xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp (revision a4a491e2238b12ccd64d3faf9e6401487f6f1f1b)
1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/MapVector.h"
19 #include "llvm/ADT/SCCIterator.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetOperations.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/CaptureTracking.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/LazyValueInfo.h"
32 #include "llvm/Analysis/MemoryBuiltins.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/ScalarEvolution.h"
35 #include "llvm/Analysis/TargetTransformInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/IR/Argument.h"
38 #include "llvm/IR/Assumptions.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/Constant.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/GlobalValue.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/InstrTypes.h"
47 #include "llvm/IR/Instruction.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/NoFolder.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/IR/ValueHandle.h"
53 #include "llvm/Support/Alignment.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/ErrorHandling.h"
57 #include "llvm/Support/GraphWriter.h"
58 #include "llvm/Support/MathExtras.h"
59 #include "llvm/Support/raw_ostream.h"
60 #include "llvm/Transforms/Utils/Local.h"
61 #include "llvm/Transforms/Utils/ValueMapper.h"
62 #include <cassert>
63 
64 using namespace llvm;
65 
66 #define DEBUG_TYPE "attributor"
67 
68 static cl::opt<bool> ManifestInternal(
69     "attributor-manifest-internal", cl::Hidden,
70     cl::desc("Manifest Attributor internal string attributes."),
71     cl::init(false));
72 
73 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
74                                        cl::Hidden);
75 
76 template <>
77 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
78 
79 template <> unsigned llvm::PotentialLLVMValuesState::MaxPotentialValues = -1;
80 
81 static cl::opt<unsigned, true> MaxPotentialValues(
82     "attributor-max-potential-values", cl::Hidden,
83     cl::desc("Maximum number of potential values to be "
84              "tracked for each position."),
85     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
86     cl::init(7));
87 
88 static cl::opt<int> MaxPotentialValuesIterations(
89     "attributor-max-potential-values-iterations", cl::Hidden,
90     cl::desc(
91         "Maximum number of iterations we keep dismantling potential values."),
92     cl::init(64));
93 
94 static cl::opt<unsigned> MaxInterferingAccesses(
95     "attributor-max-interfering-accesses", cl::Hidden,
96     cl::desc("Maximum number of interfering accesses to "
97              "check before assuming all might interfere."),
98     cl::init(6));
99 
100 STATISTIC(NumAAs, "Number of abstract attributes created");
101 
102 // Some helper macros to deal with statistics tracking.
103 //
104 // Usage:
105 // For simple IR attribute tracking overload trackStatistics in the abstract
106 // attribute and choose the right STATS_DECLTRACK_********* macro,
107 // e.g.,:
108 //  void trackStatistics() const override {
109 //    STATS_DECLTRACK_ARG_ATTR(returned)
110 //  }
111 // If there is a single "increment" side one can use the macro
112 // STATS_DECLTRACK with a custom message. If there are multiple increment
113 // sides, STATS_DECL and STATS_TRACK can also be used separately.
114 //
115 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
116   ("Number of " #TYPE " marked '" #NAME "'")
117 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
118 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
119 #define STATS_DECL(NAME, TYPE, MSG)                                            \
120   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
121 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
122 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
123   {                                                                            \
124     STATS_DECL(NAME, TYPE, MSG)                                                \
125     STATS_TRACK(NAME, TYPE)                                                    \
126   }
127 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
128   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
129 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
130   STATS_DECLTRACK(NAME, CSArguments,                                           \
131                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
132 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
133   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
134 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
135   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
136 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
137   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
138                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
139 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
140   STATS_DECLTRACK(NAME, CSReturn,                                              \
141                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
142 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
143   STATS_DECLTRACK(NAME, Floating,                                              \
144                   ("Number of floating values known to be '" #NAME "'"))
145 
146 // Specialization of the operator<< for abstract attributes subclasses. This
147 // disambiguates situations where multiple operators are applicable.
148 namespace llvm {
149 #define PIPE_OPERATOR(CLASS)                                                   \
150   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
151     return OS << static_cast<const AbstractAttribute &>(AA);                   \
152   }
153 
154 PIPE_OPERATOR(AAIsDead)
155 PIPE_OPERATOR(AANoUnwind)
156 PIPE_OPERATOR(AANoSync)
157 PIPE_OPERATOR(AANoRecurse)
158 PIPE_OPERATOR(AAWillReturn)
159 PIPE_OPERATOR(AANoReturn)
160 PIPE_OPERATOR(AAReturnedValues)
161 PIPE_OPERATOR(AANonNull)
162 PIPE_OPERATOR(AANoAlias)
163 PIPE_OPERATOR(AADereferenceable)
164 PIPE_OPERATOR(AAAlign)
165 PIPE_OPERATOR(AAInstanceInfo)
166 PIPE_OPERATOR(AANoCapture)
167 PIPE_OPERATOR(AAValueSimplify)
168 PIPE_OPERATOR(AANoFree)
169 PIPE_OPERATOR(AAHeapToStack)
170 PIPE_OPERATOR(AAReachability)
171 PIPE_OPERATOR(AAMemoryBehavior)
172 PIPE_OPERATOR(AAMemoryLocation)
173 PIPE_OPERATOR(AAValueConstantRange)
174 PIPE_OPERATOR(AAPrivatizablePtr)
175 PIPE_OPERATOR(AAUndefinedBehavior)
176 PIPE_OPERATOR(AAPotentialConstantValues)
177 PIPE_OPERATOR(AAPotentialValues)
178 PIPE_OPERATOR(AANoUndef)
179 PIPE_OPERATOR(AACallEdges)
180 PIPE_OPERATOR(AAFunctionReachability)
181 PIPE_OPERATOR(AAPointerInfo)
182 PIPE_OPERATOR(AAAssumptionInfo)
183 
184 #undef PIPE_OPERATOR
185 
186 template <>
187 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
188                                                      const DerefState &R) {
189   ChangeStatus CS0 =
190       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
191   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
192   return CS0 | CS1;
193 }
194 
195 } // namespace llvm
196 
197 /// Checks if a type could have padding bytes.
198 static bool isDenselyPacked(Type *Ty, const DataLayout &DL) {
199   // There is no size information, so be conservative.
200   if (!Ty->isSized())
201     return false;
202 
203   // If the alloc size is not equal to the storage size, then there are padding
204   // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128.
205   if (DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty))
206     return false;
207 
208   // FIXME: This isn't the right way to check for padding in vectors with
209   // non-byte-size elements.
210   if (VectorType *SeqTy = dyn_cast<VectorType>(Ty))
211     return isDenselyPacked(SeqTy->getElementType(), DL);
212 
213   // For array types, check for padding within members.
214   if (ArrayType *SeqTy = dyn_cast<ArrayType>(Ty))
215     return isDenselyPacked(SeqTy->getElementType(), DL);
216 
217   if (!isa<StructType>(Ty))
218     return true;
219 
220   // Check for padding within and between elements of a struct.
221   StructType *StructTy = cast<StructType>(Ty);
222   const StructLayout *Layout = DL.getStructLayout(StructTy);
223   uint64_t StartPos = 0;
224   for (unsigned I = 0, E = StructTy->getNumElements(); I < E; ++I) {
225     Type *ElTy = StructTy->getElementType(I);
226     if (!isDenselyPacked(ElTy, DL))
227       return false;
228     if (StartPos != Layout->getElementOffsetInBits(I))
229       return false;
230     StartPos += DL.getTypeAllocSizeInBits(ElTy);
231   }
232 
233   return true;
234 }
235 
236 /// Get pointer operand of memory accessing instruction. If \p I is
237 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
238 /// is set to false and the instruction is volatile, return nullptr.
239 static const Value *getPointerOperand(const Instruction *I,
240                                       bool AllowVolatile) {
241   if (!AllowVolatile && I->isVolatile())
242     return nullptr;
243 
244   if (auto *LI = dyn_cast<LoadInst>(I)) {
245     return LI->getPointerOperand();
246   }
247 
248   if (auto *SI = dyn_cast<StoreInst>(I)) {
249     return SI->getPointerOperand();
250   }
251 
252   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
253     return CXI->getPointerOperand();
254   }
255 
256   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
257     return RMWI->getPointerOperand();
258   }
259 
260   return nullptr;
261 }
262 
263 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
264 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
265 /// getelement pointer instructions that traverse the natural type of \p Ptr if
266 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
267 /// through a cast to i8*.
268 ///
269 /// TODO: This could probably live somewhere more prominantly if it doesn't
270 ///       already exist.
271 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
272                                int64_t Offset, IRBuilder<NoFolder> &IRB,
273                                const DataLayout &DL) {
274   assert(Offset >= 0 && "Negative offset not supported yet!");
275   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
276                     << "-bytes as " << *ResTy << "\n");
277 
278   if (Offset) {
279     Type *Ty = PtrElemTy;
280     APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
281     SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
282 
283     SmallVector<Value *, 4> ValIndices;
284     std::string GEPName = Ptr->getName().str();
285     for (const APInt &Index : IntIndices) {
286       ValIndices.push_back(IRB.getInt(Index));
287       GEPName += "." + std::to_string(Index.getZExtValue());
288     }
289 
290     // Create a GEP for the indices collected above.
291     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
292 
293     // If an offset is left we use byte-wise adjustment.
294     if (IntOffset != 0) {
295       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
296       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
297                           GEPName + ".b" + Twine(IntOffset.getZExtValue()));
298     }
299   }
300 
301   // Ensure the result has the requested type.
302   Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy,
303                                                 Ptr->getName() + ".cast");
304 
305   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
306   return Ptr;
307 }
308 
309 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
310                                      SmallSetVector<Value *, 8> &Objects,
311                                      const AbstractAttribute &QueryingAA,
312                                      const Instruction *CtxI,
313                                      bool &UsedAssumedInformation,
314                                      AA::ValueScope S,
315                                      SmallPtrSetImpl<Value *> *SeenObjects) {
316   SmallPtrSet<Value *, 8> LocalSeenObjects;
317   if (!SeenObjects)
318     SeenObjects = &LocalSeenObjects;
319 
320   SmallVector<AA::ValueAndContext> Values;
321   if (!A.getAssumedSimplifiedValues(IRPosition::value(Ptr), &QueryingAA, Values,
322                                     S, UsedAssumedInformation)) {
323     Objects.insert(const_cast<Value *>(&Ptr));
324     return true;
325   }
326 
327   for (auto &VAC : Values) {
328     Value *UO = getUnderlyingObject(VAC.getValue());
329     if (UO && UO != VAC.getValue() && SeenObjects->insert(UO).second) {
330       if (!getAssumedUnderlyingObjects(A, *UO, Objects, QueryingAA,
331                                        VAC.getCtxI(), UsedAssumedInformation, S,
332                                        SeenObjects))
333         return false;
334       continue;
335     }
336     Objects.insert(VAC.getValue());
337   }
338   return true;
339 }
340 
341 static const Value *
342 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA,
343                           const Value *Val, const DataLayout &DL, APInt &Offset,
344                           bool GetMinOffset, bool AllowNonInbounds,
345                           bool UseAssumed = false) {
346 
347   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
348     const IRPosition &Pos = IRPosition::value(V);
349     // Only track dependence if we are going to use the assumed info.
350     const AAValueConstantRange &ValueConstantRangeAA =
351         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
352                                          UseAssumed ? DepClassTy::OPTIONAL
353                                                     : DepClassTy::NONE);
354     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
355                                      : ValueConstantRangeAA.getKnown();
356     if (Range.isFullSet())
357       return false;
358 
359     // We can only use the lower part of the range because the upper part can
360     // be higher than what the value can really be.
361     if (GetMinOffset)
362       ROffset = Range.getSignedMin();
363     else
364       ROffset = Range.getSignedMax();
365     return true;
366   };
367 
368   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
369                                                 /* AllowInvariant */ true,
370                                                 AttributorAnalysis);
371 }
372 
373 static const Value *
374 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA,
375                         const Value *Ptr, int64_t &BytesOffset,
376                         const DataLayout &DL, bool AllowNonInbounds = false) {
377   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
378   const Value *Base =
379       stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
380                                 /* GetMinOffset */ true, AllowNonInbounds);
381 
382   BytesOffset = OffsetAPInt.getSExtValue();
383   return Base;
384 }
385 
386 /// Clamp the information known for all returned values of a function
387 /// (identified by \p QueryingAA) into \p S.
388 template <typename AAType, typename StateType = typename AAType::StateType>
389 static void clampReturnedValueStates(
390     Attributor &A, const AAType &QueryingAA, StateType &S,
391     const IRPosition::CallBaseContext *CBContext = nullptr) {
392   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
393                     << QueryingAA << " into " << S << "\n");
394 
395   assert((QueryingAA.getIRPosition().getPositionKind() ==
396               IRPosition::IRP_RETURNED ||
397           QueryingAA.getIRPosition().getPositionKind() ==
398               IRPosition::IRP_CALL_SITE_RETURNED) &&
399          "Can only clamp returned value states for a function returned or call "
400          "site returned position!");
401 
402   // Use an optional state as there might not be any return values and we want
403   // to join (IntegerState::operator&) the state of all there are.
404   Optional<StateType> T;
405 
406   // Callback for each possibly returned value.
407   auto CheckReturnValue = [&](Value &RV) -> bool {
408     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
409     const AAType &AA =
410         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
411     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
412                       << " @ " << RVPos << "\n");
413     const StateType &AAS = AA.getState();
414     if (!T)
415       T = StateType::getBestState(AAS);
416     *T &= AAS;
417     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
418                       << "\n");
419     return T->isValidState();
420   };
421 
422   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
423     S.indicatePessimisticFixpoint();
424   else if (T)
425     S ^= *T;
426 }
427 
428 namespace {
429 /// Helper class for generic deduction: return value -> returned position.
430 template <typename AAType, typename BaseType,
431           typename StateType = typename BaseType::StateType,
432           bool PropagateCallBaseContext = false>
433 struct AAReturnedFromReturnedValues : public BaseType {
434   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
435       : BaseType(IRP, A) {}
436 
437   /// See AbstractAttribute::updateImpl(...).
438   ChangeStatus updateImpl(Attributor &A) override {
439     StateType S(StateType::getBestState(this->getState()));
440     clampReturnedValueStates<AAType, StateType>(
441         A, *this, S,
442         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
443     // TODO: If we know we visited all returned values, thus no are assumed
444     // dead, we can take the known information from the state T.
445     return clampStateAndIndicateChange<StateType>(this->getState(), S);
446   }
447 };
448 
449 /// Clamp the information known at all call sites for a given argument
450 /// (identified by \p QueryingAA) into \p S.
451 template <typename AAType, typename StateType = typename AAType::StateType>
452 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
453                                         StateType &S) {
454   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
455                     << QueryingAA << " into " << S << "\n");
456 
457   assert(QueryingAA.getIRPosition().getPositionKind() ==
458              IRPosition::IRP_ARGUMENT &&
459          "Can only clamp call site argument states for an argument position!");
460 
461   // Use an optional state as there might not be any return values and we want
462   // to join (IntegerState::operator&) the state of all there are.
463   Optional<StateType> T;
464 
465   // The argument number which is also the call site argument number.
466   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
467 
468   auto CallSiteCheck = [&](AbstractCallSite ACS) {
469     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
470     // Check if a coresponding argument was found or if it is on not associated
471     // (which can happen for callback calls).
472     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
473       return false;
474 
475     const AAType &AA =
476         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
477     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
478                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
479     const StateType &AAS = AA.getState();
480     if (!T)
481       T = StateType::getBestState(AAS);
482     *T &= AAS;
483     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
484                       << "\n");
485     return T->isValidState();
486   };
487 
488   bool UsedAssumedInformation = false;
489   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
490                               UsedAssumedInformation))
491     S.indicatePessimisticFixpoint();
492   else if (T)
493     S ^= *T;
494 }
495 
496 /// This function is the bridge between argument position and the call base
497 /// context.
498 template <typename AAType, typename BaseType,
499           typename StateType = typename AAType::StateType>
500 bool getArgumentStateFromCallBaseContext(Attributor &A,
501                                          BaseType &QueryingAttribute,
502                                          IRPosition &Pos, StateType &State) {
503   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
504          "Expected an 'argument' position !");
505   const CallBase *CBContext = Pos.getCallBaseContext();
506   if (!CBContext)
507     return false;
508 
509   int ArgNo = Pos.getCallSiteArgNo();
510   assert(ArgNo >= 0 && "Invalid Arg No!");
511 
512   const auto &AA = A.getAAFor<AAType>(
513       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
514       DepClassTy::REQUIRED);
515   const StateType &CBArgumentState =
516       static_cast<const StateType &>(AA.getState());
517 
518   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
519                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
520                     << "\n");
521 
522   // NOTE: If we want to do call site grouping it should happen here.
523   State ^= CBArgumentState;
524   return true;
525 }
526 
527 /// Helper class for generic deduction: call site argument -> argument position.
528 template <typename AAType, typename BaseType,
529           typename StateType = typename AAType::StateType,
530           bool BridgeCallBaseContext = false>
531 struct AAArgumentFromCallSiteArguments : public BaseType {
532   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
533       : BaseType(IRP, A) {}
534 
535   /// See AbstractAttribute::updateImpl(...).
536   ChangeStatus updateImpl(Attributor &A) override {
537     StateType S = StateType::getBestState(this->getState());
538 
539     if (BridgeCallBaseContext) {
540       bool Success =
541           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
542               A, *this, this->getIRPosition(), S);
543       if (Success)
544         return clampStateAndIndicateChange<StateType>(this->getState(), S);
545     }
546     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
547 
548     // TODO: If we know we visited all incoming values, thus no are assumed
549     // dead, we can take the known information from the state T.
550     return clampStateAndIndicateChange<StateType>(this->getState(), S);
551   }
552 };
553 
554 /// Helper class for generic replication: function returned -> cs returned.
555 template <typename AAType, typename BaseType,
556           typename StateType = typename BaseType::StateType,
557           bool IntroduceCallBaseContext = false>
558 struct AACallSiteReturnedFromReturned : public BaseType {
559   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
560       : BaseType(IRP, A) {}
561 
562   /// See AbstractAttribute::updateImpl(...).
563   ChangeStatus updateImpl(Attributor &A) override {
564     assert(this->getIRPosition().getPositionKind() ==
565                IRPosition::IRP_CALL_SITE_RETURNED &&
566            "Can only wrap function returned positions for call site returned "
567            "positions!");
568     auto &S = this->getState();
569 
570     const Function *AssociatedFunction =
571         this->getIRPosition().getAssociatedFunction();
572     if (!AssociatedFunction)
573       return S.indicatePessimisticFixpoint();
574 
575     CallBase &CBContext = cast<CallBase>(this->getAnchorValue());
576     if (IntroduceCallBaseContext)
577       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
578                         << CBContext << "\n");
579 
580     IRPosition FnPos = IRPosition::returned(
581         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
582     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
583     return clampStateAndIndicateChange(S, AA.getState());
584   }
585 };
586 
587 /// Helper function to accumulate uses.
588 template <class AAType, typename StateType = typename AAType::StateType>
589 static void followUsesInContext(AAType &AA, Attributor &A,
590                                 MustBeExecutedContextExplorer &Explorer,
591                                 const Instruction *CtxI,
592                                 SetVector<const Use *> &Uses,
593                                 StateType &State) {
594   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
595   for (unsigned u = 0; u < Uses.size(); ++u) {
596     const Use *U = Uses[u];
597     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
598       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
599       if (Found && AA.followUseInMBEC(A, U, UserI, State))
600         for (const Use &Us : UserI->uses())
601           Uses.insert(&Us);
602     }
603   }
604 }
605 
606 /// Use the must-be-executed-context around \p I to add information into \p S.
607 /// The AAType class is required to have `followUseInMBEC` method with the
608 /// following signature and behaviour:
609 ///
610 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
611 /// U - Underlying use.
612 /// I - The user of the \p U.
613 /// Returns true if the value should be tracked transitively.
614 ///
615 template <class AAType, typename StateType = typename AAType::StateType>
616 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
617                              Instruction &CtxI) {
618 
619   // Container for (transitive) uses of the associated value.
620   SetVector<const Use *> Uses;
621   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
622     Uses.insert(&U);
623 
624   MustBeExecutedContextExplorer &Explorer =
625       A.getInfoCache().getMustBeExecutedContextExplorer();
626 
627   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
628 
629   if (S.isAtFixpoint())
630     return;
631 
632   SmallVector<const BranchInst *, 4> BrInsts;
633   auto Pred = [&](const Instruction *I) {
634     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
635       if (Br->isConditional())
636         BrInsts.push_back(Br);
637     return true;
638   };
639 
640   // Here, accumulate conditional branch instructions in the context. We
641   // explore the child paths and collect the known states. The disjunction of
642   // those states can be merged to its own state. Let ParentState_i be a state
643   // to indicate the known information for an i-th branch instruction in the
644   // context. ChildStates are created for its successors respectively.
645   //
646   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
647   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
648   //      ...
649   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
650   //
651   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
652   //
653   // FIXME: Currently, recursive branches are not handled. For example, we
654   // can't deduce that ptr must be dereferenced in below function.
655   //
656   // void f(int a, int c, int *ptr) {
657   //    if(a)
658   //      if (b) {
659   //        *ptr = 0;
660   //      } else {
661   //        *ptr = 1;
662   //      }
663   //    else {
664   //      if (b) {
665   //        *ptr = 0;
666   //      } else {
667   //        *ptr = 1;
668   //      }
669   //    }
670   // }
671 
672   Explorer.checkForAllContext(&CtxI, Pred);
673   for (const BranchInst *Br : BrInsts) {
674     StateType ParentState;
675 
676     // The known state of the parent state is a conjunction of children's
677     // known states so it is initialized with a best state.
678     ParentState.indicateOptimisticFixpoint();
679 
680     for (const BasicBlock *BB : Br->successors()) {
681       StateType ChildState;
682 
683       size_t BeforeSize = Uses.size();
684       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
685 
686       // Erase uses which only appear in the child.
687       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
688         It = Uses.erase(It);
689 
690       ParentState &= ChildState;
691     }
692 
693     // Use only known state.
694     S += ParentState;
695   }
696 }
697 } // namespace
698 
699 /// ------------------------ PointerInfo ---------------------------------------
700 
701 namespace llvm {
702 namespace AA {
703 namespace PointerInfo {
704 
705 struct State;
706 
707 } // namespace PointerInfo
708 } // namespace AA
709 
710 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
711 template <>
712 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
713   using Access = AAPointerInfo::Access;
714   static inline Access getEmptyKey();
715   static inline Access getTombstoneKey();
716   static unsigned getHashValue(const Access &A);
717   static bool isEqual(const Access &LHS, const Access &RHS);
718 };
719 
720 /// Helper that allows OffsetAndSize as a key in a DenseMap.
721 template <>
722 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize>
723     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
724 
725 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
726 /// but the instruction
727 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
728   using Base = DenseMapInfo<Instruction *>;
729   using Access = AAPointerInfo::Access;
730   static inline Access getEmptyKey();
731   static inline Access getTombstoneKey();
732   static unsigned getHashValue(const Access &A);
733   static bool isEqual(const Access &LHS, const Access &RHS);
734 };
735 
736 } // namespace llvm
737 
738 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
739 struct AA::PointerInfo::State : public AbstractState {
740 
741   ~State() {
742     // We do not delete the Accesses objects but need to destroy them still.
743     for (auto &It : AccessBins)
744       It.second->~Accesses();
745   }
746 
747   /// Return the best possible representable state.
748   static State getBestState(const State &SIS) { return State(); }
749 
750   /// Return the worst possible representable state.
751   static State getWorstState(const State &SIS) {
752     State R;
753     R.indicatePessimisticFixpoint();
754     return R;
755   }
756 
757   State() = default;
758   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {
759     SIS.AccessBins.clear();
760   }
761 
762   const State &getAssumed() const { return *this; }
763 
764   /// See AbstractState::isValidState().
765   bool isValidState() const override { return BS.isValidState(); }
766 
767   /// See AbstractState::isAtFixpoint().
768   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
769 
770   /// See AbstractState::indicateOptimisticFixpoint().
771   ChangeStatus indicateOptimisticFixpoint() override {
772     BS.indicateOptimisticFixpoint();
773     return ChangeStatus::UNCHANGED;
774   }
775 
776   /// See AbstractState::indicatePessimisticFixpoint().
777   ChangeStatus indicatePessimisticFixpoint() override {
778     BS.indicatePessimisticFixpoint();
779     return ChangeStatus::CHANGED;
780   }
781 
782   State &operator=(const State &R) {
783     if (this == &R)
784       return *this;
785     BS = R.BS;
786     AccessBins = R.AccessBins;
787     return *this;
788   }
789 
790   State &operator=(State &&R) {
791     if (this == &R)
792       return *this;
793     std::swap(BS, R.BS);
794     std::swap(AccessBins, R.AccessBins);
795     return *this;
796   }
797 
798   bool operator==(const State &R) const {
799     if (BS != R.BS)
800       return false;
801     if (AccessBins.size() != R.AccessBins.size())
802       return false;
803     auto It = begin(), RIt = R.begin(), E = end();
804     while (It != E) {
805       if (It->getFirst() != RIt->getFirst())
806         return false;
807       auto &Accs = It->getSecond();
808       auto &RAccs = RIt->getSecond();
809       if (Accs->size() != RAccs->size())
810         return false;
811       for (const auto &ZipIt : llvm::zip(*Accs, *RAccs))
812         if (std::get<0>(ZipIt) != std::get<1>(ZipIt))
813           return false;
814       ++It;
815       ++RIt;
816     }
817     return true;
818   }
819   bool operator!=(const State &R) const { return !(*this == R); }
820 
821   /// We store accesses in a set with the instruction as key.
822   struct Accesses {
823     SmallVector<AAPointerInfo::Access, 4> Accesses;
824     DenseMap<const Instruction *, unsigned> Map;
825 
826     unsigned size() const { return Accesses.size(); }
827 
828     using vec_iterator = decltype(Accesses)::iterator;
829     vec_iterator begin() { return Accesses.begin(); }
830     vec_iterator end() { return Accesses.end(); }
831 
832     using iterator = decltype(Map)::const_iterator;
833     iterator find(AAPointerInfo::Access &Acc) {
834       return Map.find(Acc.getRemoteInst());
835     }
836     iterator find_end() { return Map.end(); }
837 
838     AAPointerInfo::Access &get(iterator &It) {
839       return Accesses[It->getSecond()];
840     }
841 
842     void insert(AAPointerInfo::Access &Acc) {
843       Map[Acc.getRemoteInst()] = Accesses.size();
844       Accesses.push_back(Acc);
845     }
846   };
847 
848   /// We store all accesses in bins denoted by their offset and size.
849   using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>;
850 
851   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
852   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
853 
854 protected:
855   /// The bins with all the accesses for the associated pointer.
856   AccessBinsTy AccessBins;
857 
858   /// Add a new access to the state at offset \p Offset and with size \p Size.
859   /// The access is associated with \p I, writes \p Content (if anything), and
860   /// is of kind \p Kind.
861   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
862   ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size,
863                          Instruction &I, Optional<Value *> Content,
864                          AAPointerInfo::AccessKind Kind, Type *Ty,
865                          Instruction *RemoteI = nullptr,
866                          Accesses *BinPtr = nullptr) {
867     AAPointerInfo::OffsetAndSize Key{Offset, Size};
868     Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key];
869     if (!Bin)
870       Bin = new (A.Allocator) Accesses;
871     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
872     // Check if we have an access for this instruction in this bin, if not,
873     // simply add it.
874     auto It = Bin->find(Acc);
875     if (It == Bin->find_end()) {
876       Bin->insert(Acc);
877       return ChangeStatus::CHANGED;
878     }
879     // If the existing access is the same as then new one, nothing changed.
880     AAPointerInfo::Access &Current = Bin->get(It);
881     AAPointerInfo::Access Before = Current;
882     // The new one will be combined with the existing one.
883     Current &= Acc;
884     return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
885   }
886 
887   /// See AAPointerInfo::forallInterferingAccesses.
888   bool forallInterferingAccesses(
889       AAPointerInfo::OffsetAndSize OAS,
890       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
891     if (!isValidState())
892       return false;
893 
894     for (auto &It : AccessBins) {
895       AAPointerInfo::OffsetAndSize ItOAS = It.getFirst();
896       if (!OAS.mayOverlap(ItOAS))
897         continue;
898       bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
899       for (auto &Access : *It.getSecond())
900         if (!CB(Access, IsExact))
901           return false;
902     }
903     return true;
904   }
905 
906   /// See AAPointerInfo::forallInterferingAccesses.
907   bool forallInterferingAccesses(
908       Instruction &I,
909       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
910     if (!isValidState())
911       return false;
912 
913     // First find the offset and size of I.
914     AAPointerInfo::OffsetAndSize OAS(-1, -1);
915     for (auto &It : AccessBins) {
916       for (auto &Access : *It.getSecond()) {
917         if (Access.getRemoteInst() == &I) {
918           OAS = It.getFirst();
919           break;
920         }
921       }
922       if (OAS.getSize() != -1)
923         break;
924     }
925     // No access for I was found, we are done.
926     if (OAS.getSize() == -1)
927       return true;
928 
929     // Now that we have an offset and size, find all overlapping ones and use
930     // the callback on the accesses.
931     return forallInterferingAccesses(OAS, CB);
932   }
933 
934 private:
935   /// State to track fixpoint and validity.
936   BooleanState BS;
937 };
938 
939 namespace {
940 struct AAPointerInfoImpl
941     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
942   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
943   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
944 
945   /// See AbstractAttribute::getAsStr().
946   const std::string getAsStr() const override {
947     return std::string("PointerInfo ") +
948            (isValidState() ? (std::string("#") +
949                               std::to_string(AccessBins.size()) + " bins")
950                            : "<invalid>");
951   }
952 
953   /// See AbstractAttribute::manifest(...).
954   ChangeStatus manifest(Attributor &A) override {
955     return AAPointerInfo::manifest(A);
956   }
957 
958   bool forallInterferingAccesses(
959       OffsetAndSize OAS,
960       function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
961       const override {
962     return State::forallInterferingAccesses(OAS, CB);
963   }
964 
965   bool
966   forallInterferingAccesses(Attributor &A, const AbstractAttribute &QueryingAA,
967                             Instruction &I,
968                             function_ref<bool(const Access &, bool)> UserCB,
969                             bool &HasBeenWrittenTo) const override {
970     HasBeenWrittenTo = false;
971 
972     SmallPtrSet<const Access *, 8> DominatingWrites;
973     SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
974 
975     Function &Scope = *I.getFunction();
976     const auto &NoSyncAA = A.getAAFor<AANoSync>(
977         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
978     const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
979         IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL);
980     const bool NoSync = NoSyncAA.isAssumedNoSync();
981 
982     // Helper to determine if we need to consider threading, which we cannot
983     // right now. However, if the function is (assumed) nosync or the thread
984     // executing all instructions is the main thread only we can ignore
985     // threading.
986     auto CanIgnoreThreading = [&](const Instruction &I) -> bool {
987       if (NoSync)
988         return true;
989       if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I))
990         return true;
991       return false;
992     };
993 
994     // Helper to determine if the access is executed by the same thread as the
995     // load, for now it is sufficient to avoid any potential threading effects
996     // as we cannot deal with them anyway.
997     auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool {
998       return CanIgnoreThreading(*Acc.getLocalInst());
999     };
1000 
1001     // TODO: Use inter-procedural reachability and dominance.
1002     const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1003         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1004 
1005     const bool FindInterferingWrites = I.mayReadFromMemory();
1006     const bool FindInterferingReads = I.mayWriteToMemory();
1007     const bool UseDominanceReasoning =
1008         FindInterferingWrites && NoRecurseAA.isKnownNoRecurse();
1009     const bool CanUseCFGResoning = CanIgnoreThreading(I);
1010     InformationCache &InfoCache = A.getInfoCache();
1011     const DominatorTree *DT =
1012         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(Scope);
1013 
1014     enum GPUAddressSpace : unsigned {
1015       Generic = 0,
1016       Global = 1,
1017       Shared = 3,
1018       Constant = 4,
1019       Local = 5,
1020     };
1021 
1022     // Helper to check if a value has "kernel lifetime", that is it will not
1023     // outlive a GPU kernel. This is true for shared, constant, and local
1024     // globals on AMD and NVIDIA GPUs.
1025     auto HasKernelLifetime = [&](Value *V, Module &M) {
1026       Triple T(M.getTargetTriple());
1027       if (!(T.isAMDGPU() || T.isNVPTX()))
1028         return false;
1029       switch (V->getType()->getPointerAddressSpace()) {
1030       case GPUAddressSpace::Shared:
1031       case GPUAddressSpace::Constant:
1032       case GPUAddressSpace::Local:
1033         return true;
1034       default:
1035         return false;
1036       };
1037     };
1038 
1039     // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1040     // to determine if we should look at reachability from the callee. For
1041     // certain pointers we know the lifetime and we do not have to step into the
1042     // callee to determine reachability as the pointer would be dead in the
1043     // callee. See the conditional initialization below.
1044     std::function<bool(const Function &)> IsLiveInCalleeCB;
1045 
1046     if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1047       // If the alloca containing function is not recursive the alloca
1048       // must be dead in the callee.
1049       const Function *AIFn = AI->getFunction();
1050       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1051           *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL);
1052       if (NoRecurseAA.isAssumedNoRecurse()) {
1053         IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1054       }
1055     } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1056       // If the global has kernel lifetime we can stop if we reach a kernel
1057       // as it is "dead" in the (unknown) callees.
1058       if (HasKernelLifetime(GV, *GV->getParent()))
1059         IsLiveInCalleeCB = [](const Function &Fn) {
1060           return !Fn.hasFnAttribute("kernel");
1061         };
1062     }
1063 
1064     auto AccessCB = [&](const Access &Acc, bool Exact) {
1065       if ((!FindInterferingWrites || !Acc.isWrite()) &&
1066           (!FindInterferingReads || !Acc.isRead()))
1067         return true;
1068 
1069       bool Dominates = DT && Exact && Acc.isMustAccess() &&
1070                        (Acc.getLocalInst()->getFunction() == &Scope) &&
1071                        DT->dominates(Acc.getRemoteInst(), &I);
1072       if (FindInterferingWrites && Dominates)
1073         HasBeenWrittenTo = true;
1074 
1075       // For now we only filter accesses based on CFG reasoning which does not
1076       // work yet if we have threading effects, or the access is complicated.
1077       if (CanUseCFGResoning && Dominates && UseDominanceReasoning &&
1078           IsSameThreadAsLoad(Acc))
1079         DominatingWrites.insert(&Acc);
1080 
1081       InterferingAccesses.push_back({&Acc, Exact});
1082       return true;
1083     };
1084     if (!State::forallInterferingAccesses(I, AccessCB))
1085       return false;
1086 
1087     if (HasBeenWrittenTo) {
1088       const Function *ScopePtr = &Scope;
1089       IsLiveInCalleeCB = [ScopePtr](const Function &Fn) {
1090         return ScopePtr != &Fn;
1091       };
1092     }
1093 
1094     // Helper to determine if we can skip a specific write access. This is in
1095     // the worst case quadratic as we are looking for another write that will
1096     // hide the effect of this one.
1097     auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1098       if ((!Acc.isWrite() ||
1099            !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA,
1100                                        IsLiveInCalleeCB)) &&
1101           (!Acc.isRead() ||
1102            !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA,
1103                                        IsLiveInCalleeCB)))
1104         return true;
1105 
1106       if (!DT || !UseDominanceReasoning)
1107         return false;
1108       if (!IsSameThreadAsLoad(Acc))
1109         return false;
1110       if (!DominatingWrites.count(&Acc))
1111         return false;
1112       for (const Access *DomAcc : DominatingWrites) {
1113         assert(Acc.getLocalInst()->getFunction() ==
1114                    DomAcc->getLocalInst()->getFunction() &&
1115                "Expected dominating writes to be in the same function!");
1116 
1117         if (DomAcc != &Acc &&
1118             DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) {
1119           return true;
1120         }
1121       }
1122       return false;
1123     };
1124 
1125     // Run the user callback on all accesses we cannot skip and return if that
1126     // succeeded for all or not.
1127     unsigned NumInterferingAccesses = InterferingAccesses.size();
1128     for (auto &It : InterferingAccesses) {
1129       if (NumInterferingAccesses > MaxInterferingAccesses ||
1130           !CanSkipAccess(*It.first, It.second)) {
1131         if (!UserCB(*It.first, It.second))
1132           return false;
1133       }
1134     }
1135     return true;
1136   }
1137 
1138   ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA,
1139                                     int64_t Offset, CallBase &CB,
1140                                     bool FromCallee = false) {
1141     using namespace AA::PointerInfo;
1142     if (!OtherAA.getState().isValidState() || !isValidState())
1143       return indicatePessimisticFixpoint();
1144 
1145     const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA);
1146     bool IsByval =
1147         FromCallee && OtherAAImpl.getAssociatedArgument()->hasByValAttr();
1148 
1149     // Combine the accesses bin by bin.
1150     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1151     for (auto &It : OtherAAImpl.getState()) {
1152       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1153       if (Offset != OffsetAndSize::Unknown)
1154         OAS = OffsetAndSize(It.first.getOffset() + Offset, It.first.getSize());
1155       Accesses *Bin = AccessBins.lookup(OAS);
1156       for (const AAPointerInfo::Access &RAcc : *It.second) {
1157         if (IsByval && !RAcc.isRead())
1158           continue;
1159         bool UsedAssumedInformation = false;
1160         AccessKind AK = RAcc.getKind();
1161         Optional<Value *> Content = RAcc.getContent();
1162         if (FromCallee) {
1163           Content = A.translateArgumentToCallSiteContent(
1164               RAcc.getContent(), CB, *this, UsedAssumedInformation);
1165           AK =
1166               AccessKind(AK & (IsByval ? AccessKind::AK_R : AccessKind::AK_RW));
1167           AK = AccessKind(AK | (RAcc.isMayAccess() ? AK_MAY : AK_MUST));
1168         }
1169         Changed =
1170             Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content,
1171                                 AK, RAcc.getType(), RAcc.getRemoteInst(), Bin);
1172       }
1173     }
1174     return Changed;
1175   }
1176 
1177   /// Statistic tracking for all AAPointerInfo implementations.
1178   /// See AbstractAttribute::trackStatistics().
1179   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1180 
1181   /// Dump the state into \p O.
1182   void dumpState(raw_ostream &O) {
1183     for (auto &It : AccessBins) {
1184       O << "[" << It.first.getOffset() << "-"
1185         << It.first.getOffset() + It.first.getSize()
1186         << "] : " << It.getSecond()->size() << "\n";
1187       for (auto &Acc : *It.getSecond()) {
1188         O << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst() << "\n";
1189         if (Acc.getLocalInst() != Acc.getRemoteInst())
1190           O << "     -->                         " << *Acc.getRemoteInst()
1191             << "\n";
1192         if (!Acc.isWrittenValueYetUndetermined()) {
1193           if (Acc.getWrittenValue())
1194             O << "       - c: " << *Acc.getWrittenValue() << "\n";
1195           else
1196             O << "       - c: <unknown>\n";
1197         }
1198       }
1199     }
1200   }
1201 };
1202 
1203 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1204   using AccessKind = AAPointerInfo::AccessKind;
1205   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1206       : AAPointerInfoImpl(IRP, A) {}
1207 
1208   /// Deal with an access and signal if it was handled successfully.
1209   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1210                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1211                     ChangeStatus &Changed, Type *Ty,
1212                     int64_t Size = OffsetAndSize::Unknown) {
1213     using namespace AA::PointerInfo;
1214     // No need to find a size if one is given or the offset is unknown.
1215     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1216         Ty) {
1217       const DataLayout &DL = A.getDataLayout();
1218       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1219       if (!AccessSize.isScalable())
1220         Size = AccessSize.getFixedSize();
1221     }
1222     Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty);
1223     return true;
1224   };
1225 
1226   /// Helper struct, will support ranges eventually.
1227   struct OffsetInfo {
1228     int64_t Offset = OffsetAndSize::Unknown;
1229 
1230     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1231   };
1232 
1233   /// See AbstractAttribute::updateImpl(...).
1234   ChangeStatus updateImpl(Attributor &A) override {
1235     using namespace AA::PointerInfo;
1236     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1237     Value &AssociatedValue = getAssociatedValue();
1238 
1239     const DataLayout &DL = A.getDataLayout();
1240     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1241     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1242 
1243     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI,
1244                                      bool &Follow) {
1245       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1246       UsrOI = PtrOI;
1247       Follow = true;
1248       return true;
1249     };
1250 
1251     const auto *TLI = getAnchorScope()
1252                           ? A.getInfoCache().getTargetLibraryInfoForFunction(
1253                                 *getAnchorScope())
1254                           : nullptr;
1255     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1256       Value *CurPtr = U.get();
1257       User *Usr = U.getUser();
1258       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1259                         << *Usr << "\n");
1260       assert(OffsetInfoMap.count(CurPtr) &&
1261              "The current pointer offset should have been seeded!");
1262 
1263       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1264         if (CE->isCast())
1265           return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1266         if (CE->isCompare())
1267           return true;
1268         if (!isa<GEPOperator>(CE)) {
1269           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1270                             << "\n");
1271           return false;
1272         }
1273       }
1274       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1275         // Note the order here, the Usr access might change the map, CurPtr is
1276         // already in it though.
1277         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1278         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1279         UsrOI = PtrOI;
1280 
1281         // TODO: Use range information.
1282         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1283             !GEP->hasAllConstantIndices()) {
1284           UsrOI.Offset = OffsetAndSize::Unknown;
1285           Follow = true;
1286           return true;
1287         }
1288 
1289         SmallVector<Value *, 8> Indices;
1290         for (Use &Idx : GEP->indices()) {
1291           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1292             Indices.push_back(CIdx);
1293             continue;
1294           }
1295 
1296           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1297                             << " : " << *Idx << "\n");
1298           return false;
1299         }
1300         UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType(
1301                                           GEP->getSourceElementType(), Indices);
1302         Follow = true;
1303         return true;
1304       }
1305       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr) || isa<ReturnInst>(Usr))
1306         return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1307 
1308       // For PHIs we need to take care of the recurrence explicitly as the value
1309       // might change while we iterate through a loop. For now, we give up if
1310       // the PHI is not invariant.
1311       if (isa<PHINode>(Usr)) {
1312         // Note the order here, the Usr access might change the map, CurPtr is
1313         // already in it though.
1314         bool IsFirstPHIUser = !OffsetInfoMap.count(Usr);
1315         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1316         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1317         // Check if the PHI is invariant (so far).
1318         if (UsrOI == PtrOI)
1319           return true;
1320 
1321         // Check if the PHI operand has already an unknown offset as we can't
1322         // improve on that anymore.
1323         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1324           UsrOI = PtrOI;
1325           Follow = true;
1326           return true;
1327         }
1328 
1329         // Check if the PHI operand is not dependent on the PHI itself.
1330         APInt Offset(
1331             DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1332             0);
1333         Value *CurPtrBase = CurPtr->stripAndAccumulateConstantOffsets(
1334             DL, Offset, /* AllowNonInbounds */ true);
1335         auto It = OffsetInfoMap.find(CurPtrBase);
1336         if (It != OffsetInfoMap.end()) {
1337           Offset += It->getSecond().Offset;
1338           if (IsFirstPHIUser || Offset == UsrOI.Offset)
1339             return HandlePassthroughUser(Usr, PtrOI, Follow);
1340           LLVM_DEBUG(dbgs()
1341                      << "[AAPointerInfo] PHI operand pointer offset mismatch "
1342                      << *CurPtr << " in " << *Usr << "\n");
1343         } else {
1344           LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1345                             << *CurPtr << " in " << *Usr << "\n");
1346         }
1347 
1348         // TODO: Approximate in case we know the direction of the recurrence.
1349         UsrOI = PtrOI;
1350         UsrOI.Offset = OffsetAndSize::Unknown;
1351         Follow = true;
1352         return true;
1353       }
1354 
1355       if (auto *LoadI = dyn_cast<LoadInst>(Usr)) {
1356         // If the access is to a pointer that may or may not be the associated
1357         // value, e.g. due to a PHI, we cannot assume it will be read.
1358         AccessKind AK = AccessKind::AK_R;
1359         if (getUnderlyingObject(CurPtr) == &AssociatedValue)
1360           AK = AccessKind(AK | AccessKind::AK_MUST);
1361         else
1362           AK = AccessKind(AK | AccessKind::AK_MAY);
1363         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr, AK,
1364                             OffsetInfoMap[CurPtr].Offset, Changed,
1365                             LoadI->getType());
1366       }
1367 
1368       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1369         if (StoreI->getValueOperand() == CurPtr) {
1370           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1371                             << *StoreI << "\n");
1372           return false;
1373         }
1374         // If the access is to a pointer that may or may not be the associated
1375         // value, e.g. due to a PHI, we cannot assume it will be written.
1376         AccessKind AK = AccessKind::AK_W;
1377         if (getUnderlyingObject(CurPtr) == &AssociatedValue)
1378           AK = AccessKind(AK | AccessKind::AK_MUST);
1379         else
1380           AK = AccessKind(AK | AccessKind::AK_MAY);
1381         bool UsedAssumedInformation = false;
1382         Optional<Value *> Content =
1383             A.getAssumedSimplified(*StoreI->getValueOperand(), *this,
1384                                    UsedAssumedInformation, AA::Interprocedural);
1385         return handleAccess(A, *StoreI, *CurPtr, Content, AK,
1386                             OffsetInfoMap[CurPtr].Offset, Changed,
1387                             StoreI->getValueOperand()->getType());
1388       }
1389       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1390         if (CB->isLifetimeStartOrEnd())
1391           return true;
1392         if (getFreedOperand(CB, TLI) == U)
1393           return true;
1394         if (CB->isArgOperand(&U)) {
1395           unsigned ArgNo = CB->getArgOperandNo(&U);
1396           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1397               *this, IRPosition::callsite_argument(*CB, ArgNo),
1398               DepClassTy::REQUIRED);
1399           Changed = translateAndAddState(A, CSArgPI,
1400                                          OffsetInfoMap[CurPtr].Offset, *CB) |
1401                     Changed;
1402           return isValidState();
1403         }
1404         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1405                           << "\n");
1406         // TODO: Allow some call uses
1407         return false;
1408       }
1409 
1410       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1411       return false;
1412     };
1413     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1414       if (OffsetInfoMap.count(NewU)) {
1415         LLVM_DEBUG({
1416           if (!(OffsetInfoMap[NewU] == OffsetInfoMap[OldU])) {
1417             dbgs() << "[AAPointerInfo] Equivalent use callback failed: "
1418                    << OffsetInfoMap[NewU].Offset << " vs "
1419                    << OffsetInfoMap[OldU].Offset << "\n";
1420           }
1421         });
1422         return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1423       }
1424       OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1425       return true;
1426     };
1427     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1428                            /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1429                            /* IgnoreDroppableUses */ true, EquivalentUseCB)) {
1430       LLVM_DEBUG(
1431           dbgs() << "[AAPointerInfo] Check for all uses failed, abort!\n");
1432       return indicatePessimisticFixpoint();
1433     }
1434 
1435     LLVM_DEBUG({
1436       dbgs() << "Accesses by bin after update:\n";
1437       dumpState(dbgs());
1438     });
1439 
1440     return Changed;
1441   }
1442 
1443   /// See AbstractAttribute::trackStatistics()
1444   void trackStatistics() const override {
1445     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1446   }
1447 };
1448 
1449 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1450   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1451       : AAPointerInfoImpl(IRP, A) {}
1452 
1453   /// See AbstractAttribute::updateImpl(...).
1454   ChangeStatus updateImpl(Attributor &A) override {
1455     return indicatePessimisticFixpoint();
1456   }
1457 
1458   /// See AbstractAttribute::trackStatistics()
1459   void trackStatistics() const override {
1460     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1461   }
1462 };
1463 
1464 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1465   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1466       : AAPointerInfoFloating(IRP, A) {}
1467 
1468   /// See AbstractAttribute::initialize(...).
1469   void initialize(Attributor &A) override {
1470     AAPointerInfoFloating::initialize(A);
1471     if (getAnchorScope()->isDeclaration())
1472       indicatePessimisticFixpoint();
1473   }
1474 
1475   /// See AbstractAttribute::trackStatistics()
1476   void trackStatistics() const override {
1477     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1478   }
1479 };
1480 
1481 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1482   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1483       : AAPointerInfoFloating(IRP, A) {}
1484 
1485   /// See AbstractAttribute::updateImpl(...).
1486   ChangeStatus updateImpl(Attributor &A) override {
1487     using namespace AA::PointerInfo;
1488     // We handle memory intrinsics explicitly, at least the first (=
1489     // destination) and second (=source) arguments as we know how they are
1490     // accessed.
1491     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1492       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1493       int64_t LengthVal = OffsetAndSize::Unknown;
1494       if (Length)
1495         LengthVal = Length->getSExtValue();
1496       Value &Ptr = getAssociatedValue();
1497       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1498       ChangeStatus Changed = ChangeStatus::UNCHANGED;
1499       if (ArgNo == 0) {
1500         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_MUST_WRITE, 0,
1501                      Changed, nullptr, LengthVal);
1502       } else if (ArgNo == 1) {
1503         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_MUST_READ, 0, Changed,
1504                      nullptr, LengthVal);
1505       } else {
1506         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1507                           << *MI << "\n");
1508         return indicatePessimisticFixpoint();
1509       }
1510 
1511       LLVM_DEBUG({
1512         dbgs() << "Accesses by bin after update:\n";
1513         dumpState(dbgs());
1514       });
1515 
1516       return Changed;
1517     }
1518 
1519     // TODO: Once we have call site specific value information we can provide
1520     //       call site specific liveness information and then it makes
1521     //       sense to specialize attributes for call sites arguments instead of
1522     //       redirecting requests to the callee argument.
1523     Argument *Arg = getAssociatedArgument();
1524     if (!Arg)
1525       return indicatePessimisticFixpoint();
1526     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1527     auto &ArgAA =
1528         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1529     return translateAndAddState(A, ArgAA, 0, *cast<CallBase>(getCtxI()),
1530                                 /* FromCallee */ true);
1531   }
1532 
1533   /// See AbstractAttribute::trackStatistics()
1534   void trackStatistics() const override {
1535     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1536   }
1537 };
1538 
1539 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1540   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1541       : AAPointerInfoFloating(IRP, A) {}
1542 
1543   /// See AbstractAttribute::trackStatistics()
1544   void trackStatistics() const override {
1545     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1546   }
1547 };
1548 } // namespace
1549 
1550 /// -----------------------NoUnwind Function Attribute--------------------------
1551 
1552 namespace {
1553 struct AANoUnwindImpl : AANoUnwind {
1554   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1555 
1556   const std::string getAsStr() const override {
1557     return getAssumed() ? "nounwind" : "may-unwind";
1558   }
1559 
1560   /// See AbstractAttribute::updateImpl(...).
1561   ChangeStatus updateImpl(Attributor &A) override {
1562     auto Opcodes = {
1563         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1564         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1565         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1566 
1567     auto CheckForNoUnwind = [&](Instruction &I) {
1568       if (!I.mayThrow())
1569         return true;
1570 
1571       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1572         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1573             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1574         return NoUnwindAA.isAssumedNoUnwind();
1575       }
1576       return false;
1577     };
1578 
1579     bool UsedAssumedInformation = false;
1580     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1581                                    UsedAssumedInformation))
1582       return indicatePessimisticFixpoint();
1583 
1584     return ChangeStatus::UNCHANGED;
1585   }
1586 };
1587 
1588 struct AANoUnwindFunction final : public AANoUnwindImpl {
1589   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1590       : AANoUnwindImpl(IRP, A) {}
1591 
1592   /// See AbstractAttribute::trackStatistics()
1593   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1594 };
1595 
1596 /// NoUnwind attribute deduction for a call sites.
1597 struct AANoUnwindCallSite final : AANoUnwindImpl {
1598   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1599       : AANoUnwindImpl(IRP, A) {}
1600 
1601   /// See AbstractAttribute::initialize(...).
1602   void initialize(Attributor &A) override {
1603     AANoUnwindImpl::initialize(A);
1604     Function *F = getAssociatedFunction();
1605     if (!F || F->isDeclaration())
1606       indicatePessimisticFixpoint();
1607   }
1608 
1609   /// See AbstractAttribute::updateImpl(...).
1610   ChangeStatus updateImpl(Attributor &A) override {
1611     // TODO: Once we have call site specific value information we can provide
1612     //       call site specific liveness information and then it makes
1613     //       sense to specialize attributes for call sites arguments instead of
1614     //       redirecting requests to the callee argument.
1615     Function *F = getAssociatedFunction();
1616     const IRPosition &FnPos = IRPosition::function(*F);
1617     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1618     return clampStateAndIndicateChange(getState(), FnAA.getState());
1619   }
1620 
1621   /// See AbstractAttribute::trackStatistics()
1622   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1623 };
1624 } // namespace
1625 
1626 /// --------------------- Function Return Values -------------------------------
1627 
1628 namespace {
1629 /// "Attribute" that collects all potential returned values and the return
1630 /// instructions that they arise from.
1631 ///
1632 /// If there is a unique returned value R, the manifest method will:
1633 ///   - mark R with the "returned" attribute, if R is an argument.
1634 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1635 
1636   /// Mapping of values potentially returned by the associated function to the
1637   /// return instructions that might return them.
1638   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1639 
1640   /// State flags
1641   ///
1642   ///{
1643   bool IsFixed = false;
1644   bool IsValidState = true;
1645   ///}
1646 
1647 public:
1648   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1649       : AAReturnedValues(IRP, A) {}
1650 
1651   /// See AbstractAttribute::initialize(...).
1652   void initialize(Attributor &A) override {
1653     // Reset the state.
1654     IsFixed = false;
1655     IsValidState = true;
1656     ReturnedValues.clear();
1657 
1658     Function *F = getAssociatedFunction();
1659     if (!F || F->isDeclaration()) {
1660       indicatePessimisticFixpoint();
1661       return;
1662     }
1663     assert(!F->getReturnType()->isVoidTy() &&
1664            "Did not expect a void return type!");
1665 
1666     // The map from instruction opcodes to those instructions in the function.
1667     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1668 
1669     // Look through all arguments, if one is marked as returned we are done.
1670     for (Argument &Arg : F->args()) {
1671       if (Arg.hasReturnedAttr()) {
1672         auto &ReturnInstSet = ReturnedValues[&Arg];
1673         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1674           for (Instruction *RI : *Insts)
1675             ReturnInstSet.insert(cast<ReturnInst>(RI));
1676 
1677         indicateOptimisticFixpoint();
1678         return;
1679       }
1680     }
1681 
1682     if (!A.isFunctionIPOAmendable(*F))
1683       indicatePessimisticFixpoint();
1684   }
1685 
1686   /// See AbstractAttribute::manifest(...).
1687   ChangeStatus manifest(Attributor &A) override;
1688 
1689   /// See AbstractAttribute::getState(...).
1690   AbstractState &getState() override { return *this; }
1691 
1692   /// See AbstractAttribute::getState(...).
1693   const AbstractState &getState() const override { return *this; }
1694 
1695   /// See AbstractAttribute::updateImpl(Attributor &A).
1696   ChangeStatus updateImpl(Attributor &A) override;
1697 
1698   llvm::iterator_range<iterator> returned_values() override {
1699     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1700   }
1701 
1702   llvm::iterator_range<const_iterator> returned_values() const override {
1703     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1704   }
1705 
1706   /// Return the number of potential return values, -1 if unknown.
1707   size_t getNumReturnValues() const override {
1708     return isValidState() ? ReturnedValues.size() : -1;
1709   }
1710 
1711   /// Return an assumed unique return value if a single candidate is found. If
1712   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1713   /// Optional::NoneType.
1714   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1715 
1716   /// See AbstractState::checkForAllReturnedValues(...).
1717   bool checkForAllReturnedValuesAndReturnInsts(
1718       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1719       const override;
1720 
1721   /// Pretty print the attribute similar to the IR representation.
1722   const std::string getAsStr() const override;
1723 
1724   /// See AbstractState::isAtFixpoint().
1725   bool isAtFixpoint() const override { return IsFixed; }
1726 
1727   /// See AbstractState::isValidState().
1728   bool isValidState() const override { return IsValidState; }
1729 
1730   /// See AbstractState::indicateOptimisticFixpoint(...).
1731   ChangeStatus indicateOptimisticFixpoint() override {
1732     IsFixed = true;
1733     return ChangeStatus::UNCHANGED;
1734   }
1735 
1736   ChangeStatus indicatePessimisticFixpoint() override {
1737     IsFixed = true;
1738     IsValidState = false;
1739     return ChangeStatus::CHANGED;
1740   }
1741 };
1742 
1743 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1744   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1745 
1746   // Bookkeeping.
1747   assert(isValidState());
1748   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1749                   "Number of function with known return values");
1750 
1751   // Check if we have an assumed unique return value that we could manifest.
1752   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1753 
1754   if (!UniqueRV || !UniqueRV.value())
1755     return Changed;
1756 
1757   // Bookkeeping.
1758   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1759                   "Number of function with unique return");
1760   // If the assumed unique return value is an argument, annotate it.
1761   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.value())) {
1762     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1763             getAssociatedFunction()->getReturnType())) {
1764       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1765       Changed = IRAttribute::manifest(A);
1766     }
1767   }
1768   return Changed;
1769 }
1770 
1771 const std::string AAReturnedValuesImpl::getAsStr() const {
1772   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1773          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1774 }
1775 
1776 Optional<Value *>
1777 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1778   // If checkForAllReturnedValues provides a unique value, ignoring potential
1779   // undef values that can also be present, it is assumed to be the actual
1780   // return value and forwarded to the caller of this method. If there are
1781   // multiple, a nullptr is returned indicating there cannot be a unique
1782   // returned value.
1783   Optional<Value *> UniqueRV;
1784   Type *Ty = getAssociatedFunction()->getReturnType();
1785 
1786   auto Pred = [&](Value &RV) -> bool {
1787     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1788     return UniqueRV != Optional<Value *>(nullptr);
1789   };
1790 
1791   if (!A.checkForAllReturnedValues(Pred, *this))
1792     UniqueRV = nullptr;
1793 
1794   return UniqueRV;
1795 }
1796 
1797 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1798     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1799     const {
1800   if (!isValidState())
1801     return false;
1802 
1803   // Check all returned values but ignore call sites as long as we have not
1804   // encountered an overdefined one during an update.
1805   for (auto &It : ReturnedValues) {
1806     Value *RV = It.first;
1807     if (!Pred(*RV, It.second))
1808       return false;
1809   }
1810 
1811   return true;
1812 }
1813 
1814 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1815   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1816 
1817   SmallVector<AA::ValueAndContext> Values;
1818   bool UsedAssumedInformation = false;
1819   auto ReturnInstCB = [&](Instruction &I) {
1820     ReturnInst &Ret = cast<ReturnInst>(I);
1821     Values.clear();
1822     if (!A.getAssumedSimplifiedValues(IRPosition::value(*Ret.getReturnValue()),
1823                                       *this, Values, AA::Intraprocedural,
1824                                       UsedAssumedInformation))
1825       Values.push_back({*Ret.getReturnValue(), Ret});
1826 
1827     for (auto &VAC : Values) {
1828       assert(AA::isValidInScope(*VAC.getValue(), Ret.getFunction()) &&
1829              "Assumed returned value should be valid in function scope!");
1830       if (ReturnedValues[VAC.getValue()].insert(&Ret))
1831         Changed = ChangeStatus::CHANGED;
1832     }
1833     return true;
1834   };
1835 
1836   // Discover returned values from all live returned instructions in the
1837   // associated function.
1838   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1839                                  UsedAssumedInformation))
1840     return indicatePessimisticFixpoint();
1841   return Changed;
1842 }
1843 
1844 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1845   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1846       : AAReturnedValuesImpl(IRP, A) {}
1847 
1848   /// See AbstractAttribute::trackStatistics()
1849   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1850 };
1851 
1852 /// Returned values information for a call sites.
1853 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1854   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1855       : AAReturnedValuesImpl(IRP, A) {}
1856 
1857   /// See AbstractAttribute::initialize(...).
1858   void initialize(Attributor &A) override {
1859     // TODO: Once we have call site specific value information we can provide
1860     //       call site specific liveness information and then it makes
1861     //       sense to specialize attributes for call sites instead of
1862     //       redirecting requests to the callee.
1863     llvm_unreachable("Abstract attributes for returned values are not "
1864                      "supported for call sites yet!");
1865   }
1866 
1867   /// See AbstractAttribute::updateImpl(...).
1868   ChangeStatus updateImpl(Attributor &A) override {
1869     return indicatePessimisticFixpoint();
1870   }
1871 
1872   /// See AbstractAttribute::trackStatistics()
1873   void trackStatistics() const override {}
1874 };
1875 } // namespace
1876 
1877 /// ------------------------ NoSync Function Attribute -------------------------
1878 
1879 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) {
1880   if (!I->isAtomic())
1881     return false;
1882 
1883   if (auto *FI = dyn_cast<FenceInst>(I))
1884     // All legal orderings for fence are stronger than monotonic.
1885     return FI->getSyncScopeID() != SyncScope::SingleThread;
1886   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1887     // Unordered is not a legal ordering for cmpxchg.
1888     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1889             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1890   }
1891 
1892   AtomicOrdering Ordering;
1893   switch (I->getOpcode()) {
1894   case Instruction::AtomicRMW:
1895     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1896     break;
1897   case Instruction::Store:
1898     Ordering = cast<StoreInst>(I)->getOrdering();
1899     break;
1900   case Instruction::Load:
1901     Ordering = cast<LoadInst>(I)->getOrdering();
1902     break;
1903   default:
1904     llvm_unreachable(
1905         "New atomic operations need to be known in the attributor.");
1906   }
1907 
1908   return (Ordering != AtomicOrdering::Unordered &&
1909           Ordering != AtomicOrdering::Monotonic);
1910 }
1911 
1912 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1913 /// which would be nosync except that they have a volatile flag.  All other
1914 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1915 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) {
1916   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1917     return !MI->isVolatile();
1918   return false;
1919 }
1920 
1921 namespace {
1922 struct AANoSyncImpl : AANoSync {
1923   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1924 
1925   const std::string getAsStr() const override {
1926     return getAssumed() ? "nosync" : "may-sync";
1927   }
1928 
1929   /// See AbstractAttribute::updateImpl(...).
1930   ChangeStatus updateImpl(Attributor &A) override;
1931 };
1932 
1933 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1934 
1935   auto CheckRWInstForNoSync = [&](Instruction &I) {
1936     return AA::isNoSyncInst(A, I, *this);
1937   };
1938 
1939   auto CheckForNoSync = [&](Instruction &I) {
1940     // At this point we handled all read/write effects and they are all
1941     // nosync, so they can be skipped.
1942     if (I.mayReadOrWriteMemory())
1943       return true;
1944 
1945     // non-convergent and readnone imply nosync.
1946     return !cast<CallBase>(I).isConvergent();
1947   };
1948 
1949   bool UsedAssumedInformation = false;
1950   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
1951                                           UsedAssumedInformation) ||
1952       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
1953                                          UsedAssumedInformation))
1954     return indicatePessimisticFixpoint();
1955 
1956   return ChangeStatus::UNCHANGED;
1957 }
1958 
1959 struct AANoSyncFunction final : public AANoSyncImpl {
1960   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1961       : AANoSyncImpl(IRP, A) {}
1962 
1963   /// See AbstractAttribute::trackStatistics()
1964   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1965 };
1966 
1967 /// NoSync attribute deduction for a call sites.
1968 struct AANoSyncCallSite final : AANoSyncImpl {
1969   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1970       : AANoSyncImpl(IRP, A) {}
1971 
1972   /// See AbstractAttribute::initialize(...).
1973   void initialize(Attributor &A) override {
1974     AANoSyncImpl::initialize(A);
1975     Function *F = getAssociatedFunction();
1976     if (!F || F->isDeclaration())
1977       indicatePessimisticFixpoint();
1978   }
1979 
1980   /// See AbstractAttribute::updateImpl(...).
1981   ChangeStatus updateImpl(Attributor &A) override {
1982     // TODO: Once we have call site specific value information we can provide
1983     //       call site specific liveness information and then it makes
1984     //       sense to specialize attributes for call sites arguments instead of
1985     //       redirecting requests to the callee argument.
1986     Function *F = getAssociatedFunction();
1987     const IRPosition &FnPos = IRPosition::function(*F);
1988     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1989     return clampStateAndIndicateChange(getState(), FnAA.getState());
1990   }
1991 
1992   /// See AbstractAttribute::trackStatistics()
1993   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1994 };
1995 } // namespace
1996 
1997 /// ------------------------ No-Free Attributes ----------------------------
1998 
1999 namespace {
2000 struct AANoFreeImpl : public AANoFree {
2001   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
2002 
2003   /// See AbstractAttribute::updateImpl(...).
2004   ChangeStatus updateImpl(Attributor &A) override {
2005     auto CheckForNoFree = [&](Instruction &I) {
2006       const auto &CB = cast<CallBase>(I);
2007       if (CB.hasFnAttr(Attribute::NoFree))
2008         return true;
2009 
2010       const auto &NoFreeAA = A.getAAFor<AANoFree>(
2011           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2012       return NoFreeAA.isAssumedNoFree();
2013     };
2014 
2015     bool UsedAssumedInformation = false;
2016     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2017                                            UsedAssumedInformation))
2018       return indicatePessimisticFixpoint();
2019     return ChangeStatus::UNCHANGED;
2020   }
2021 
2022   /// See AbstractAttribute::getAsStr().
2023   const std::string getAsStr() const override {
2024     return getAssumed() ? "nofree" : "may-free";
2025   }
2026 };
2027 
2028 struct AANoFreeFunction final : public AANoFreeImpl {
2029   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2030       : AANoFreeImpl(IRP, A) {}
2031 
2032   /// See AbstractAttribute::trackStatistics()
2033   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
2034 };
2035 
2036 /// NoFree attribute deduction for a call sites.
2037 struct AANoFreeCallSite final : AANoFreeImpl {
2038   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2039       : AANoFreeImpl(IRP, A) {}
2040 
2041   /// See AbstractAttribute::initialize(...).
2042   void initialize(Attributor &A) override {
2043     AANoFreeImpl::initialize(A);
2044     Function *F = getAssociatedFunction();
2045     if (!F || F->isDeclaration())
2046       indicatePessimisticFixpoint();
2047   }
2048 
2049   /// See AbstractAttribute::updateImpl(...).
2050   ChangeStatus updateImpl(Attributor &A) override {
2051     // TODO: Once we have call site specific value information we can provide
2052     //       call site specific liveness information and then it makes
2053     //       sense to specialize attributes for call sites arguments instead of
2054     //       redirecting requests to the callee argument.
2055     Function *F = getAssociatedFunction();
2056     const IRPosition &FnPos = IRPosition::function(*F);
2057     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
2058     return clampStateAndIndicateChange(getState(), FnAA.getState());
2059   }
2060 
2061   /// See AbstractAttribute::trackStatistics()
2062   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2063 };
2064 
2065 /// NoFree attribute for floating values.
2066 struct AANoFreeFloating : AANoFreeImpl {
2067   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2068       : AANoFreeImpl(IRP, A) {}
2069 
2070   /// See AbstractAttribute::trackStatistics()
2071   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2072 
2073   /// See Abstract Attribute::updateImpl(...).
2074   ChangeStatus updateImpl(Attributor &A) override {
2075     const IRPosition &IRP = getIRPosition();
2076 
2077     const auto &NoFreeAA = A.getAAFor<AANoFree>(
2078         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
2079     if (NoFreeAA.isAssumedNoFree())
2080       return ChangeStatus::UNCHANGED;
2081 
2082     Value &AssociatedValue = getIRPosition().getAssociatedValue();
2083     auto Pred = [&](const Use &U, bool &Follow) -> bool {
2084       Instruction *UserI = cast<Instruction>(U.getUser());
2085       if (auto *CB = dyn_cast<CallBase>(UserI)) {
2086         if (CB->isBundleOperand(&U))
2087           return false;
2088         if (!CB->isArgOperand(&U))
2089           return true;
2090         unsigned ArgNo = CB->getArgOperandNo(&U);
2091 
2092         const auto &NoFreeArg = A.getAAFor<AANoFree>(
2093             *this, IRPosition::callsite_argument(*CB, ArgNo),
2094             DepClassTy::REQUIRED);
2095         return NoFreeArg.isAssumedNoFree();
2096       }
2097 
2098       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2099           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2100         Follow = true;
2101         return true;
2102       }
2103       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2104           isa<ReturnInst>(UserI))
2105         return true;
2106 
2107       // Unknown user.
2108       return false;
2109     };
2110     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2111       return indicatePessimisticFixpoint();
2112 
2113     return ChangeStatus::UNCHANGED;
2114   }
2115 };
2116 
2117 /// NoFree attribute for a call site argument.
2118 struct AANoFreeArgument final : AANoFreeFloating {
2119   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2120       : AANoFreeFloating(IRP, A) {}
2121 
2122   /// See AbstractAttribute::trackStatistics()
2123   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2124 };
2125 
2126 /// NoFree attribute for call site arguments.
2127 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2128   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2129       : AANoFreeFloating(IRP, A) {}
2130 
2131   /// See AbstractAttribute::updateImpl(...).
2132   ChangeStatus updateImpl(Attributor &A) override {
2133     // TODO: Once we have call site specific value information we can provide
2134     //       call site specific liveness information and then it makes
2135     //       sense to specialize attributes for call sites arguments instead of
2136     //       redirecting requests to the callee argument.
2137     Argument *Arg = getAssociatedArgument();
2138     if (!Arg)
2139       return indicatePessimisticFixpoint();
2140     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2141     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2142     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2143   }
2144 
2145   /// See AbstractAttribute::trackStatistics()
2146   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2147 };
2148 
2149 /// NoFree attribute for function return value.
2150 struct AANoFreeReturned final : AANoFreeFloating {
2151   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2152       : AANoFreeFloating(IRP, A) {
2153     llvm_unreachable("NoFree is not applicable to function returns!");
2154   }
2155 
2156   /// See AbstractAttribute::initialize(...).
2157   void initialize(Attributor &A) override {
2158     llvm_unreachable("NoFree is not applicable to function returns!");
2159   }
2160 
2161   /// See AbstractAttribute::updateImpl(...).
2162   ChangeStatus updateImpl(Attributor &A) override {
2163     llvm_unreachable("NoFree is not applicable to function returns!");
2164   }
2165 
2166   /// See AbstractAttribute::trackStatistics()
2167   void trackStatistics() const override {}
2168 };
2169 
2170 /// NoFree attribute deduction for a call site return value.
2171 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2172   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2173       : AANoFreeFloating(IRP, A) {}
2174 
2175   ChangeStatus manifest(Attributor &A) override {
2176     return ChangeStatus::UNCHANGED;
2177   }
2178   /// See AbstractAttribute::trackStatistics()
2179   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2180 };
2181 } // namespace
2182 
2183 /// ------------------------ NonNull Argument Attribute ------------------------
2184 namespace {
2185 static int64_t getKnownNonNullAndDerefBytesForUse(
2186     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2187     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2188   TrackUse = false;
2189 
2190   const Value *UseV = U->get();
2191   if (!UseV->getType()->isPointerTy())
2192     return 0;
2193 
2194   // We need to follow common pointer manipulation uses to the accesses they
2195   // feed into. We can try to be smart to avoid looking through things we do not
2196   // like for now, e.g., non-inbounds GEPs.
2197   if (isa<CastInst>(I)) {
2198     TrackUse = true;
2199     return 0;
2200   }
2201 
2202   if (isa<GetElementPtrInst>(I)) {
2203     TrackUse = true;
2204     return 0;
2205   }
2206 
2207   Type *PtrTy = UseV->getType();
2208   const Function *F = I->getFunction();
2209   bool NullPointerIsDefined =
2210       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2211   const DataLayout &DL = A.getInfoCache().getDL();
2212   if (const auto *CB = dyn_cast<CallBase>(I)) {
2213     if (CB->isBundleOperand(U)) {
2214       if (RetainedKnowledge RK = getKnowledgeFromUse(
2215               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2216         IsNonNull |=
2217             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2218         return RK.ArgValue;
2219       }
2220       return 0;
2221     }
2222 
2223     if (CB->isCallee(U)) {
2224       IsNonNull |= !NullPointerIsDefined;
2225       return 0;
2226     }
2227 
2228     unsigned ArgNo = CB->getArgOperandNo(U);
2229     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2230     // As long as we only use known information there is no need to track
2231     // dependences here.
2232     auto &DerefAA =
2233         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2234     IsNonNull |= DerefAA.isKnownNonNull();
2235     return DerefAA.getKnownDereferenceableBytes();
2236   }
2237 
2238   Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2239   if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
2240     return 0;
2241 
2242   int64_t Offset;
2243   const Value *Base =
2244       getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2245   if (Base && Base == &AssociatedValue) {
2246     int64_t DerefBytes = Loc->Size.getValue() + Offset;
2247     IsNonNull |= !NullPointerIsDefined;
2248     return std::max(int64_t(0), DerefBytes);
2249   }
2250 
2251   /// Corner case when an offset is 0.
2252   Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
2253                                           /*AllowNonInbounds*/ true);
2254   if (Base && Base == &AssociatedValue && Offset == 0) {
2255     int64_t DerefBytes = Loc->Size.getValue();
2256     IsNonNull |= !NullPointerIsDefined;
2257     return std::max(int64_t(0), DerefBytes);
2258   }
2259 
2260   return 0;
2261 }
2262 
2263 struct AANonNullImpl : AANonNull {
2264   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2265       : AANonNull(IRP, A),
2266         NullIsDefined(NullPointerIsDefined(
2267             getAnchorScope(),
2268             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2269 
2270   /// See AbstractAttribute::initialize(...).
2271   void initialize(Attributor &A) override {
2272     Value &V = *getAssociatedValue().stripPointerCasts();
2273     if (!NullIsDefined &&
2274         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2275                 /* IgnoreSubsumingPositions */ false, &A)) {
2276       indicateOptimisticFixpoint();
2277       return;
2278     }
2279 
2280     if (isa<ConstantPointerNull>(V)) {
2281       indicatePessimisticFixpoint();
2282       return;
2283     }
2284 
2285     AANonNull::initialize(A);
2286 
2287     bool CanBeNull, CanBeFreed;
2288     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2289                                          CanBeFreed)) {
2290       if (!CanBeNull) {
2291         indicateOptimisticFixpoint();
2292         return;
2293       }
2294     }
2295 
2296     if (isa<GlobalValue>(V)) {
2297       indicatePessimisticFixpoint();
2298       return;
2299     }
2300 
2301     if (Instruction *CtxI = getCtxI())
2302       followUsesInMBEC(*this, A, getState(), *CtxI);
2303   }
2304 
2305   /// See followUsesInMBEC
2306   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2307                        AANonNull::StateType &State) {
2308     bool IsNonNull = false;
2309     bool TrackUse = false;
2310     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2311                                        IsNonNull, TrackUse);
2312     State.setKnown(IsNonNull);
2313     return TrackUse;
2314   }
2315 
2316   /// See AbstractAttribute::getAsStr().
2317   const std::string getAsStr() const override {
2318     return getAssumed() ? "nonnull" : "may-null";
2319   }
2320 
2321   /// Flag to determine if the underlying value can be null and still allow
2322   /// valid accesses.
2323   const bool NullIsDefined;
2324 };
2325 
2326 /// NonNull attribute for a floating value.
2327 struct AANonNullFloating : public AANonNullImpl {
2328   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2329       : AANonNullImpl(IRP, A) {}
2330 
2331   /// See AbstractAttribute::updateImpl(...).
2332   ChangeStatus updateImpl(Attributor &A) override {
2333     const DataLayout &DL = A.getDataLayout();
2334 
2335     bool Stripped;
2336     bool UsedAssumedInformation = false;
2337     SmallVector<AA::ValueAndContext> Values;
2338     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
2339                                       AA::AnyScope, UsedAssumedInformation)) {
2340       Values.push_back({getAssociatedValue(), getCtxI()});
2341       Stripped = false;
2342     } else {
2343       Stripped = Values.size() != 1 ||
2344                  Values.front().getValue() != &getAssociatedValue();
2345     }
2346 
2347     DominatorTree *DT = nullptr;
2348     AssumptionCache *AC = nullptr;
2349     InformationCache &InfoCache = A.getInfoCache();
2350     if (const Function *Fn = getAnchorScope()) {
2351       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2352       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2353     }
2354 
2355     AANonNull::StateType T;
2356     auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
2357       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2358                                              DepClassTy::REQUIRED);
2359       if (!Stripped && this == &AA) {
2360         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2361           T.indicatePessimisticFixpoint();
2362       } else {
2363         // Use abstract attribute information.
2364         const AANonNull::StateType &NS = AA.getState();
2365         T ^= NS;
2366       }
2367       return T.isValidState();
2368     };
2369 
2370     for (const auto &VAC : Values)
2371       if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI()))
2372         return indicatePessimisticFixpoint();
2373 
2374     return clampStateAndIndicateChange(getState(), T);
2375   }
2376 
2377   /// See AbstractAttribute::trackStatistics()
2378   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2379 };
2380 
2381 /// NonNull attribute for function return value.
2382 struct AANonNullReturned final
2383     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2384   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2385       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2386 
2387   /// See AbstractAttribute::getAsStr().
2388   const std::string getAsStr() const override {
2389     return getAssumed() ? "nonnull" : "may-null";
2390   }
2391 
2392   /// See AbstractAttribute::trackStatistics()
2393   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2394 };
2395 
2396 /// NonNull attribute for function argument.
2397 struct AANonNullArgument final
2398     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2399   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2400       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2401 
2402   /// See AbstractAttribute::trackStatistics()
2403   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2404 };
2405 
2406 struct AANonNullCallSiteArgument final : AANonNullFloating {
2407   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2408       : AANonNullFloating(IRP, A) {}
2409 
2410   /// See AbstractAttribute::trackStatistics()
2411   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2412 };
2413 
2414 /// NonNull attribute for a call site return position.
2415 struct AANonNullCallSiteReturned final
2416     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2417   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2418       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2419 
2420   /// See AbstractAttribute::trackStatistics()
2421   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2422 };
2423 } // namespace
2424 
2425 /// ------------------------ No-Recurse Attributes ----------------------------
2426 
2427 namespace {
2428 struct AANoRecurseImpl : public AANoRecurse {
2429   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2430 
2431   /// See AbstractAttribute::getAsStr()
2432   const std::string getAsStr() const override {
2433     return getAssumed() ? "norecurse" : "may-recurse";
2434   }
2435 };
2436 
2437 struct AANoRecurseFunction final : AANoRecurseImpl {
2438   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2439       : AANoRecurseImpl(IRP, A) {}
2440 
2441   /// See AbstractAttribute::updateImpl(...).
2442   ChangeStatus updateImpl(Attributor &A) override {
2443 
2444     // If all live call sites are known to be no-recurse, we are as well.
2445     auto CallSitePred = [&](AbstractCallSite ACS) {
2446       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2447           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2448           DepClassTy::NONE);
2449       return NoRecurseAA.isKnownNoRecurse();
2450     };
2451     bool UsedAssumedInformation = false;
2452     if (A.checkForAllCallSites(CallSitePred, *this, true,
2453                                UsedAssumedInformation)) {
2454       // If we know all call sites and all are known no-recurse, we are done.
2455       // If all known call sites, which might not be all that exist, are known
2456       // to be no-recurse, we are not done but we can continue to assume
2457       // no-recurse. If one of the call sites we have not visited will become
2458       // live, another update is triggered.
2459       if (!UsedAssumedInformation)
2460         indicateOptimisticFixpoint();
2461       return ChangeStatus::UNCHANGED;
2462     }
2463 
2464     const AAFunctionReachability &EdgeReachability =
2465         A.getAAFor<AAFunctionReachability>(*this, getIRPosition(),
2466                                            DepClassTy::REQUIRED);
2467     if (EdgeReachability.canReach(A, *getAnchorScope()))
2468       return indicatePessimisticFixpoint();
2469     return ChangeStatus::UNCHANGED;
2470   }
2471 
2472   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2473 };
2474 
2475 /// NoRecurse attribute deduction for a call sites.
2476 struct AANoRecurseCallSite final : AANoRecurseImpl {
2477   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2478       : AANoRecurseImpl(IRP, A) {}
2479 
2480   /// See AbstractAttribute::initialize(...).
2481   void initialize(Attributor &A) override {
2482     AANoRecurseImpl::initialize(A);
2483     Function *F = getAssociatedFunction();
2484     if (!F || F->isDeclaration())
2485       indicatePessimisticFixpoint();
2486   }
2487 
2488   /// See AbstractAttribute::updateImpl(...).
2489   ChangeStatus updateImpl(Attributor &A) override {
2490     // TODO: Once we have call site specific value information we can provide
2491     //       call site specific liveness information and then it makes
2492     //       sense to specialize attributes for call sites arguments instead of
2493     //       redirecting requests to the callee argument.
2494     Function *F = getAssociatedFunction();
2495     const IRPosition &FnPos = IRPosition::function(*F);
2496     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2497     return clampStateAndIndicateChange(getState(), FnAA.getState());
2498   }
2499 
2500   /// See AbstractAttribute::trackStatistics()
2501   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2502 };
2503 } // namespace
2504 
2505 /// -------------------- Undefined-Behavior Attributes ------------------------
2506 
2507 namespace {
2508 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2509   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2510       : AAUndefinedBehavior(IRP, A) {}
2511 
2512   /// See AbstractAttribute::updateImpl(...).
2513   // through a pointer (i.e. also branches etc.)
2514   ChangeStatus updateImpl(Attributor &A) override {
2515     const size_t UBPrevSize = KnownUBInsts.size();
2516     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2517 
2518     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2519       // Lang ref now states volatile store is not UB, let's skip them.
2520       if (I.isVolatile() && I.mayWriteToMemory())
2521         return true;
2522 
2523       // Skip instructions that are already saved.
2524       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2525         return true;
2526 
2527       // If we reach here, we know we have an instruction
2528       // that accesses memory through a pointer operand,
2529       // for which getPointerOperand() should give it to us.
2530       Value *PtrOp =
2531           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2532       assert(PtrOp &&
2533              "Expected pointer operand of memory accessing instruction");
2534 
2535       // Either we stopped and the appropriate action was taken,
2536       // or we got back a simplified value to continue.
2537       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2538       if (!SimplifiedPtrOp || !SimplifiedPtrOp.value())
2539         return true;
2540       const Value *PtrOpVal = SimplifiedPtrOp.value();
2541 
2542       // A memory access through a pointer is considered UB
2543       // only if the pointer has constant null value.
2544       // TODO: Expand it to not only check constant values.
2545       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2546         AssumedNoUBInsts.insert(&I);
2547         return true;
2548       }
2549       const Type *PtrTy = PtrOpVal->getType();
2550 
2551       // Because we only consider instructions inside functions,
2552       // assume that a parent function exists.
2553       const Function *F = I.getFunction();
2554 
2555       // A memory access using constant null pointer is only considered UB
2556       // if null pointer is _not_ defined for the target platform.
2557       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2558         AssumedNoUBInsts.insert(&I);
2559       else
2560         KnownUBInsts.insert(&I);
2561       return true;
2562     };
2563 
2564     auto InspectBrInstForUB = [&](Instruction &I) {
2565       // A conditional branch instruction is considered UB if it has `undef`
2566       // condition.
2567 
2568       // Skip instructions that are already saved.
2569       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2570         return true;
2571 
2572       // We know we have a branch instruction.
2573       auto *BrInst = cast<BranchInst>(&I);
2574 
2575       // Unconditional branches are never considered UB.
2576       if (BrInst->isUnconditional())
2577         return true;
2578 
2579       // Either we stopped and the appropriate action was taken,
2580       // or we got back a simplified value to continue.
2581       Optional<Value *> SimplifiedCond =
2582           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2583       if (!SimplifiedCond || !*SimplifiedCond)
2584         return true;
2585       AssumedNoUBInsts.insert(&I);
2586       return true;
2587     };
2588 
2589     auto InspectCallSiteForUB = [&](Instruction &I) {
2590       // Check whether a callsite always cause UB or not
2591 
2592       // Skip instructions that are already saved.
2593       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2594         return true;
2595 
2596       // Check nonnull and noundef argument attribute violation for each
2597       // callsite.
2598       CallBase &CB = cast<CallBase>(I);
2599       Function *Callee = CB.getCalledFunction();
2600       if (!Callee)
2601         return true;
2602       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2603         // If current argument is known to be simplified to null pointer and the
2604         // corresponding argument position is known to have nonnull attribute,
2605         // the argument is poison. Furthermore, if the argument is poison and
2606         // the position is known to have noundef attriubte, this callsite is
2607         // considered UB.
2608         if (idx >= Callee->arg_size())
2609           break;
2610         Value *ArgVal = CB.getArgOperand(idx);
2611         if (!ArgVal)
2612           continue;
2613         // Here, we handle three cases.
2614         //   (1) Not having a value means it is dead. (we can replace the value
2615         //       with undef)
2616         //   (2) Simplified to undef. The argument violate noundef attriubte.
2617         //   (3) Simplified to null pointer where known to be nonnull.
2618         //       The argument is a poison value and violate noundef attribute.
2619         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2620         auto &NoUndefAA =
2621             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2622         if (!NoUndefAA.isKnownNoUndef())
2623           continue;
2624         bool UsedAssumedInformation = false;
2625         Optional<Value *> SimplifiedVal =
2626             A.getAssumedSimplified(IRPosition::value(*ArgVal), *this,
2627                                    UsedAssumedInformation, AA::Interprocedural);
2628         if (UsedAssumedInformation)
2629           continue;
2630         if (SimplifiedVal && !SimplifiedVal.value())
2631           return true;
2632         if (!SimplifiedVal || isa<UndefValue>(*SimplifiedVal.value())) {
2633           KnownUBInsts.insert(&I);
2634           continue;
2635         }
2636         if (!ArgVal->getType()->isPointerTy() ||
2637             !isa<ConstantPointerNull>(*SimplifiedVal.value()))
2638           continue;
2639         auto &NonNullAA =
2640             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2641         if (NonNullAA.isKnownNonNull())
2642           KnownUBInsts.insert(&I);
2643       }
2644       return true;
2645     };
2646 
2647     auto InspectReturnInstForUB = [&](Instruction &I) {
2648       auto &RI = cast<ReturnInst>(I);
2649       // Either we stopped and the appropriate action was taken,
2650       // or we got back a simplified return value to continue.
2651       Optional<Value *> SimplifiedRetValue =
2652           stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
2653       if (!SimplifiedRetValue || !*SimplifiedRetValue)
2654         return true;
2655 
2656       // Check if a return instruction always cause UB or not
2657       // Note: It is guaranteed that the returned position of the anchor
2658       //       scope has noundef attribute when this is called.
2659       //       We also ensure the return position is not "assumed dead"
2660       //       because the returned value was then potentially simplified to
2661       //       `undef` in AAReturnedValues without removing the `noundef`
2662       //       attribute yet.
2663 
2664       // When the returned position has noundef attriubte, UB occurs in the
2665       // following cases.
2666       //   (1) Returned value is known to be undef.
2667       //   (2) The value is known to be a null pointer and the returned
2668       //       position has nonnull attribute (because the returned value is
2669       //       poison).
2670       if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
2671         auto &NonNullAA = A.getAAFor<AANonNull>(
2672             *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE);
2673         if (NonNullAA.isKnownNonNull())
2674           KnownUBInsts.insert(&I);
2675       }
2676 
2677       return true;
2678     };
2679 
2680     bool UsedAssumedInformation = false;
2681     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2682                               {Instruction::Load, Instruction::Store,
2683                                Instruction::AtomicCmpXchg,
2684                                Instruction::AtomicRMW},
2685                               UsedAssumedInformation,
2686                               /* CheckBBLivenessOnly */ true);
2687     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2688                               UsedAssumedInformation,
2689                               /* CheckBBLivenessOnly */ true);
2690     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2691                                       UsedAssumedInformation);
2692 
2693     // If the returned position of the anchor scope has noundef attriubte, check
2694     // all returned instructions.
2695     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2696       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2697       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2698         auto &RetPosNoUndefAA =
2699             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2700         if (RetPosNoUndefAA.isKnownNoUndef())
2701           A.checkForAllInstructions(InspectReturnInstForUB, *this,
2702                                     {Instruction::Ret}, UsedAssumedInformation,
2703                                     /* CheckBBLivenessOnly */ true);
2704       }
2705     }
2706 
2707     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2708         UBPrevSize != KnownUBInsts.size())
2709       return ChangeStatus::CHANGED;
2710     return ChangeStatus::UNCHANGED;
2711   }
2712 
2713   bool isKnownToCauseUB(Instruction *I) const override {
2714     return KnownUBInsts.count(I);
2715   }
2716 
2717   bool isAssumedToCauseUB(Instruction *I) const override {
2718     // In simple words, if an instruction is not in the assumed to _not_
2719     // cause UB, then it is assumed UB (that includes those
2720     // in the KnownUBInsts set). The rest is boilerplate
2721     // is to ensure that it is one of the instructions we test
2722     // for UB.
2723 
2724     switch (I->getOpcode()) {
2725     case Instruction::Load:
2726     case Instruction::Store:
2727     case Instruction::AtomicCmpXchg:
2728     case Instruction::AtomicRMW:
2729       return !AssumedNoUBInsts.count(I);
2730     case Instruction::Br: {
2731       auto *BrInst = cast<BranchInst>(I);
2732       if (BrInst->isUnconditional())
2733         return false;
2734       return !AssumedNoUBInsts.count(I);
2735     } break;
2736     default:
2737       return false;
2738     }
2739     return false;
2740   }
2741 
2742   ChangeStatus manifest(Attributor &A) override {
2743     if (KnownUBInsts.empty())
2744       return ChangeStatus::UNCHANGED;
2745     for (Instruction *I : KnownUBInsts)
2746       A.changeToUnreachableAfterManifest(I);
2747     return ChangeStatus::CHANGED;
2748   }
2749 
2750   /// See AbstractAttribute::getAsStr()
2751   const std::string getAsStr() const override {
2752     return getAssumed() ? "undefined-behavior" : "no-ub";
2753   }
2754 
2755   /// Note: The correctness of this analysis depends on the fact that the
2756   /// following 2 sets will stop changing after some point.
2757   /// "Change" here means that their size changes.
2758   /// The size of each set is monotonically increasing
2759   /// (we only add items to them) and it is upper bounded by the number of
2760   /// instructions in the processed function (we can never save more
2761   /// elements in either set than this number). Hence, at some point,
2762   /// they will stop increasing.
2763   /// Consequently, at some point, both sets will have stopped
2764   /// changing, effectively making the analysis reach a fixpoint.
2765 
2766   /// Note: These 2 sets are disjoint and an instruction can be considered
2767   /// one of 3 things:
2768   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2769   ///    the KnownUBInsts set.
2770   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2771   ///    has a reason to assume it).
2772   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2773   ///    could not find a reason to assume or prove that it can cause UB,
2774   ///    hence it assumes it doesn't. We have a set for these instructions
2775   ///    so that we don't reprocess them in every update.
2776   ///    Note however that instructions in this set may cause UB.
2777 
2778 protected:
2779   /// A set of all live instructions _known_ to cause UB.
2780   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2781 
2782 private:
2783   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2784   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2785 
2786   // Should be called on updates in which if we're processing an instruction
2787   // \p I that depends on a value \p V, one of the following has to happen:
2788   // - If the value is assumed, then stop.
2789   // - If the value is known but undef, then consider it UB.
2790   // - Otherwise, do specific processing with the simplified value.
2791   // We return None in the first 2 cases to signify that an appropriate
2792   // action was taken and the caller should stop.
2793   // Otherwise, we return the simplified value that the caller should
2794   // use for specific processing.
2795   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2796                                          Instruction *I) {
2797     bool UsedAssumedInformation = false;
2798     Optional<Value *> SimplifiedV =
2799         A.getAssumedSimplified(IRPosition::value(*V), *this,
2800                                UsedAssumedInformation, AA::Interprocedural);
2801     if (!UsedAssumedInformation) {
2802       // Don't depend on assumed values.
2803       if (!SimplifiedV) {
2804         // If it is known (which we tested above) but it doesn't have a value,
2805         // then we can assume `undef` and hence the instruction is UB.
2806         KnownUBInsts.insert(I);
2807         return llvm::None;
2808       }
2809       if (!*SimplifiedV)
2810         return nullptr;
2811       V = *SimplifiedV;
2812     }
2813     if (isa<UndefValue>(V)) {
2814       KnownUBInsts.insert(I);
2815       return llvm::None;
2816     }
2817     return V;
2818   }
2819 };
2820 
2821 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2822   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2823       : AAUndefinedBehaviorImpl(IRP, A) {}
2824 
2825   /// See AbstractAttribute::trackStatistics()
2826   void trackStatistics() const override {
2827     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2828                "Number of instructions known to have UB");
2829     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2830         KnownUBInsts.size();
2831   }
2832 };
2833 } // namespace
2834 
2835 /// ------------------------ Will-Return Attributes ----------------------------
2836 
2837 namespace {
2838 // Helper function that checks whether a function has any cycle which we don't
2839 // know if it is bounded or not.
2840 // Loops with maximum trip count are considered bounded, any other cycle not.
2841 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2842   ScalarEvolution *SE =
2843       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2844   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2845   // If either SCEV or LoopInfo is not available for the function then we assume
2846   // any cycle to be unbounded cycle.
2847   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2848   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2849   if (!SE || !LI) {
2850     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2851       if (SCCI.hasCycle())
2852         return true;
2853     return false;
2854   }
2855 
2856   // If there's irreducible control, the function may contain non-loop cycles.
2857   if (mayContainIrreducibleControl(F, LI))
2858     return true;
2859 
2860   // Any loop that does not have a max trip count is considered unbounded cycle.
2861   for (auto *L : LI->getLoopsInPreorder()) {
2862     if (!SE->getSmallConstantMaxTripCount(L))
2863       return true;
2864   }
2865   return false;
2866 }
2867 
2868 struct AAWillReturnImpl : public AAWillReturn {
2869   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2870       : AAWillReturn(IRP, A) {}
2871 
2872   /// See AbstractAttribute::initialize(...).
2873   void initialize(Attributor &A) override {
2874     AAWillReturn::initialize(A);
2875 
2876     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2877       indicateOptimisticFixpoint();
2878       return;
2879     }
2880   }
2881 
2882   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2883   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2884     // Check for `mustprogress` in the scope and the associated function which
2885     // might be different if this is a call site.
2886     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2887         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2888       return false;
2889 
2890     bool IsKnown;
2891     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
2892       return IsKnown || !KnownOnly;
2893     return false;
2894   }
2895 
2896   /// See AbstractAttribute::updateImpl(...).
2897   ChangeStatus updateImpl(Attributor &A) override {
2898     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2899       return ChangeStatus::UNCHANGED;
2900 
2901     auto CheckForWillReturn = [&](Instruction &I) {
2902       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2903       const auto &WillReturnAA =
2904           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2905       if (WillReturnAA.isKnownWillReturn())
2906         return true;
2907       if (!WillReturnAA.isAssumedWillReturn())
2908         return false;
2909       const auto &NoRecurseAA =
2910           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2911       return NoRecurseAA.isAssumedNoRecurse();
2912     };
2913 
2914     bool UsedAssumedInformation = false;
2915     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2916                                            UsedAssumedInformation))
2917       return indicatePessimisticFixpoint();
2918 
2919     return ChangeStatus::UNCHANGED;
2920   }
2921 
2922   /// See AbstractAttribute::getAsStr()
2923   const std::string getAsStr() const override {
2924     return getAssumed() ? "willreturn" : "may-noreturn";
2925   }
2926 };
2927 
2928 struct AAWillReturnFunction final : AAWillReturnImpl {
2929   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2930       : AAWillReturnImpl(IRP, A) {}
2931 
2932   /// See AbstractAttribute::initialize(...).
2933   void initialize(Attributor &A) override {
2934     AAWillReturnImpl::initialize(A);
2935 
2936     Function *F = getAnchorScope();
2937     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2938       indicatePessimisticFixpoint();
2939   }
2940 
2941   /// See AbstractAttribute::trackStatistics()
2942   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2943 };
2944 
2945 /// WillReturn attribute deduction for a call sites.
2946 struct AAWillReturnCallSite final : AAWillReturnImpl {
2947   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2948       : AAWillReturnImpl(IRP, A) {}
2949 
2950   /// See AbstractAttribute::initialize(...).
2951   void initialize(Attributor &A) override {
2952     AAWillReturnImpl::initialize(A);
2953     Function *F = getAssociatedFunction();
2954     if (!F || !A.isFunctionIPOAmendable(*F))
2955       indicatePessimisticFixpoint();
2956   }
2957 
2958   /// See AbstractAttribute::updateImpl(...).
2959   ChangeStatus updateImpl(Attributor &A) override {
2960     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2961       return ChangeStatus::UNCHANGED;
2962 
2963     // TODO: Once we have call site specific value information we can provide
2964     //       call site specific liveness information and then it makes
2965     //       sense to specialize attributes for call sites arguments instead of
2966     //       redirecting requests to the callee argument.
2967     Function *F = getAssociatedFunction();
2968     const IRPosition &FnPos = IRPosition::function(*F);
2969     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2970     return clampStateAndIndicateChange(getState(), FnAA.getState());
2971   }
2972 
2973   /// See AbstractAttribute::trackStatistics()
2974   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2975 };
2976 } // namespace
2977 
2978 /// -------------------AAReachability Attribute--------------------------
2979 
2980 namespace {
2981 struct AAReachabilityImpl : AAReachability {
2982   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2983       : AAReachability(IRP, A) {}
2984 
2985   const std::string getAsStr() const override {
2986     // TODO: Return the number of reachable queries.
2987     return "reachable";
2988   }
2989 
2990   /// See AbstractAttribute::updateImpl(...).
2991   ChangeStatus updateImpl(Attributor &A) override {
2992     return ChangeStatus::UNCHANGED;
2993   }
2994 };
2995 
2996 struct AAReachabilityFunction final : public AAReachabilityImpl {
2997   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2998       : AAReachabilityImpl(IRP, A) {}
2999 
3000   /// See AbstractAttribute::trackStatistics()
3001   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
3002 };
3003 } // namespace
3004 
3005 /// ------------------------ NoAlias Argument Attribute ------------------------
3006 
3007 namespace {
3008 struct AANoAliasImpl : AANoAlias {
3009   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
3010     assert(getAssociatedType()->isPointerTy() &&
3011            "Noalias is a pointer attribute");
3012   }
3013 
3014   const std::string getAsStr() const override {
3015     return getAssumed() ? "noalias" : "may-alias";
3016   }
3017 };
3018 
3019 /// NoAlias attribute for a floating value.
3020 struct AANoAliasFloating final : AANoAliasImpl {
3021   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3022       : AANoAliasImpl(IRP, A) {}
3023 
3024   /// See AbstractAttribute::initialize(...).
3025   void initialize(Attributor &A) override {
3026     AANoAliasImpl::initialize(A);
3027     Value *Val = &getAssociatedValue();
3028     do {
3029       CastInst *CI = dyn_cast<CastInst>(Val);
3030       if (!CI)
3031         break;
3032       Value *Base = CI->getOperand(0);
3033       if (!Base->hasOneUse())
3034         break;
3035       Val = Base;
3036     } while (true);
3037 
3038     if (!Val->getType()->isPointerTy()) {
3039       indicatePessimisticFixpoint();
3040       return;
3041     }
3042 
3043     if (isa<AllocaInst>(Val))
3044       indicateOptimisticFixpoint();
3045     else if (isa<ConstantPointerNull>(Val) &&
3046              !NullPointerIsDefined(getAnchorScope(),
3047                                    Val->getType()->getPointerAddressSpace()))
3048       indicateOptimisticFixpoint();
3049     else if (Val != &getAssociatedValue()) {
3050       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
3051           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
3052       if (ValNoAliasAA.isKnownNoAlias())
3053         indicateOptimisticFixpoint();
3054     }
3055   }
3056 
3057   /// See AbstractAttribute::updateImpl(...).
3058   ChangeStatus updateImpl(Attributor &A) override {
3059     // TODO: Implement this.
3060     return indicatePessimisticFixpoint();
3061   }
3062 
3063   /// See AbstractAttribute::trackStatistics()
3064   void trackStatistics() const override {
3065     STATS_DECLTRACK_FLOATING_ATTR(noalias)
3066   }
3067 };
3068 
3069 /// NoAlias attribute for an argument.
3070 struct AANoAliasArgument final
3071     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3072   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3073   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3074 
3075   /// See AbstractAttribute::initialize(...).
3076   void initialize(Attributor &A) override {
3077     Base::initialize(A);
3078     // See callsite argument attribute and callee argument attribute.
3079     if (hasAttr({Attribute::ByVal}))
3080       indicateOptimisticFixpoint();
3081   }
3082 
3083   /// See AbstractAttribute::update(...).
3084   ChangeStatus updateImpl(Attributor &A) override {
3085     // We have to make sure no-alias on the argument does not break
3086     // synchronization when this is a callback argument, see also [1] below.
3087     // If synchronization cannot be affected, we delegate to the base updateImpl
3088     // function, otherwise we give up for now.
3089 
3090     // If the function is no-sync, no-alias cannot break synchronization.
3091     const auto &NoSyncAA =
3092         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3093                              DepClassTy::OPTIONAL);
3094     if (NoSyncAA.isAssumedNoSync())
3095       return Base::updateImpl(A);
3096 
3097     // If the argument is read-only, no-alias cannot break synchronization.
3098     bool IsKnown;
3099     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3100       return Base::updateImpl(A);
3101 
3102     // If the argument is never passed through callbacks, no-alias cannot break
3103     // synchronization.
3104     bool UsedAssumedInformation = false;
3105     if (A.checkForAllCallSites(
3106             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3107             true, UsedAssumedInformation))
3108       return Base::updateImpl(A);
3109 
3110     // TODO: add no-alias but make sure it doesn't break synchronization by
3111     // introducing fake uses. See:
3112     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3113     //     International Workshop on OpenMP 2018,
3114     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3115 
3116     return indicatePessimisticFixpoint();
3117   }
3118 
3119   /// See AbstractAttribute::trackStatistics()
3120   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3121 };
3122 
3123 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3124   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3125       : AANoAliasImpl(IRP, A) {}
3126 
3127   /// See AbstractAttribute::initialize(...).
3128   void initialize(Attributor &A) override {
3129     // See callsite argument attribute and callee argument attribute.
3130     const auto &CB = cast<CallBase>(getAnchorValue());
3131     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3132       indicateOptimisticFixpoint();
3133     Value &Val = getAssociatedValue();
3134     if (isa<ConstantPointerNull>(Val) &&
3135         !NullPointerIsDefined(getAnchorScope(),
3136                               Val.getType()->getPointerAddressSpace()))
3137       indicateOptimisticFixpoint();
3138   }
3139 
3140   /// Determine if the underlying value may alias with the call site argument
3141   /// \p OtherArgNo of \p ICS (= the underlying call site).
3142   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3143                             const AAMemoryBehavior &MemBehaviorAA,
3144                             const CallBase &CB, unsigned OtherArgNo) {
3145     // We do not need to worry about aliasing with the underlying IRP.
3146     if (this->getCalleeArgNo() == (int)OtherArgNo)
3147       return false;
3148 
3149     // If it is not a pointer or pointer vector we do not alias.
3150     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3151     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3152       return false;
3153 
3154     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3155         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3156 
3157     // If the argument is readnone, there is no read-write aliasing.
3158     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3159       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3160       return false;
3161     }
3162 
3163     // If the argument is readonly and the underlying value is readonly, there
3164     // is no read-write aliasing.
3165     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3166     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3167       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3168       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3169       return false;
3170     }
3171 
3172     // We have to utilize actual alias analysis queries so we need the object.
3173     if (!AAR)
3174       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3175 
3176     // Try to rule it out at the call site.
3177     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3178     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3179                          "callsite arguments: "
3180                       << getAssociatedValue() << " " << *ArgOp << " => "
3181                       << (IsAliasing ? "" : "no-") << "alias \n");
3182 
3183     return IsAliasing;
3184   }
3185 
3186   bool
3187   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3188                                          const AAMemoryBehavior &MemBehaviorAA,
3189                                          const AANoAlias &NoAliasAA) {
3190     // We can deduce "noalias" if the following conditions hold.
3191     // (i)   Associated value is assumed to be noalias in the definition.
3192     // (ii)  Associated value is assumed to be no-capture in all the uses
3193     //       possibly executed before this callsite.
3194     // (iii) There is no other pointer argument which could alias with the
3195     //       value.
3196 
3197     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3198     if (!AssociatedValueIsNoAliasAtDef) {
3199       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3200                         << " is not no-alias at the definition\n");
3201       return false;
3202     }
3203 
3204     auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
3205       const auto &DerefAA = A.getAAFor<AADereferenceable>(
3206           *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
3207       return DerefAA.getAssumedDereferenceableBytes();
3208     };
3209 
3210     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3211 
3212     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3213     const Function *ScopeFn = VIRP.getAnchorScope();
3214     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3215     // Check whether the value is captured in the scope using AANoCapture.
3216     // Look at CFG and check only uses possibly executed before this
3217     // callsite.
3218     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3219       Instruction *UserI = cast<Instruction>(U.getUser());
3220 
3221       // If UserI is the curr instruction and there is a single potential use of
3222       // the value in UserI we allow the use.
3223       // TODO: We should inspect the operands and allow those that cannot alias
3224       //       with the value.
3225       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3226         return true;
3227 
3228       if (ScopeFn) {
3229         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3230           if (CB->isArgOperand(&U)) {
3231 
3232             unsigned ArgNo = CB->getArgOperandNo(&U);
3233 
3234             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3235                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3236                 DepClassTy::OPTIONAL);
3237 
3238             if (NoCaptureAA.isAssumedNoCapture())
3239               return true;
3240           }
3241         }
3242 
3243         if (!AA::isPotentiallyReachable(
3244                 A, *UserI, *getCtxI(), *this,
3245                 [ScopeFn](const Function &Fn) { return &Fn != ScopeFn; }))
3246           return true;
3247       }
3248 
3249       // TODO: We should track the capturing uses in AANoCapture but the problem
3250       //       is CGSCC runs. For those we would need to "allow" AANoCapture for
3251       //       a value in the module slice.
3252       switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
3253       case UseCaptureKind::NO_CAPTURE:
3254         return true;
3255       case UseCaptureKind::MAY_CAPTURE:
3256         LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI
3257                           << "\n");
3258         return false;
3259       case UseCaptureKind::PASSTHROUGH:
3260         Follow = true;
3261         return true;
3262       }
3263       llvm_unreachable("unknown UseCaptureKind");
3264     };
3265 
3266     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3267       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3268         LLVM_DEBUG(
3269             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3270                    << " cannot be noalias as it is potentially captured\n");
3271         return false;
3272       }
3273     }
3274     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3275 
3276     // Check there is no other pointer argument which could alias with the
3277     // value passed at this call site.
3278     // TODO: AbstractCallSite
3279     const auto &CB = cast<CallBase>(getAnchorValue());
3280     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3281       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3282         return false;
3283 
3284     return true;
3285   }
3286 
3287   /// See AbstractAttribute::updateImpl(...).
3288   ChangeStatus updateImpl(Attributor &A) override {
3289     // If the argument is readnone we are done as there are no accesses via the
3290     // argument.
3291     auto &MemBehaviorAA =
3292         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3293     if (MemBehaviorAA.isAssumedReadNone()) {
3294       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3295       return ChangeStatus::UNCHANGED;
3296     }
3297 
3298     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3299     const auto &NoAliasAA =
3300         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3301 
3302     AAResults *AAR = nullptr;
3303     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3304                                                NoAliasAA)) {
3305       LLVM_DEBUG(
3306           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3307       return ChangeStatus::UNCHANGED;
3308     }
3309 
3310     return indicatePessimisticFixpoint();
3311   }
3312 
3313   /// See AbstractAttribute::trackStatistics()
3314   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3315 };
3316 
3317 /// NoAlias attribute for function return value.
3318 struct AANoAliasReturned final : AANoAliasImpl {
3319   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3320       : AANoAliasImpl(IRP, A) {}
3321 
3322   /// See AbstractAttribute::initialize(...).
3323   void initialize(Attributor &A) override {
3324     AANoAliasImpl::initialize(A);
3325     Function *F = getAssociatedFunction();
3326     if (!F || F->isDeclaration())
3327       indicatePessimisticFixpoint();
3328   }
3329 
3330   /// See AbstractAttribute::updateImpl(...).
3331   ChangeStatus updateImpl(Attributor &A) override {
3332 
3333     auto CheckReturnValue = [&](Value &RV) -> bool {
3334       if (Constant *C = dyn_cast<Constant>(&RV))
3335         if (C->isNullValue() || isa<UndefValue>(C))
3336           return true;
3337 
3338       /// For now, we can only deduce noalias if we have call sites.
3339       /// FIXME: add more support.
3340       if (!isa<CallBase>(&RV))
3341         return false;
3342 
3343       const IRPosition &RVPos = IRPosition::value(RV);
3344       const auto &NoAliasAA =
3345           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3346       if (!NoAliasAA.isAssumedNoAlias())
3347         return false;
3348 
3349       const auto &NoCaptureAA =
3350           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3351       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3352     };
3353 
3354     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3355       return indicatePessimisticFixpoint();
3356 
3357     return ChangeStatus::UNCHANGED;
3358   }
3359 
3360   /// See AbstractAttribute::trackStatistics()
3361   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3362 };
3363 
3364 /// NoAlias attribute deduction for a call site return value.
3365 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3366   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3367       : AANoAliasImpl(IRP, A) {}
3368 
3369   /// See AbstractAttribute::initialize(...).
3370   void initialize(Attributor &A) override {
3371     AANoAliasImpl::initialize(A);
3372     Function *F = getAssociatedFunction();
3373     if (!F || F->isDeclaration())
3374       indicatePessimisticFixpoint();
3375   }
3376 
3377   /// See AbstractAttribute::updateImpl(...).
3378   ChangeStatus updateImpl(Attributor &A) override {
3379     // TODO: Once we have call site specific value information we can provide
3380     //       call site specific liveness information and then it makes
3381     //       sense to specialize attributes for call sites arguments instead of
3382     //       redirecting requests to the callee argument.
3383     Function *F = getAssociatedFunction();
3384     const IRPosition &FnPos = IRPosition::returned(*F);
3385     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3386     return clampStateAndIndicateChange(getState(), FnAA.getState());
3387   }
3388 
3389   /// See AbstractAttribute::trackStatistics()
3390   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3391 };
3392 } // namespace
3393 
3394 /// -------------------AAIsDead Function Attribute-----------------------
3395 
3396 namespace {
3397 struct AAIsDeadValueImpl : public AAIsDead {
3398   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3399 
3400   /// See AbstractAttribute::initialize(...).
3401   void initialize(Attributor &A) override {
3402     if (auto *Scope = getAnchorScope())
3403       if (!A.isRunOn(*Scope))
3404         indicatePessimisticFixpoint();
3405   }
3406 
3407   /// See AAIsDead::isAssumedDead().
3408   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3409 
3410   /// See AAIsDead::isKnownDead().
3411   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3412 
3413   /// See AAIsDead::isAssumedDead(BasicBlock *).
3414   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3415 
3416   /// See AAIsDead::isKnownDead(BasicBlock *).
3417   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3418 
3419   /// See AAIsDead::isAssumedDead(Instruction *I).
3420   bool isAssumedDead(const Instruction *I) const override {
3421     return I == getCtxI() && isAssumedDead();
3422   }
3423 
3424   /// See AAIsDead::isKnownDead(Instruction *I).
3425   bool isKnownDead(const Instruction *I) const override {
3426     return isAssumedDead(I) && isKnownDead();
3427   }
3428 
3429   /// See AbstractAttribute::getAsStr().
3430   const std::string getAsStr() const override {
3431     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3432   }
3433 
3434   /// Check if all uses are assumed dead.
3435   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3436     // Callers might not check the type, void has no uses.
3437     if (V.getType()->isVoidTy() || V.use_empty())
3438       return true;
3439 
3440     // If we replace a value with a constant there are no uses left afterwards.
3441     if (!isa<Constant>(V)) {
3442       if (auto *I = dyn_cast<Instruction>(&V))
3443         if (!A.isRunOn(*I->getFunction()))
3444           return false;
3445       bool UsedAssumedInformation = false;
3446       Optional<Constant *> C =
3447           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3448       if (!C || *C)
3449         return true;
3450     }
3451 
3452     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3453     // Explicitly set the dependence class to required because we want a long
3454     // chain of N dependent instructions to be considered live as soon as one is
3455     // without going through N update cycles. This is not required for
3456     // correctness.
3457     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3458                              DepClassTy::REQUIRED,
3459                              /* IgnoreDroppableUses */ false);
3460   }
3461 
3462   /// Determine if \p I is assumed to be side-effect free.
3463   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3464     if (!I || wouldInstructionBeTriviallyDead(I))
3465       return true;
3466 
3467     auto *CB = dyn_cast<CallBase>(I);
3468     if (!CB || isa<IntrinsicInst>(CB))
3469       return false;
3470 
3471     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3472     const auto &NoUnwindAA =
3473         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3474     if (!NoUnwindAA.isAssumedNoUnwind())
3475       return false;
3476     if (!NoUnwindAA.isKnownNoUnwind())
3477       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3478 
3479     bool IsKnown;
3480     return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
3481   }
3482 };
3483 
3484 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3485   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3486       : AAIsDeadValueImpl(IRP, A) {}
3487 
3488   /// See AbstractAttribute::initialize(...).
3489   void initialize(Attributor &A) override {
3490     AAIsDeadValueImpl::initialize(A);
3491 
3492     if (isa<UndefValue>(getAssociatedValue())) {
3493       indicatePessimisticFixpoint();
3494       return;
3495     }
3496 
3497     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3498     if (!isAssumedSideEffectFree(A, I)) {
3499       if (!isa_and_nonnull<StoreInst>(I))
3500         indicatePessimisticFixpoint();
3501       else
3502         removeAssumedBits(HAS_NO_EFFECT);
3503     }
3504   }
3505 
3506   bool isDeadStore(Attributor &A, StoreInst &SI) {
3507     // Lang ref now states volatile store is not UB/dead, let's skip them.
3508     if (SI.isVolatile())
3509       return false;
3510 
3511     bool UsedAssumedInformation = false;
3512     SmallSetVector<Value *, 4> PotentialCopies;
3513     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3514                                              UsedAssumedInformation))
3515       return false;
3516     return llvm::all_of(PotentialCopies, [&](Value *V) {
3517       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3518                              UsedAssumedInformation);
3519     });
3520   }
3521 
3522   /// See AbstractAttribute::getAsStr().
3523   const std::string getAsStr() const override {
3524     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3525     if (isa_and_nonnull<StoreInst>(I))
3526       if (isValidState())
3527         return "assumed-dead-store";
3528     return AAIsDeadValueImpl::getAsStr();
3529   }
3530 
3531   /// See AbstractAttribute::updateImpl(...).
3532   ChangeStatus updateImpl(Attributor &A) override {
3533     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3534     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3535       if (!isDeadStore(A, *SI))
3536         return indicatePessimisticFixpoint();
3537     } else {
3538       if (!isAssumedSideEffectFree(A, I))
3539         return indicatePessimisticFixpoint();
3540       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3541         return indicatePessimisticFixpoint();
3542     }
3543     return ChangeStatus::UNCHANGED;
3544   }
3545 
3546   bool isRemovableStore() const override {
3547     return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
3548   }
3549 
3550   /// See AbstractAttribute::manifest(...).
3551   ChangeStatus manifest(Attributor &A) override {
3552     Value &V = getAssociatedValue();
3553     if (auto *I = dyn_cast<Instruction>(&V)) {
3554       // If we get here we basically know the users are all dead. We check if
3555       // isAssumedSideEffectFree returns true here again because it might not be
3556       // the case and only the users are dead but the instruction (=call) is
3557       // still needed.
3558       if (isa<StoreInst>(I) ||
3559           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3560         A.deleteAfterManifest(*I);
3561         return ChangeStatus::CHANGED;
3562       }
3563     }
3564     return ChangeStatus::UNCHANGED;
3565   }
3566 
3567   /// See AbstractAttribute::trackStatistics()
3568   void trackStatistics() const override {
3569     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3570   }
3571 };
3572 
3573 struct AAIsDeadArgument : public AAIsDeadFloating {
3574   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3575       : AAIsDeadFloating(IRP, A) {}
3576 
3577   /// See AbstractAttribute::initialize(...).
3578   void initialize(Attributor &A) override {
3579     AAIsDeadFloating::initialize(A);
3580     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3581       indicatePessimisticFixpoint();
3582   }
3583 
3584   /// See AbstractAttribute::manifest(...).
3585   ChangeStatus manifest(Attributor &A) override {
3586     Argument &Arg = *getAssociatedArgument();
3587     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3588       if (A.registerFunctionSignatureRewrite(
3589               Arg, /* ReplacementTypes */ {},
3590               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3591               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3592         return ChangeStatus::CHANGED;
3593       }
3594     return ChangeStatus::UNCHANGED;
3595   }
3596 
3597   /// See AbstractAttribute::trackStatistics()
3598   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3599 };
3600 
3601 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3602   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3603       : AAIsDeadValueImpl(IRP, A) {}
3604 
3605   /// See AbstractAttribute::initialize(...).
3606   void initialize(Attributor &A) override {
3607     AAIsDeadValueImpl::initialize(A);
3608     if (isa<UndefValue>(getAssociatedValue()))
3609       indicatePessimisticFixpoint();
3610   }
3611 
3612   /// See AbstractAttribute::updateImpl(...).
3613   ChangeStatus updateImpl(Attributor &A) override {
3614     // TODO: Once we have call site specific value information we can provide
3615     //       call site specific liveness information and then it makes
3616     //       sense to specialize attributes for call sites arguments instead of
3617     //       redirecting requests to the callee argument.
3618     Argument *Arg = getAssociatedArgument();
3619     if (!Arg)
3620       return indicatePessimisticFixpoint();
3621     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3622     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3623     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3624   }
3625 
3626   /// See AbstractAttribute::manifest(...).
3627   ChangeStatus manifest(Attributor &A) override {
3628     CallBase &CB = cast<CallBase>(getAnchorValue());
3629     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3630     assert(!isa<UndefValue>(U.get()) &&
3631            "Expected undef values to be filtered out!");
3632     UndefValue &UV = *UndefValue::get(U->getType());
3633     if (A.changeUseAfterManifest(U, UV))
3634       return ChangeStatus::CHANGED;
3635     return ChangeStatus::UNCHANGED;
3636   }
3637 
3638   /// See AbstractAttribute::trackStatistics()
3639   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3640 };
3641 
3642 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3643   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3644       : AAIsDeadFloating(IRP, A) {}
3645 
3646   /// See AAIsDead::isAssumedDead().
3647   bool isAssumedDead() const override {
3648     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3649   }
3650 
3651   /// See AbstractAttribute::initialize(...).
3652   void initialize(Attributor &A) override {
3653     AAIsDeadFloating::initialize(A);
3654     if (isa<UndefValue>(getAssociatedValue())) {
3655       indicatePessimisticFixpoint();
3656       return;
3657     }
3658 
3659     // We track this separately as a secondary state.
3660     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3661   }
3662 
3663   /// See AbstractAttribute::updateImpl(...).
3664   ChangeStatus updateImpl(Attributor &A) override {
3665     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3666     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3667       IsAssumedSideEffectFree = false;
3668       Changed = ChangeStatus::CHANGED;
3669     }
3670     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3671       return indicatePessimisticFixpoint();
3672     return Changed;
3673   }
3674 
3675   /// See AbstractAttribute::trackStatistics()
3676   void trackStatistics() const override {
3677     if (IsAssumedSideEffectFree)
3678       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3679     else
3680       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3681   }
3682 
3683   /// See AbstractAttribute::getAsStr().
3684   const std::string getAsStr() const override {
3685     return isAssumedDead()
3686                ? "assumed-dead"
3687                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3688   }
3689 
3690 private:
3691   bool IsAssumedSideEffectFree = true;
3692 };
3693 
3694 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3695   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3696       : AAIsDeadValueImpl(IRP, A) {}
3697 
3698   /// See AbstractAttribute::updateImpl(...).
3699   ChangeStatus updateImpl(Attributor &A) override {
3700 
3701     bool UsedAssumedInformation = false;
3702     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3703                               {Instruction::Ret}, UsedAssumedInformation);
3704 
3705     auto PredForCallSite = [&](AbstractCallSite ACS) {
3706       if (ACS.isCallbackCall() || !ACS.getInstruction())
3707         return false;
3708       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3709     };
3710 
3711     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3712                                 UsedAssumedInformation))
3713       return indicatePessimisticFixpoint();
3714 
3715     return ChangeStatus::UNCHANGED;
3716   }
3717 
3718   /// See AbstractAttribute::manifest(...).
3719   ChangeStatus manifest(Attributor &A) override {
3720     // TODO: Rewrite the signature to return void?
3721     bool AnyChange = false;
3722     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3723     auto RetInstPred = [&](Instruction &I) {
3724       ReturnInst &RI = cast<ReturnInst>(I);
3725       if (!isa<UndefValue>(RI.getReturnValue()))
3726         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3727       return true;
3728     };
3729     bool UsedAssumedInformation = false;
3730     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3731                               UsedAssumedInformation);
3732     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3733   }
3734 
3735   /// See AbstractAttribute::trackStatistics()
3736   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3737 };
3738 
3739 struct AAIsDeadFunction : public AAIsDead {
3740   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3741 
3742   /// See AbstractAttribute::initialize(...).
3743   void initialize(Attributor &A) override {
3744     Function *F = getAnchorScope();
3745     if (!F || F->isDeclaration() || !A.isRunOn(*F)) {
3746       indicatePessimisticFixpoint();
3747       return;
3748     }
3749     ToBeExploredFrom.insert(&F->getEntryBlock().front());
3750     assumeLive(A, F->getEntryBlock());
3751   }
3752 
3753   /// See AbstractAttribute::getAsStr().
3754   const std::string getAsStr() const override {
3755     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3756            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3757            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3758            std::to_string(KnownDeadEnds.size()) + "]";
3759   }
3760 
3761   /// See AbstractAttribute::manifest(...).
3762   ChangeStatus manifest(Attributor &A) override {
3763     assert(getState().isValidState() &&
3764            "Attempted to manifest an invalid state!");
3765 
3766     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3767     Function &F = *getAnchorScope();
3768 
3769     if (AssumedLiveBlocks.empty()) {
3770       A.deleteAfterManifest(F);
3771       return ChangeStatus::CHANGED;
3772     }
3773 
3774     // Flag to determine if we can change an invoke to a call assuming the
3775     // callee is nounwind. This is not possible if the personality of the
3776     // function allows to catch asynchronous exceptions.
3777     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3778 
3779     KnownDeadEnds.set_union(ToBeExploredFrom);
3780     for (const Instruction *DeadEndI : KnownDeadEnds) {
3781       auto *CB = dyn_cast<CallBase>(DeadEndI);
3782       if (!CB)
3783         continue;
3784       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3785           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3786       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3787       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3788         continue;
3789 
3790       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3791         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3792       else
3793         A.changeToUnreachableAfterManifest(
3794             const_cast<Instruction *>(DeadEndI->getNextNode()));
3795       HasChanged = ChangeStatus::CHANGED;
3796     }
3797 
3798     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3799     for (BasicBlock &BB : F)
3800       if (!AssumedLiveBlocks.count(&BB)) {
3801         A.deleteAfterManifest(BB);
3802         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3803         HasChanged = ChangeStatus::CHANGED;
3804       }
3805 
3806     return HasChanged;
3807   }
3808 
3809   /// See AbstractAttribute::updateImpl(...).
3810   ChangeStatus updateImpl(Attributor &A) override;
3811 
3812   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3813     assert(From->getParent() == getAnchorScope() &&
3814            To->getParent() == getAnchorScope() &&
3815            "Used AAIsDead of the wrong function");
3816     return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
3817   }
3818 
3819   /// See AbstractAttribute::trackStatistics()
3820   void trackStatistics() const override {}
3821 
3822   /// Returns true if the function is assumed dead.
3823   bool isAssumedDead() const override { return false; }
3824 
3825   /// See AAIsDead::isKnownDead().
3826   bool isKnownDead() const override { return false; }
3827 
3828   /// See AAIsDead::isAssumedDead(BasicBlock *).
3829   bool isAssumedDead(const BasicBlock *BB) const override {
3830     assert(BB->getParent() == getAnchorScope() &&
3831            "BB must be in the same anchor scope function.");
3832 
3833     if (!getAssumed())
3834       return false;
3835     return !AssumedLiveBlocks.count(BB);
3836   }
3837 
3838   /// See AAIsDead::isKnownDead(BasicBlock *).
3839   bool isKnownDead(const BasicBlock *BB) const override {
3840     return getKnown() && isAssumedDead(BB);
3841   }
3842 
3843   /// See AAIsDead::isAssumed(Instruction *I).
3844   bool isAssumedDead(const Instruction *I) const override {
3845     assert(I->getParent()->getParent() == getAnchorScope() &&
3846            "Instruction must be in the same anchor scope function.");
3847 
3848     if (!getAssumed())
3849       return false;
3850 
3851     // If it is not in AssumedLiveBlocks then it for sure dead.
3852     // Otherwise, it can still be after noreturn call in a live block.
3853     if (!AssumedLiveBlocks.count(I->getParent()))
3854       return true;
3855 
3856     // If it is not after a liveness barrier it is live.
3857     const Instruction *PrevI = I->getPrevNode();
3858     while (PrevI) {
3859       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3860         return true;
3861       PrevI = PrevI->getPrevNode();
3862     }
3863     return false;
3864   }
3865 
3866   /// See AAIsDead::isKnownDead(Instruction *I).
3867   bool isKnownDead(const Instruction *I) const override {
3868     return getKnown() && isAssumedDead(I);
3869   }
3870 
3871   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3872   /// that internal function called from \p BB should now be looked at.
3873   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3874     if (!AssumedLiveBlocks.insert(&BB).second)
3875       return false;
3876 
3877     // We assume that all of BB is (probably) live now and if there are calls to
3878     // internal functions we will assume that those are now live as well. This
3879     // is a performance optimization for blocks with calls to a lot of internal
3880     // functions. It can however cause dead functions to be treated as live.
3881     for (const Instruction &I : BB)
3882       if (const auto *CB = dyn_cast<CallBase>(&I))
3883         if (const Function *F = CB->getCalledFunction())
3884           if (F->hasLocalLinkage())
3885             A.markLiveInternalFunction(*F);
3886     return true;
3887   }
3888 
3889   /// Collection of instructions that need to be explored again, e.g., we
3890   /// did assume they do not transfer control to (one of their) successors.
3891   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3892 
3893   /// Collection of instructions that are known to not transfer control.
3894   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3895 
3896   /// Collection of all assumed live edges
3897   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3898 
3899   /// Collection of all assumed live BasicBlocks.
3900   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3901 };
3902 
3903 static bool
3904 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3905                         AbstractAttribute &AA,
3906                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3907   const IRPosition &IPos = IRPosition::callsite_function(CB);
3908 
3909   const auto &NoReturnAA =
3910       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3911   if (NoReturnAA.isAssumedNoReturn())
3912     return !NoReturnAA.isKnownNoReturn();
3913   if (CB.isTerminator())
3914     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3915   else
3916     AliveSuccessors.push_back(CB.getNextNode());
3917   return false;
3918 }
3919 
3920 static bool
3921 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3922                         AbstractAttribute &AA,
3923                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3924   bool UsedAssumedInformation =
3925       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3926 
3927   // First, determine if we can change an invoke to a call assuming the
3928   // callee is nounwind. This is not possible if the personality of the
3929   // function allows to catch asynchronous exceptions.
3930   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3931     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3932   } else {
3933     const IRPosition &IPos = IRPosition::callsite_function(II);
3934     const auto &AANoUnw =
3935         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3936     if (AANoUnw.isAssumedNoUnwind()) {
3937       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3938     } else {
3939       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3940     }
3941   }
3942   return UsedAssumedInformation;
3943 }
3944 
3945 static bool
3946 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3947                         AbstractAttribute &AA,
3948                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3949   bool UsedAssumedInformation = false;
3950   if (BI.getNumSuccessors() == 1) {
3951     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3952   } else {
3953     Optional<Constant *> C =
3954         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3955     if (!C || isa_and_nonnull<UndefValue>(*C)) {
3956       // No value yet, assume both edges are dead.
3957     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3958       const BasicBlock *SuccBB =
3959           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3960       AliveSuccessors.push_back(&SuccBB->front());
3961     } else {
3962       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3963       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3964       UsedAssumedInformation = false;
3965     }
3966   }
3967   return UsedAssumedInformation;
3968 }
3969 
3970 static bool
3971 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3972                         AbstractAttribute &AA,
3973                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3974   bool UsedAssumedInformation = false;
3975   Optional<Constant *> C =
3976       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3977   if (!C || isa_and_nonnull<UndefValue>(C.value())) {
3978     // No value yet, assume all edges are dead.
3979   } else if (isa_and_nonnull<ConstantInt>(C.value())) {
3980     for (auto &CaseIt : SI.cases()) {
3981       if (CaseIt.getCaseValue() == C.value()) {
3982         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3983         return UsedAssumedInformation;
3984       }
3985     }
3986     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3987     return UsedAssumedInformation;
3988   } else {
3989     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3990       AliveSuccessors.push_back(&SuccBB->front());
3991   }
3992   return UsedAssumedInformation;
3993 }
3994 
3995 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3996   ChangeStatus Change = ChangeStatus::UNCHANGED;
3997 
3998   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3999                     << getAnchorScope()->size() << "] BBs and "
4000                     << ToBeExploredFrom.size() << " exploration points and "
4001                     << KnownDeadEnds.size() << " known dead ends\n");
4002 
4003   // Copy and clear the list of instructions we need to explore from. It is
4004   // refilled with instructions the next update has to look at.
4005   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
4006                                                ToBeExploredFrom.end());
4007   decltype(ToBeExploredFrom) NewToBeExploredFrom;
4008 
4009   SmallVector<const Instruction *, 8> AliveSuccessors;
4010   while (!Worklist.empty()) {
4011     const Instruction *I = Worklist.pop_back_val();
4012     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
4013 
4014     // Fast forward for uninteresting instructions. We could look for UB here
4015     // though.
4016     while (!I->isTerminator() && !isa<CallBase>(I))
4017       I = I->getNextNode();
4018 
4019     AliveSuccessors.clear();
4020 
4021     bool UsedAssumedInformation = false;
4022     switch (I->getOpcode()) {
4023     // TODO: look for (assumed) UB to backwards propagate "deadness".
4024     default:
4025       assert(I->isTerminator() &&
4026              "Expected non-terminators to be handled already!");
4027       for (const BasicBlock *SuccBB : successors(I->getParent()))
4028         AliveSuccessors.push_back(&SuccBB->front());
4029       break;
4030     case Instruction::Call:
4031       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4032                                                        *this, AliveSuccessors);
4033       break;
4034     case Instruction::Invoke:
4035       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4036                                                        *this, AliveSuccessors);
4037       break;
4038     case Instruction::Br:
4039       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4040                                                        *this, AliveSuccessors);
4041       break;
4042     case Instruction::Switch:
4043       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4044                                                        *this, AliveSuccessors);
4045       break;
4046     }
4047 
4048     if (UsedAssumedInformation) {
4049       NewToBeExploredFrom.insert(I);
4050     } else if (AliveSuccessors.empty() ||
4051                (I->isTerminator() &&
4052                 AliveSuccessors.size() < I->getNumSuccessors())) {
4053       if (KnownDeadEnds.insert(I))
4054         Change = ChangeStatus::CHANGED;
4055     }
4056 
4057     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4058                       << AliveSuccessors.size() << " UsedAssumedInformation: "
4059                       << UsedAssumedInformation << "\n");
4060 
4061     for (const Instruction *AliveSuccessor : AliveSuccessors) {
4062       if (!I->isTerminator()) {
4063         assert(AliveSuccessors.size() == 1 &&
4064                "Non-terminator expected to have a single successor!");
4065         Worklist.push_back(AliveSuccessor);
4066       } else {
4067         // record the assumed live edge
4068         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4069         if (AssumedLiveEdges.insert(Edge).second)
4070           Change = ChangeStatus::CHANGED;
4071         if (assumeLive(A, *AliveSuccessor->getParent()))
4072           Worklist.push_back(AliveSuccessor);
4073       }
4074     }
4075   }
4076 
4077   // Check if the content of ToBeExploredFrom changed, ignore the order.
4078   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4079       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4080         return !ToBeExploredFrom.count(I);
4081       })) {
4082     Change = ChangeStatus::CHANGED;
4083     ToBeExploredFrom = std::move(NewToBeExploredFrom);
4084   }
4085 
4086   // If we know everything is live there is no need to query for liveness.
4087   // Instead, indicating a pessimistic fixpoint will cause the state to be
4088   // "invalid" and all queries to be answered conservatively without lookups.
4089   // To be in this state we have to (1) finished the exploration and (3) not
4090   // discovered any non-trivial dead end and (2) not ruled unreachable code
4091   // dead.
4092   if (ToBeExploredFrom.empty() &&
4093       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4094       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4095         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4096       }))
4097     return indicatePessimisticFixpoint();
4098   return Change;
4099 }
4100 
4101 /// Liveness information for a call sites.
4102 struct AAIsDeadCallSite final : AAIsDeadFunction {
4103   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4104       : AAIsDeadFunction(IRP, A) {}
4105 
4106   /// See AbstractAttribute::initialize(...).
4107   void initialize(Attributor &A) override {
4108     // TODO: Once we have call site specific value information we can provide
4109     //       call site specific liveness information and then it makes
4110     //       sense to specialize attributes for call sites instead of
4111     //       redirecting requests to the callee.
4112     llvm_unreachable("Abstract attributes for liveness are not "
4113                      "supported for call sites yet!");
4114   }
4115 
4116   /// See AbstractAttribute::updateImpl(...).
4117   ChangeStatus updateImpl(Attributor &A) override {
4118     return indicatePessimisticFixpoint();
4119   }
4120 
4121   /// See AbstractAttribute::trackStatistics()
4122   void trackStatistics() const override {}
4123 };
4124 } // namespace
4125 
4126 /// -------------------- Dereferenceable Argument Attribute --------------------
4127 
4128 namespace {
4129 struct AADereferenceableImpl : AADereferenceable {
4130   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4131       : AADereferenceable(IRP, A) {}
4132   using StateType = DerefState;
4133 
4134   /// See AbstractAttribute::initialize(...).
4135   void initialize(Attributor &A) override {
4136     Value &V = *getAssociatedValue().stripPointerCasts();
4137     SmallVector<Attribute, 4> Attrs;
4138     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4139              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4140     for (const Attribute &Attr : Attrs)
4141       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4142 
4143     const IRPosition &IRP = this->getIRPosition();
4144     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4145 
4146     bool CanBeNull, CanBeFreed;
4147     takeKnownDerefBytesMaximum(V.getPointerDereferenceableBytes(
4148         A.getDataLayout(), CanBeNull, CanBeFreed));
4149 
4150     bool IsFnInterface = IRP.isFnInterfaceKind();
4151     Function *FnScope = IRP.getAnchorScope();
4152     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4153       indicatePessimisticFixpoint();
4154       return;
4155     }
4156 
4157     if (Instruction *CtxI = getCtxI())
4158       followUsesInMBEC(*this, A, getState(), *CtxI);
4159   }
4160 
4161   /// See AbstractAttribute::getState()
4162   /// {
4163   StateType &getState() override { return *this; }
4164   const StateType &getState() const override { return *this; }
4165   /// }
4166 
4167   /// Helper function for collecting accessed bytes in must-be-executed-context
4168   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4169                               DerefState &State) {
4170     const Value *UseV = U->get();
4171     if (!UseV->getType()->isPointerTy())
4172       return;
4173 
4174     Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4175     if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4176       return;
4177 
4178     int64_t Offset;
4179     const Value *Base = GetPointerBaseWithConstantOffset(
4180         Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4181     if (Base && Base == &getAssociatedValue())
4182       State.addAccessedBytes(Offset, Loc->Size.getValue());
4183   }
4184 
4185   /// See followUsesInMBEC
4186   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4187                        AADereferenceable::StateType &State) {
4188     bool IsNonNull = false;
4189     bool TrackUse = false;
4190     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4191         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4192     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4193                       << " for instruction " << *I << "\n");
4194 
4195     addAccessedBytesForUse(A, U, I, State);
4196     State.takeKnownDerefBytesMaximum(DerefBytes);
4197     return TrackUse;
4198   }
4199 
4200   /// See AbstractAttribute::manifest(...).
4201   ChangeStatus manifest(Attributor &A) override {
4202     ChangeStatus Change = AADereferenceable::manifest(A);
4203     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4204       removeAttrs({Attribute::DereferenceableOrNull});
4205       return ChangeStatus::CHANGED;
4206     }
4207     return Change;
4208   }
4209 
4210   void getDeducedAttributes(LLVMContext &Ctx,
4211                             SmallVectorImpl<Attribute> &Attrs) const override {
4212     // TODO: Add *_globally support
4213     if (isAssumedNonNull())
4214       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4215           Ctx, getAssumedDereferenceableBytes()));
4216     else
4217       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4218           Ctx, getAssumedDereferenceableBytes()));
4219   }
4220 
4221   /// See AbstractAttribute::getAsStr().
4222   const std::string getAsStr() const override {
4223     if (!getAssumedDereferenceableBytes())
4224       return "unknown-dereferenceable";
4225     return std::string("dereferenceable") +
4226            (isAssumedNonNull() ? "" : "_or_null") +
4227            (isAssumedGlobal() ? "_globally" : "") + "<" +
4228            std::to_string(getKnownDereferenceableBytes()) + "-" +
4229            std::to_string(getAssumedDereferenceableBytes()) + ">";
4230   }
4231 };
4232 
4233 /// Dereferenceable attribute for a floating value.
4234 struct AADereferenceableFloating : AADereferenceableImpl {
4235   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4236       : AADereferenceableImpl(IRP, A) {}
4237 
4238   /// See AbstractAttribute::updateImpl(...).
4239   ChangeStatus updateImpl(Attributor &A) override {
4240 
4241     bool Stripped;
4242     bool UsedAssumedInformation = false;
4243     SmallVector<AA::ValueAndContext> Values;
4244     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
4245                                       AA::AnyScope, UsedAssumedInformation)) {
4246       Values.push_back({getAssociatedValue(), getCtxI()});
4247       Stripped = false;
4248     } else {
4249       Stripped = Values.size() != 1 ||
4250                  Values.front().getValue() != &getAssociatedValue();
4251     }
4252 
4253     const DataLayout &DL = A.getDataLayout();
4254     DerefState T;
4255 
4256     auto VisitValueCB = [&](const Value &V) -> bool {
4257       unsigned IdxWidth =
4258           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4259       APInt Offset(IdxWidth, 0);
4260       const Value *Base = stripAndAccumulateOffsets(
4261           A, *this, &V, DL, Offset, /* GetMinOffset */ false,
4262           /* AllowNonInbounds */ true);
4263 
4264       const auto &AA = A.getAAFor<AADereferenceable>(
4265           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4266       int64_t DerefBytes = 0;
4267       if (!Stripped && this == &AA) {
4268         // Use IR information if we did not strip anything.
4269         // TODO: track globally.
4270         bool CanBeNull, CanBeFreed;
4271         DerefBytes =
4272             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4273         T.GlobalState.indicatePessimisticFixpoint();
4274       } else {
4275         const DerefState &DS = AA.getState();
4276         DerefBytes = DS.DerefBytesState.getAssumed();
4277         T.GlobalState &= DS.GlobalState;
4278       }
4279 
4280       // For now we do not try to "increase" dereferenceability due to negative
4281       // indices as we first have to come up with code to deal with loops and
4282       // for overflows of the dereferenceable bytes.
4283       int64_t OffsetSExt = Offset.getSExtValue();
4284       if (OffsetSExt < 0)
4285         OffsetSExt = 0;
4286 
4287       T.takeAssumedDerefBytesMinimum(
4288           std::max(int64_t(0), DerefBytes - OffsetSExt));
4289 
4290       if (this == &AA) {
4291         if (!Stripped) {
4292           // If nothing was stripped IR information is all we got.
4293           T.takeKnownDerefBytesMaximum(
4294               std::max(int64_t(0), DerefBytes - OffsetSExt));
4295           T.indicatePessimisticFixpoint();
4296         } else if (OffsetSExt > 0) {
4297           // If something was stripped but there is circular reasoning we look
4298           // for the offset. If it is positive we basically decrease the
4299           // dereferenceable bytes in a circluar loop now, which will simply
4300           // drive them down to the known value in a very slow way which we
4301           // can accelerate.
4302           T.indicatePessimisticFixpoint();
4303         }
4304       }
4305 
4306       return T.isValidState();
4307     };
4308 
4309     for (const auto &VAC : Values)
4310       if (!VisitValueCB(*VAC.getValue()))
4311         return indicatePessimisticFixpoint();
4312 
4313     return clampStateAndIndicateChange(getState(), T);
4314   }
4315 
4316   /// See AbstractAttribute::trackStatistics()
4317   void trackStatistics() const override {
4318     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4319   }
4320 };
4321 
4322 /// Dereferenceable attribute for a return value.
4323 struct AADereferenceableReturned final
4324     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4325   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4326       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4327             IRP, A) {}
4328 
4329   /// See AbstractAttribute::trackStatistics()
4330   void trackStatistics() const override {
4331     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4332   }
4333 };
4334 
4335 /// Dereferenceable attribute for an argument
4336 struct AADereferenceableArgument final
4337     : AAArgumentFromCallSiteArguments<AADereferenceable,
4338                                       AADereferenceableImpl> {
4339   using Base =
4340       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4341   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4342       : Base(IRP, A) {}
4343 
4344   /// See AbstractAttribute::trackStatistics()
4345   void trackStatistics() const override {
4346     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4347   }
4348 };
4349 
4350 /// Dereferenceable attribute for a call site argument.
4351 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4352   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4353       : AADereferenceableFloating(IRP, A) {}
4354 
4355   /// See AbstractAttribute::trackStatistics()
4356   void trackStatistics() const override {
4357     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4358   }
4359 };
4360 
4361 /// Dereferenceable attribute deduction for a call site return value.
4362 struct AADereferenceableCallSiteReturned final
4363     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4364   using Base =
4365       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4366   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4367       : Base(IRP, A) {}
4368 
4369   /// See AbstractAttribute::trackStatistics()
4370   void trackStatistics() const override {
4371     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4372   }
4373 };
4374 } // namespace
4375 
4376 // ------------------------ Align Argument Attribute ------------------------
4377 
4378 namespace {
4379 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4380                                     Value &AssociatedValue, const Use *U,
4381                                     const Instruction *I, bool &TrackUse) {
4382   // We need to follow common pointer manipulation uses to the accesses they
4383   // feed into.
4384   if (isa<CastInst>(I)) {
4385     // Follow all but ptr2int casts.
4386     TrackUse = !isa<PtrToIntInst>(I);
4387     return 0;
4388   }
4389   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4390     if (GEP->hasAllConstantIndices())
4391       TrackUse = true;
4392     return 0;
4393   }
4394 
4395   MaybeAlign MA;
4396   if (const auto *CB = dyn_cast<CallBase>(I)) {
4397     if (CB->isBundleOperand(U) || CB->isCallee(U))
4398       return 0;
4399 
4400     unsigned ArgNo = CB->getArgOperandNo(U);
4401     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4402     // As long as we only use known information there is no need to track
4403     // dependences here.
4404     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4405     MA = MaybeAlign(AlignAA.getKnownAlign());
4406   }
4407 
4408   const DataLayout &DL = A.getDataLayout();
4409   const Value *UseV = U->get();
4410   if (auto *SI = dyn_cast<StoreInst>(I)) {
4411     if (SI->getPointerOperand() == UseV)
4412       MA = SI->getAlign();
4413   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4414     if (LI->getPointerOperand() == UseV)
4415       MA = LI->getAlign();
4416   }
4417 
4418   if (!MA || *MA <= QueryingAA.getKnownAlign())
4419     return 0;
4420 
4421   unsigned Alignment = MA->value();
4422   int64_t Offset;
4423 
4424   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4425     if (Base == &AssociatedValue) {
4426       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4427       // So we can say that the maximum power of two which is a divisor of
4428       // gcd(Offset, Alignment) is an alignment.
4429 
4430       uint32_t gcd =
4431           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4432       Alignment = llvm::PowerOf2Floor(gcd);
4433     }
4434   }
4435 
4436   return Alignment;
4437 }
4438 
4439 struct AAAlignImpl : AAAlign {
4440   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4441 
4442   /// See AbstractAttribute::initialize(...).
4443   void initialize(Attributor &A) override {
4444     SmallVector<Attribute, 4> Attrs;
4445     getAttrs({Attribute::Alignment}, Attrs);
4446     for (const Attribute &Attr : Attrs)
4447       takeKnownMaximum(Attr.getValueAsInt());
4448 
4449     Value &V = *getAssociatedValue().stripPointerCasts();
4450     takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4451 
4452     if (getIRPosition().isFnInterfaceKind() &&
4453         (!getAnchorScope() ||
4454          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4455       indicatePessimisticFixpoint();
4456       return;
4457     }
4458 
4459     if (Instruction *CtxI = getCtxI())
4460       followUsesInMBEC(*this, A, getState(), *CtxI);
4461   }
4462 
4463   /// See AbstractAttribute::manifest(...).
4464   ChangeStatus manifest(Attributor &A) override {
4465     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4466 
4467     // Check for users that allow alignment annotations.
4468     Value &AssociatedValue = getAssociatedValue();
4469     for (const Use &U : AssociatedValue.uses()) {
4470       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4471         if (SI->getPointerOperand() == &AssociatedValue)
4472           if (SI->getAlign() < getAssumedAlign()) {
4473             STATS_DECLTRACK(AAAlign, Store,
4474                             "Number of times alignment added to a store");
4475             SI->setAlignment(getAssumedAlign());
4476             LoadStoreChanged = ChangeStatus::CHANGED;
4477           }
4478       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4479         if (LI->getPointerOperand() == &AssociatedValue)
4480           if (LI->getAlign() < getAssumedAlign()) {
4481             LI->setAlignment(getAssumedAlign());
4482             STATS_DECLTRACK(AAAlign, Load,
4483                             "Number of times alignment added to a load");
4484             LoadStoreChanged = ChangeStatus::CHANGED;
4485           }
4486       }
4487     }
4488 
4489     ChangeStatus Changed = AAAlign::manifest(A);
4490 
4491     Align InheritAlign =
4492         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4493     if (InheritAlign >= getAssumedAlign())
4494       return LoadStoreChanged;
4495     return Changed | LoadStoreChanged;
4496   }
4497 
4498   // TODO: Provide a helper to determine the implied ABI alignment and check in
4499   //       the existing manifest method and a new one for AAAlignImpl that value
4500   //       to avoid making the alignment explicit if it did not improve.
4501 
4502   /// See AbstractAttribute::getDeducedAttributes
4503   void getDeducedAttributes(LLVMContext &Ctx,
4504                             SmallVectorImpl<Attribute> &Attrs) const override {
4505     if (getAssumedAlign() > 1)
4506       Attrs.emplace_back(
4507           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4508   }
4509 
4510   /// See followUsesInMBEC
4511   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4512                        AAAlign::StateType &State) {
4513     bool TrackUse = false;
4514 
4515     unsigned int KnownAlign =
4516         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4517     State.takeKnownMaximum(KnownAlign);
4518 
4519     return TrackUse;
4520   }
4521 
4522   /// See AbstractAttribute::getAsStr().
4523   const std::string getAsStr() const override {
4524     return "align<" + std::to_string(getKnownAlign().value()) + "-" +
4525            std::to_string(getAssumedAlign().value()) + ">";
4526   }
4527 };
4528 
4529 /// Align attribute for a floating value.
4530 struct AAAlignFloating : AAAlignImpl {
4531   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4532 
4533   /// See AbstractAttribute::updateImpl(...).
4534   ChangeStatus updateImpl(Attributor &A) override {
4535     const DataLayout &DL = A.getDataLayout();
4536 
4537     bool Stripped;
4538     bool UsedAssumedInformation = false;
4539     SmallVector<AA::ValueAndContext> Values;
4540     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
4541                                       AA::AnyScope, UsedAssumedInformation)) {
4542       Values.push_back({getAssociatedValue(), getCtxI()});
4543       Stripped = false;
4544     } else {
4545       Stripped = Values.size() != 1 ||
4546                  Values.front().getValue() != &getAssociatedValue();
4547     }
4548 
4549     StateType T;
4550     auto VisitValueCB = [&](Value &V) -> bool {
4551       if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4552         return true;
4553       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4554                                            DepClassTy::REQUIRED);
4555       if (!Stripped && this == &AA) {
4556         int64_t Offset;
4557         unsigned Alignment = 1;
4558         if (const Value *Base =
4559                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4560           // TODO: Use AAAlign for the base too.
4561           Align PA = Base->getPointerAlignment(DL);
4562           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4563           // So we can say that the maximum power of two which is a divisor of
4564           // gcd(Offset, Alignment) is an alignment.
4565 
4566           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4567                                                uint32_t(PA.value()));
4568           Alignment = llvm::PowerOf2Floor(gcd);
4569         } else {
4570           Alignment = V.getPointerAlignment(DL).value();
4571         }
4572         // Use only IR information if we did not strip anything.
4573         T.takeKnownMaximum(Alignment);
4574         T.indicatePessimisticFixpoint();
4575       } else {
4576         // Use abstract attribute information.
4577         const AAAlign::StateType &DS = AA.getState();
4578         T ^= DS;
4579       }
4580       return T.isValidState();
4581     };
4582 
4583     for (const auto &VAC : Values) {
4584       if (!VisitValueCB(*VAC.getValue()))
4585         return indicatePessimisticFixpoint();
4586     }
4587 
4588     //  TODO: If we know we visited all incoming values, thus no are assumed
4589     //  dead, we can take the known information from the state T.
4590     return clampStateAndIndicateChange(getState(), T);
4591   }
4592 
4593   /// See AbstractAttribute::trackStatistics()
4594   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4595 };
4596 
4597 /// Align attribute for function return value.
4598 struct AAAlignReturned final
4599     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4600   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4601   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4602 
4603   /// See AbstractAttribute::initialize(...).
4604   void initialize(Attributor &A) override {
4605     Base::initialize(A);
4606     Function *F = getAssociatedFunction();
4607     if (!F || F->isDeclaration())
4608       indicatePessimisticFixpoint();
4609   }
4610 
4611   /// See AbstractAttribute::trackStatistics()
4612   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4613 };
4614 
4615 /// Align attribute for function argument.
4616 struct AAAlignArgument final
4617     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4618   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4619   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4620 
4621   /// See AbstractAttribute::manifest(...).
4622   ChangeStatus manifest(Attributor &A) override {
4623     // If the associated argument is involved in a must-tail call we give up
4624     // because we would need to keep the argument alignments of caller and
4625     // callee in-sync. Just does not seem worth the trouble right now.
4626     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4627       return ChangeStatus::UNCHANGED;
4628     return Base::manifest(A);
4629   }
4630 
4631   /// See AbstractAttribute::trackStatistics()
4632   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4633 };
4634 
4635 struct AAAlignCallSiteArgument final : AAAlignFloating {
4636   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4637       : AAAlignFloating(IRP, A) {}
4638 
4639   /// See AbstractAttribute::manifest(...).
4640   ChangeStatus manifest(Attributor &A) override {
4641     // If the associated argument is involved in a must-tail call we give up
4642     // because we would need to keep the argument alignments of caller and
4643     // callee in-sync. Just does not seem worth the trouble right now.
4644     if (Argument *Arg = getAssociatedArgument())
4645       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4646         return ChangeStatus::UNCHANGED;
4647     ChangeStatus Changed = AAAlignImpl::manifest(A);
4648     Align InheritAlign =
4649         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4650     if (InheritAlign >= getAssumedAlign())
4651       Changed = ChangeStatus::UNCHANGED;
4652     return Changed;
4653   }
4654 
4655   /// See AbstractAttribute::updateImpl(Attributor &A).
4656   ChangeStatus updateImpl(Attributor &A) override {
4657     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4658     if (Argument *Arg = getAssociatedArgument()) {
4659       // We only take known information from the argument
4660       // so we do not need to track a dependence.
4661       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4662           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4663       takeKnownMaximum(ArgAlignAA.getKnownAlign().value());
4664     }
4665     return Changed;
4666   }
4667 
4668   /// See AbstractAttribute::trackStatistics()
4669   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4670 };
4671 
4672 /// Align attribute deduction for a call site return value.
4673 struct AAAlignCallSiteReturned final
4674     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4675   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4676   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4677       : Base(IRP, A) {}
4678 
4679   /// See AbstractAttribute::initialize(...).
4680   void initialize(Attributor &A) override {
4681     Base::initialize(A);
4682     Function *F = getAssociatedFunction();
4683     if (!F || F->isDeclaration())
4684       indicatePessimisticFixpoint();
4685   }
4686 
4687   /// See AbstractAttribute::trackStatistics()
4688   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4689 };
4690 } // namespace
4691 
4692 /// ------------------ Function No-Return Attribute ----------------------------
4693 namespace {
4694 struct AANoReturnImpl : public AANoReturn {
4695   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4696 
4697   /// See AbstractAttribute::initialize(...).
4698   void initialize(Attributor &A) override {
4699     AANoReturn::initialize(A);
4700     Function *F = getAssociatedFunction();
4701     if (!F || F->isDeclaration())
4702       indicatePessimisticFixpoint();
4703   }
4704 
4705   /// See AbstractAttribute::getAsStr().
4706   const std::string getAsStr() const override {
4707     return getAssumed() ? "noreturn" : "may-return";
4708   }
4709 
4710   /// See AbstractAttribute::updateImpl(Attributor &A).
4711   ChangeStatus updateImpl(Attributor &A) override {
4712     auto CheckForNoReturn = [](Instruction &) { return false; };
4713     bool UsedAssumedInformation = false;
4714     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4715                                    {(unsigned)Instruction::Ret},
4716                                    UsedAssumedInformation))
4717       return indicatePessimisticFixpoint();
4718     return ChangeStatus::UNCHANGED;
4719   }
4720 };
4721 
4722 struct AANoReturnFunction final : AANoReturnImpl {
4723   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4724       : AANoReturnImpl(IRP, A) {}
4725 
4726   /// See AbstractAttribute::trackStatistics()
4727   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4728 };
4729 
4730 /// NoReturn attribute deduction for a call sites.
4731 struct AANoReturnCallSite final : AANoReturnImpl {
4732   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4733       : AANoReturnImpl(IRP, A) {}
4734 
4735   /// See AbstractAttribute::initialize(...).
4736   void initialize(Attributor &A) override {
4737     AANoReturnImpl::initialize(A);
4738     if (Function *F = getAssociatedFunction()) {
4739       const IRPosition &FnPos = IRPosition::function(*F);
4740       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4741       if (!FnAA.isAssumedNoReturn())
4742         indicatePessimisticFixpoint();
4743     }
4744   }
4745 
4746   /// See AbstractAttribute::updateImpl(...).
4747   ChangeStatus updateImpl(Attributor &A) override {
4748     // TODO: Once we have call site specific value information we can provide
4749     //       call site specific liveness information and then it makes
4750     //       sense to specialize attributes for call sites arguments instead of
4751     //       redirecting requests to the callee argument.
4752     Function *F = getAssociatedFunction();
4753     const IRPosition &FnPos = IRPosition::function(*F);
4754     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4755     return clampStateAndIndicateChange(getState(), FnAA.getState());
4756   }
4757 
4758   /// See AbstractAttribute::trackStatistics()
4759   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4760 };
4761 } // namespace
4762 
4763 /// ----------------------- Instance Info ---------------------------------
4764 
4765 namespace {
4766 /// A class to hold the state of for no-capture attributes.
4767 struct AAInstanceInfoImpl : public AAInstanceInfo {
4768   AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
4769       : AAInstanceInfo(IRP, A) {}
4770 
4771   /// See AbstractAttribute::initialize(...).
4772   void initialize(Attributor &A) override {
4773     Value &V = getAssociatedValue();
4774     if (auto *C = dyn_cast<Constant>(&V)) {
4775       if (C->isThreadDependent())
4776         indicatePessimisticFixpoint();
4777       else
4778         indicateOptimisticFixpoint();
4779       return;
4780     }
4781     if (auto *CB = dyn_cast<CallBase>(&V))
4782       if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
4783           !CB->mayReadFromMemory()) {
4784         indicateOptimisticFixpoint();
4785         return;
4786       }
4787   }
4788 
4789   /// See AbstractAttribute::updateImpl(...).
4790   ChangeStatus updateImpl(Attributor &A) override {
4791     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4792 
4793     Value &V = getAssociatedValue();
4794     const Function *Scope = nullptr;
4795     if (auto *I = dyn_cast<Instruction>(&V))
4796       Scope = I->getFunction();
4797     if (auto *A = dyn_cast<Argument>(&V)) {
4798       Scope = A->getParent();
4799       if (!Scope->hasLocalLinkage())
4800         return Changed;
4801     }
4802     if (!Scope)
4803       return indicateOptimisticFixpoint();
4804 
4805     auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
4806         *this, IRPosition::function(*Scope), DepClassTy::OPTIONAL);
4807     if (NoRecurseAA.isAssumedNoRecurse())
4808       return Changed;
4809 
4810     auto UsePred = [&](const Use &U, bool &Follow) {
4811       const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
4812       if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
4813           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4814         Follow = true;
4815         return true;
4816       }
4817       if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
4818           (isa<StoreInst>(UserI) &&
4819            cast<StoreInst>(UserI)->getValueOperand() != U.get()))
4820         return true;
4821       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4822         // This check is not guaranteeing uniqueness but for now that we cannot
4823         // end up with two versions of \p U thinking it was one.
4824         if (!CB->getCalledFunction() ||
4825             !CB->getCalledFunction()->hasLocalLinkage())
4826           return true;
4827         if (!CB->isArgOperand(&U))
4828           return false;
4829         const auto &ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
4830             *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)),
4831             DepClassTy::OPTIONAL);
4832         if (!ArgInstanceInfoAA.isAssumedUniqueForAnalysis())
4833           return false;
4834         // If this call base might reach the scope again we might forward the
4835         // argument back here. This is very conservative.
4836         if (AA::isPotentiallyReachable(
4837                 A, *CB, *Scope, *this,
4838                 [Scope](const Function &Fn) { return &Fn != Scope; }))
4839           return false;
4840         return true;
4841       }
4842       return false;
4843     };
4844 
4845     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
4846       if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
4847         auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
4848         if (isa<AllocaInst>(Ptr) && AA::isDynamicallyUnique(A, *this, *Ptr))
4849           return true;
4850         auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(
4851             *SI->getFunction());
4852         if (isAllocationFn(Ptr, TLI) && AA::isDynamicallyUnique(A, *this, *Ptr))
4853           return true;
4854       }
4855       return false;
4856     };
4857 
4858     if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
4859                            DepClassTy::OPTIONAL,
4860                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
4861       return indicatePessimisticFixpoint();
4862 
4863     return Changed;
4864   }
4865 
4866   /// See AbstractState::getAsStr().
4867   const std::string getAsStr() const override {
4868     return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
4869   }
4870 
4871   /// See AbstractAttribute::trackStatistics()
4872   void trackStatistics() const override {}
4873 };
4874 
4875 /// InstanceInfo attribute for floating values.
4876 struct AAInstanceInfoFloating : AAInstanceInfoImpl {
4877   AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
4878       : AAInstanceInfoImpl(IRP, A) {}
4879 };
4880 
4881 /// NoCapture attribute for function arguments.
4882 struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
4883   AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
4884       : AAInstanceInfoFloating(IRP, A) {}
4885 };
4886 
4887 /// InstanceInfo attribute for call site arguments.
4888 struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
4889   AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
4890       : AAInstanceInfoImpl(IRP, A) {}
4891 
4892   /// See AbstractAttribute::updateImpl(...).
4893   ChangeStatus updateImpl(Attributor &A) override {
4894     // TODO: Once we have call site specific value information we can provide
4895     //       call site specific liveness information and then it makes
4896     //       sense to specialize attributes for call sites arguments instead of
4897     //       redirecting requests to the callee argument.
4898     Argument *Arg = getAssociatedArgument();
4899     if (!Arg)
4900       return indicatePessimisticFixpoint();
4901     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4902     auto &ArgAA =
4903         A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
4904     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4905   }
4906 };
4907 
4908 /// InstanceInfo attribute for function return value.
4909 struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
4910   AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
4911       : AAInstanceInfoImpl(IRP, A) {
4912     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4913   }
4914 
4915   /// See AbstractAttribute::initialize(...).
4916   void initialize(Attributor &A) override {
4917     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4918   }
4919 
4920   /// See AbstractAttribute::updateImpl(...).
4921   ChangeStatus updateImpl(Attributor &A) override {
4922     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4923   }
4924 };
4925 
4926 /// InstanceInfo attribute deduction for a call site return value.
4927 struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
4928   AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
4929       : AAInstanceInfoFloating(IRP, A) {}
4930 };
4931 } // namespace
4932 
4933 /// ----------------------- Variable Capturing ---------------------------------
4934 
4935 namespace {
4936 /// A class to hold the state of for no-capture attributes.
4937 struct AANoCaptureImpl : public AANoCapture {
4938   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4939 
4940   /// See AbstractAttribute::initialize(...).
4941   void initialize(Attributor &A) override {
4942     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4943       indicateOptimisticFixpoint();
4944       return;
4945     }
4946     Function *AnchorScope = getAnchorScope();
4947     if (isFnInterfaceKind() &&
4948         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4949       indicatePessimisticFixpoint();
4950       return;
4951     }
4952 
4953     // You cannot "capture" null in the default address space.
4954     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4955         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4956       indicateOptimisticFixpoint();
4957       return;
4958     }
4959 
4960     const Function *F =
4961         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4962 
4963     // Check what state the associated function can actually capture.
4964     if (F)
4965       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4966     else
4967       indicatePessimisticFixpoint();
4968   }
4969 
4970   /// See AbstractAttribute::updateImpl(...).
4971   ChangeStatus updateImpl(Attributor &A) override;
4972 
4973   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4974   void getDeducedAttributes(LLVMContext &Ctx,
4975                             SmallVectorImpl<Attribute> &Attrs) const override {
4976     if (!isAssumedNoCaptureMaybeReturned())
4977       return;
4978 
4979     if (isArgumentPosition()) {
4980       if (isAssumedNoCapture())
4981         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4982       else if (ManifestInternal)
4983         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4984     }
4985   }
4986 
4987   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4988   /// depending on the ability of the function associated with \p IRP to capture
4989   /// state in memory and through "returning/throwing", respectively.
4990   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4991                                                    const Function &F,
4992                                                    BitIntegerState &State) {
4993     // TODO: Once we have memory behavior attributes we should use them here.
4994 
4995     // If we know we cannot communicate or write to memory, we do not care about
4996     // ptr2int anymore.
4997     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4998         F.getReturnType()->isVoidTy()) {
4999       State.addKnownBits(NO_CAPTURE);
5000       return;
5001     }
5002 
5003     // A function cannot capture state in memory if it only reads memory, it can
5004     // however return/throw state and the state might be influenced by the
5005     // pointer value, e.g., loading from a returned pointer might reveal a bit.
5006     if (F.onlyReadsMemory())
5007       State.addKnownBits(NOT_CAPTURED_IN_MEM);
5008 
5009     // A function cannot communicate state back if it does not through
5010     // exceptions and doesn not return values.
5011     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
5012       State.addKnownBits(NOT_CAPTURED_IN_RET);
5013 
5014     // Check existing "returned" attributes.
5015     int ArgNo = IRP.getCalleeArgNo();
5016     if (F.doesNotThrow() && ArgNo >= 0) {
5017       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
5018         if (F.hasParamAttribute(u, Attribute::Returned)) {
5019           if (u == unsigned(ArgNo))
5020             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
5021           else if (F.onlyReadsMemory())
5022             State.addKnownBits(NO_CAPTURE);
5023           else
5024             State.addKnownBits(NOT_CAPTURED_IN_RET);
5025           break;
5026         }
5027     }
5028   }
5029 
5030   /// See AbstractState::getAsStr().
5031   const std::string getAsStr() const override {
5032     if (isKnownNoCapture())
5033       return "known not-captured";
5034     if (isAssumedNoCapture())
5035       return "assumed not-captured";
5036     if (isKnownNoCaptureMaybeReturned())
5037       return "known not-captured-maybe-returned";
5038     if (isAssumedNoCaptureMaybeReturned())
5039       return "assumed not-captured-maybe-returned";
5040     return "assumed-captured";
5041   }
5042 
5043   /// Check the use \p U and update \p State accordingly. Return true if we
5044   /// should continue to update the state.
5045   bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5046                 bool &Follow) {
5047     Instruction *UInst = cast<Instruction>(U.getUser());
5048     LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "
5049                       << *UInst << "\n");
5050 
5051     // Deal with ptr2int by following uses.
5052     if (isa<PtrToIntInst>(UInst)) {
5053       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
5054       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5055                           /* Return */ true);
5056     }
5057 
5058     // For stores we already checked if we can follow them, if they make it
5059     // here we give up.
5060     if (isa<StoreInst>(UInst))
5061       return isCapturedIn(State, /* Memory */ true, /* Integer */ false,
5062                           /* Return */ false);
5063 
5064     // Explicitly catch return instructions.
5065     if (isa<ReturnInst>(UInst)) {
5066       if (UInst->getFunction() == getAnchorScope())
5067         return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5068                             /* Return */ true);
5069       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5070                           /* Return */ true);
5071     }
5072 
5073     // For now we only use special logic for call sites. However, the tracker
5074     // itself knows about a lot of other non-capturing cases already.
5075     auto *CB = dyn_cast<CallBase>(UInst);
5076     if (!CB || !CB->isArgOperand(&U))
5077       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5078                           /* Return */ true);
5079 
5080     unsigned ArgNo = CB->getArgOperandNo(&U);
5081     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5082     // If we have a abstract no-capture attribute for the argument we can use
5083     // it to justify a non-capture attribute here. This allows recursion!
5084     auto &ArgNoCaptureAA =
5085         A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED);
5086     if (ArgNoCaptureAA.isAssumedNoCapture())
5087       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5088                           /* Return */ false);
5089     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5090       Follow = true;
5091       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5092                           /* Return */ false);
5093     }
5094 
5095     // Lastly, we could not find a reason no-capture can be assumed so we don't.
5096     return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5097                         /* Return */ true);
5098   }
5099 
5100   /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5101   /// \p CapturedInRet, then return true if we should continue updating the
5102   /// state.
5103   static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5104                            bool CapturedInInt, bool CapturedInRet) {
5105     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5106                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5107     if (CapturedInMem)
5108       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5109     if (CapturedInInt)
5110       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5111     if (CapturedInRet)
5112       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5113     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5114   }
5115 };
5116 
5117 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5118   const IRPosition &IRP = getIRPosition();
5119   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5120                                   : &IRP.getAssociatedValue();
5121   if (!V)
5122     return indicatePessimisticFixpoint();
5123 
5124   const Function *F =
5125       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5126   assert(F && "Expected a function!");
5127   const IRPosition &FnPos = IRPosition::function(*F);
5128 
5129   AANoCapture::StateType T;
5130 
5131   // Readonly means we cannot capture through memory.
5132   bool IsKnown;
5133   if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5134     T.addKnownBits(NOT_CAPTURED_IN_MEM);
5135     if (IsKnown)
5136       addKnownBits(NOT_CAPTURED_IN_MEM);
5137   }
5138 
5139   // Make sure all returned values are different than the underlying value.
5140   // TODO: we could do this in a more sophisticated way inside
5141   //       AAReturnedValues, e.g., track all values that escape through returns
5142   //       directly somehow.
5143   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
5144     if (!RVAA.getState().isValidState())
5145       return false;
5146     bool SeenConstant = false;
5147     for (auto &It : RVAA.returned_values()) {
5148       if (isa<Constant>(It.first)) {
5149         if (SeenConstant)
5150           return false;
5151         SeenConstant = true;
5152       } else if (!isa<Argument>(It.first) ||
5153                  It.first == getAssociatedArgument())
5154         return false;
5155     }
5156     return true;
5157   };
5158 
5159   const auto &NoUnwindAA =
5160       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
5161   if (NoUnwindAA.isAssumedNoUnwind()) {
5162     bool IsVoidTy = F->getReturnType()->isVoidTy();
5163     const AAReturnedValues *RVAA =
5164         IsVoidTy ? nullptr
5165                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
5166 
5167                                                  DepClassTy::OPTIONAL);
5168     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
5169       T.addKnownBits(NOT_CAPTURED_IN_RET);
5170       if (T.isKnown(NOT_CAPTURED_IN_MEM))
5171         return ChangeStatus::UNCHANGED;
5172       if (NoUnwindAA.isKnownNoUnwind() &&
5173           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
5174         addKnownBits(NOT_CAPTURED_IN_RET);
5175         if (isKnown(NOT_CAPTURED_IN_MEM))
5176           return indicateOptimisticFixpoint();
5177       }
5178     }
5179   }
5180 
5181   auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
5182     const auto &DerefAA = A.getAAFor<AADereferenceable>(
5183         *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
5184     return DerefAA.getAssumedDereferenceableBytes();
5185   };
5186 
5187   auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
5188     switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
5189     case UseCaptureKind::NO_CAPTURE:
5190       return true;
5191     case UseCaptureKind::MAY_CAPTURE:
5192       return checkUse(A, T, U, Follow);
5193     case UseCaptureKind::PASSTHROUGH:
5194       Follow = true;
5195       return true;
5196     }
5197     llvm_unreachable("Unexpected use capture kind!");
5198   };
5199 
5200   if (!A.checkForAllUses(UseCheck, *this, *V))
5201     return indicatePessimisticFixpoint();
5202 
5203   AANoCapture::StateType &S = getState();
5204   auto Assumed = S.getAssumed();
5205   S.intersectAssumedBits(T.getAssumed());
5206   if (!isAssumedNoCaptureMaybeReturned())
5207     return indicatePessimisticFixpoint();
5208   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5209                                    : ChangeStatus::CHANGED;
5210 }
5211 
5212 /// NoCapture attribute for function arguments.
5213 struct AANoCaptureArgument final : AANoCaptureImpl {
5214   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5215       : AANoCaptureImpl(IRP, A) {}
5216 
5217   /// See AbstractAttribute::trackStatistics()
5218   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5219 };
5220 
5221 /// NoCapture attribute for call site arguments.
5222 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5223   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5224       : AANoCaptureImpl(IRP, A) {}
5225 
5226   /// See AbstractAttribute::initialize(...).
5227   void initialize(Attributor &A) override {
5228     if (Argument *Arg = getAssociatedArgument())
5229       if (Arg->hasByValAttr())
5230         indicateOptimisticFixpoint();
5231     AANoCaptureImpl::initialize(A);
5232   }
5233 
5234   /// See AbstractAttribute::updateImpl(...).
5235   ChangeStatus updateImpl(Attributor &A) override {
5236     // TODO: Once we have call site specific value information we can provide
5237     //       call site specific liveness information and then it makes
5238     //       sense to specialize attributes for call sites arguments instead of
5239     //       redirecting requests to the callee argument.
5240     Argument *Arg = getAssociatedArgument();
5241     if (!Arg)
5242       return indicatePessimisticFixpoint();
5243     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5244     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5245     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5246   }
5247 
5248   /// See AbstractAttribute::trackStatistics()
5249   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5250 };
5251 
5252 /// NoCapture attribute for floating values.
5253 struct AANoCaptureFloating final : AANoCaptureImpl {
5254   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5255       : AANoCaptureImpl(IRP, A) {}
5256 
5257   /// See AbstractAttribute::trackStatistics()
5258   void trackStatistics() const override {
5259     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5260   }
5261 };
5262 
5263 /// NoCapture attribute for function return value.
5264 struct AANoCaptureReturned final : AANoCaptureImpl {
5265   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5266       : AANoCaptureImpl(IRP, A) {
5267     llvm_unreachable("NoCapture is not applicable to function returns!");
5268   }
5269 
5270   /// See AbstractAttribute::initialize(...).
5271   void initialize(Attributor &A) override {
5272     llvm_unreachable("NoCapture is not applicable to function returns!");
5273   }
5274 
5275   /// See AbstractAttribute::updateImpl(...).
5276   ChangeStatus updateImpl(Attributor &A) override {
5277     llvm_unreachable("NoCapture is not applicable to function returns!");
5278   }
5279 
5280   /// See AbstractAttribute::trackStatistics()
5281   void trackStatistics() const override {}
5282 };
5283 
5284 /// NoCapture attribute deduction for a call site return value.
5285 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5286   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5287       : AANoCaptureImpl(IRP, A) {}
5288 
5289   /// See AbstractAttribute::initialize(...).
5290   void initialize(Attributor &A) override {
5291     const Function *F = getAnchorScope();
5292     // Check what state the associated function can actually capture.
5293     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5294   }
5295 
5296   /// See AbstractAttribute::trackStatistics()
5297   void trackStatistics() const override {
5298     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5299   }
5300 };
5301 } // namespace
5302 
5303 /// ------------------ Value Simplify Attribute ----------------------------
5304 
5305 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5306   // FIXME: Add a typecast support.
5307   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5308       SimplifiedAssociatedValue, Other, Ty);
5309   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5310     return false;
5311 
5312   LLVM_DEBUG({
5313     if (SimplifiedAssociatedValue)
5314       dbgs() << "[ValueSimplify] is assumed to be "
5315              << **SimplifiedAssociatedValue << "\n";
5316     else
5317       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5318   });
5319   return true;
5320 }
5321 
5322 namespace {
5323 struct AAValueSimplifyImpl : AAValueSimplify {
5324   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5325       : AAValueSimplify(IRP, A) {}
5326 
5327   /// See AbstractAttribute::initialize(...).
5328   void initialize(Attributor &A) override {
5329     if (getAssociatedValue().getType()->isVoidTy())
5330       indicatePessimisticFixpoint();
5331     if (A.hasSimplificationCallback(getIRPosition()))
5332       indicatePessimisticFixpoint();
5333   }
5334 
5335   /// See AbstractAttribute::getAsStr().
5336   const std::string getAsStr() const override {
5337     LLVM_DEBUG({
5338       dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " ";
5339       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5340         dbgs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5341     });
5342     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5343                           : "not-simple";
5344   }
5345 
5346   /// See AbstractAttribute::trackStatistics()
5347   void trackStatistics() const override {}
5348 
5349   /// See AAValueSimplify::getAssumedSimplifiedValue()
5350   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5351     return SimplifiedAssociatedValue;
5352   }
5353 
5354   /// Ensure the return value is \p V with type \p Ty, if not possible return
5355   /// nullptr. If \p Check is true we will only verify such an operation would
5356   /// suceed and return a non-nullptr value if that is the case. No IR is
5357   /// generated or modified.
5358   static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI,
5359                            bool Check) {
5360     if (auto *TypedV = AA::getWithType(V, Ty))
5361       return TypedV;
5362     if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty))
5363       return Check ? &V
5364                    : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(&V, &Ty,
5365                                                                       "", CtxI);
5366     return nullptr;
5367   }
5368 
5369   /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble.
5370   /// If \p Check is true we will only verify such an operation would suceed and
5371   /// return a non-nullptr value if that is the case. No IR is generated or
5372   /// modified.
5373   static Value *reproduceInst(Attributor &A,
5374                               const AbstractAttribute &QueryingAA,
5375                               Instruction &I, Type &Ty, Instruction *CtxI,
5376                               bool Check, ValueToValueMapTy &VMap) {
5377     assert(CtxI && "Cannot reproduce an instruction without context!");
5378     if (Check && (I.mayReadFromMemory() ||
5379                   !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr,
5380                                                 /* TLI */ nullptr)))
5381       return nullptr;
5382     for (Value *Op : I.operands()) {
5383       Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap);
5384       if (!NewOp) {
5385         assert(Check && "Manifest of new value unexpectedly failed!");
5386         return nullptr;
5387       }
5388       if (!Check)
5389         VMap[Op] = NewOp;
5390     }
5391     if (Check)
5392       return &I;
5393 
5394     Instruction *CloneI = I.clone();
5395     // TODO: Try to salvage debug information here.
5396     CloneI->setDebugLoc(DebugLoc());
5397     VMap[&I] = CloneI;
5398     CloneI->insertBefore(CtxI);
5399     RemapInstruction(CloneI, VMap);
5400     return CloneI;
5401   }
5402 
5403   /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble.
5404   /// If \p Check is true we will only verify such an operation would suceed and
5405   /// return a non-nullptr value if that is the case. No IR is generated or
5406   /// modified.
5407   static Value *reproduceValue(Attributor &A,
5408                                const AbstractAttribute &QueryingAA, Value &V,
5409                                Type &Ty, Instruction *CtxI, bool Check,
5410                                ValueToValueMapTy &VMap) {
5411     if (const auto &NewV = VMap.lookup(&V))
5412       return NewV;
5413     bool UsedAssumedInformation = false;
5414     Optional<Value *> SimpleV = A.getAssumedSimplified(
5415         V, QueryingAA, UsedAssumedInformation, AA::Interprocedural);
5416     if (!SimpleV.has_value())
5417       return PoisonValue::get(&Ty);
5418     Value *EffectiveV = &V;
5419     if (SimpleV.value())
5420       EffectiveV = SimpleV.value();
5421     if (auto *C = dyn_cast<Constant>(EffectiveV))
5422       return C;
5423     if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI),
5424                                       A.getInfoCache()))
5425       return ensureType(A, *EffectiveV, Ty, CtxI, Check);
5426     if (auto *I = dyn_cast<Instruction>(EffectiveV))
5427       if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap))
5428         return ensureType(A, *NewV, Ty, CtxI, Check);
5429     return nullptr;
5430   }
5431 
5432   /// Return a value we can use as replacement for the associated one, or
5433   /// nullptr if we don't have one that makes sense.
5434   Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
5435     Value *NewV = SimplifiedAssociatedValue
5436                       ? SimplifiedAssociatedValue.value()
5437                       : UndefValue::get(getAssociatedType());
5438     if (NewV && NewV != &getAssociatedValue()) {
5439       ValueToValueMapTy VMap;
5440       // First verify we can reprduce the value with the required type at the
5441       // context location before we actually start modifying the IR.
5442       if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5443                          /* CheckOnly */ true, VMap))
5444         return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5445                               /* CheckOnly */ false, VMap);
5446     }
5447     return nullptr;
5448   }
5449 
5450   /// Helper function for querying AAValueSimplify and updating candicate.
5451   /// \param IRP The value position we are trying to unify with SimplifiedValue
5452   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5453                       const IRPosition &IRP, bool Simplify = true) {
5454     bool UsedAssumedInformation = false;
5455     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5456     if (Simplify)
5457       QueryingValueSimplified = A.getAssumedSimplified(
5458           IRP, QueryingAA, UsedAssumedInformation, AA::Interprocedural);
5459     return unionAssumed(QueryingValueSimplified);
5460   }
5461 
5462   /// Returns a candidate is found or not
5463   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5464     if (!getAssociatedValue().getType()->isIntegerTy())
5465       return false;
5466 
5467     // This will also pass the call base context.
5468     const auto &AA =
5469         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5470 
5471     Optional<Constant *> COpt = AA.getAssumedConstant(A);
5472 
5473     if (!COpt) {
5474       SimplifiedAssociatedValue = llvm::None;
5475       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5476       return true;
5477     }
5478     if (auto *C = *COpt) {
5479       SimplifiedAssociatedValue = C;
5480       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5481       return true;
5482     }
5483     return false;
5484   }
5485 
5486   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5487     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5488       return true;
5489     if (askSimplifiedValueFor<AAPotentialConstantValues>(A))
5490       return true;
5491     return false;
5492   }
5493 
5494   /// See AbstractAttribute::manifest(...).
5495   ChangeStatus manifest(Attributor &A) override {
5496     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5497     for (auto &U : getAssociatedValue().uses()) {
5498       // Check if we need to adjust the insertion point to make sure the IR is
5499       // valid.
5500       Instruction *IP = dyn_cast<Instruction>(U.getUser());
5501       if (auto *PHI = dyn_cast_or_null<PHINode>(IP))
5502         IP = PHI->getIncomingBlock(U)->getTerminator();
5503       if (auto *NewV = manifestReplacementValue(A, IP)) {
5504         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue()
5505                           << " -> " << *NewV << " :: " << *this << "\n");
5506         if (A.changeUseAfterManifest(U, *NewV))
5507           Changed = ChangeStatus::CHANGED;
5508       }
5509     }
5510 
5511     return Changed | AAValueSimplify::manifest(A);
5512   }
5513 
5514   /// See AbstractState::indicatePessimisticFixpoint(...).
5515   ChangeStatus indicatePessimisticFixpoint() override {
5516     SimplifiedAssociatedValue = &getAssociatedValue();
5517     return AAValueSimplify::indicatePessimisticFixpoint();
5518   }
5519 };
5520 
5521 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5522   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5523       : AAValueSimplifyImpl(IRP, A) {}
5524 
5525   void initialize(Attributor &A) override {
5526     AAValueSimplifyImpl::initialize(A);
5527     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5528       indicatePessimisticFixpoint();
5529     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5530                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5531                 /* IgnoreSubsumingPositions */ true))
5532       indicatePessimisticFixpoint();
5533   }
5534 
5535   /// See AbstractAttribute::updateImpl(...).
5536   ChangeStatus updateImpl(Attributor &A) override {
5537     // Byval is only replacable if it is readonly otherwise we would write into
5538     // the replaced value and not the copy that byval creates implicitly.
5539     Argument *Arg = getAssociatedArgument();
5540     if (Arg->hasByValAttr()) {
5541       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5542       //       there is no race by not copying a constant byval.
5543       bool IsKnown;
5544       if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
5545         return indicatePessimisticFixpoint();
5546     }
5547 
5548     auto Before = SimplifiedAssociatedValue;
5549 
5550     auto PredForCallSite = [&](AbstractCallSite ACS) {
5551       const IRPosition &ACSArgPos =
5552           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5553       // Check if a coresponding argument was found or if it is on not
5554       // associated (which can happen for callback calls).
5555       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5556         return false;
5557 
5558       // Simplify the argument operand explicitly and check if the result is
5559       // valid in the current scope. This avoids refering to simplified values
5560       // in other functions, e.g., we don't want to say a an argument in a
5561       // static function is actually an argument in a different function.
5562       bool UsedAssumedInformation = false;
5563       Optional<Constant *> SimpleArgOp =
5564           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5565       if (!SimpleArgOp)
5566         return true;
5567       if (!SimpleArgOp.value())
5568         return false;
5569       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5570         return false;
5571       return unionAssumed(*SimpleArgOp);
5572     };
5573 
5574     // Generate a answer specific to a call site context.
5575     bool Success;
5576     bool UsedAssumedInformation = false;
5577     if (hasCallBaseContext() &&
5578         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5579       Success = PredForCallSite(
5580           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5581     else
5582       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5583                                        UsedAssumedInformation);
5584 
5585     if (!Success)
5586       if (!askSimplifiedValueForOtherAAs(A))
5587         return indicatePessimisticFixpoint();
5588 
5589     // If a candicate was found in this update, return CHANGED.
5590     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5591                                                : ChangeStatus ::CHANGED;
5592   }
5593 
5594   /// See AbstractAttribute::trackStatistics()
5595   void trackStatistics() const override {
5596     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5597   }
5598 };
5599 
5600 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5601   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5602       : AAValueSimplifyImpl(IRP, A) {}
5603 
5604   /// See AAValueSimplify::getAssumedSimplifiedValue()
5605   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5606     if (!isValidState())
5607       return nullptr;
5608     return SimplifiedAssociatedValue;
5609   }
5610 
5611   /// See AbstractAttribute::updateImpl(...).
5612   ChangeStatus updateImpl(Attributor &A) override {
5613     auto Before = SimplifiedAssociatedValue;
5614 
5615     auto ReturnInstCB = [&](Instruction &I) {
5616       auto &RI = cast<ReturnInst>(I);
5617       return checkAndUpdate(
5618           A, *this,
5619           IRPosition::value(*RI.getReturnValue(), getCallBaseContext()));
5620     };
5621 
5622     bool UsedAssumedInformation = false;
5623     if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
5624                                    UsedAssumedInformation))
5625       if (!askSimplifiedValueForOtherAAs(A))
5626         return indicatePessimisticFixpoint();
5627 
5628     // If a candicate was found in this update, return CHANGED.
5629     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5630                                                : ChangeStatus ::CHANGED;
5631   }
5632 
5633   ChangeStatus manifest(Attributor &A) override {
5634     // We queried AAValueSimplify for the returned values so they will be
5635     // replaced if a simplified form was found. Nothing to do here.
5636     return ChangeStatus::UNCHANGED;
5637   }
5638 
5639   /// See AbstractAttribute::trackStatistics()
5640   void trackStatistics() const override {
5641     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5642   }
5643 };
5644 
5645 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5646   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5647       : AAValueSimplifyImpl(IRP, A) {}
5648 
5649   /// See AbstractAttribute::initialize(...).
5650   void initialize(Attributor &A) override {
5651     AAValueSimplifyImpl::initialize(A);
5652     Value &V = getAnchorValue();
5653 
5654     // TODO: add other stuffs
5655     if (isa<Constant>(V))
5656       indicatePessimisticFixpoint();
5657   }
5658 
5659   /// See AbstractAttribute::updateImpl(...).
5660   ChangeStatus updateImpl(Attributor &A) override {
5661     auto Before = SimplifiedAssociatedValue;
5662     if (!askSimplifiedValueForOtherAAs(A))
5663       return indicatePessimisticFixpoint();
5664 
5665     // If a candicate was found in this update, return CHANGED.
5666     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5667                                                : ChangeStatus ::CHANGED;
5668   }
5669 
5670   /// See AbstractAttribute::trackStatistics()
5671   void trackStatistics() const override {
5672     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5673   }
5674 };
5675 
5676 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5677   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5678       : AAValueSimplifyImpl(IRP, A) {}
5679 
5680   /// See AbstractAttribute::initialize(...).
5681   void initialize(Attributor &A) override {
5682     SimplifiedAssociatedValue = nullptr;
5683     indicateOptimisticFixpoint();
5684   }
5685   /// See AbstractAttribute::initialize(...).
5686   ChangeStatus updateImpl(Attributor &A) override {
5687     llvm_unreachable(
5688         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5689   }
5690   /// See AbstractAttribute::trackStatistics()
5691   void trackStatistics() const override {
5692     STATS_DECLTRACK_FN_ATTR(value_simplify)
5693   }
5694 };
5695 
5696 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5697   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5698       : AAValueSimplifyFunction(IRP, A) {}
5699   /// See AbstractAttribute::trackStatistics()
5700   void trackStatistics() const override {
5701     STATS_DECLTRACK_CS_ATTR(value_simplify)
5702   }
5703 };
5704 
5705 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5706   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5707       : AAValueSimplifyImpl(IRP, A) {}
5708 
5709   void initialize(Attributor &A) override {
5710     AAValueSimplifyImpl::initialize(A);
5711     Function *Fn = getAssociatedFunction();
5712     if (!Fn) {
5713       indicatePessimisticFixpoint();
5714       return;
5715     }
5716     for (Argument &Arg : Fn->args()) {
5717       if (Arg.hasReturnedAttr()) {
5718         auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()),
5719                                                  Arg.getArgNo());
5720         if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT &&
5721             checkAndUpdate(A, *this, IRP))
5722           indicateOptimisticFixpoint();
5723         else
5724           indicatePessimisticFixpoint();
5725         return;
5726       }
5727     }
5728   }
5729 
5730   /// See AbstractAttribute::updateImpl(...).
5731   ChangeStatus updateImpl(Attributor &A) override {
5732     auto Before = SimplifiedAssociatedValue;
5733     auto &RetAA = A.getAAFor<AAReturnedValues>(
5734         *this, IRPosition::function(*getAssociatedFunction()),
5735         DepClassTy::REQUIRED);
5736     auto PredForReturned =
5737         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5738           bool UsedAssumedInformation = false;
5739           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
5740               &RetVal, *cast<CallBase>(getCtxI()), *this,
5741               UsedAssumedInformation);
5742           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5743               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
5744           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5745         };
5746     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
5747       if (!askSimplifiedValueForOtherAAs(A))
5748         return indicatePessimisticFixpoint();
5749     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5750                                                : ChangeStatus ::CHANGED;
5751   }
5752 
5753   void trackStatistics() const override {
5754     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
5755   }
5756 };
5757 
5758 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
5759   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
5760       : AAValueSimplifyFloating(IRP, A) {}
5761 
5762   /// See AbstractAttribute::manifest(...).
5763   ChangeStatus manifest(Attributor &A) override {
5764     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5765     // TODO: We should avoid simplification duplication to begin with.
5766     auto *FloatAA = A.lookupAAFor<AAValueSimplify>(
5767         IRPosition::value(getAssociatedValue()), this, DepClassTy::NONE);
5768     if (FloatAA && FloatAA->getState().isValidState())
5769       return Changed;
5770 
5771     if (auto *NewV = manifestReplacementValue(A, getCtxI())) {
5772       Use &U = cast<CallBase>(&getAnchorValue())
5773                    ->getArgOperandUse(getCallSiteArgNo());
5774       if (A.changeUseAfterManifest(U, *NewV))
5775         Changed = ChangeStatus::CHANGED;
5776     }
5777 
5778     return Changed | AAValueSimplify::manifest(A);
5779   }
5780 
5781   void trackStatistics() const override {
5782     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5783   }
5784 };
5785 } // namespace
5786 
5787 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5788 namespace {
5789 struct AAHeapToStackFunction final : public AAHeapToStack {
5790 
5791   struct AllocationInfo {
5792     /// The call that allocates the memory.
5793     CallBase *const CB;
5794 
5795     /// The library function id for the allocation.
5796     LibFunc LibraryFunctionId = NotLibFunc;
5797 
5798     /// The status wrt. a rewrite.
5799     enum {
5800       STACK_DUE_TO_USE,
5801       STACK_DUE_TO_FREE,
5802       INVALID,
5803     } Status = STACK_DUE_TO_USE;
5804 
5805     /// Flag to indicate if we encountered a use that might free this allocation
5806     /// but which is not in the deallocation infos.
5807     bool HasPotentiallyFreeingUnknownUses = false;
5808 
5809     /// Flag to indicate that we should place the new alloca in the function
5810     /// entry block rather than where the call site (CB) is.
5811     bool MoveAllocaIntoEntry = true;
5812 
5813     /// The set of free calls that use this allocation.
5814     SmallSetVector<CallBase *, 1> PotentialFreeCalls{};
5815   };
5816 
5817   struct DeallocationInfo {
5818     /// The call that deallocates the memory.
5819     CallBase *const CB;
5820     /// The value freed by the call.
5821     Value *FreedOp;
5822 
5823     /// Flag to indicate if we don't know all objects this deallocation might
5824     /// free.
5825     bool MightFreeUnknownObjects = false;
5826 
5827     /// The set of allocation calls that are potentially freed.
5828     SmallSetVector<CallBase *, 1> PotentialAllocationCalls{};
5829   };
5830 
5831   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5832       : AAHeapToStack(IRP, A) {}
5833 
5834   ~AAHeapToStackFunction() {
5835     // Ensure we call the destructor so we release any memory allocated in the
5836     // sets.
5837     for (auto &It : AllocationInfos)
5838       It.second->~AllocationInfo();
5839     for (auto &It : DeallocationInfos)
5840       It.second->~DeallocationInfo();
5841   }
5842 
5843   void initialize(Attributor &A) override {
5844     AAHeapToStack::initialize(A);
5845 
5846     const Function *F = getAnchorScope();
5847     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5848 
5849     auto AllocationIdentifierCB = [&](Instruction &I) {
5850       CallBase *CB = dyn_cast<CallBase>(&I);
5851       if (!CB)
5852         return true;
5853       if (Value *FreedOp = getFreedOperand(CB, TLI)) {
5854         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB, FreedOp};
5855         return true;
5856       }
5857       // To do heap to stack, we need to know that the allocation itself is
5858       // removable once uses are rewritten, and that we can initialize the
5859       // alloca to the same pattern as the original allocation result.
5860       if (isRemovableAlloc(CB, TLI)) {
5861         auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
5862         if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
5863           AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
5864           AllocationInfos[CB] = AI;
5865           if (TLI)
5866             TLI->getLibFunc(*CB, AI->LibraryFunctionId);
5867         }
5868       }
5869       return true;
5870     };
5871 
5872     bool UsedAssumedInformation = false;
5873     bool Success = A.checkForAllCallLikeInstructions(
5874         AllocationIdentifierCB, *this, UsedAssumedInformation,
5875         /* CheckBBLivenessOnly */ false,
5876         /* CheckPotentiallyDead */ true);
5877     (void)Success;
5878     assert(Success && "Did not expect the call base visit callback to fail!");
5879 
5880     Attributor::SimplifictionCallbackTy SCB =
5881         [](const IRPosition &, const AbstractAttribute *,
5882            bool &) -> Optional<Value *> { return nullptr; };
5883     for (const auto &It : AllocationInfos)
5884       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
5885                                        SCB);
5886     for (const auto &It : DeallocationInfos)
5887       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
5888                                        SCB);
5889   }
5890 
5891   const std::string getAsStr() const override {
5892     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
5893     for (const auto &It : AllocationInfos) {
5894       if (It.second->Status == AllocationInfo::INVALID)
5895         ++NumInvalidMallocs;
5896       else
5897         ++NumH2SMallocs;
5898     }
5899     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
5900            std::to_string(NumInvalidMallocs);
5901   }
5902 
5903   /// See AbstractAttribute::trackStatistics().
5904   void trackStatistics() const override {
5905     STATS_DECL(
5906         MallocCalls, Function,
5907         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5908     for (auto &It : AllocationInfos)
5909       if (It.second->Status != AllocationInfo::INVALID)
5910         ++BUILD_STAT_NAME(MallocCalls, Function);
5911   }
5912 
5913   bool isAssumedHeapToStack(const CallBase &CB) const override {
5914     if (isValidState())
5915       if (AllocationInfo *AI =
5916               AllocationInfos.lookup(const_cast<CallBase *>(&CB)))
5917         return AI->Status != AllocationInfo::INVALID;
5918     return false;
5919   }
5920 
5921   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
5922     if (!isValidState())
5923       return false;
5924 
5925     for (auto &It : AllocationInfos) {
5926       AllocationInfo &AI = *It.second;
5927       if (AI.Status == AllocationInfo::INVALID)
5928         continue;
5929 
5930       if (AI.PotentialFreeCalls.count(&CB))
5931         return true;
5932     }
5933 
5934     return false;
5935   }
5936 
5937   ChangeStatus manifest(Attributor &A) override {
5938     assert(getState().isValidState() &&
5939            "Attempted to manifest an invalid state!");
5940 
5941     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5942     Function *F = getAnchorScope();
5943     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5944 
5945     for (auto &It : AllocationInfos) {
5946       AllocationInfo &AI = *It.second;
5947       if (AI.Status == AllocationInfo::INVALID)
5948         continue;
5949 
5950       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
5951         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5952         A.deleteAfterManifest(*FreeCall);
5953         HasChanged = ChangeStatus::CHANGED;
5954       }
5955 
5956       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
5957                         << "\n");
5958 
5959       auto Remark = [&](OptimizationRemark OR) {
5960         LibFunc IsAllocShared;
5961         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
5962           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
5963             return OR << "Moving globalized variable to the stack.";
5964         return OR << "Moving memory allocation from the heap to the stack.";
5965       };
5966       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
5967         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
5968       else
5969         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
5970 
5971       const DataLayout &DL = A.getInfoCache().getDL();
5972       Value *Size;
5973       Optional<APInt> SizeAPI = getSize(A, *this, AI);
5974       if (SizeAPI) {
5975         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
5976       } else {
5977         LLVMContext &Ctx = AI.CB->getContext();
5978         ObjectSizeOpts Opts;
5979         ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
5980         SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB);
5981         assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&
5982                cast<ConstantInt>(SizeOffsetPair.second)->isZero());
5983         Size = SizeOffsetPair.first;
5984       }
5985 
5986       Instruction *IP =
5987           AI.MoveAllocaIntoEntry ? &F->getEntryBlock().front() : AI.CB;
5988 
5989       Align Alignment(1);
5990       if (MaybeAlign RetAlign = AI.CB->getRetAlign())
5991         Alignment = std::max(Alignment, *RetAlign);
5992       if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
5993         Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
5994         assert(AlignmentAPI && AlignmentAPI.value().getZExtValue() > 0 &&
5995                "Expected an alignment during manifest!");
5996         Alignment = std::max(
5997             Alignment, assumeAligned(AlignmentAPI.value().getZExtValue()));
5998       }
5999 
6000       // TODO: Hoist the alloca towards the function entry.
6001       unsigned AS = DL.getAllocaAddrSpace();
6002       Instruction *Alloca =
6003           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
6004                          AI.CB->getName() + ".h2s", IP);
6005 
6006       if (Alloca->getType() != AI.CB->getType())
6007         Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6008             Alloca, AI.CB->getType(), "malloc_cast", AI.CB);
6009 
6010       auto *I8Ty = Type::getInt8Ty(F->getContext());
6011       auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
6012       assert(InitVal &&
6013              "Must be able to materialize initial memory state of allocation");
6014 
6015       A.changeAfterManifest(IRPosition::inst(*AI.CB), *Alloca);
6016 
6017       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
6018         auto *NBB = II->getNormalDest();
6019         BranchInst::Create(NBB, AI.CB->getParent());
6020         A.deleteAfterManifest(*AI.CB);
6021       } else {
6022         A.deleteAfterManifest(*AI.CB);
6023       }
6024 
6025       // Initialize the alloca with the same value as used by the allocation
6026       // function.  We can skip undef as the initial value of an alloc is
6027       // undef, and the memset would simply end up being DSEd.
6028       if (!isa<UndefValue>(InitVal)) {
6029         IRBuilder<> Builder(Alloca->getNextNode());
6030         // TODO: Use alignment above if align!=1
6031         Builder.CreateMemSet(Alloca, InitVal, Size, None);
6032       }
6033       HasChanged = ChangeStatus::CHANGED;
6034     }
6035 
6036     return HasChanged;
6037   }
6038 
6039   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
6040                            Value &V) {
6041     bool UsedAssumedInformation = false;
6042     Optional<Constant *> SimpleV =
6043         A.getAssumedConstant(V, AA, UsedAssumedInformation);
6044     if (!SimpleV)
6045       return APInt(64, 0);
6046     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.value()))
6047       return CI->getValue();
6048     return llvm::None;
6049   }
6050 
6051   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6052                           AllocationInfo &AI) {
6053     auto Mapper = [&](const Value *V) -> const Value * {
6054       bool UsedAssumedInformation = false;
6055       if (Optional<Constant *> SimpleV =
6056               A.getAssumedConstant(*V, AA, UsedAssumedInformation))
6057         if (*SimpleV)
6058           return *SimpleV;
6059       return V;
6060     };
6061 
6062     const Function *F = getAnchorScope();
6063     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6064     return getAllocSize(AI.CB, TLI, Mapper);
6065   }
6066 
6067   /// Collection of all malloc-like calls in a function with associated
6068   /// information.
6069   MapVector<CallBase *, AllocationInfo *> AllocationInfos;
6070 
6071   /// Collection of all free-like calls in a function with associated
6072   /// information.
6073   MapVector<CallBase *, DeallocationInfo *> DeallocationInfos;
6074 
6075   ChangeStatus updateImpl(Attributor &A) override;
6076 };
6077 
6078 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6079   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6080   const Function *F = getAnchorScope();
6081   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6082 
6083   const auto &LivenessAA =
6084       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6085 
6086   MustBeExecutedContextExplorer &Explorer =
6087       A.getInfoCache().getMustBeExecutedContextExplorer();
6088 
6089   bool StackIsAccessibleByOtherThreads =
6090       A.getInfoCache().stackIsAccessibleByOtherThreads();
6091 
6092   LoopInfo *LI =
6093       A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(*F);
6094   Optional<bool> MayContainIrreducibleControl;
6095   auto IsInLoop = [&](BasicBlock &BB) {
6096     if (&F->getEntryBlock() == &BB)
6097       return false;
6098     if (!MayContainIrreducibleControl.has_value())
6099       MayContainIrreducibleControl = mayContainIrreducibleControl(*F, LI);
6100     if (MayContainIrreducibleControl.value())
6101       return true;
6102     if (!LI)
6103       return true;
6104     return LI->getLoopFor(&BB) != nullptr;
6105   };
6106 
6107   // Flag to ensure we update our deallocation information at most once per
6108   // updateImpl call and only if we use the free check reasoning.
6109   bool HasUpdatedFrees = false;
6110 
6111   auto UpdateFrees = [&]() {
6112     HasUpdatedFrees = true;
6113 
6114     for (auto &It : DeallocationInfos) {
6115       DeallocationInfo &DI = *It.second;
6116       // For now we cannot use deallocations that have unknown inputs, skip
6117       // them.
6118       if (DI.MightFreeUnknownObjects)
6119         continue;
6120 
6121       // No need to analyze dead calls, ignore them instead.
6122       bool UsedAssumedInformation = false;
6123       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6124                           /* CheckBBLivenessOnly */ true))
6125         continue;
6126 
6127       // Use the non-optimistic version to get the freed object.
6128       Value *Obj = getUnderlyingObject(DI.FreedOp);
6129       if (!Obj) {
6130         LLVM_DEBUG(dbgs() << "[H2S] Unknown underlying object for free!\n");
6131         DI.MightFreeUnknownObjects = true;
6132         continue;
6133       }
6134 
6135       // Free of null and undef can be ignored as no-ops (or UB in the latter
6136       // case).
6137       if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6138         continue;
6139 
6140       CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6141       if (!ObjCB) {
6142         LLVM_DEBUG(dbgs() << "[H2S] Free of a non-call object: " << *Obj
6143                           << "\n");
6144         DI.MightFreeUnknownObjects = true;
6145         continue;
6146       }
6147 
6148       AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6149       if (!AI) {
6150         LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6151                           << "\n");
6152         DI.MightFreeUnknownObjects = true;
6153         continue;
6154       }
6155 
6156       DI.PotentialAllocationCalls.insert(ObjCB);
6157     }
6158   };
6159 
6160   auto FreeCheck = [&](AllocationInfo &AI) {
6161     // If the stack is not accessible by other threads, the "must-free" logic
6162     // doesn't apply as the pointer could be shared and needs to be places in
6163     // "shareable" memory.
6164     if (!StackIsAccessibleByOtherThreads) {
6165       auto &NoSyncAA =
6166           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6167       if (!NoSyncAA.isAssumedNoSync()) {
6168         LLVM_DEBUG(
6169             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6170                       "other threads and function is not nosync:\n");
6171         return false;
6172       }
6173     }
6174     if (!HasUpdatedFrees)
6175       UpdateFrees();
6176 
6177     // TODO: Allow multi exit functions that have different free calls.
6178     if (AI.PotentialFreeCalls.size() != 1) {
6179       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6180                         << AI.PotentialFreeCalls.size() << "\n");
6181       return false;
6182     }
6183     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6184     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6185     if (!DI) {
6186       LLVM_DEBUG(
6187           dbgs() << "[H2S] unique free call was not known as deallocation call "
6188                  << *UniqueFree << "\n");
6189       return false;
6190     }
6191     if (DI->MightFreeUnknownObjects) {
6192       LLVM_DEBUG(
6193           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6194       return false;
6195     }
6196     if (DI->PotentialAllocationCalls.empty())
6197       return true;
6198     if (DI->PotentialAllocationCalls.size() > 1) {
6199       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6200                         << DI->PotentialAllocationCalls.size()
6201                         << " different allocations\n");
6202       return false;
6203     }
6204     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6205       LLVM_DEBUG(
6206           dbgs()
6207           << "[H2S] unique free call not known to free this allocation but "
6208           << **DI->PotentialAllocationCalls.begin() << "\n");
6209       return false;
6210     }
6211     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6212     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6213       LLVM_DEBUG(
6214           dbgs()
6215           << "[H2S] unique free call might not be executed with the allocation "
6216           << *UniqueFree << "\n");
6217       return false;
6218     }
6219     return true;
6220   };
6221 
6222   auto UsesCheck = [&](AllocationInfo &AI) {
6223     bool ValidUsesOnly = true;
6224 
6225     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6226       Instruction *UserI = cast<Instruction>(U.getUser());
6227       if (isa<LoadInst>(UserI))
6228         return true;
6229       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6230         if (SI->getValueOperand() == U.get()) {
6231           LLVM_DEBUG(dbgs()
6232                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6233           ValidUsesOnly = false;
6234         } else {
6235           // A store into the malloc'ed memory is fine.
6236         }
6237         return true;
6238       }
6239       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6240         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6241           return true;
6242         if (DeallocationInfos.count(CB)) {
6243           AI.PotentialFreeCalls.insert(CB);
6244           return true;
6245         }
6246 
6247         unsigned ArgNo = CB->getArgOperandNo(&U);
6248 
6249         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6250             *this, IRPosition::callsite_argument(*CB, ArgNo),
6251             DepClassTy::OPTIONAL);
6252 
6253         // If a call site argument use is nofree, we are fine.
6254         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6255             *this, IRPosition::callsite_argument(*CB, ArgNo),
6256             DepClassTy::OPTIONAL);
6257 
6258         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6259         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6260         if (MaybeCaptured ||
6261             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6262              MaybeFreed)) {
6263           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6264 
6265           // Emit a missed remark if this is missed OpenMP globalization.
6266           auto Remark = [&](OptimizationRemarkMissed ORM) {
6267             return ORM
6268                    << "Could not move globalized variable to the stack. "
6269                       "Variable is potentially captured in call. Mark "
6270                       "parameter as `__attribute__((noescape))` to override.";
6271           };
6272 
6273           if (ValidUsesOnly &&
6274               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6275             A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark);
6276 
6277           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6278           ValidUsesOnly = false;
6279         }
6280         return true;
6281       }
6282 
6283       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6284           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6285         Follow = true;
6286         return true;
6287       }
6288       // Unknown user for which we can not track uses further (in a way that
6289       // makes sense).
6290       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6291       ValidUsesOnly = false;
6292       return true;
6293     };
6294     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6295       return false;
6296     return ValidUsesOnly;
6297   };
6298 
6299   // The actual update starts here. We look at all allocations and depending on
6300   // their status perform the appropriate check(s).
6301   for (auto &It : AllocationInfos) {
6302     AllocationInfo &AI = *It.second;
6303     if (AI.Status == AllocationInfo::INVALID)
6304       continue;
6305 
6306     if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6307       Optional<APInt> APAlign = getAPInt(A, *this, *Align);
6308       if (!APAlign) {
6309         // Can't generate an alloca which respects the required alignment
6310         // on the allocation.
6311         LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6312                           << "\n");
6313         AI.Status = AllocationInfo::INVALID;
6314         Changed = ChangeStatus::CHANGED;
6315         continue;
6316       }
6317       if (APAlign->ugt(llvm::Value::MaximumAlignment) ||
6318           !APAlign->isPowerOf2()) {
6319         LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign
6320                           << "\n");
6321         AI.Status = AllocationInfo::INVALID;
6322         Changed = ChangeStatus::CHANGED;
6323         continue;
6324       }
6325     }
6326 
6327     Optional<APInt> Size = getSize(A, *this, AI);
6328     if (MaxHeapToStackSize != -1) {
6329       if (!Size || Size.value().ugt(MaxHeapToStackSize)) {
6330         LLVM_DEBUG({
6331           if (!Size)
6332             dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
6333           else
6334             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6335                    << MaxHeapToStackSize << "\n";
6336         });
6337 
6338         AI.Status = AllocationInfo::INVALID;
6339         Changed = ChangeStatus::CHANGED;
6340         continue;
6341       }
6342     }
6343 
6344     switch (AI.Status) {
6345     case AllocationInfo::STACK_DUE_TO_USE:
6346       if (UsesCheck(AI))
6347         break;
6348       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6349       LLVM_FALLTHROUGH;
6350     case AllocationInfo::STACK_DUE_TO_FREE:
6351       if (FreeCheck(AI))
6352         break;
6353       AI.Status = AllocationInfo::INVALID;
6354       Changed = ChangeStatus::CHANGED;
6355       break;
6356     case AllocationInfo::INVALID:
6357       llvm_unreachable("Invalid allocations should never reach this point!");
6358     };
6359 
6360     // Check if we still think we can move it into the entry block.
6361     if (AI.MoveAllocaIntoEntry &&
6362         (!Size.has_value() || IsInLoop(*AI.CB->getParent())))
6363       AI.MoveAllocaIntoEntry = false;
6364   }
6365 
6366   return Changed;
6367 }
6368 } // namespace
6369 
6370 /// ----------------------- Privatizable Pointers ------------------------------
6371 namespace {
6372 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6373   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6374       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6375 
6376   ChangeStatus indicatePessimisticFixpoint() override {
6377     AAPrivatizablePtr::indicatePessimisticFixpoint();
6378     PrivatizableType = nullptr;
6379     return ChangeStatus::CHANGED;
6380   }
6381 
6382   /// Identify the type we can chose for a private copy of the underlying
6383   /// argument. None means it is not clear yet, nullptr means there is none.
6384   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6385 
6386   /// Return a privatizable type that encloses both T0 and T1.
6387   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6388   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6389     if (!T0)
6390       return T1;
6391     if (!T1)
6392       return T0;
6393     if (T0 == T1)
6394       return T0;
6395     return nullptr;
6396   }
6397 
6398   Optional<Type *> getPrivatizableType() const override {
6399     return PrivatizableType;
6400   }
6401 
6402   const std::string getAsStr() const override {
6403     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6404   }
6405 
6406 protected:
6407   Optional<Type *> PrivatizableType;
6408 };
6409 
6410 // TODO: Do this for call site arguments (probably also other values) as well.
6411 
6412 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6413   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6414       : AAPrivatizablePtrImpl(IRP, A) {}
6415 
6416   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6417   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6418     // If this is a byval argument and we know all the call sites (so we can
6419     // rewrite them), there is no need to check them explicitly.
6420     bool UsedAssumedInformation = false;
6421     SmallVector<Attribute, 1> Attrs;
6422     getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true);
6423     if (!Attrs.empty() &&
6424         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6425                                true, UsedAssumedInformation))
6426       return Attrs[0].getValueAsType();
6427 
6428     Optional<Type *> Ty;
6429     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6430 
6431     // Make sure the associated call site argument has the same type at all call
6432     // sites and it is an allocation we know is safe to privatize, for now that
6433     // means we only allow alloca instructions.
6434     // TODO: We can additionally analyze the accesses in the callee to  create
6435     //       the type from that information instead. That is a little more
6436     //       involved and will be done in a follow up patch.
6437     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6438       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6439       // Check if a coresponding argument was found or if it is one not
6440       // associated (which can happen for callback calls).
6441       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6442         return false;
6443 
6444       // Check that all call sites agree on a type.
6445       auto &PrivCSArgAA =
6446           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6447       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6448 
6449       LLVM_DEBUG({
6450         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6451         if (CSTy && CSTy.value())
6452           CSTy.value()->print(dbgs());
6453         else if (CSTy)
6454           dbgs() << "<nullptr>";
6455         else
6456           dbgs() << "<none>";
6457       });
6458 
6459       Ty = combineTypes(Ty, CSTy);
6460 
6461       LLVM_DEBUG({
6462         dbgs() << " : New Type: ";
6463         if (Ty && Ty.value())
6464           Ty.value()->print(dbgs());
6465         else if (Ty)
6466           dbgs() << "<nullptr>";
6467         else
6468           dbgs() << "<none>";
6469         dbgs() << "\n";
6470       });
6471 
6472       return !Ty || Ty.value();
6473     };
6474 
6475     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6476                                 UsedAssumedInformation))
6477       return nullptr;
6478     return Ty;
6479   }
6480 
6481   /// See AbstractAttribute::updateImpl(...).
6482   ChangeStatus updateImpl(Attributor &A) override {
6483     PrivatizableType = identifyPrivatizableType(A);
6484     if (!PrivatizableType)
6485       return ChangeStatus::UNCHANGED;
6486     if (!PrivatizableType.value())
6487       return indicatePessimisticFixpoint();
6488 
6489     // The dependence is optional so we don't give up once we give up on the
6490     // alignment.
6491     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6492                         DepClassTy::OPTIONAL);
6493 
6494     // Avoid arguments with padding for now.
6495     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6496         !isDenselyPacked(*PrivatizableType, A.getInfoCache().getDL())) {
6497       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6498       return indicatePessimisticFixpoint();
6499     }
6500 
6501     // Collect the types that will replace the privatizable type in the function
6502     // signature.
6503     SmallVector<Type *, 16> ReplacementTypes;
6504     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
6505 
6506     // Verify callee and caller agree on how the promoted argument would be
6507     // passed.
6508     Function &Fn = *getIRPosition().getAnchorScope();
6509     const auto *TTI =
6510         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6511     if (!TTI) {
6512       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
6513                         << Fn.getName() << "\n");
6514       return indicatePessimisticFixpoint();
6515     }
6516 
6517     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6518       CallBase *CB = ACS.getInstruction();
6519       return TTI->areTypesABICompatible(
6520           CB->getCaller(), CB->getCalledFunction(), ReplacementTypes);
6521     };
6522     bool UsedAssumedInformation = false;
6523     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6524                                 UsedAssumedInformation)) {
6525       LLVM_DEBUG(
6526           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6527                  << Fn.getName() << "\n");
6528       return indicatePessimisticFixpoint();
6529     }
6530 
6531     // Register a rewrite of the argument.
6532     Argument *Arg = getAssociatedArgument();
6533     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6534       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6535       return indicatePessimisticFixpoint();
6536     }
6537 
6538     unsigned ArgNo = Arg->getArgNo();
6539 
6540     // Helper to check if for the given call site the associated argument is
6541     // passed to a callback where the privatization would be different.
6542     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6543       SmallVector<const Use *, 4> CallbackUses;
6544       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6545       for (const Use *U : CallbackUses) {
6546         AbstractCallSite CBACS(U);
6547         assert(CBACS && CBACS.isCallbackCall());
6548         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6549           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6550 
6551           LLVM_DEBUG({
6552             dbgs()
6553                 << "[AAPrivatizablePtr] Argument " << *Arg
6554                 << "check if can be privatized in the context of its parent ("
6555                 << Arg->getParent()->getName()
6556                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6557                    "callback ("
6558                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6559                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6560                 << CBACS.getCallArgOperand(CBArg) << " vs "
6561                 << CB.getArgOperand(ArgNo) << "\n"
6562                 << "[AAPrivatizablePtr] " << CBArg << " : "
6563                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6564           });
6565 
6566           if (CBArgNo != int(ArgNo))
6567             continue;
6568           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6569               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6570           if (CBArgPrivAA.isValidState()) {
6571             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6572             if (!CBArgPrivTy)
6573               continue;
6574             if (CBArgPrivTy.value() == PrivatizableType)
6575               continue;
6576           }
6577 
6578           LLVM_DEBUG({
6579             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6580                    << " cannot be privatized in the context of its parent ("
6581                    << Arg->getParent()->getName()
6582                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6583                       "callback ("
6584                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6585                    << ").\n[AAPrivatizablePtr] for which the argument "
6586                       "privatization is not compatible.\n";
6587           });
6588           return false;
6589         }
6590       }
6591       return true;
6592     };
6593 
6594     // Helper to check if for the given call site the associated argument is
6595     // passed to a direct call where the privatization would be different.
6596     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6597       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6598       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6599       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
6600              "Expected a direct call operand for callback call operand");
6601 
6602       LLVM_DEBUG({
6603         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6604                << " check if be privatized in the context of its parent ("
6605                << Arg->getParent()->getName()
6606                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6607                   "direct call of ("
6608                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6609                << ").\n";
6610       });
6611 
6612       Function *DCCallee = DC->getCalledFunction();
6613       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6614         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6615             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6616             DepClassTy::REQUIRED);
6617         if (DCArgPrivAA.isValidState()) {
6618           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6619           if (!DCArgPrivTy)
6620             return true;
6621           if (DCArgPrivTy.value() == PrivatizableType)
6622             return true;
6623         }
6624       }
6625 
6626       LLVM_DEBUG({
6627         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6628                << " cannot be privatized in the context of its parent ("
6629                << Arg->getParent()->getName()
6630                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6631                   "direct call of ("
6632                << ACS.getInstruction()->getCalledFunction()->getName()
6633                << ").\n[AAPrivatizablePtr] for which the argument "
6634                   "privatization is not compatible.\n";
6635       });
6636       return false;
6637     };
6638 
6639     // Helper to check if the associated argument is used at the given abstract
6640     // call site in a way that is incompatible with the privatization assumed
6641     // here.
6642     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6643       if (ACS.isDirectCall())
6644         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6645       if (ACS.isCallbackCall())
6646         return IsCompatiblePrivArgOfDirectCS(ACS);
6647       return false;
6648     };
6649 
6650     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6651                                 UsedAssumedInformation))
6652       return indicatePessimisticFixpoint();
6653 
6654     return ChangeStatus::UNCHANGED;
6655   }
6656 
6657   /// Given a type to private \p PrivType, collect the constituates (which are
6658   /// used) in \p ReplacementTypes.
6659   static void
6660   identifyReplacementTypes(Type *PrivType,
6661                            SmallVectorImpl<Type *> &ReplacementTypes) {
6662     // TODO: For now we expand the privatization type to the fullest which can
6663     //       lead to dead arguments that need to be removed later.
6664     assert(PrivType && "Expected privatizable type!");
6665 
6666     // Traverse the type, extract constituate types on the outermost level.
6667     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6668       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6669         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6670     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6671       ReplacementTypes.append(PrivArrayType->getNumElements(),
6672                               PrivArrayType->getElementType());
6673     } else {
6674       ReplacementTypes.push_back(PrivType);
6675     }
6676   }
6677 
6678   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6679   /// The values needed are taken from the arguments of \p F starting at
6680   /// position \p ArgNo.
6681   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6682                                    unsigned ArgNo, Instruction &IP) {
6683     assert(PrivType && "Expected privatizable type!");
6684 
6685     IRBuilder<NoFolder> IRB(&IP);
6686     const DataLayout &DL = F.getParent()->getDataLayout();
6687 
6688     // Traverse the type, build GEPs and stores.
6689     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6690       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6691       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6692         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6693         Value *Ptr =
6694             constructPointer(PointeeTy, PrivType, &Base,
6695                              PrivStructLayout->getElementOffset(u), IRB, DL);
6696         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6697       }
6698     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6699       Type *PointeeTy = PrivArrayType->getElementType();
6700       Type *PointeePtrTy = PointeeTy->getPointerTo();
6701       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6702       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6703         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6704                                       u * PointeeTySize, IRB, DL);
6705         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6706       }
6707     } else {
6708       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6709     }
6710   }
6711 
6712   /// Extract values from \p Base according to the type \p PrivType at the
6713   /// call position \p ACS. The values are appended to \p ReplacementValues.
6714   void createReplacementValues(Align Alignment, Type *PrivType,
6715                                AbstractCallSite ACS, Value *Base,
6716                                SmallVectorImpl<Value *> &ReplacementValues) {
6717     assert(Base && "Expected base value!");
6718     assert(PrivType && "Expected privatizable type!");
6719     Instruction *IP = ACS.getInstruction();
6720 
6721     IRBuilder<NoFolder> IRB(IP);
6722     const DataLayout &DL = IP->getModule()->getDataLayout();
6723 
6724     Type *PrivPtrType = PrivType->getPointerTo();
6725     if (Base->getType() != PrivPtrType)
6726       Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6727           Base, PrivPtrType, "", ACS.getInstruction());
6728 
6729     // Traverse the type, build GEPs and loads.
6730     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6731       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6732       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6733         Type *PointeeTy = PrivStructType->getElementType(u);
6734         Value *Ptr =
6735             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6736                              PrivStructLayout->getElementOffset(u), IRB, DL);
6737         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6738         L->setAlignment(Alignment);
6739         ReplacementValues.push_back(L);
6740       }
6741     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6742       Type *PointeeTy = PrivArrayType->getElementType();
6743       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6744       Type *PointeePtrTy = PointeeTy->getPointerTo();
6745       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6746         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6747                                       u * PointeeTySize, IRB, DL);
6748         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6749         L->setAlignment(Alignment);
6750         ReplacementValues.push_back(L);
6751       }
6752     } else {
6753       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
6754       L->setAlignment(Alignment);
6755       ReplacementValues.push_back(L);
6756     }
6757   }
6758 
6759   /// See AbstractAttribute::manifest(...)
6760   ChangeStatus manifest(Attributor &A) override {
6761     if (!PrivatizableType)
6762       return ChangeStatus::UNCHANGED;
6763     assert(PrivatizableType.value() && "Expected privatizable type!");
6764 
6765     // Collect all tail calls in the function as we cannot allow new allocas to
6766     // escape into tail recursion.
6767     // TODO: Be smarter about new allocas escaping into tail calls.
6768     SmallVector<CallInst *, 16> TailCalls;
6769     bool UsedAssumedInformation = false;
6770     if (!A.checkForAllInstructions(
6771             [&](Instruction &I) {
6772               CallInst &CI = cast<CallInst>(I);
6773               if (CI.isTailCall())
6774                 TailCalls.push_back(&CI);
6775               return true;
6776             },
6777             *this, {Instruction::Call}, UsedAssumedInformation))
6778       return ChangeStatus::UNCHANGED;
6779 
6780     Argument *Arg = getAssociatedArgument();
6781     // Query AAAlign attribute for alignment of associated argument to
6782     // determine the best alignment of loads.
6783     const auto &AlignAA =
6784         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
6785 
6786     // Callback to repair the associated function. A new alloca is placed at the
6787     // beginning and initialized with the values passed through arguments. The
6788     // new alloca replaces the use of the old pointer argument.
6789     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
6790         [=](const Attributor::ArgumentReplacementInfo &ARI,
6791             Function &ReplacementFn, Function::arg_iterator ArgIt) {
6792           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
6793           Instruction *IP = &*EntryBB.getFirstInsertionPt();
6794           const DataLayout &DL = IP->getModule()->getDataLayout();
6795           unsigned AS = DL.getAllocaAddrSpace();
6796           Instruction *AI = new AllocaInst(PrivatizableType.value(), AS,
6797                                            Arg->getName() + ".priv", IP);
6798           createInitialization(PrivatizableType.value(), *AI, ReplacementFn,
6799                                ArgIt->getArgNo(), *IP);
6800 
6801           if (AI->getType() != Arg->getType())
6802             AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6803                 AI, Arg->getType(), "", IP);
6804           Arg->replaceAllUsesWith(AI);
6805 
6806           for (CallInst *CI : TailCalls)
6807             CI->setTailCall(false);
6808         };
6809 
6810     // Callback to repair a call site of the associated function. The elements
6811     // of the privatizable type are loaded prior to the call and passed to the
6812     // new function version.
6813     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
6814         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
6815                       AbstractCallSite ACS,
6816                       SmallVectorImpl<Value *> &NewArgOperands) {
6817           // When no alignment is specified for the load instruction,
6818           // natural alignment is assumed.
6819           createReplacementValues(
6820               AlignAA.getAssumedAlign(), *PrivatizableType, ACS,
6821               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
6822               NewArgOperands);
6823         };
6824 
6825     // Collect the types that will replace the privatizable type in the function
6826     // signature.
6827     SmallVector<Type *, 16> ReplacementTypes;
6828     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
6829 
6830     // Register a rewrite of the argument.
6831     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
6832                                            std::move(FnRepairCB),
6833                                            std::move(ACSRepairCB)))
6834       return ChangeStatus::CHANGED;
6835     return ChangeStatus::UNCHANGED;
6836   }
6837 
6838   /// See AbstractAttribute::trackStatistics()
6839   void trackStatistics() const override {
6840     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
6841   }
6842 };
6843 
6844 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
6845   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
6846       : AAPrivatizablePtrImpl(IRP, A) {}
6847 
6848   /// See AbstractAttribute::initialize(...).
6849   void initialize(Attributor &A) override {
6850     // TODO: We can privatize more than arguments.
6851     indicatePessimisticFixpoint();
6852   }
6853 
6854   ChangeStatus updateImpl(Attributor &A) override {
6855     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
6856                      "updateImpl will not be called");
6857   }
6858 
6859   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6860   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6861     Value *Obj = getUnderlyingObject(&getAssociatedValue());
6862     if (!Obj) {
6863       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
6864       return nullptr;
6865     }
6866 
6867     if (auto *AI = dyn_cast<AllocaInst>(Obj))
6868       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
6869         if (CI->isOne())
6870           return AI->getAllocatedType();
6871     if (auto *Arg = dyn_cast<Argument>(Obj)) {
6872       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
6873           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
6874       if (PrivArgAA.isAssumedPrivatizablePtr())
6875         return PrivArgAA.getPrivatizableType();
6876     }
6877 
6878     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
6879                          "alloca nor privatizable argument: "
6880                       << *Obj << "!\n");
6881     return nullptr;
6882   }
6883 
6884   /// See AbstractAttribute::trackStatistics()
6885   void trackStatistics() const override {
6886     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
6887   }
6888 };
6889 
6890 struct AAPrivatizablePtrCallSiteArgument final
6891     : public AAPrivatizablePtrFloating {
6892   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
6893       : AAPrivatizablePtrFloating(IRP, A) {}
6894 
6895   /// See AbstractAttribute::initialize(...).
6896   void initialize(Attributor &A) override {
6897     if (getIRPosition().hasAttr(Attribute::ByVal))
6898       indicateOptimisticFixpoint();
6899   }
6900 
6901   /// See AbstractAttribute::updateImpl(...).
6902   ChangeStatus updateImpl(Attributor &A) override {
6903     PrivatizableType = identifyPrivatizableType(A);
6904     if (!PrivatizableType)
6905       return ChangeStatus::UNCHANGED;
6906     if (!PrivatizableType.value())
6907       return indicatePessimisticFixpoint();
6908 
6909     const IRPosition &IRP = getIRPosition();
6910     auto &NoCaptureAA =
6911         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
6912     if (!NoCaptureAA.isAssumedNoCapture()) {
6913       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
6914       return indicatePessimisticFixpoint();
6915     }
6916 
6917     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
6918     if (!NoAliasAA.isAssumedNoAlias()) {
6919       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
6920       return indicatePessimisticFixpoint();
6921     }
6922 
6923     bool IsKnown;
6924     if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) {
6925       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
6926       return indicatePessimisticFixpoint();
6927     }
6928 
6929     return ChangeStatus::UNCHANGED;
6930   }
6931 
6932   /// See AbstractAttribute::trackStatistics()
6933   void trackStatistics() const override {
6934     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
6935   }
6936 };
6937 
6938 struct AAPrivatizablePtrCallSiteReturned final
6939     : public AAPrivatizablePtrFloating {
6940   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
6941       : AAPrivatizablePtrFloating(IRP, A) {}
6942 
6943   /// See AbstractAttribute::initialize(...).
6944   void initialize(Attributor &A) override {
6945     // TODO: We can privatize more than arguments.
6946     indicatePessimisticFixpoint();
6947   }
6948 
6949   /// See AbstractAttribute::trackStatistics()
6950   void trackStatistics() const override {
6951     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
6952   }
6953 };
6954 
6955 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
6956   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
6957       : AAPrivatizablePtrFloating(IRP, A) {}
6958 
6959   /// See AbstractAttribute::initialize(...).
6960   void initialize(Attributor &A) override {
6961     // TODO: We can privatize more than arguments.
6962     indicatePessimisticFixpoint();
6963   }
6964 
6965   /// See AbstractAttribute::trackStatistics()
6966   void trackStatistics() const override {
6967     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
6968   }
6969 };
6970 } // namespace
6971 
6972 /// -------------------- Memory Behavior Attributes ----------------------------
6973 /// Includes read-none, read-only, and write-only.
6974 /// ----------------------------------------------------------------------------
6975 namespace {
6976 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
6977   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
6978       : AAMemoryBehavior(IRP, A) {}
6979 
6980   /// See AbstractAttribute::initialize(...).
6981   void initialize(Attributor &A) override {
6982     intersectAssumedBits(BEST_STATE);
6983     getKnownStateFromValue(getIRPosition(), getState());
6984     AAMemoryBehavior::initialize(A);
6985   }
6986 
6987   /// Return the memory behavior information encoded in the IR for \p IRP.
6988   static void getKnownStateFromValue(const IRPosition &IRP,
6989                                      BitIntegerState &State,
6990                                      bool IgnoreSubsumingPositions = false) {
6991     SmallVector<Attribute, 2> Attrs;
6992     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6993     for (const Attribute &Attr : Attrs) {
6994       switch (Attr.getKindAsEnum()) {
6995       case Attribute::ReadNone:
6996         State.addKnownBits(NO_ACCESSES);
6997         break;
6998       case Attribute::ReadOnly:
6999         State.addKnownBits(NO_WRITES);
7000         break;
7001       case Attribute::WriteOnly:
7002         State.addKnownBits(NO_READS);
7003         break;
7004       default:
7005         llvm_unreachable("Unexpected attribute!");
7006       }
7007     }
7008 
7009     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
7010       if (!I->mayReadFromMemory())
7011         State.addKnownBits(NO_READS);
7012       if (!I->mayWriteToMemory())
7013         State.addKnownBits(NO_WRITES);
7014     }
7015   }
7016 
7017   /// See AbstractAttribute::getDeducedAttributes(...).
7018   void getDeducedAttributes(LLVMContext &Ctx,
7019                             SmallVectorImpl<Attribute> &Attrs) const override {
7020     assert(Attrs.size() == 0);
7021     if (isAssumedReadNone())
7022       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7023     else if (isAssumedReadOnly())
7024       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
7025     else if (isAssumedWriteOnly())
7026       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
7027     assert(Attrs.size() <= 1);
7028   }
7029 
7030   /// See AbstractAttribute::manifest(...).
7031   ChangeStatus manifest(Attributor &A) override {
7032     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
7033       return ChangeStatus::UNCHANGED;
7034 
7035     const IRPosition &IRP = getIRPosition();
7036 
7037     // Check if we would improve the existing attributes first.
7038     SmallVector<Attribute, 4> DeducedAttrs;
7039     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7040     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7041           return IRP.hasAttr(Attr.getKindAsEnum(),
7042                              /* IgnoreSubsumingPositions */ true);
7043         }))
7044       return ChangeStatus::UNCHANGED;
7045 
7046     // Clear existing attributes.
7047     IRP.removeAttrs(AttrKinds);
7048 
7049     // Use the generic manifest method.
7050     return IRAttribute::manifest(A);
7051   }
7052 
7053   /// See AbstractState::getAsStr().
7054   const std::string getAsStr() const override {
7055     if (isAssumedReadNone())
7056       return "readnone";
7057     if (isAssumedReadOnly())
7058       return "readonly";
7059     if (isAssumedWriteOnly())
7060       return "writeonly";
7061     return "may-read/write";
7062   }
7063 
7064   /// The set of IR attributes AAMemoryBehavior deals with.
7065   static const Attribute::AttrKind AttrKinds[3];
7066 };
7067 
7068 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
7069     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
7070 
7071 /// Memory behavior attribute for a floating value.
7072 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
7073   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
7074       : AAMemoryBehaviorImpl(IRP, A) {}
7075 
7076   /// See AbstractAttribute::updateImpl(...).
7077   ChangeStatus updateImpl(Attributor &A) override;
7078 
7079   /// See AbstractAttribute::trackStatistics()
7080   void trackStatistics() const override {
7081     if (isAssumedReadNone())
7082       STATS_DECLTRACK_FLOATING_ATTR(readnone)
7083     else if (isAssumedReadOnly())
7084       STATS_DECLTRACK_FLOATING_ATTR(readonly)
7085     else if (isAssumedWriteOnly())
7086       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7087   }
7088 
7089 private:
7090   /// Return true if users of \p UserI might access the underlying
7091   /// variable/location described by \p U and should therefore be analyzed.
7092   bool followUsersOfUseIn(Attributor &A, const Use &U,
7093                           const Instruction *UserI);
7094 
7095   /// Update the state according to the effect of use \p U in \p UserI.
7096   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7097 };
7098 
7099 /// Memory behavior attribute for function argument.
7100 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7101   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7102       : AAMemoryBehaviorFloating(IRP, A) {}
7103 
7104   /// See AbstractAttribute::initialize(...).
7105   void initialize(Attributor &A) override {
7106     intersectAssumedBits(BEST_STATE);
7107     const IRPosition &IRP = getIRPosition();
7108     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7109     // can query it when we use has/getAttr. That would allow us to reuse the
7110     // initialize of the base class here.
7111     bool HasByVal =
7112         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7113     getKnownStateFromValue(IRP, getState(),
7114                            /* IgnoreSubsumingPositions */ HasByVal);
7115 
7116     // Initialize the use vector with all direct uses of the associated value.
7117     Argument *Arg = getAssociatedArgument();
7118     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7119       indicatePessimisticFixpoint();
7120   }
7121 
7122   ChangeStatus manifest(Attributor &A) override {
7123     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7124     if (!getAssociatedValue().getType()->isPointerTy())
7125       return ChangeStatus::UNCHANGED;
7126 
7127     // TODO: From readattrs.ll: "inalloca parameters are always
7128     //                           considered written"
7129     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7130       removeKnownBits(NO_WRITES);
7131       removeAssumedBits(NO_WRITES);
7132     }
7133     return AAMemoryBehaviorFloating::manifest(A);
7134   }
7135 
7136   /// See AbstractAttribute::trackStatistics()
7137   void trackStatistics() const override {
7138     if (isAssumedReadNone())
7139       STATS_DECLTRACK_ARG_ATTR(readnone)
7140     else if (isAssumedReadOnly())
7141       STATS_DECLTRACK_ARG_ATTR(readonly)
7142     else if (isAssumedWriteOnly())
7143       STATS_DECLTRACK_ARG_ATTR(writeonly)
7144   }
7145 };
7146 
7147 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7148   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7149       : AAMemoryBehaviorArgument(IRP, A) {}
7150 
7151   /// See AbstractAttribute::initialize(...).
7152   void initialize(Attributor &A) override {
7153     // If we don't have an associated attribute this is either a variadic call
7154     // or an indirect call, either way, nothing to do here.
7155     Argument *Arg = getAssociatedArgument();
7156     if (!Arg) {
7157       indicatePessimisticFixpoint();
7158       return;
7159     }
7160     if (Arg->hasByValAttr()) {
7161       addKnownBits(NO_WRITES);
7162       removeKnownBits(NO_READS);
7163       removeAssumedBits(NO_READS);
7164     }
7165     AAMemoryBehaviorArgument::initialize(A);
7166     if (getAssociatedFunction()->isDeclaration())
7167       indicatePessimisticFixpoint();
7168   }
7169 
7170   /// See AbstractAttribute::updateImpl(...).
7171   ChangeStatus updateImpl(Attributor &A) override {
7172     // TODO: Once we have call site specific value information we can provide
7173     //       call site specific liveness liveness information and then it makes
7174     //       sense to specialize attributes for call sites arguments instead of
7175     //       redirecting requests to the callee argument.
7176     Argument *Arg = getAssociatedArgument();
7177     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7178     auto &ArgAA =
7179         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7180     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7181   }
7182 
7183   /// See AbstractAttribute::trackStatistics()
7184   void trackStatistics() const override {
7185     if (isAssumedReadNone())
7186       STATS_DECLTRACK_CSARG_ATTR(readnone)
7187     else if (isAssumedReadOnly())
7188       STATS_DECLTRACK_CSARG_ATTR(readonly)
7189     else if (isAssumedWriteOnly())
7190       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7191   }
7192 };
7193 
7194 /// Memory behavior attribute for a call site return position.
7195 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7196   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7197       : AAMemoryBehaviorFloating(IRP, A) {}
7198 
7199   /// See AbstractAttribute::initialize(...).
7200   void initialize(Attributor &A) override {
7201     AAMemoryBehaviorImpl::initialize(A);
7202     Function *F = getAssociatedFunction();
7203     if (!F || F->isDeclaration())
7204       indicatePessimisticFixpoint();
7205   }
7206 
7207   /// See AbstractAttribute::manifest(...).
7208   ChangeStatus manifest(Attributor &A) override {
7209     // We do not annotate returned values.
7210     return ChangeStatus::UNCHANGED;
7211   }
7212 
7213   /// See AbstractAttribute::trackStatistics()
7214   void trackStatistics() const override {}
7215 };
7216 
7217 /// An AA to represent the memory behavior function attributes.
7218 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7219   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7220       : AAMemoryBehaviorImpl(IRP, A) {}
7221 
7222   /// See AbstractAttribute::updateImpl(Attributor &A).
7223   ChangeStatus updateImpl(Attributor &A) override;
7224 
7225   /// See AbstractAttribute::manifest(...).
7226   ChangeStatus manifest(Attributor &A) override {
7227     Function &F = cast<Function>(getAnchorValue());
7228     if (isAssumedReadNone()) {
7229       F.removeFnAttr(Attribute::ArgMemOnly);
7230       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7231       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7232     }
7233     return AAMemoryBehaviorImpl::manifest(A);
7234   }
7235 
7236   /// See AbstractAttribute::trackStatistics()
7237   void trackStatistics() const override {
7238     if (isAssumedReadNone())
7239       STATS_DECLTRACK_FN_ATTR(readnone)
7240     else if (isAssumedReadOnly())
7241       STATS_DECLTRACK_FN_ATTR(readonly)
7242     else if (isAssumedWriteOnly())
7243       STATS_DECLTRACK_FN_ATTR(writeonly)
7244   }
7245 };
7246 
7247 /// AAMemoryBehavior attribute for call sites.
7248 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7249   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7250       : AAMemoryBehaviorImpl(IRP, A) {}
7251 
7252   /// See AbstractAttribute::initialize(...).
7253   void initialize(Attributor &A) override {
7254     AAMemoryBehaviorImpl::initialize(A);
7255     Function *F = getAssociatedFunction();
7256     if (!F || F->isDeclaration())
7257       indicatePessimisticFixpoint();
7258   }
7259 
7260   /// See AbstractAttribute::updateImpl(...).
7261   ChangeStatus updateImpl(Attributor &A) override {
7262     // TODO: Once we have call site specific value information we can provide
7263     //       call site specific liveness liveness information and then it makes
7264     //       sense to specialize attributes for call sites arguments instead of
7265     //       redirecting requests to the callee argument.
7266     Function *F = getAssociatedFunction();
7267     const IRPosition &FnPos = IRPosition::function(*F);
7268     auto &FnAA =
7269         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7270     return clampStateAndIndicateChange(getState(), FnAA.getState());
7271   }
7272 
7273   /// See AbstractAttribute::trackStatistics()
7274   void trackStatistics() const override {
7275     if (isAssumedReadNone())
7276       STATS_DECLTRACK_CS_ATTR(readnone)
7277     else if (isAssumedReadOnly())
7278       STATS_DECLTRACK_CS_ATTR(readonly)
7279     else if (isAssumedWriteOnly())
7280       STATS_DECLTRACK_CS_ATTR(writeonly)
7281   }
7282 };
7283 
7284 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7285 
7286   // The current assumed state used to determine a change.
7287   auto AssumedState = getAssumed();
7288 
7289   auto CheckRWInst = [&](Instruction &I) {
7290     // If the instruction has an own memory behavior state, use it to restrict
7291     // the local state. No further analysis is required as the other memory
7292     // state is as optimistic as it gets.
7293     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7294       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7295           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7296       intersectAssumedBits(MemBehaviorAA.getAssumed());
7297       return !isAtFixpoint();
7298     }
7299 
7300     // Remove access kind modifiers if necessary.
7301     if (I.mayReadFromMemory())
7302       removeAssumedBits(NO_READS);
7303     if (I.mayWriteToMemory())
7304       removeAssumedBits(NO_WRITES);
7305     return !isAtFixpoint();
7306   };
7307 
7308   bool UsedAssumedInformation = false;
7309   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7310                                           UsedAssumedInformation))
7311     return indicatePessimisticFixpoint();
7312 
7313   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7314                                         : ChangeStatus::UNCHANGED;
7315 }
7316 
7317 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7318 
7319   const IRPosition &IRP = getIRPosition();
7320   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7321   AAMemoryBehavior::StateType &S = getState();
7322 
7323   // First, check the function scope. We take the known information and we avoid
7324   // work if the assumed information implies the current assumed information for
7325   // this attribute. This is a valid for all but byval arguments.
7326   Argument *Arg = IRP.getAssociatedArgument();
7327   AAMemoryBehavior::base_t FnMemAssumedState =
7328       AAMemoryBehavior::StateType::getWorstState();
7329   if (!Arg || !Arg->hasByValAttr()) {
7330     const auto &FnMemAA =
7331         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7332     FnMemAssumedState = FnMemAA.getAssumed();
7333     S.addKnownBits(FnMemAA.getKnown());
7334     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7335       return ChangeStatus::UNCHANGED;
7336   }
7337 
7338   // The current assumed state used to determine a change.
7339   auto AssumedState = S.getAssumed();
7340 
7341   // Make sure the value is not captured (except through "return"), if
7342   // it is, any information derived would be irrelevant anyway as we cannot
7343   // check the potential aliases introduced by the capture. However, no need
7344   // to fall back to anythign less optimistic than the function state.
7345   const auto &ArgNoCaptureAA =
7346       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7347   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7348     S.intersectAssumedBits(FnMemAssumedState);
7349     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7350                                           : ChangeStatus::UNCHANGED;
7351   }
7352 
7353   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7354   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7355     Instruction *UserI = cast<Instruction>(U.getUser());
7356     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7357                       << " \n");
7358 
7359     // Droppable users, e.g., llvm::assume does not actually perform any action.
7360     if (UserI->isDroppable())
7361       return true;
7362 
7363     // Check if the users of UserI should also be visited.
7364     Follow = followUsersOfUseIn(A, U, UserI);
7365 
7366     // If UserI might touch memory we analyze the use in detail.
7367     if (UserI->mayReadOrWriteMemory())
7368       analyzeUseIn(A, U, UserI);
7369 
7370     return !isAtFixpoint();
7371   };
7372 
7373   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7374     return indicatePessimisticFixpoint();
7375 
7376   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7377                                         : ChangeStatus::UNCHANGED;
7378 }
7379 
7380 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7381                                                   const Instruction *UserI) {
7382   // The loaded value is unrelated to the pointer argument, no need to
7383   // follow the users of the load.
7384   if (isa<LoadInst>(UserI) || isa<ReturnInst>(UserI))
7385     return false;
7386 
7387   // By default we follow all uses assuming UserI might leak information on U,
7388   // we have special handling for call sites operands though.
7389   const auto *CB = dyn_cast<CallBase>(UserI);
7390   if (!CB || !CB->isArgOperand(&U))
7391     return true;
7392 
7393   // If the use is a call argument known not to be captured, the users of
7394   // the call do not need to be visited because they have to be unrelated to
7395   // the input. Note that this check is not trivial even though we disallow
7396   // general capturing of the underlying argument. The reason is that the
7397   // call might the argument "through return", which we allow and for which we
7398   // need to check call users.
7399   if (U.get()->getType()->isPointerTy()) {
7400     unsigned ArgNo = CB->getArgOperandNo(&U);
7401     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7402         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7403     return !ArgNoCaptureAA.isAssumedNoCapture();
7404   }
7405 
7406   return true;
7407 }
7408 
7409 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7410                                             const Instruction *UserI) {
7411   assert(UserI->mayReadOrWriteMemory());
7412 
7413   switch (UserI->getOpcode()) {
7414   default:
7415     // TODO: Handle all atomics and other side-effect operations we know of.
7416     break;
7417   case Instruction::Load:
7418     // Loads cause the NO_READS property to disappear.
7419     removeAssumedBits(NO_READS);
7420     return;
7421 
7422   case Instruction::Store:
7423     // Stores cause the NO_WRITES property to disappear if the use is the
7424     // pointer operand. Note that while capturing was taken care of somewhere
7425     // else we need to deal with stores of the value that is not looked through.
7426     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7427       removeAssumedBits(NO_WRITES);
7428     else
7429       indicatePessimisticFixpoint();
7430     return;
7431 
7432   case Instruction::Call:
7433   case Instruction::CallBr:
7434   case Instruction::Invoke: {
7435     // For call sites we look at the argument memory behavior attribute (this
7436     // could be recursive!) in order to restrict our own state.
7437     const auto *CB = cast<CallBase>(UserI);
7438 
7439     // Give up on operand bundles.
7440     if (CB->isBundleOperand(&U)) {
7441       indicatePessimisticFixpoint();
7442       return;
7443     }
7444 
7445     // Calling a function does read the function pointer, maybe write it if the
7446     // function is self-modifying.
7447     if (CB->isCallee(&U)) {
7448       removeAssumedBits(NO_READS);
7449       break;
7450     }
7451 
7452     // Adjust the possible access behavior based on the information on the
7453     // argument.
7454     IRPosition Pos;
7455     if (U.get()->getType()->isPointerTy())
7456       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7457     else
7458       Pos = IRPosition::callsite_function(*CB);
7459     const auto &MemBehaviorAA =
7460         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7461     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7462     // and at least "known".
7463     intersectAssumedBits(MemBehaviorAA.getAssumed());
7464     return;
7465   }
7466   };
7467 
7468   // Generally, look at the "may-properties" and adjust the assumed state if we
7469   // did not trigger special handling before.
7470   if (UserI->mayReadFromMemory())
7471     removeAssumedBits(NO_READS);
7472   if (UserI->mayWriteToMemory())
7473     removeAssumedBits(NO_WRITES);
7474 }
7475 } // namespace
7476 
7477 /// -------------------- Memory Locations Attributes ---------------------------
7478 /// Includes read-none, argmemonly, inaccessiblememonly,
7479 /// inaccessiblememorargmemonly
7480 /// ----------------------------------------------------------------------------
7481 
7482 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7483     AAMemoryLocation::MemoryLocationsKind MLK) {
7484   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7485     return "all memory";
7486   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7487     return "no memory";
7488   std::string S = "memory:";
7489   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7490     S += "stack,";
7491   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7492     S += "constant,";
7493   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7494     S += "internal global,";
7495   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7496     S += "external global,";
7497   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7498     S += "argument,";
7499   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7500     S += "inaccessible,";
7501   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7502     S += "malloced,";
7503   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7504     S += "unknown,";
7505   S.pop_back();
7506   return S;
7507 }
7508 
7509 namespace {
7510 struct AAMemoryLocationImpl : public AAMemoryLocation {
7511 
7512   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7513       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7514     AccessKind2Accesses.fill(nullptr);
7515   }
7516 
7517   ~AAMemoryLocationImpl() {
7518     // The AccessSets are allocated via a BumpPtrAllocator, we call
7519     // the destructor manually.
7520     for (AccessSet *AS : AccessKind2Accesses)
7521       if (AS)
7522         AS->~AccessSet();
7523   }
7524 
7525   /// See AbstractAttribute::initialize(...).
7526   void initialize(Attributor &A) override {
7527     intersectAssumedBits(BEST_STATE);
7528     getKnownStateFromValue(A, getIRPosition(), getState());
7529     AAMemoryLocation::initialize(A);
7530   }
7531 
7532   /// Return the memory behavior information encoded in the IR for \p IRP.
7533   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7534                                      BitIntegerState &State,
7535                                      bool IgnoreSubsumingPositions = false) {
7536     // For internal functions we ignore `argmemonly` and
7537     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7538     // constant propagation. It is unclear if this is the best way but it is
7539     // unlikely this will cause real performance problems. If we are deriving
7540     // attributes for the anchor function we even remove the attribute in
7541     // addition to ignoring it.
7542     bool UseArgMemOnly = true;
7543     Function *AnchorFn = IRP.getAnchorScope();
7544     if (AnchorFn && A.isRunOn(*AnchorFn))
7545       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7546 
7547     SmallVector<Attribute, 2> Attrs;
7548     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7549     for (const Attribute &Attr : Attrs) {
7550       switch (Attr.getKindAsEnum()) {
7551       case Attribute::ReadNone:
7552         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7553         break;
7554       case Attribute::InaccessibleMemOnly:
7555         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7556         break;
7557       case Attribute::ArgMemOnly:
7558         if (UseArgMemOnly)
7559           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7560         else
7561           IRP.removeAttrs({Attribute::ArgMemOnly});
7562         break;
7563       case Attribute::InaccessibleMemOrArgMemOnly:
7564         if (UseArgMemOnly)
7565           State.addKnownBits(inverseLocation(
7566               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7567         else
7568           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7569         break;
7570       default:
7571         llvm_unreachable("Unexpected attribute!");
7572       }
7573     }
7574   }
7575 
7576   /// See AbstractAttribute::getDeducedAttributes(...).
7577   void getDeducedAttributes(LLVMContext &Ctx,
7578                             SmallVectorImpl<Attribute> &Attrs) const override {
7579     assert(Attrs.size() == 0);
7580     if (isAssumedReadNone()) {
7581       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7582     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7583       if (isAssumedInaccessibleMemOnly())
7584         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7585       else if (isAssumedArgMemOnly())
7586         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7587       else if (isAssumedInaccessibleOrArgMemOnly())
7588         Attrs.push_back(
7589             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7590     }
7591     assert(Attrs.size() <= 1);
7592   }
7593 
7594   /// See AbstractAttribute::manifest(...).
7595   ChangeStatus manifest(Attributor &A) override {
7596     const IRPosition &IRP = getIRPosition();
7597 
7598     // Check if we would improve the existing attributes first.
7599     SmallVector<Attribute, 4> DeducedAttrs;
7600     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7601     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7602           return IRP.hasAttr(Attr.getKindAsEnum(),
7603                              /* IgnoreSubsumingPositions */ true);
7604         }))
7605       return ChangeStatus::UNCHANGED;
7606 
7607     // Clear existing attributes.
7608     IRP.removeAttrs(AttrKinds);
7609     if (isAssumedReadNone())
7610       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7611 
7612     // Use the generic manifest method.
7613     return IRAttribute::manifest(A);
7614   }
7615 
7616   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7617   bool checkForAllAccessesToMemoryKind(
7618       function_ref<bool(const Instruction *, const Value *, AccessKind,
7619                         MemoryLocationsKind)>
7620           Pred,
7621       MemoryLocationsKind RequestedMLK) const override {
7622     if (!isValidState())
7623       return false;
7624 
7625     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7626     if (AssumedMLK == NO_LOCATIONS)
7627       return true;
7628 
7629     unsigned Idx = 0;
7630     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7631          CurMLK *= 2, ++Idx) {
7632       if (CurMLK & RequestedMLK)
7633         continue;
7634 
7635       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7636         for (const AccessInfo &AI : *Accesses)
7637           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7638             return false;
7639     }
7640 
7641     return true;
7642   }
7643 
7644   ChangeStatus indicatePessimisticFixpoint() override {
7645     // If we give up and indicate a pessimistic fixpoint this instruction will
7646     // become an access for all potential access kinds:
7647     // TODO: Add pointers for argmemonly and globals to improve the results of
7648     //       checkForAllAccessesToMemoryKind.
7649     bool Changed = false;
7650     MemoryLocationsKind KnownMLK = getKnown();
7651     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7652     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7653       if (!(CurMLK & KnownMLK))
7654         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7655                                   getAccessKindFromInst(I));
7656     return AAMemoryLocation::indicatePessimisticFixpoint();
7657   }
7658 
7659 protected:
7660   /// Helper struct to tie together an instruction that has a read or write
7661   /// effect with the pointer it accesses (if any).
7662   struct AccessInfo {
7663 
7664     /// The instruction that caused the access.
7665     const Instruction *I;
7666 
7667     /// The base pointer that is accessed, or null if unknown.
7668     const Value *Ptr;
7669 
7670     /// The kind of access (read/write/read+write).
7671     AccessKind Kind;
7672 
7673     bool operator==(const AccessInfo &RHS) const {
7674       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7675     }
7676     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7677       if (LHS.I != RHS.I)
7678         return LHS.I < RHS.I;
7679       if (LHS.Ptr != RHS.Ptr)
7680         return LHS.Ptr < RHS.Ptr;
7681       if (LHS.Kind != RHS.Kind)
7682         return LHS.Kind < RHS.Kind;
7683       return false;
7684     }
7685   };
7686 
7687   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7688   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7689   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7690   std::array<AccessSet *, llvm::CTLog2<VALID_STATE>()> AccessKind2Accesses;
7691 
7692   /// Categorize the pointer arguments of CB that might access memory in
7693   /// AccessedLoc and update the state and access map accordingly.
7694   void
7695   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7696                                      AAMemoryLocation::StateType &AccessedLocs,
7697                                      bool &Changed);
7698 
7699   /// Return the kind(s) of location that may be accessed by \p V.
7700   AAMemoryLocation::MemoryLocationsKind
7701   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7702 
7703   /// Return the access kind as determined by \p I.
7704   AccessKind getAccessKindFromInst(const Instruction *I) {
7705     AccessKind AK = READ_WRITE;
7706     if (I) {
7707       AK = I->mayReadFromMemory() ? READ : NONE;
7708       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7709     }
7710     return AK;
7711   }
7712 
7713   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7714   /// an access of kind \p AK to a \p MLK memory location with the access
7715   /// pointer \p Ptr.
7716   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7717                                  MemoryLocationsKind MLK, const Instruction *I,
7718                                  const Value *Ptr, bool &Changed,
7719                                  AccessKind AK = READ_WRITE) {
7720 
7721     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7722     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7723     if (!Accesses)
7724       Accesses = new (Allocator) AccessSet();
7725     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7726     State.removeAssumedBits(MLK);
7727   }
7728 
7729   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7730   /// arguments, and update the state and access map accordingly.
7731   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7732                           AAMemoryLocation::StateType &State, bool &Changed);
7733 
7734   /// Used to allocate access sets.
7735   BumpPtrAllocator &Allocator;
7736 
7737   /// The set of IR attributes AAMemoryLocation deals with.
7738   static const Attribute::AttrKind AttrKinds[4];
7739 };
7740 
7741 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7742     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7743     Attribute::InaccessibleMemOrArgMemOnly};
7744 
7745 void AAMemoryLocationImpl::categorizePtrValue(
7746     Attributor &A, const Instruction &I, const Value &Ptr,
7747     AAMemoryLocation::StateType &State, bool &Changed) {
7748   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7749                     << Ptr << " ["
7750                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
7751 
7752   SmallSetVector<Value *, 8> Objects;
7753   bool UsedAssumedInformation = false;
7754   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I,
7755                                        UsedAssumedInformation,
7756                                        AA::Intraprocedural)) {
7757     LLVM_DEBUG(
7758         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
7759     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
7760                               getAccessKindFromInst(&I));
7761     return;
7762   }
7763 
7764   for (Value *Obj : Objects) {
7765     // TODO: recognize the TBAA used for constant accesses.
7766     MemoryLocationsKind MLK = NO_LOCATIONS;
7767     if (isa<UndefValue>(Obj))
7768       continue;
7769     if (isa<Argument>(Obj)) {
7770       // TODO: For now we do not treat byval arguments as local copies performed
7771       // on the call edge, though, we should. To make that happen we need to
7772       // teach various passes, e.g., DSE, about the copy effect of a byval. That
7773       // would also allow us to mark functions only accessing byval arguments as
7774       // readnone again, atguably their acceses have no effect outside of the
7775       // function, like accesses to allocas.
7776       MLK = NO_ARGUMENT_MEM;
7777     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
7778       // Reading constant memory is not treated as a read "effect" by the
7779       // function attr pass so we won't neither. Constants defined by TBAA are
7780       // similar. (We know we do not write it because it is constant.)
7781       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
7782         if (GVar->isConstant())
7783           continue;
7784 
7785       if (GV->hasLocalLinkage())
7786         MLK = NO_GLOBAL_INTERNAL_MEM;
7787       else
7788         MLK = NO_GLOBAL_EXTERNAL_MEM;
7789     } else if (isa<ConstantPointerNull>(Obj) &&
7790                !NullPointerIsDefined(getAssociatedFunction(),
7791                                      Ptr.getType()->getPointerAddressSpace())) {
7792       continue;
7793     } else if (isa<AllocaInst>(Obj)) {
7794       MLK = NO_LOCAL_MEM;
7795     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
7796       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
7797           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
7798       if (NoAliasAA.isAssumedNoAlias())
7799         MLK = NO_MALLOCED_MEM;
7800       else
7801         MLK = NO_UNKOWN_MEM;
7802     } else {
7803       MLK = NO_UNKOWN_MEM;
7804     }
7805 
7806     assert(MLK != NO_LOCATIONS && "No location specified!");
7807     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
7808                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
7809                       << "\n");
7810     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
7811                               getAccessKindFromInst(&I));
7812   }
7813 
7814   LLVM_DEBUG(
7815       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
7816              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
7817 }
7818 
7819 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
7820     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
7821     bool &Changed) {
7822   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
7823 
7824     // Skip non-pointer arguments.
7825     const Value *ArgOp = CB.getArgOperand(ArgNo);
7826     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
7827       continue;
7828 
7829     // Skip readnone arguments.
7830     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
7831     const auto &ArgOpMemLocationAA =
7832         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
7833 
7834     if (ArgOpMemLocationAA.isAssumedReadNone())
7835       continue;
7836 
7837     // Categorize potentially accessed pointer arguments as if there was an
7838     // access instruction with them as pointer.
7839     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
7840   }
7841 }
7842 
7843 AAMemoryLocation::MemoryLocationsKind
7844 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
7845                                                   bool &Changed) {
7846   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
7847                     << I << "\n");
7848 
7849   AAMemoryLocation::StateType AccessedLocs;
7850   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
7851 
7852   if (auto *CB = dyn_cast<CallBase>(&I)) {
7853 
7854     // First check if we assume any memory is access is visible.
7855     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
7856         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
7857     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
7858                       << " [" << CBMemLocationAA << "]\n");
7859 
7860     if (CBMemLocationAA.isAssumedReadNone())
7861       return NO_LOCATIONS;
7862 
7863     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
7864       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
7865                                 Changed, getAccessKindFromInst(&I));
7866       return AccessedLocs.getAssumed();
7867     }
7868 
7869     uint32_t CBAssumedNotAccessedLocs =
7870         CBMemLocationAA.getAssumedNotAccessedLocation();
7871 
7872     // Set the argmemonly and global bit as we handle them separately below.
7873     uint32_t CBAssumedNotAccessedLocsNoArgMem =
7874         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
7875 
7876     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
7877       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
7878         continue;
7879       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
7880                                 getAccessKindFromInst(&I));
7881     }
7882 
7883     // Now handle global memory if it might be accessed. This is slightly tricky
7884     // as NO_GLOBAL_MEM has multiple bits set.
7885     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
7886     if (HasGlobalAccesses) {
7887       auto AccessPred = [&](const Instruction *, const Value *Ptr,
7888                             AccessKind Kind, MemoryLocationsKind MLK) {
7889         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
7890                                   getAccessKindFromInst(&I));
7891         return true;
7892       };
7893       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
7894               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
7895         return AccessedLocs.getWorstState();
7896     }
7897 
7898     LLVM_DEBUG(
7899         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
7900                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7901 
7902     // Now handle argument memory if it might be accessed.
7903     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
7904     if (HasArgAccesses)
7905       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
7906 
7907     LLVM_DEBUG(
7908         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
7909                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7910 
7911     return AccessedLocs.getAssumed();
7912   }
7913 
7914   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
7915     LLVM_DEBUG(
7916         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
7917                << I << " [" << *Ptr << "]\n");
7918     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
7919     return AccessedLocs.getAssumed();
7920   }
7921 
7922   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
7923                     << I << "\n");
7924   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
7925                             getAccessKindFromInst(&I));
7926   return AccessedLocs.getAssumed();
7927 }
7928 
7929 /// An AA to represent the memory behavior function attributes.
7930 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
7931   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
7932       : AAMemoryLocationImpl(IRP, A) {}
7933 
7934   /// See AbstractAttribute::updateImpl(Attributor &A).
7935   ChangeStatus updateImpl(Attributor &A) override {
7936 
7937     const auto &MemBehaviorAA =
7938         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
7939     if (MemBehaviorAA.isAssumedReadNone()) {
7940       if (MemBehaviorAA.isKnownReadNone())
7941         return indicateOptimisticFixpoint();
7942       assert(isAssumedReadNone() &&
7943              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
7944       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
7945       return ChangeStatus::UNCHANGED;
7946     }
7947 
7948     // The current assumed state used to determine a change.
7949     auto AssumedState = getAssumed();
7950     bool Changed = false;
7951 
7952     auto CheckRWInst = [&](Instruction &I) {
7953       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
7954       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
7955                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
7956       removeAssumedBits(inverseLocation(MLK, false, false));
7957       // Stop once only the valid bit set in the *not assumed location*, thus
7958       // once we don't actually exclude any memory locations in the state.
7959       return getAssumedNotAccessedLocation() != VALID_STATE;
7960     };
7961 
7962     bool UsedAssumedInformation = false;
7963     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7964                                             UsedAssumedInformation))
7965       return indicatePessimisticFixpoint();
7966 
7967     Changed |= AssumedState != getAssumed();
7968     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7969   }
7970 
7971   /// See AbstractAttribute::trackStatistics()
7972   void trackStatistics() const override {
7973     if (isAssumedReadNone())
7974       STATS_DECLTRACK_FN_ATTR(readnone)
7975     else if (isAssumedArgMemOnly())
7976       STATS_DECLTRACK_FN_ATTR(argmemonly)
7977     else if (isAssumedInaccessibleMemOnly())
7978       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
7979     else if (isAssumedInaccessibleOrArgMemOnly())
7980       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
7981   }
7982 };
7983 
7984 /// AAMemoryLocation attribute for call sites.
7985 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
7986   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
7987       : AAMemoryLocationImpl(IRP, A) {}
7988 
7989   /// See AbstractAttribute::initialize(...).
7990   void initialize(Attributor &A) override {
7991     AAMemoryLocationImpl::initialize(A);
7992     Function *F = getAssociatedFunction();
7993     if (!F || F->isDeclaration())
7994       indicatePessimisticFixpoint();
7995   }
7996 
7997   /// See AbstractAttribute::updateImpl(...).
7998   ChangeStatus updateImpl(Attributor &A) override {
7999     // TODO: Once we have call site specific value information we can provide
8000     //       call site specific liveness liveness information and then it makes
8001     //       sense to specialize attributes for call sites arguments instead of
8002     //       redirecting requests to the callee argument.
8003     Function *F = getAssociatedFunction();
8004     const IRPosition &FnPos = IRPosition::function(*F);
8005     auto &FnAA =
8006         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
8007     bool Changed = false;
8008     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
8009                           AccessKind Kind, MemoryLocationsKind MLK) {
8010       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
8011                                 getAccessKindFromInst(I));
8012       return true;
8013     };
8014     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
8015       return indicatePessimisticFixpoint();
8016     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8017   }
8018 
8019   /// See AbstractAttribute::trackStatistics()
8020   void trackStatistics() const override {
8021     if (isAssumedReadNone())
8022       STATS_DECLTRACK_CS_ATTR(readnone)
8023   }
8024 };
8025 } // namespace
8026 
8027 /// ------------------ Value Constant Range Attribute -------------------------
8028 
8029 namespace {
8030 struct AAValueConstantRangeImpl : AAValueConstantRange {
8031   using StateType = IntegerRangeState;
8032   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
8033       : AAValueConstantRange(IRP, A) {}
8034 
8035   /// See AbstractAttribute::initialize(..).
8036   void initialize(Attributor &A) override {
8037     if (A.hasSimplificationCallback(getIRPosition())) {
8038       indicatePessimisticFixpoint();
8039       return;
8040     }
8041 
8042     // Intersect a range given by SCEV.
8043     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
8044 
8045     // Intersect a range given by LVI.
8046     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
8047   }
8048 
8049   /// See AbstractAttribute::getAsStr().
8050   const std::string getAsStr() const override {
8051     std::string Str;
8052     llvm::raw_string_ostream OS(Str);
8053     OS << "range(" << getBitWidth() << ")<";
8054     getKnown().print(OS);
8055     OS << " / ";
8056     getAssumed().print(OS);
8057     OS << ">";
8058     return OS.str();
8059   }
8060 
8061   /// Helper function to get a SCEV expr for the associated value at program
8062   /// point \p I.
8063   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
8064     if (!getAnchorScope())
8065       return nullptr;
8066 
8067     ScalarEvolution *SE =
8068         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8069             *getAnchorScope());
8070 
8071     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
8072         *getAnchorScope());
8073 
8074     if (!SE || !LI)
8075       return nullptr;
8076 
8077     const SCEV *S = SE->getSCEV(&getAssociatedValue());
8078     if (!I)
8079       return S;
8080 
8081     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
8082   }
8083 
8084   /// Helper function to get a range from SCEV for the associated value at
8085   /// program point \p I.
8086   ConstantRange getConstantRangeFromSCEV(Attributor &A,
8087                                          const Instruction *I = nullptr) const {
8088     if (!getAnchorScope())
8089       return getWorstState(getBitWidth());
8090 
8091     ScalarEvolution *SE =
8092         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8093             *getAnchorScope());
8094 
8095     const SCEV *S = getSCEV(A, I);
8096     if (!SE || !S)
8097       return getWorstState(getBitWidth());
8098 
8099     return SE->getUnsignedRange(S);
8100   }
8101 
8102   /// Helper function to get a range from LVI for the associated value at
8103   /// program point \p I.
8104   ConstantRange
8105   getConstantRangeFromLVI(Attributor &A,
8106                           const Instruction *CtxI = nullptr) const {
8107     if (!getAnchorScope())
8108       return getWorstState(getBitWidth());
8109 
8110     LazyValueInfo *LVI =
8111         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8112             *getAnchorScope());
8113 
8114     if (!LVI || !CtxI)
8115       return getWorstState(getBitWidth());
8116     return LVI->getConstantRange(&getAssociatedValue(),
8117                                  const_cast<Instruction *>(CtxI));
8118   }
8119 
8120   /// Return true if \p CtxI is valid for querying outside analyses.
8121   /// This basically makes sure we do not ask intra-procedural analysis
8122   /// about a context in the wrong function or a context that violates
8123   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8124   /// if the original context of this AA is OK or should be considered invalid.
8125   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8126                                                const Instruction *CtxI,
8127                                                bool AllowAACtxI) const {
8128     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8129       return false;
8130 
8131     // Our context might be in a different function, neither intra-procedural
8132     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8133     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8134       return false;
8135 
8136     // If the context is not dominated by the value there are paths to the
8137     // context that do not define the value. This cannot be handled by
8138     // LazyValueInfo so we need to bail.
8139     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8140       InformationCache &InfoCache = A.getInfoCache();
8141       const DominatorTree *DT =
8142           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8143               *I->getFunction());
8144       return DT && DT->dominates(I, CtxI);
8145     }
8146 
8147     return true;
8148   }
8149 
8150   /// See AAValueConstantRange::getKnownConstantRange(..).
8151   ConstantRange
8152   getKnownConstantRange(Attributor &A,
8153                         const Instruction *CtxI = nullptr) const override {
8154     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8155                                                  /* AllowAACtxI */ false))
8156       return getKnown();
8157 
8158     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8159     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8160     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8161   }
8162 
8163   /// See AAValueConstantRange::getAssumedConstantRange(..).
8164   ConstantRange
8165   getAssumedConstantRange(Attributor &A,
8166                           const Instruction *CtxI = nullptr) const override {
8167     // TODO: Make SCEV use Attributor assumption.
8168     //       We may be able to bound a variable range via assumptions in
8169     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8170     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8171     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8172                                                  /* AllowAACtxI */ false))
8173       return getAssumed();
8174 
8175     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8176     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8177     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8178   }
8179 
8180   /// Helper function to create MDNode for range metadata.
8181   static MDNode *
8182   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8183                             const ConstantRange &AssumedConstantRange) {
8184     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8185                                   Ty, AssumedConstantRange.getLower())),
8186                               ConstantAsMetadata::get(ConstantInt::get(
8187                                   Ty, AssumedConstantRange.getUpper()))};
8188     return MDNode::get(Ctx, LowAndHigh);
8189   }
8190 
8191   /// Return true if \p Assumed is included in \p KnownRanges.
8192   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8193 
8194     if (Assumed.isFullSet())
8195       return false;
8196 
8197     if (!KnownRanges)
8198       return true;
8199 
8200     // If multiple ranges are annotated in IR, we give up to annotate assumed
8201     // range for now.
8202 
8203     // TODO:  If there exists a known range which containts assumed range, we
8204     // can say assumed range is better.
8205     if (KnownRanges->getNumOperands() > 2)
8206       return false;
8207 
8208     ConstantInt *Lower =
8209         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8210     ConstantInt *Upper =
8211         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8212 
8213     ConstantRange Known(Lower->getValue(), Upper->getValue());
8214     return Known.contains(Assumed) && Known != Assumed;
8215   }
8216 
8217   /// Helper function to set range metadata.
8218   static bool
8219   setRangeMetadataIfisBetterRange(Instruction *I,
8220                                   const ConstantRange &AssumedConstantRange) {
8221     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8222     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8223       if (!AssumedConstantRange.isEmptySet()) {
8224         I->setMetadata(LLVMContext::MD_range,
8225                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8226                                                  AssumedConstantRange));
8227         return true;
8228       }
8229     }
8230     return false;
8231   }
8232 
8233   /// See AbstractAttribute::manifest()
8234   ChangeStatus manifest(Attributor &A) override {
8235     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8236     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8237     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8238 
8239     auto &V = getAssociatedValue();
8240     if (!AssumedConstantRange.isEmptySet() &&
8241         !AssumedConstantRange.isSingleElement()) {
8242       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8243         assert(I == getCtxI() && "Should not annotate an instruction which is "
8244                                  "not the context instruction");
8245         if (isa<CallInst>(I) || isa<LoadInst>(I))
8246           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8247             Changed = ChangeStatus::CHANGED;
8248       }
8249     }
8250 
8251     return Changed;
8252   }
8253 };
8254 
8255 struct AAValueConstantRangeArgument final
8256     : AAArgumentFromCallSiteArguments<
8257           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8258           true /* BridgeCallBaseContext */> {
8259   using Base = AAArgumentFromCallSiteArguments<
8260       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8261       true /* BridgeCallBaseContext */>;
8262   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8263       : Base(IRP, A) {}
8264 
8265   /// See AbstractAttribute::initialize(..).
8266   void initialize(Attributor &A) override {
8267     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8268       indicatePessimisticFixpoint();
8269     } else {
8270       Base::initialize(A);
8271     }
8272   }
8273 
8274   /// See AbstractAttribute::trackStatistics()
8275   void trackStatistics() const override {
8276     STATS_DECLTRACK_ARG_ATTR(value_range)
8277   }
8278 };
8279 
8280 struct AAValueConstantRangeReturned
8281     : AAReturnedFromReturnedValues<AAValueConstantRange,
8282                                    AAValueConstantRangeImpl,
8283                                    AAValueConstantRangeImpl::StateType,
8284                                    /* PropogateCallBaseContext */ true> {
8285   using Base =
8286       AAReturnedFromReturnedValues<AAValueConstantRange,
8287                                    AAValueConstantRangeImpl,
8288                                    AAValueConstantRangeImpl::StateType,
8289                                    /* PropogateCallBaseContext */ true>;
8290   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8291       : Base(IRP, A) {}
8292 
8293   /// See AbstractAttribute::initialize(...).
8294   void initialize(Attributor &A) override {}
8295 
8296   /// See AbstractAttribute::trackStatistics()
8297   void trackStatistics() const override {
8298     STATS_DECLTRACK_FNRET_ATTR(value_range)
8299   }
8300 };
8301 
8302 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8303   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8304       : AAValueConstantRangeImpl(IRP, A) {}
8305 
8306   /// See AbstractAttribute::initialize(...).
8307   void initialize(Attributor &A) override {
8308     AAValueConstantRangeImpl::initialize(A);
8309     if (isAtFixpoint())
8310       return;
8311 
8312     Value &V = getAssociatedValue();
8313 
8314     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8315       unionAssumed(ConstantRange(C->getValue()));
8316       indicateOptimisticFixpoint();
8317       return;
8318     }
8319 
8320     if (isa<UndefValue>(&V)) {
8321       // Collapse the undef state to 0.
8322       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8323       indicateOptimisticFixpoint();
8324       return;
8325     }
8326 
8327     if (isa<CallBase>(&V))
8328       return;
8329 
8330     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8331       return;
8332 
8333     // If it is a load instruction with range metadata, use it.
8334     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8335       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8336         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8337         return;
8338       }
8339 
8340     // We can work with PHI and select instruction as we traverse their operands
8341     // during update.
8342     if (isa<SelectInst>(V) || isa<PHINode>(V))
8343       return;
8344 
8345     // Otherwise we give up.
8346     indicatePessimisticFixpoint();
8347 
8348     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8349                       << getAssociatedValue() << "\n");
8350   }
8351 
8352   bool calculateBinaryOperator(
8353       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8354       const Instruction *CtxI,
8355       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8356     Value *LHS = BinOp->getOperand(0);
8357     Value *RHS = BinOp->getOperand(1);
8358 
8359     // Simplify the operands first.
8360     bool UsedAssumedInformation = false;
8361     const auto &SimplifiedLHS = A.getAssumedSimplified(
8362         IRPosition::value(*LHS, getCallBaseContext()), *this,
8363         UsedAssumedInformation, AA::Interprocedural);
8364     if (!SimplifiedLHS.has_value())
8365       return true;
8366     if (!SimplifiedLHS.value())
8367       return false;
8368     LHS = *SimplifiedLHS;
8369 
8370     const auto &SimplifiedRHS = A.getAssumedSimplified(
8371         IRPosition::value(*RHS, getCallBaseContext()), *this,
8372         UsedAssumedInformation, AA::Interprocedural);
8373     if (!SimplifiedRHS.has_value())
8374       return true;
8375     if (!SimplifiedRHS.value())
8376       return false;
8377     RHS = *SimplifiedRHS;
8378 
8379     // TODO: Allow non integers as well.
8380     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8381       return false;
8382 
8383     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8384         *this, IRPosition::value(*LHS, getCallBaseContext()),
8385         DepClassTy::REQUIRED);
8386     QuerriedAAs.push_back(&LHSAA);
8387     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8388 
8389     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8390         *this, IRPosition::value(*RHS, getCallBaseContext()),
8391         DepClassTy::REQUIRED);
8392     QuerriedAAs.push_back(&RHSAA);
8393     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8394 
8395     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8396 
8397     T.unionAssumed(AssumedRange);
8398 
8399     // TODO: Track a known state too.
8400 
8401     return T.isValidState();
8402   }
8403 
8404   bool calculateCastInst(
8405       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8406       const Instruction *CtxI,
8407       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8408     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8409     // TODO: Allow non integers as well.
8410     Value *OpV = CastI->getOperand(0);
8411 
8412     // Simplify the operand first.
8413     bool UsedAssumedInformation = false;
8414     const auto &SimplifiedOpV = A.getAssumedSimplified(
8415         IRPosition::value(*OpV, getCallBaseContext()), *this,
8416         UsedAssumedInformation, AA::Interprocedural);
8417     if (!SimplifiedOpV.has_value())
8418       return true;
8419     if (!SimplifiedOpV.value())
8420       return false;
8421     OpV = *SimplifiedOpV;
8422 
8423     if (!OpV->getType()->isIntegerTy())
8424       return false;
8425 
8426     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8427         *this, IRPosition::value(*OpV, getCallBaseContext()),
8428         DepClassTy::REQUIRED);
8429     QuerriedAAs.push_back(&OpAA);
8430     T.unionAssumed(
8431         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8432     return T.isValidState();
8433   }
8434 
8435   bool
8436   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8437                    const Instruction *CtxI,
8438                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8439     Value *LHS = CmpI->getOperand(0);
8440     Value *RHS = CmpI->getOperand(1);
8441 
8442     // Simplify the operands first.
8443     bool UsedAssumedInformation = false;
8444     const auto &SimplifiedLHS = A.getAssumedSimplified(
8445         IRPosition::value(*LHS, getCallBaseContext()), *this,
8446         UsedAssumedInformation, AA::Interprocedural);
8447     if (!SimplifiedLHS.has_value())
8448       return true;
8449     if (!SimplifiedLHS.value())
8450       return false;
8451     LHS = *SimplifiedLHS;
8452 
8453     const auto &SimplifiedRHS = A.getAssumedSimplified(
8454         IRPosition::value(*RHS, getCallBaseContext()), *this,
8455         UsedAssumedInformation, AA::Interprocedural);
8456     if (!SimplifiedRHS.has_value())
8457       return true;
8458     if (!SimplifiedRHS.value())
8459       return false;
8460     RHS = *SimplifiedRHS;
8461 
8462     // TODO: Allow non integers as well.
8463     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8464       return false;
8465 
8466     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8467         *this, IRPosition::value(*LHS, getCallBaseContext()),
8468         DepClassTy::REQUIRED);
8469     QuerriedAAs.push_back(&LHSAA);
8470     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8471         *this, IRPosition::value(*RHS, getCallBaseContext()),
8472         DepClassTy::REQUIRED);
8473     QuerriedAAs.push_back(&RHSAA);
8474     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8475     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8476 
8477     // If one of them is empty set, we can't decide.
8478     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8479       return true;
8480 
8481     bool MustTrue = false, MustFalse = false;
8482 
8483     auto AllowedRegion =
8484         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8485 
8486     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8487       MustFalse = true;
8488 
8489     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8490       MustTrue = true;
8491 
8492     assert((!MustTrue || !MustFalse) &&
8493            "Either MustTrue or MustFalse should be false!");
8494 
8495     if (MustTrue)
8496       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8497     else if (MustFalse)
8498       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8499     else
8500       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8501 
8502     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8503                       << " " << RHSAA << "\n");
8504 
8505     // TODO: Track a known state too.
8506     return T.isValidState();
8507   }
8508 
8509   /// See AbstractAttribute::updateImpl(...).
8510   ChangeStatus updateImpl(Attributor &A) override {
8511 
8512     IntegerRangeState T(getBitWidth());
8513     auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
8514       Instruction *I = dyn_cast<Instruction>(&V);
8515       if (!I || isa<CallBase>(I)) {
8516 
8517         // Simplify the operand first.
8518         bool UsedAssumedInformation = false;
8519         const auto &SimplifiedOpV = A.getAssumedSimplified(
8520             IRPosition::value(V, getCallBaseContext()), *this,
8521             UsedAssumedInformation, AA::Interprocedural);
8522         if (!SimplifiedOpV.has_value())
8523           return true;
8524         if (!SimplifiedOpV.value())
8525           return false;
8526         Value *VPtr = *SimplifiedOpV;
8527 
8528         // If the value is not instruction, we query AA to Attributor.
8529         const auto &AA = A.getAAFor<AAValueConstantRange>(
8530             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8531             DepClassTy::REQUIRED);
8532 
8533         // Clamp operator is not used to utilize a program point CtxI.
8534         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8535 
8536         return T.isValidState();
8537       }
8538 
8539       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8540       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8541         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8542           return false;
8543       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8544         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8545           return false;
8546       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8547         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8548           return false;
8549       } else {
8550         // Give up with other instructions.
8551         // TODO: Add other instructions
8552 
8553         T.indicatePessimisticFixpoint();
8554         return false;
8555       }
8556 
8557       // Catch circular reasoning in a pessimistic way for now.
8558       // TODO: Check how the range evolves and if we stripped anything, see also
8559       //       AADereferenceable or AAAlign for similar situations.
8560       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8561         if (QueriedAA != this)
8562           continue;
8563         // If we are in a stady state we do not need to worry.
8564         if (T.getAssumed() == getState().getAssumed())
8565           continue;
8566         T.indicatePessimisticFixpoint();
8567       }
8568 
8569       return T.isValidState();
8570     };
8571 
8572     if (!VisitValueCB(getAssociatedValue(), getCtxI()))
8573       return indicatePessimisticFixpoint();
8574 
8575     // Ensure that long def-use chains can't cause circular reasoning either by
8576     // introducing a cutoff below.
8577     if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
8578       return ChangeStatus::UNCHANGED;
8579     if (++NumChanges > MaxNumChanges) {
8580       LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges
8581                         << " but only " << MaxNumChanges
8582                         << " are allowed to avoid cyclic reasoning.");
8583       return indicatePessimisticFixpoint();
8584     }
8585     return ChangeStatus::CHANGED;
8586   }
8587 
8588   /// See AbstractAttribute::trackStatistics()
8589   void trackStatistics() const override {
8590     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8591   }
8592 
8593   /// Tracker to bail after too many widening steps of the constant range.
8594   int NumChanges = 0;
8595 
8596   /// Upper bound for the number of allowed changes (=widening steps) for the
8597   /// constant range before we give up.
8598   static constexpr int MaxNumChanges = 5;
8599 };
8600 
8601 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8602   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8603       : AAValueConstantRangeImpl(IRP, A) {}
8604 
8605   /// See AbstractAttribute::initialize(...).
8606   ChangeStatus updateImpl(Attributor &A) override {
8607     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8608                      "not be called");
8609   }
8610 
8611   /// See AbstractAttribute::trackStatistics()
8612   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8613 };
8614 
8615 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8616   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8617       : AAValueConstantRangeFunction(IRP, A) {}
8618 
8619   /// See AbstractAttribute::trackStatistics()
8620   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8621 };
8622 
8623 struct AAValueConstantRangeCallSiteReturned
8624     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8625                                      AAValueConstantRangeImpl,
8626                                      AAValueConstantRangeImpl::StateType,
8627                                      /* IntroduceCallBaseContext */ true> {
8628   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8629       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8630                                        AAValueConstantRangeImpl,
8631                                        AAValueConstantRangeImpl::StateType,
8632                                        /* IntroduceCallBaseContext */ true>(IRP,
8633                                                                             A) {
8634   }
8635 
8636   /// See AbstractAttribute::initialize(...).
8637   void initialize(Attributor &A) override {
8638     // If it is a load instruction with range metadata, use the metadata.
8639     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8640       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8641         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8642 
8643     AAValueConstantRangeImpl::initialize(A);
8644   }
8645 
8646   /// See AbstractAttribute::trackStatistics()
8647   void trackStatistics() const override {
8648     STATS_DECLTRACK_CSRET_ATTR(value_range)
8649   }
8650 };
8651 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8652   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8653       : AAValueConstantRangeFloating(IRP, A) {}
8654 
8655   /// See AbstractAttribute::manifest()
8656   ChangeStatus manifest(Attributor &A) override {
8657     return ChangeStatus::UNCHANGED;
8658   }
8659 
8660   /// See AbstractAttribute::trackStatistics()
8661   void trackStatistics() const override {
8662     STATS_DECLTRACK_CSARG_ATTR(value_range)
8663   }
8664 };
8665 } // namespace
8666 
8667 /// ------------------ Potential Values Attribute -------------------------
8668 
8669 namespace {
8670 struct AAPotentialConstantValuesImpl : AAPotentialConstantValues {
8671   using StateType = PotentialConstantIntValuesState;
8672 
8673   AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A)
8674       : AAPotentialConstantValues(IRP, A) {}
8675 
8676   /// See AbstractAttribute::initialize(..).
8677   void initialize(Attributor &A) override {
8678     if (A.hasSimplificationCallback(getIRPosition()))
8679       indicatePessimisticFixpoint();
8680     else
8681       AAPotentialConstantValues::initialize(A);
8682   }
8683 
8684   bool fillSetWithConstantValues(Attributor &A, const IRPosition &IRP, SetTy &S,
8685                                  bool &ContainsUndef) {
8686     SmallVector<AA::ValueAndContext> Values;
8687     bool UsedAssumedInformation = false;
8688     if (!A.getAssumedSimplifiedValues(IRP, *this, Values, AA::Interprocedural,
8689                                       UsedAssumedInformation)) {
8690       if (!IRP.getAssociatedType()->isIntegerTy())
8691         return false;
8692       auto &PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>(
8693           *this, IRP, DepClassTy::REQUIRED);
8694       if (!PotentialValuesAA.getState().isValidState())
8695         return false;
8696       ContainsUndef = PotentialValuesAA.getState().undefIsContained();
8697       S = PotentialValuesAA.getState().getAssumedSet();
8698       return true;
8699     }
8700 
8701     for (auto &It : Values) {
8702       if (isa<UndefValue>(It.getValue()))
8703         continue;
8704       auto *CI = dyn_cast<ConstantInt>(It.getValue());
8705       if (!CI)
8706         return false;
8707       S.insert(CI->getValue());
8708     }
8709     ContainsUndef = S.empty();
8710 
8711     return true;
8712   }
8713 
8714   /// See AbstractAttribute::getAsStr().
8715   const std::string getAsStr() const override {
8716     std::string Str;
8717     llvm::raw_string_ostream OS(Str);
8718     OS << getState();
8719     return OS.str();
8720   }
8721 
8722   /// See AbstractAttribute::updateImpl(...).
8723   ChangeStatus updateImpl(Attributor &A) override {
8724     return indicatePessimisticFixpoint();
8725   }
8726 };
8727 
8728 struct AAPotentialConstantValuesArgument final
8729     : AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8730                                       AAPotentialConstantValuesImpl,
8731                                       PotentialConstantIntValuesState> {
8732   using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8733                                                AAPotentialConstantValuesImpl,
8734                                                PotentialConstantIntValuesState>;
8735   AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A)
8736       : Base(IRP, A) {}
8737 
8738   /// See AbstractAttribute::initialize(..).
8739   void initialize(Attributor &A) override {
8740     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8741       indicatePessimisticFixpoint();
8742     } else {
8743       Base::initialize(A);
8744     }
8745   }
8746 
8747   /// See AbstractAttribute::trackStatistics()
8748   void trackStatistics() const override {
8749     STATS_DECLTRACK_ARG_ATTR(potential_values)
8750   }
8751 };
8752 
8753 struct AAPotentialConstantValuesReturned
8754     : AAReturnedFromReturnedValues<AAPotentialConstantValues,
8755                                    AAPotentialConstantValuesImpl> {
8756   using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues,
8757                                             AAPotentialConstantValuesImpl>;
8758   AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A)
8759       : Base(IRP, A) {}
8760 
8761   /// See AbstractAttribute::trackStatistics()
8762   void trackStatistics() const override {
8763     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8764   }
8765 };
8766 
8767 struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
8768   AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A)
8769       : AAPotentialConstantValuesImpl(IRP, A) {}
8770 
8771   /// See AbstractAttribute::initialize(..).
8772   void initialize(Attributor &A) override {
8773     AAPotentialConstantValuesImpl::initialize(A);
8774     if (isAtFixpoint())
8775       return;
8776 
8777     Value &V = getAssociatedValue();
8778 
8779     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8780       unionAssumed(C->getValue());
8781       indicateOptimisticFixpoint();
8782       return;
8783     }
8784 
8785     if (isa<UndefValue>(&V)) {
8786       unionAssumedWithUndef();
8787       indicateOptimisticFixpoint();
8788       return;
8789     }
8790 
8791     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
8792       return;
8793 
8794     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
8795       return;
8796 
8797     indicatePessimisticFixpoint();
8798 
8799     LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: "
8800                       << getAssociatedValue() << "\n");
8801   }
8802 
8803   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
8804                                 const APInt &RHS) {
8805     return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
8806   }
8807 
8808   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
8809                                  uint32_t ResultBitWidth) {
8810     Instruction::CastOps CastOp = CI->getOpcode();
8811     switch (CastOp) {
8812     default:
8813       llvm_unreachable("unsupported or not integer cast");
8814     case Instruction::Trunc:
8815       return Src.trunc(ResultBitWidth);
8816     case Instruction::SExt:
8817       return Src.sext(ResultBitWidth);
8818     case Instruction::ZExt:
8819       return Src.zext(ResultBitWidth);
8820     case Instruction::BitCast:
8821       return Src;
8822     }
8823   }
8824 
8825   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
8826                                        const APInt &LHS, const APInt &RHS,
8827                                        bool &SkipOperation, bool &Unsupported) {
8828     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
8829     // Unsupported is set to true when the binary operator is not supported.
8830     // SkipOperation is set to true when UB occur with the given operand pair
8831     // (LHS, RHS).
8832     // TODO: we should look at nsw and nuw keywords to handle operations
8833     //       that create poison or undef value.
8834     switch (BinOpcode) {
8835     default:
8836       Unsupported = true;
8837       return LHS;
8838     case Instruction::Add:
8839       return LHS + RHS;
8840     case Instruction::Sub:
8841       return LHS - RHS;
8842     case Instruction::Mul:
8843       return LHS * RHS;
8844     case Instruction::UDiv:
8845       if (RHS.isZero()) {
8846         SkipOperation = true;
8847         return LHS;
8848       }
8849       return LHS.udiv(RHS);
8850     case Instruction::SDiv:
8851       if (RHS.isZero()) {
8852         SkipOperation = true;
8853         return LHS;
8854       }
8855       return LHS.sdiv(RHS);
8856     case Instruction::URem:
8857       if (RHS.isZero()) {
8858         SkipOperation = true;
8859         return LHS;
8860       }
8861       return LHS.urem(RHS);
8862     case Instruction::SRem:
8863       if (RHS.isZero()) {
8864         SkipOperation = true;
8865         return LHS;
8866       }
8867       return LHS.srem(RHS);
8868     case Instruction::Shl:
8869       return LHS.shl(RHS);
8870     case Instruction::LShr:
8871       return LHS.lshr(RHS);
8872     case Instruction::AShr:
8873       return LHS.ashr(RHS);
8874     case Instruction::And:
8875       return LHS & RHS;
8876     case Instruction::Or:
8877       return LHS | RHS;
8878     case Instruction::Xor:
8879       return LHS ^ RHS;
8880     }
8881   }
8882 
8883   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
8884                                            const APInt &LHS, const APInt &RHS) {
8885     bool SkipOperation = false;
8886     bool Unsupported = false;
8887     APInt Result =
8888         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
8889     if (Unsupported)
8890       return false;
8891     // If SkipOperation is true, we can ignore this operand pair (L, R).
8892     if (!SkipOperation)
8893       unionAssumed(Result);
8894     return isValidState();
8895   }
8896 
8897   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
8898     auto AssumedBefore = getAssumed();
8899     Value *LHS = ICI->getOperand(0);
8900     Value *RHS = ICI->getOperand(1);
8901 
8902     bool LHSContainsUndef = false, RHSContainsUndef = false;
8903     SetTy LHSAAPVS, RHSAAPVS;
8904     if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
8905                                    LHSContainsUndef) ||
8906         !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
8907                                    RHSContainsUndef))
8908       return indicatePessimisticFixpoint();
8909 
8910     // TODO: make use of undef flag to limit potential values aggressively.
8911     bool MaybeTrue = false, MaybeFalse = false;
8912     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
8913     if (LHSContainsUndef && RHSContainsUndef) {
8914       // The result of any comparison between undefs can be soundly replaced
8915       // with undef.
8916       unionAssumedWithUndef();
8917     } else if (LHSContainsUndef) {
8918       for (const APInt &R : RHSAAPVS) {
8919         bool CmpResult = calculateICmpInst(ICI, Zero, R);
8920         MaybeTrue |= CmpResult;
8921         MaybeFalse |= !CmpResult;
8922         if (MaybeTrue & MaybeFalse)
8923           return indicatePessimisticFixpoint();
8924       }
8925     } else if (RHSContainsUndef) {
8926       for (const APInt &L : LHSAAPVS) {
8927         bool CmpResult = calculateICmpInst(ICI, L, Zero);
8928         MaybeTrue |= CmpResult;
8929         MaybeFalse |= !CmpResult;
8930         if (MaybeTrue & MaybeFalse)
8931           return indicatePessimisticFixpoint();
8932       }
8933     } else {
8934       for (const APInt &L : LHSAAPVS) {
8935         for (const APInt &R : RHSAAPVS) {
8936           bool CmpResult = calculateICmpInst(ICI, L, R);
8937           MaybeTrue |= CmpResult;
8938           MaybeFalse |= !CmpResult;
8939           if (MaybeTrue & MaybeFalse)
8940             return indicatePessimisticFixpoint();
8941         }
8942       }
8943     }
8944     if (MaybeTrue)
8945       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
8946     if (MaybeFalse)
8947       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
8948     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8949                                          : ChangeStatus::CHANGED;
8950   }
8951 
8952   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
8953     auto AssumedBefore = getAssumed();
8954     Value *LHS = SI->getTrueValue();
8955     Value *RHS = SI->getFalseValue();
8956 
8957     bool UsedAssumedInformation = false;
8958     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
8959                                                   UsedAssumedInformation);
8960 
8961     // Check if we only need one operand.
8962     bool OnlyLeft = false, OnlyRight = false;
8963     if (C && *C && (*C)->isOneValue())
8964       OnlyLeft = true;
8965     else if (C && *C && (*C)->isZeroValue())
8966       OnlyRight = true;
8967 
8968     bool LHSContainsUndef = false, RHSContainsUndef = false;
8969     SetTy LHSAAPVS, RHSAAPVS;
8970     if (!OnlyRight && !fillSetWithConstantValues(A, IRPosition::value(*LHS),
8971                                                  LHSAAPVS, LHSContainsUndef))
8972       return indicatePessimisticFixpoint();
8973 
8974     if (!OnlyLeft && !fillSetWithConstantValues(A, IRPosition::value(*RHS),
8975                                                 RHSAAPVS, RHSContainsUndef))
8976       return indicatePessimisticFixpoint();
8977 
8978     if (OnlyLeft || OnlyRight) {
8979       // select (true/false), lhs, rhs
8980       auto *OpAA = OnlyLeft ? &LHSAAPVS : &RHSAAPVS;
8981       auto Undef = OnlyLeft ? LHSContainsUndef : RHSContainsUndef;
8982 
8983       if (Undef)
8984         unionAssumedWithUndef();
8985       else {
8986         for (auto &It : *OpAA)
8987           unionAssumed(It);
8988       }
8989 
8990     } else if (LHSContainsUndef && RHSContainsUndef) {
8991       // select i1 *, undef , undef => undef
8992       unionAssumedWithUndef();
8993     } else {
8994       for (auto &It : LHSAAPVS)
8995         unionAssumed(It);
8996       for (auto &It : RHSAAPVS)
8997         unionAssumed(It);
8998     }
8999     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9000                                          : ChangeStatus::CHANGED;
9001   }
9002 
9003   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
9004     auto AssumedBefore = getAssumed();
9005     if (!CI->isIntegerCast())
9006       return indicatePessimisticFixpoint();
9007     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
9008     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
9009     Value *Src = CI->getOperand(0);
9010 
9011     bool SrcContainsUndef = false;
9012     SetTy SrcPVS;
9013     if (!fillSetWithConstantValues(A, IRPosition::value(*Src), SrcPVS,
9014                                    SrcContainsUndef))
9015       return indicatePessimisticFixpoint();
9016 
9017     if (SrcContainsUndef)
9018       unionAssumedWithUndef();
9019     else {
9020       for (const APInt &S : SrcPVS) {
9021         APInt T = calculateCastInst(CI, S, ResultBitWidth);
9022         unionAssumed(T);
9023       }
9024     }
9025     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9026                                          : ChangeStatus::CHANGED;
9027   }
9028 
9029   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
9030     auto AssumedBefore = getAssumed();
9031     Value *LHS = BinOp->getOperand(0);
9032     Value *RHS = BinOp->getOperand(1);
9033 
9034     bool LHSContainsUndef = false, RHSContainsUndef = false;
9035     SetTy LHSAAPVS, RHSAAPVS;
9036     if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
9037                                    LHSContainsUndef) ||
9038         !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
9039                                    RHSContainsUndef))
9040       return indicatePessimisticFixpoint();
9041 
9042     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9043 
9044     // TODO: make use of undef flag to limit potential values aggressively.
9045     if (LHSContainsUndef && RHSContainsUndef) {
9046       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9047         return indicatePessimisticFixpoint();
9048     } else if (LHSContainsUndef) {
9049       for (const APInt &R : RHSAAPVS) {
9050         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9051           return indicatePessimisticFixpoint();
9052       }
9053     } else if (RHSContainsUndef) {
9054       for (const APInt &L : LHSAAPVS) {
9055         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9056           return indicatePessimisticFixpoint();
9057       }
9058     } else {
9059       for (const APInt &L : LHSAAPVS) {
9060         for (const APInt &R : RHSAAPVS) {
9061           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9062             return indicatePessimisticFixpoint();
9063         }
9064       }
9065     }
9066     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9067                                          : ChangeStatus::CHANGED;
9068   }
9069 
9070   /// See AbstractAttribute::updateImpl(...).
9071   ChangeStatus updateImpl(Attributor &A) override {
9072     Value &V = getAssociatedValue();
9073     Instruction *I = dyn_cast<Instruction>(&V);
9074 
9075     if (auto *ICI = dyn_cast<ICmpInst>(I))
9076       return updateWithICmpInst(A, ICI);
9077 
9078     if (auto *SI = dyn_cast<SelectInst>(I))
9079       return updateWithSelectInst(A, SI);
9080 
9081     if (auto *CI = dyn_cast<CastInst>(I))
9082       return updateWithCastInst(A, CI);
9083 
9084     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9085       return updateWithBinaryOperator(A, BinOp);
9086 
9087     return indicatePessimisticFixpoint();
9088   }
9089 
9090   /// See AbstractAttribute::trackStatistics()
9091   void trackStatistics() const override {
9092     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9093   }
9094 };
9095 
9096 struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl {
9097   AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A)
9098       : AAPotentialConstantValuesImpl(IRP, A) {}
9099 
9100   /// See AbstractAttribute::initialize(...).
9101   ChangeStatus updateImpl(Attributor &A) override {
9102     llvm_unreachable(
9103         "AAPotentialConstantValues(Function|CallSite)::updateImpl will "
9104         "not be called");
9105   }
9106 
9107   /// See AbstractAttribute::trackStatistics()
9108   void trackStatistics() const override {
9109     STATS_DECLTRACK_FN_ATTR(potential_values)
9110   }
9111 };
9112 
9113 struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction {
9114   AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A)
9115       : AAPotentialConstantValuesFunction(IRP, A) {}
9116 
9117   /// See AbstractAttribute::trackStatistics()
9118   void trackStatistics() const override {
9119     STATS_DECLTRACK_CS_ATTR(potential_values)
9120   }
9121 };
9122 
9123 struct AAPotentialConstantValuesCallSiteReturned
9124     : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9125                                      AAPotentialConstantValuesImpl> {
9126   AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP,
9127                                             Attributor &A)
9128       : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9129                                        AAPotentialConstantValuesImpl>(IRP, A) {}
9130 
9131   /// See AbstractAttribute::trackStatistics()
9132   void trackStatistics() const override {
9133     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9134   }
9135 };
9136 
9137 struct AAPotentialConstantValuesCallSiteArgument
9138     : AAPotentialConstantValuesFloating {
9139   AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP,
9140                                             Attributor &A)
9141       : AAPotentialConstantValuesFloating(IRP, A) {}
9142 
9143   /// See AbstractAttribute::initialize(..).
9144   void initialize(Attributor &A) override {
9145     AAPotentialConstantValuesImpl::initialize(A);
9146     if (isAtFixpoint())
9147       return;
9148 
9149     Value &V = getAssociatedValue();
9150 
9151     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9152       unionAssumed(C->getValue());
9153       indicateOptimisticFixpoint();
9154       return;
9155     }
9156 
9157     if (isa<UndefValue>(&V)) {
9158       unionAssumedWithUndef();
9159       indicateOptimisticFixpoint();
9160       return;
9161     }
9162   }
9163 
9164   /// See AbstractAttribute::updateImpl(...).
9165   ChangeStatus updateImpl(Attributor &A) override {
9166     Value &V = getAssociatedValue();
9167     auto AssumedBefore = getAssumed();
9168     auto &AA = A.getAAFor<AAPotentialConstantValues>(
9169         *this, IRPosition::value(V), DepClassTy::REQUIRED);
9170     const auto &S = AA.getAssumed();
9171     unionAssumed(S);
9172     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9173                                          : ChangeStatus::CHANGED;
9174   }
9175 
9176   /// See AbstractAttribute::trackStatistics()
9177   void trackStatistics() const override {
9178     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9179   }
9180 };
9181 
9182 /// ------------------------ NoUndef Attribute ---------------------------------
9183 struct AANoUndefImpl : AANoUndef {
9184   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9185 
9186   /// See AbstractAttribute::initialize(...).
9187   void initialize(Attributor &A) override {
9188     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9189       indicateOptimisticFixpoint();
9190       return;
9191     }
9192     Value &V = getAssociatedValue();
9193     if (isa<UndefValue>(V))
9194       indicatePessimisticFixpoint();
9195     else if (isa<FreezeInst>(V))
9196       indicateOptimisticFixpoint();
9197     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9198              isGuaranteedNotToBeUndefOrPoison(&V))
9199       indicateOptimisticFixpoint();
9200     else
9201       AANoUndef::initialize(A);
9202   }
9203 
9204   /// See followUsesInMBEC
9205   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9206                        AANoUndef::StateType &State) {
9207     const Value *UseV = U->get();
9208     const DominatorTree *DT = nullptr;
9209     AssumptionCache *AC = nullptr;
9210     InformationCache &InfoCache = A.getInfoCache();
9211     if (Function *F = getAnchorScope()) {
9212       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9213       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9214     }
9215     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9216     bool TrackUse = false;
9217     // Track use for instructions which must produce undef or poison bits when
9218     // at least one operand contains such bits.
9219     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9220       TrackUse = true;
9221     return TrackUse;
9222   }
9223 
9224   /// See AbstractAttribute::getAsStr().
9225   const std::string getAsStr() const override {
9226     return getAssumed() ? "noundef" : "may-undef-or-poison";
9227   }
9228 
9229   ChangeStatus manifest(Attributor &A) override {
9230     // We don't manifest noundef attribute for dead positions because the
9231     // associated values with dead positions would be replaced with undef
9232     // values.
9233     bool UsedAssumedInformation = false;
9234     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9235                         UsedAssumedInformation))
9236       return ChangeStatus::UNCHANGED;
9237     // A position whose simplified value does not have any value is
9238     // considered to be dead. We don't manifest noundef in such positions for
9239     // the same reason above.
9240     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation,
9241                                 AA::Interprocedural)
9242              .has_value())
9243       return ChangeStatus::UNCHANGED;
9244     return AANoUndef::manifest(A);
9245   }
9246 };
9247 
9248 struct AANoUndefFloating : public AANoUndefImpl {
9249   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9250       : AANoUndefImpl(IRP, A) {}
9251 
9252   /// See AbstractAttribute::initialize(...).
9253   void initialize(Attributor &A) override {
9254     AANoUndefImpl::initialize(A);
9255     if (!getState().isAtFixpoint())
9256       if (Instruction *CtxI = getCtxI())
9257         followUsesInMBEC(*this, A, getState(), *CtxI);
9258   }
9259 
9260   /// See AbstractAttribute::updateImpl(...).
9261   ChangeStatus updateImpl(Attributor &A) override {
9262 
9263     SmallVector<AA::ValueAndContext> Values;
9264     bool UsedAssumedInformation = false;
9265     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
9266                                       AA::AnyScope, UsedAssumedInformation)) {
9267       Values.push_back({getAssociatedValue(), getCtxI()});
9268     }
9269 
9270     StateType T;
9271     auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
9272       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9273                                              DepClassTy::REQUIRED);
9274       if (this == &AA) {
9275         T.indicatePessimisticFixpoint();
9276       } else {
9277         const AANoUndef::StateType &S =
9278             static_cast<const AANoUndef::StateType &>(AA.getState());
9279         T ^= S;
9280       }
9281       return T.isValidState();
9282     };
9283 
9284     for (const auto &VAC : Values)
9285       if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI()))
9286         return indicatePessimisticFixpoint();
9287 
9288     return clampStateAndIndicateChange(getState(), T);
9289   }
9290 
9291   /// See AbstractAttribute::trackStatistics()
9292   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9293 };
9294 
9295 struct AANoUndefReturned final
9296     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9297   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9298       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9299 
9300   /// See AbstractAttribute::trackStatistics()
9301   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9302 };
9303 
9304 struct AANoUndefArgument final
9305     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9306   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9307       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9308 
9309   /// See AbstractAttribute::trackStatistics()
9310   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9311 };
9312 
9313 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9314   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9315       : AANoUndefFloating(IRP, A) {}
9316 
9317   /// See AbstractAttribute::trackStatistics()
9318   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9319 };
9320 
9321 struct AANoUndefCallSiteReturned final
9322     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9323   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9324       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9325 
9326   /// See AbstractAttribute::trackStatistics()
9327   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9328 };
9329 
9330 struct AACallEdgesImpl : public AACallEdges {
9331   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9332 
9333   const SetVector<Function *> &getOptimisticEdges() const override {
9334     return CalledFunctions;
9335   }
9336 
9337   bool hasUnknownCallee() const override { return HasUnknownCallee; }
9338 
9339   bool hasNonAsmUnknownCallee() const override {
9340     return HasUnknownCalleeNonAsm;
9341   }
9342 
9343   const std::string getAsStr() const override {
9344     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9345            std::to_string(CalledFunctions.size()) + "]";
9346   }
9347 
9348   void trackStatistics() const override {}
9349 
9350 protected:
9351   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9352     if (CalledFunctions.insert(Fn)) {
9353       Change = ChangeStatus::CHANGED;
9354       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9355                         << "\n");
9356     }
9357   }
9358 
9359   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9360     if (!HasUnknownCallee)
9361       Change = ChangeStatus::CHANGED;
9362     if (NonAsm && !HasUnknownCalleeNonAsm)
9363       Change = ChangeStatus::CHANGED;
9364     HasUnknownCalleeNonAsm |= NonAsm;
9365     HasUnknownCallee = true;
9366   }
9367 
9368 private:
9369   /// Optimistic set of functions that might be called by this position.
9370   SetVector<Function *> CalledFunctions;
9371 
9372   /// Is there any call with a unknown callee.
9373   bool HasUnknownCallee = false;
9374 
9375   /// Is there any call with a unknown callee, excluding any inline asm.
9376   bool HasUnknownCalleeNonAsm = false;
9377 };
9378 
9379 struct AACallEdgesCallSite : public AACallEdgesImpl {
9380   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9381       : AACallEdgesImpl(IRP, A) {}
9382   /// See AbstractAttribute::updateImpl(...).
9383   ChangeStatus updateImpl(Attributor &A) override {
9384     ChangeStatus Change = ChangeStatus::UNCHANGED;
9385 
9386     auto VisitValue = [&](Value &V, const Instruction *CtxI) -> bool {
9387       if (Function *Fn = dyn_cast<Function>(&V)) {
9388         addCalledFunction(Fn, Change);
9389       } else {
9390         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9391         setHasUnknownCallee(true, Change);
9392       }
9393 
9394       // Explore all values.
9395       return true;
9396     };
9397 
9398     SmallVector<AA::ValueAndContext> Values;
9399     // Process any value that we might call.
9400     auto ProcessCalledOperand = [&](Value *V, Instruction *CtxI) {
9401       bool UsedAssumedInformation = false;
9402       Values.clear();
9403       if (!A.getAssumedSimplifiedValues(IRPosition::value(*V), *this, Values,
9404                                         AA::AnyScope, UsedAssumedInformation)) {
9405         Values.push_back({*V, CtxI});
9406       }
9407       for (auto &VAC : Values)
9408         VisitValue(*VAC.getValue(), VAC.getCtxI());
9409     };
9410 
9411     CallBase *CB = cast<CallBase>(getCtxI());
9412 
9413     if (CB->isInlineAsm()) {
9414       if (!hasAssumption(*CB->getCaller(), "ompx_no_call_asm") &&
9415           !hasAssumption(*CB, "ompx_no_call_asm"))
9416         setHasUnknownCallee(false, Change);
9417       return Change;
9418     }
9419 
9420     // Process callee metadata if available.
9421     if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9422       for (auto &Op : MD->operands()) {
9423         Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9424         if (Callee)
9425           addCalledFunction(Callee, Change);
9426       }
9427       return Change;
9428     }
9429 
9430     // The most simple case.
9431     ProcessCalledOperand(CB->getCalledOperand(), CB);
9432 
9433     // Process callback functions.
9434     SmallVector<const Use *, 4u> CallbackUses;
9435     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9436     for (const Use *U : CallbackUses)
9437       ProcessCalledOperand(U->get(), CB);
9438 
9439     return Change;
9440   }
9441 };
9442 
9443 struct AACallEdgesFunction : public AACallEdgesImpl {
9444   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9445       : AACallEdgesImpl(IRP, A) {}
9446 
9447   /// See AbstractAttribute::updateImpl(...).
9448   ChangeStatus updateImpl(Attributor &A) override {
9449     ChangeStatus Change = ChangeStatus::UNCHANGED;
9450 
9451     auto ProcessCallInst = [&](Instruction &Inst) {
9452       CallBase &CB = cast<CallBase>(Inst);
9453 
9454       auto &CBEdges = A.getAAFor<AACallEdges>(
9455           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9456       if (CBEdges.hasNonAsmUnknownCallee())
9457         setHasUnknownCallee(true, Change);
9458       if (CBEdges.hasUnknownCallee())
9459         setHasUnknownCallee(false, Change);
9460 
9461       for (Function *F : CBEdges.getOptimisticEdges())
9462         addCalledFunction(F, Change);
9463 
9464       return true;
9465     };
9466 
9467     // Visit all callable instructions.
9468     bool UsedAssumedInformation = false;
9469     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9470                                            UsedAssumedInformation,
9471                                            /* CheckBBLivenessOnly */ true)) {
9472       // If we haven't looked at all call like instructions, assume that there
9473       // are unknown callees.
9474       setHasUnknownCallee(true, Change);
9475     }
9476 
9477     return Change;
9478   }
9479 };
9480 
9481 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9482 private:
9483   struct QuerySet {
9484     void markReachable(const Function &Fn) {
9485       Reachable.insert(&Fn);
9486       Unreachable.erase(&Fn);
9487     }
9488 
9489     /// If there is no information about the function None is returned.
9490     Optional<bool> isCachedReachable(const Function &Fn) {
9491       // Assume that we can reach the function.
9492       // TODO: Be more specific with the unknown callee.
9493       if (CanReachUnknownCallee)
9494         return true;
9495 
9496       if (Reachable.count(&Fn))
9497         return true;
9498 
9499       if (Unreachable.count(&Fn))
9500         return false;
9501 
9502       return llvm::None;
9503     }
9504 
9505     /// Set of functions that we know for sure is reachable.
9506     DenseSet<const Function *> Reachable;
9507 
9508     /// Set of functions that are unreachable, but might become reachable.
9509     DenseSet<const Function *> Unreachable;
9510 
9511     /// If we can reach a function with a call to a unknown function we assume
9512     /// that we can reach any function.
9513     bool CanReachUnknownCallee = false;
9514   };
9515 
9516   struct QueryResolver : public QuerySet {
9517     ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9518                         ArrayRef<const AACallEdges *> AAEdgesList) {
9519       ChangeStatus Change = ChangeStatus::UNCHANGED;
9520 
9521       for (auto *AAEdges : AAEdgesList) {
9522         if (AAEdges->hasUnknownCallee()) {
9523           if (!CanReachUnknownCallee) {
9524             LLVM_DEBUG(dbgs()
9525                        << "[QueryResolver] Edges include unknown callee!\n");
9526             Change = ChangeStatus::CHANGED;
9527           }
9528           CanReachUnknownCallee = true;
9529           return Change;
9530         }
9531       }
9532 
9533       for (const Function *Fn : make_early_inc_range(Unreachable)) {
9534         if (checkIfReachable(A, AA, AAEdgesList, *Fn)) {
9535           Change = ChangeStatus::CHANGED;
9536           markReachable(*Fn);
9537         }
9538       }
9539       return Change;
9540     }
9541 
9542     bool isReachable(Attributor &A, AAFunctionReachability &AA,
9543                      ArrayRef<const AACallEdges *> AAEdgesList,
9544                      const Function &Fn) {
9545       Optional<bool> Cached = isCachedReachable(Fn);
9546       if (Cached)
9547         return Cached.value();
9548 
9549       // The query was not cached, thus it is new. We need to request an update
9550       // explicitly to make sure this the information is properly run to a
9551       // fixpoint.
9552       A.registerForUpdate(AA);
9553 
9554       // We need to assume that this function can't reach Fn to prevent
9555       // an infinite loop if this function is recursive.
9556       Unreachable.insert(&Fn);
9557 
9558       bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9559       if (Result)
9560         markReachable(Fn);
9561       return Result;
9562     }
9563 
9564     bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9565                           ArrayRef<const AACallEdges *> AAEdgesList,
9566                           const Function &Fn) const {
9567 
9568       // Handle the most trivial case first.
9569       for (auto *AAEdges : AAEdgesList) {
9570         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9571 
9572         if (Edges.count(const_cast<Function *>(&Fn)))
9573           return true;
9574       }
9575 
9576       SmallVector<const AAFunctionReachability *, 8> Deps;
9577       for (auto &AAEdges : AAEdgesList) {
9578         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9579 
9580         for (Function *Edge : Edges) {
9581           // Functions that do not call back into the module can be ignored.
9582           if (Edge->hasFnAttribute(Attribute::NoCallback))
9583             continue;
9584 
9585           // We don't need a dependency if the result is reachable.
9586           const AAFunctionReachability &EdgeReachability =
9587               A.getAAFor<AAFunctionReachability>(
9588                   AA, IRPosition::function(*Edge), DepClassTy::NONE);
9589           Deps.push_back(&EdgeReachability);
9590 
9591           if (EdgeReachability.canReach(A, Fn))
9592             return true;
9593         }
9594       }
9595 
9596       // The result is false for now, set dependencies and leave.
9597       for (auto *Dep : Deps)
9598         A.recordDependence(*Dep, AA, DepClassTy::REQUIRED);
9599 
9600       return false;
9601     }
9602   };
9603 
9604   /// Get call edges that can be reached by this instruction.
9605   bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability,
9606                              const Instruction &Inst,
9607                              SmallVector<const AACallEdges *> &Result) const {
9608     // Determine call like instructions that we can reach from the inst.
9609     auto CheckCallBase = [&](Instruction &CBInst) {
9610       if (!Reachability.isAssumedReachable(A, Inst, CBInst))
9611         return true;
9612 
9613       auto &CB = cast<CallBase>(CBInst);
9614       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9615           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9616 
9617       Result.push_back(&AAEdges);
9618       return true;
9619     };
9620 
9621     bool UsedAssumedInformation = false;
9622     return A.checkForAllCallLikeInstructions(CheckCallBase, *this,
9623                                              UsedAssumedInformation,
9624                                              /* CheckBBLivenessOnly */ true);
9625   }
9626 
9627 public:
9628   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9629       : AAFunctionReachability(IRP, A) {}
9630 
9631   bool canReach(Attributor &A, const Function &Fn) const override {
9632     if (!isValidState())
9633       return true;
9634 
9635     const AACallEdges &AAEdges =
9636         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9637 
9638     // Attributor returns attributes as const, so this function has to be
9639     // const for users of this attribute to use it without having to do
9640     // a const_cast.
9641     // This is a hack for us to be able to cache queries.
9642     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9643     bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis,
9644                                                           {&AAEdges}, Fn);
9645 
9646     return Result;
9647   }
9648 
9649   /// Can \p CB reach \p Fn
9650   bool canReach(Attributor &A, CallBase &CB,
9651                 const Function &Fn) const override {
9652     if (!isValidState())
9653       return true;
9654 
9655     const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9656         *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9657 
9658     // Attributor returns attributes as const, so this function has to be
9659     // const for users of this attribute to use it without having to do
9660     // a const_cast.
9661     // This is a hack for us to be able to cache queries.
9662     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9663     QueryResolver &CBQuery = NonConstThis->CBQueries[&CB];
9664 
9665     bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn);
9666 
9667     return Result;
9668   }
9669 
9670   bool instructionCanReach(Attributor &A, const Instruction &Inst,
9671                            const Function &Fn) const override {
9672     if (!isValidState())
9673       return true;
9674 
9675     const auto &Reachability = A.getAAFor<AAReachability>(
9676         *this, IRPosition::function(*getAssociatedFunction()),
9677         DepClassTy::REQUIRED);
9678 
9679     SmallVector<const AACallEdges *> CallEdges;
9680     bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges);
9681     // Attributor returns attributes as const, so this function has to be
9682     // const for users of this attribute to use it without having to do
9683     // a const_cast.
9684     // This is a hack for us to be able to cache queries.
9685     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9686     QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst];
9687     if (!AllKnown) {
9688       LLVM_DEBUG(dbgs() << "[AAReachability] Not all reachable edges known, "
9689                            "may reach unknown callee!\n");
9690       InstQSet.CanReachUnknownCallee = true;
9691     }
9692 
9693     return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn);
9694   }
9695 
9696   /// See AbstractAttribute::updateImpl(...).
9697   ChangeStatus updateImpl(Attributor &A) override {
9698     const AACallEdges &AAEdges =
9699         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9700     ChangeStatus Change = ChangeStatus::UNCHANGED;
9701 
9702     Change |= WholeFunction.update(A, *this, {&AAEdges});
9703 
9704     for (auto &CBPair : CBQueries) {
9705       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9706           *this, IRPosition::callsite_function(*CBPair.first),
9707           DepClassTy::REQUIRED);
9708 
9709       Change |= CBPair.second.update(A, *this, {&AAEdges});
9710     }
9711 
9712     // Update the Instruction queries.
9713     if (!InstQueries.empty()) {
9714       const AAReachability *Reachability = &A.getAAFor<AAReachability>(
9715           *this, IRPosition::function(*getAssociatedFunction()),
9716           DepClassTy::REQUIRED);
9717 
9718       // Check for local callbases first.
9719       for (auto &InstPair : InstQueries) {
9720         SmallVector<const AACallEdges *> CallEdges;
9721         bool AllKnown =
9722             getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges);
9723         // Update will return change if we this effects any queries.
9724         if (!AllKnown) {
9725           LLVM_DEBUG(dbgs() << "[AAReachability] Not all reachable edges "
9726                                "known, may reach unknown callee!\n");
9727           InstPair.second.CanReachUnknownCallee = true;
9728         }
9729         Change |= InstPair.second.update(A, *this, CallEdges);
9730       }
9731     }
9732 
9733     return Change;
9734   }
9735 
9736   const std::string getAsStr() const override {
9737     size_t QueryCount =
9738         WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
9739 
9740     return "FunctionReachability [" +
9741            (canReachUnknownCallee()
9742                 ? "unknown"
9743                 : (std::to_string(WholeFunction.Reachable.size()) + "," +
9744                    std::to_string(QueryCount))) +
9745            "]";
9746   }
9747 
9748   void trackStatistics() const override {}
9749 
9750 private:
9751   bool canReachUnknownCallee() const override {
9752     return WholeFunction.CanReachUnknownCallee;
9753   }
9754 
9755   /// Used to answer if a the whole function can reacha a specific function.
9756   QueryResolver WholeFunction;
9757 
9758   /// Used to answer if a call base inside this function can reach a specific
9759   /// function.
9760   MapVector<const CallBase *, QueryResolver> CBQueries;
9761 
9762   /// This is for instruction queries than scan "forward".
9763   MapVector<const Instruction *, QueryResolver> InstQueries;
9764 };
9765 } // namespace
9766 
9767 template <typename AAType>
9768 static Optional<Constant *>
9769 askForAssumedConstant(Attributor &A, const AbstractAttribute &QueryingAA,
9770                       const IRPosition &IRP, Type &Ty) {
9771   if (!Ty.isIntegerTy())
9772     return nullptr;
9773 
9774   // This will also pass the call base context.
9775   const auto &AA = A.getAAFor<AAType>(QueryingAA, IRP, DepClassTy::NONE);
9776 
9777   Optional<Constant *> COpt = AA.getAssumedConstant(A);
9778 
9779   if (!COpt.has_value()) {
9780     A.recordDependence(AA, QueryingAA, DepClassTy::OPTIONAL);
9781     return llvm::None;
9782   }
9783   if (auto *C = COpt.value()) {
9784     A.recordDependence(AA, QueryingAA, DepClassTy::OPTIONAL);
9785     return C;
9786   }
9787   return nullptr;
9788 }
9789 
9790 Value *AAPotentialValues::getSingleValue(
9791     Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP,
9792     SmallVectorImpl<AA::ValueAndContext> &Values) {
9793   Type &Ty = *IRP.getAssociatedType();
9794   Optional<Value *> V;
9795   for (auto &It : Values) {
9796     V = AA::combineOptionalValuesInAAValueLatice(V, It.getValue(), &Ty);
9797     if (V.has_value() && !V.value())
9798       break;
9799   }
9800   if (!V.has_value())
9801     return UndefValue::get(&Ty);
9802   return V.value();
9803 }
9804 
9805 namespace {
9806 struct AAPotentialValuesImpl : AAPotentialValues {
9807   using StateType = PotentialLLVMValuesState;
9808 
9809   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
9810       : AAPotentialValues(IRP, A) {}
9811 
9812   /// See AbstractAttribute::initialize(..).
9813   void initialize(Attributor &A) override {
9814     if (A.hasSimplificationCallback(getIRPosition())) {
9815       indicatePessimisticFixpoint();
9816       return;
9817     }
9818     Value *Stripped = getAssociatedValue().stripPointerCasts();
9819     if (isa<Constant>(Stripped)) {
9820       addValue(A, getState(), *Stripped, getCtxI(), AA::AnyScope,
9821                getAnchorScope());
9822       indicateOptimisticFixpoint();
9823       return;
9824     }
9825     AAPotentialValues::initialize(A);
9826   }
9827 
9828   /// See AbstractAttribute::getAsStr().
9829   const std::string getAsStr() const override {
9830     std::string Str;
9831     llvm::raw_string_ostream OS(Str);
9832     OS << getState();
9833     return OS.str();
9834   }
9835 
9836   template <typename AAType>
9837   static Optional<Value *> askOtherAA(Attributor &A,
9838                                       const AbstractAttribute &AA,
9839                                       const IRPosition &IRP, Type &Ty) {
9840     if (isa<Constant>(IRP.getAssociatedValue()))
9841       return &IRP.getAssociatedValue();
9842     Optional<Constant *> C = askForAssumedConstant<AAType>(A, AA, IRP, Ty);
9843     if (!C)
9844       return llvm::None;
9845     if (C.value())
9846       if (auto *CC = AA::getWithType(**C, Ty))
9847         return CC;
9848     return nullptr;
9849   }
9850 
9851   void addValue(Attributor &A, StateType &State, Value &V,
9852                 const Instruction *CtxI, AA::ValueScope S,
9853                 Function *AnchorScope) const {
9854 
9855     IRPosition ValIRP = IRPosition::value(V);
9856     if (auto *CB = dyn_cast_or_null<CallBase>(CtxI)) {
9857       for (auto &U : CB->args()) {
9858         if (U.get() != &V)
9859           continue;
9860         ValIRP = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
9861         break;
9862       }
9863     }
9864 
9865     Value *VPtr = &V;
9866     if (ValIRP.getAssociatedType()->isIntegerTy()) {
9867       Type &Ty = *getAssociatedType();
9868       Optional<Value *> SimpleV =
9869           askOtherAA<AAValueConstantRange>(A, *this, ValIRP, Ty);
9870       if (SimpleV.has_value() && !SimpleV.value()) {
9871         auto &PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>(
9872             *this, ValIRP, DepClassTy::OPTIONAL);
9873         if (PotentialConstantsAA.isValidState()) {
9874           for (auto &It : PotentialConstantsAA.getAssumedSet()) {
9875             State.unionAssumed({{*ConstantInt::get(&Ty, It), nullptr}, S});
9876           }
9877           assert(!PotentialConstantsAA.undefIsContained() &&
9878                  "Undef should be an explicit value!");
9879           return;
9880         }
9881       }
9882       if (!SimpleV.has_value())
9883         return;
9884 
9885       if (SimpleV.value())
9886         VPtr = SimpleV.value();
9887     }
9888 
9889     if (isa<ConstantInt>(VPtr))
9890       CtxI = nullptr;
9891     if (!AA::isValidInScope(*VPtr, AnchorScope))
9892       S = AA::ValueScope(S | AA::Interprocedural);
9893 
9894     State.unionAssumed({{*VPtr, CtxI}, S});
9895   }
9896 
9897   /// Helper struct to tie a value+context pair together with the scope for
9898   /// which this is the simplified version.
9899   struct ItemInfo {
9900     AA::ValueAndContext I;
9901     AA::ValueScope S;
9902 
9903     bool operator==(const ItemInfo &II) const {
9904       return II.I == I && II.S == S;
9905     };
9906     bool operator<(const ItemInfo &II) const {
9907       if (I == II.I)
9908         return S < II.S;
9909       return I < II.I;
9910     };
9911   };
9912 
9913   bool recurseForValue(Attributor &A, const IRPosition &IRP, AA::ValueScope S) {
9914     SmallMapVector<AA::ValueAndContext, int, 8> ValueScopeMap;
9915     for (auto CS : {AA::Intraprocedural, AA::Interprocedural}) {
9916       if (!(CS & S))
9917         continue;
9918 
9919       bool UsedAssumedInformation = false;
9920       SmallVector<AA::ValueAndContext> Values;
9921       if (!A.getAssumedSimplifiedValues(IRP, this, Values, CS,
9922                                         UsedAssumedInformation))
9923         return false;
9924 
9925       for (auto &It : Values)
9926         ValueScopeMap[It] += CS;
9927     }
9928     for (auto &It : ValueScopeMap)
9929       addValue(A, getState(), *It.first.getValue(), It.first.getCtxI(),
9930                AA::ValueScope(It.second), getAnchorScope());
9931 
9932     return true;
9933   }
9934 
9935   void giveUpOnIntraprocedural(Attributor &A) {
9936     auto NewS = StateType::getBestState(getState());
9937     for (auto &It : getAssumedSet()) {
9938       if (It.second == AA::Intraprocedural)
9939         continue;
9940       addValue(A, NewS, *It.first.getValue(), It.first.getCtxI(),
9941                AA::Interprocedural, getAnchorScope());
9942     }
9943     assert(!undefIsContained() && "Undef should be an explicit value!");
9944     addValue(A, NewS, getAssociatedValue(), getCtxI(), AA::Intraprocedural,
9945              getAnchorScope());
9946     getState() = NewS;
9947   }
9948 
9949   /// See AbstractState::indicatePessimisticFixpoint(...).
9950   ChangeStatus indicatePessimisticFixpoint() override {
9951     getState() = StateType::getBestState(getState());
9952     getState().unionAssumed({{getAssociatedValue(), getCtxI()}, AA::AnyScope});
9953     AAPotentialValues::indicateOptimisticFixpoint();
9954     return ChangeStatus::CHANGED;
9955   }
9956 
9957   /// See AbstractAttribute::updateImpl(...).
9958   ChangeStatus updateImpl(Attributor &A) override {
9959     return indicatePessimisticFixpoint();
9960   }
9961 
9962   /// See AbstractAttribute::manifest(...).
9963   ChangeStatus manifest(Attributor &A) override {
9964     SmallVector<AA::ValueAndContext> Values;
9965     for (AA::ValueScope S : {AA::Interprocedural, AA::Intraprocedural}) {
9966       Values.clear();
9967       if (!getAssumedSimplifiedValues(A, Values, S))
9968         continue;
9969       Value &OldV = getAssociatedValue();
9970       if (isa<UndefValue>(OldV))
9971         continue;
9972       Value *NewV = getSingleValue(A, *this, getIRPosition(), Values);
9973       if (!NewV || NewV == &OldV)
9974         continue;
9975       if (getCtxI() &&
9976           !AA::isValidAtPosition({*NewV, *getCtxI()}, A.getInfoCache()))
9977         continue;
9978       if (A.changeAfterManifest(getIRPosition(), *NewV))
9979         return ChangeStatus::CHANGED;
9980     }
9981     return ChangeStatus::UNCHANGED;
9982   }
9983 
9984   bool getAssumedSimplifiedValues(Attributor &A,
9985                                   SmallVectorImpl<AA::ValueAndContext> &Values,
9986                                   AA::ValueScope S) const override {
9987     if (!isValidState())
9988       return false;
9989     for (auto &It : getAssumedSet())
9990       if (It.second & S)
9991         Values.push_back(It.first);
9992     assert(!undefIsContained() && "Undef should be an explicit value!");
9993     return true;
9994   }
9995 };
9996 
9997 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
9998   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
9999       : AAPotentialValuesImpl(IRP, A) {}
10000 
10001   /// See AbstractAttribute::updateImpl(...).
10002   ChangeStatus updateImpl(Attributor &A) override {
10003     auto AssumedBefore = getAssumed();
10004 
10005     genericValueTraversal(A);
10006 
10007     return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
10008                                            : ChangeStatus::CHANGED;
10009   }
10010 
10011   /// Helper struct to remember which AAIsDead instances we actually used.
10012   struct LivenessInfo {
10013     const AAIsDead *LivenessAA = nullptr;
10014     bool AnyDead = false;
10015   };
10016 
10017   /// Check if \p Cmp is a comparison we can simplify.
10018   ///
10019   /// We handle multiple cases, one in which at least one operand is an
10020   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
10021   /// operand. Return true if successful, in that case Worklist will be updated.
10022   bool handleCmp(Attributor &A, CmpInst &Cmp, ItemInfo II,
10023                  SmallVectorImpl<ItemInfo> &Worklist) {
10024     Value *LHS = Cmp.getOperand(0);
10025     Value *RHS = Cmp.getOperand(1);
10026 
10027     // Simplify the operands first.
10028     bool UsedAssumedInformation = false;
10029     const auto &SimplifiedLHS = A.getAssumedSimplified(
10030         IRPosition::value(*LHS, getCallBaseContext()), *this,
10031         UsedAssumedInformation, AA::Intraprocedural);
10032     if (!SimplifiedLHS.has_value())
10033       return true;
10034     if (!SimplifiedLHS.value())
10035       return false;
10036     LHS = *SimplifiedLHS;
10037 
10038     const auto &SimplifiedRHS = A.getAssumedSimplified(
10039         IRPosition::value(*RHS, getCallBaseContext()), *this,
10040         UsedAssumedInformation, AA::Intraprocedural);
10041     if (!SimplifiedRHS.has_value())
10042       return true;
10043     if (!SimplifiedRHS.value())
10044       return false;
10045     RHS = *SimplifiedRHS;
10046 
10047     LLVMContext &Ctx = Cmp.getContext();
10048     // Handle the trivial case first in which we don't even need to think about
10049     // null or non-null.
10050     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
10051       Constant *NewV =
10052           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
10053       addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S,
10054                getAnchorScope());
10055       return true;
10056     }
10057 
10058     // From now on we only handle equalities (==, !=).
10059     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
10060     if (!ICmp || !ICmp->isEquality())
10061       return false;
10062 
10063     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
10064     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
10065     if (!LHSIsNull && !RHSIsNull)
10066       return false;
10067 
10068     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
10069     // non-nullptr operand and if we assume it's non-null we can conclude the
10070     // result of the comparison.
10071     assert((LHSIsNull || RHSIsNull) &&
10072            "Expected nullptr versus non-nullptr comparison at this point");
10073 
10074     // The index is the operand that we assume is not null.
10075     unsigned PtrIdx = LHSIsNull;
10076     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
10077         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
10078         DepClassTy::REQUIRED);
10079     if (!PtrNonNullAA.isAssumedNonNull())
10080       return false;
10081 
10082     // The new value depends on the predicate, true for != and false for ==.
10083     Constant *NewV = ConstantInt::get(Type::getInt1Ty(Ctx),
10084                                       ICmp->getPredicate() == CmpInst::ICMP_NE);
10085     addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S, getAnchorScope());
10086     return true;
10087   }
10088 
10089   bool handleSelectInst(Attributor &A, SelectInst &SI, ItemInfo II,
10090                         SmallVectorImpl<ItemInfo> &Worklist) {
10091     const Instruction *CtxI = II.I.getCtxI();
10092     bool UsedAssumedInformation = false;
10093 
10094     Optional<Constant *> C =
10095         A.getAssumedConstant(*SI.getCondition(), *this, UsedAssumedInformation);
10096     bool NoValueYet = !C.has_value();
10097     if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
10098       return true;
10099     if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
10100       if (CI->isZero())
10101         Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S});
10102       else
10103         Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S});
10104     } else {
10105       // We could not simplify the condition, assume both values.
10106       Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S});
10107       Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S});
10108     }
10109     return true;
10110   }
10111 
10112   bool handleLoadInst(Attributor &A, LoadInst &LI, ItemInfo II,
10113                       SmallVectorImpl<ItemInfo> &Worklist) {
10114     SmallSetVector<Value *, 4> PotentialCopies;
10115     SmallSetVector<Instruction *, 4> PotentialValueOrigins;
10116     bool UsedAssumedInformation = false;
10117     if (!AA::getPotentiallyLoadedValues(A, LI, PotentialCopies,
10118                                         PotentialValueOrigins, *this,
10119                                         UsedAssumedInformation,
10120                                         /* OnlyExact */ true)) {
10121       LLVM_DEBUG(dbgs() << "[AAPotentialValues] Failed to get potentially "
10122                            "loaded values for load instruction "
10123                         << LI << "\n");
10124       return false;
10125     }
10126 
10127     // Do not simplify loads that are only used in llvm.assume if we cannot also
10128     // remove all stores that may feed into the load. The reason is that the
10129     // assume is probably worth something as long as the stores are around.
10130     InformationCache &InfoCache = A.getInfoCache();
10131     if (InfoCache.isOnlyUsedByAssume(LI)) {
10132       if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
10133             if (!I)
10134               return true;
10135             if (auto *SI = dyn_cast<StoreInst>(I))
10136               return A.isAssumedDead(SI->getOperandUse(0), this,
10137                                      /* LivenessAA */ nullptr,
10138                                      UsedAssumedInformation,
10139                                      /* CheckBBLivenessOnly */ false);
10140             return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
10141                                    UsedAssumedInformation,
10142                                    /* CheckBBLivenessOnly */ false);
10143           })) {
10144         LLVM_DEBUG(dbgs() << "[AAPotentialValues] Load is onl used by assumes "
10145                              "and we cannot delete all the stores: "
10146                           << LI << "\n");
10147         return false;
10148       }
10149     }
10150 
10151     // Values have to be dynamically unique or we loose the fact that a
10152     // single llvm::Value might represent two runtime values (e.g.,
10153     // stack locations in different recursive calls).
10154     const Instruction *CtxI = II.I.getCtxI();
10155     bool ScopeIsLocal = (II.S & AA::Intraprocedural);
10156     bool AllLocal = ScopeIsLocal;
10157     bool DynamicallyUnique = llvm::all_of(PotentialCopies, [&](Value *PC) {
10158       AllLocal &= AA::isValidInScope(*PC, getAnchorScope());
10159       return AA::isDynamicallyUnique(A, *this, *PC);
10160     });
10161     if (!DynamicallyUnique) {
10162       LLVM_DEBUG(dbgs() << "[AAPotentialValues] Not all potentially loaded "
10163                            "values are dynamically unique: "
10164                         << LI << "\n");
10165       return false;
10166     }
10167 
10168     for (auto *PotentialCopy : PotentialCopies) {
10169       if (AllLocal) {
10170         Worklist.push_back({{*PotentialCopy, CtxI}, II.S});
10171       } else {
10172         Worklist.push_back({{*PotentialCopy, CtxI}, AA::Interprocedural});
10173       }
10174     }
10175     if (!AllLocal && ScopeIsLocal)
10176       addValue(A, getState(), LI, CtxI, AA::Intraprocedural, getAnchorScope());
10177     return true;
10178   }
10179 
10180   bool handlePHINode(
10181       Attributor &A, PHINode &PHI, ItemInfo II,
10182       SmallVectorImpl<ItemInfo> &Worklist,
10183       SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) {
10184     auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
10185       LivenessInfo &LI = LivenessAAs[&F];
10186       if (!LI.LivenessAA)
10187         LI.LivenessAA = &A.getAAFor<AAIsDead>(*this, IRPosition::function(F),
10188                                               DepClassTy::NONE);
10189       return LI;
10190     };
10191 
10192     LivenessInfo &LI = GetLivenessInfo(*PHI.getFunction());
10193     for (unsigned u = 0, e = PHI.getNumIncomingValues(); u < e; u++) {
10194       BasicBlock *IncomingBB = PHI.getIncomingBlock(u);
10195       if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI.getParent())) {
10196         LI.AnyDead = true;
10197         continue;
10198       }
10199       Worklist.push_back(
10200           {{*PHI.getIncomingValue(u), IncomingBB->getTerminator()}, II.S});
10201     }
10202     return true;
10203   }
10204 
10205   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
10206   /// simplify any operand of the instruction \p I. Return true if successful,
10207   /// in that case Worklist will be updated.
10208   bool handleGenericInst(Attributor &A, Instruction &I, ItemInfo II,
10209                          SmallVectorImpl<ItemInfo> &Worklist) {
10210     bool SomeSimplified = false;
10211     bool UsedAssumedInformation = false;
10212 
10213     SmallVector<Value *, 8> NewOps(I.getNumOperands());
10214     int Idx = 0;
10215     for (Value *Op : I.operands()) {
10216       const auto &SimplifiedOp = A.getAssumedSimplified(
10217           IRPosition::value(*Op, getCallBaseContext()), *this,
10218           UsedAssumedInformation, AA::Intraprocedural);
10219       // If we are not sure about any operand we are not sure about the entire
10220       // instruction, we'll wait.
10221       if (!SimplifiedOp.has_value())
10222         return true;
10223 
10224       if (SimplifiedOp.value())
10225         NewOps[Idx] = SimplifiedOp.value();
10226       else
10227         NewOps[Idx] = Op;
10228 
10229       SomeSimplified |= (NewOps[Idx] != Op);
10230       ++Idx;
10231     }
10232 
10233     // We won't bother with the InstSimplify interface if we didn't simplify any
10234     // operand ourselves.
10235     if (!SomeSimplified)
10236       return false;
10237 
10238     InformationCache &InfoCache = A.getInfoCache();
10239     Function *F = I.getFunction();
10240     const auto *DT =
10241         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
10242     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
10243     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
10244     OptimizationRemarkEmitter *ORE = nullptr;
10245 
10246     const DataLayout &DL = I.getModule()->getDataLayout();
10247     SimplifyQuery Q(DL, TLI, DT, AC, &I);
10248     Value *NewV = simplifyInstructionWithOperands(&I, NewOps, Q, ORE);
10249     if (!NewV || NewV == &I)
10250       return false;
10251 
10252     LLVM_DEBUG(dbgs() << "Generic inst " << I << " assumed simplified to "
10253                       << *NewV << "\n");
10254     Worklist.push_back({{*NewV, II.I.getCtxI()}, II.S});
10255     return true;
10256   }
10257 
10258   bool simplifyInstruction(
10259       Attributor &A, Instruction &I, ItemInfo II,
10260       SmallVectorImpl<ItemInfo> &Worklist,
10261       SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) {
10262     if (auto *CI = dyn_cast<CmpInst>(&I))
10263       if (handleCmp(A, *CI, II, Worklist))
10264         return true;
10265 
10266     switch (I.getOpcode()) {
10267     case Instruction::Select:
10268       return handleSelectInst(A, cast<SelectInst>(I), II, Worklist);
10269     case Instruction::PHI:
10270       return handlePHINode(A, cast<PHINode>(I), II, Worklist, LivenessAAs);
10271     case Instruction::Load:
10272       return handleLoadInst(A, cast<LoadInst>(I), II, Worklist);
10273     default:
10274       return handleGenericInst(A, I, II, Worklist);
10275     };
10276     return false;
10277   }
10278 
10279   void genericValueTraversal(Attributor &A) {
10280     SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs;
10281 
10282     Value *InitialV = &getAssociatedValue();
10283     SmallSet<ItemInfo, 16> Visited;
10284     SmallVector<ItemInfo, 16> Worklist;
10285     Worklist.push_back({{*InitialV, getCtxI()}, AA::AnyScope});
10286 
10287     int Iteration = 0;
10288     do {
10289       ItemInfo II = Worklist.pop_back_val();
10290       Value *V = II.I.getValue();
10291       assert(V);
10292       const Instruction *CtxI = II.I.getCtxI();
10293       AA::ValueScope S = II.S;
10294 
10295       // Check if we should process the current value. To prevent endless
10296       // recursion keep a record of the values we followed!
10297       if (!Visited.insert(II).second)
10298         continue;
10299 
10300       // Make sure we limit the compile time for complex expressions.
10301       if (Iteration++ >= MaxPotentialValuesIterations) {
10302         LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
10303                           << Iteration << "!\n");
10304         addValue(A, getState(), *V, CtxI, S, getAnchorScope());
10305         continue;
10306       }
10307 
10308       // Explicitly look through calls with a "returned" attribute if we do
10309       // not have a pointer as stripPointerCasts only works on them.
10310       Value *NewV = nullptr;
10311       if (V->getType()->isPointerTy()) {
10312         NewV = AA::getWithType(*V->stripPointerCasts(), *V->getType());
10313       } else {
10314         auto *CB = dyn_cast<CallBase>(V);
10315         if (CB && CB->getCalledFunction()) {
10316           for (Argument &Arg : CB->getCalledFunction()->args())
10317             if (Arg.hasReturnedAttr()) {
10318               NewV = CB->getArgOperand(Arg.getArgNo());
10319               break;
10320             }
10321         }
10322       }
10323       if (NewV && NewV != V) {
10324         Worklist.push_back({{*NewV, CtxI}, S});
10325         continue;
10326       }
10327 
10328       if (auto *I = dyn_cast<Instruction>(V)) {
10329         if (simplifyInstruction(A, *I, II, Worklist, LivenessAAs))
10330           continue;
10331       }
10332 
10333       if (V != InitialV || isa<Argument>(V))
10334         if (recurseForValue(A, IRPosition::value(*V), II.S))
10335           continue;
10336 
10337       // If we haven't stripped anything we give up.
10338       if (V == InitialV && CtxI == getCtxI()) {
10339         indicatePessimisticFixpoint();
10340         return;
10341       }
10342 
10343       addValue(A, getState(), *V, CtxI, S, getAnchorScope());
10344     } while (!Worklist.empty());
10345 
10346     // If we actually used liveness information so we have to record a
10347     // dependence.
10348     for (auto &It : LivenessAAs)
10349       if (It.second.AnyDead)
10350         A.recordDependence(*It.second.LivenessAA, *this, DepClassTy::OPTIONAL);
10351   }
10352 
10353   /// See AbstractAttribute::trackStatistics()
10354   void trackStatistics() const override {
10355     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
10356   }
10357 };
10358 
10359 struct AAPotentialValuesArgument final : AAPotentialValuesImpl {
10360   using Base = AAPotentialValuesImpl;
10361   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
10362       : Base(IRP, A) {}
10363 
10364   /// See AbstractAttribute::initialize(..).
10365   void initialize(Attributor &A) override {
10366     auto &Arg = cast<Argument>(getAssociatedValue());
10367     if (Arg.hasPointeeInMemoryValueAttr())
10368       indicatePessimisticFixpoint();
10369   }
10370 
10371   /// See AbstractAttribute::updateImpl(...).
10372   ChangeStatus updateImpl(Attributor &A) override {
10373     auto AssumedBefore = getAssumed();
10374 
10375     unsigned CSArgNo = getCallSiteArgNo();
10376 
10377     bool UsedAssumedInformation = false;
10378     SmallVector<AA::ValueAndContext> Values;
10379     auto CallSitePred = [&](AbstractCallSite ACS) {
10380       const auto CSArgIRP = IRPosition::callsite_argument(ACS, CSArgNo);
10381       if (CSArgIRP.getPositionKind() == IRP_INVALID)
10382         return false;
10383 
10384       if (!A.getAssumedSimplifiedValues(CSArgIRP, this, Values,
10385                                         AA::Interprocedural,
10386                                         UsedAssumedInformation))
10387         return false;
10388 
10389       return isValidState();
10390     };
10391 
10392     if (!A.checkForAllCallSites(CallSitePred, *this,
10393                                 /* RequireAllCallSites */ true,
10394                                 UsedAssumedInformation))
10395       return indicatePessimisticFixpoint();
10396 
10397     Function *Fn = getAssociatedFunction();
10398     bool AnyNonLocal = false;
10399     for (auto &It : Values) {
10400       if (isa<Constant>(It.getValue())) {
10401         addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope,
10402                  getAnchorScope());
10403         continue;
10404       }
10405       if (!AA::isDynamicallyUnique(A, *this, *It.getValue()))
10406         return indicatePessimisticFixpoint();
10407 
10408       if (auto *Arg = dyn_cast<Argument>(It.getValue()))
10409         if (Arg->getParent() == Fn) {
10410           addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope,
10411                    getAnchorScope());
10412           continue;
10413         }
10414       addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::Interprocedural,
10415                getAnchorScope());
10416       AnyNonLocal = true;
10417     }
10418     if (undefIsContained())
10419       unionAssumedWithUndef();
10420     if (AnyNonLocal)
10421       giveUpOnIntraprocedural(A);
10422 
10423     return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
10424                                            : ChangeStatus::CHANGED;
10425   }
10426 
10427   /// See AbstractAttribute::trackStatistics()
10428   void trackStatistics() const override {
10429     STATS_DECLTRACK_ARG_ATTR(potential_values)
10430   }
10431 };
10432 
10433 struct AAPotentialValuesReturned
10434     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
10435   using Base =
10436       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
10437   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
10438       : Base(IRP, A) {}
10439 
10440   /// See AbstractAttribute::initialize(..).
10441   void initialize(Attributor &A) override {
10442     if (A.hasSimplificationCallback(getIRPosition()))
10443       indicatePessimisticFixpoint();
10444     else
10445       AAPotentialValues::initialize(A);
10446   }
10447 
10448   ChangeStatus manifest(Attributor &A) override {
10449     // We queried AAValueSimplify for the returned values so they will be
10450     // replaced if a simplified form was found. Nothing to do here.
10451     return ChangeStatus::UNCHANGED;
10452   }
10453 
10454   ChangeStatus indicatePessimisticFixpoint() override {
10455     return AAPotentialValues::indicatePessimisticFixpoint();
10456   }
10457 
10458   /// See AbstractAttribute::trackStatistics()
10459   void trackStatistics() const override {
10460     STATS_DECLTRACK_FNRET_ATTR(potential_values)
10461   }
10462 };
10463 
10464 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
10465   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
10466       : AAPotentialValuesImpl(IRP, A) {}
10467 
10468   /// See AbstractAttribute::updateImpl(...).
10469   ChangeStatus updateImpl(Attributor &A) override {
10470     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
10471                      "not be called");
10472   }
10473 
10474   /// See AbstractAttribute::trackStatistics()
10475   void trackStatistics() const override {
10476     STATS_DECLTRACK_FN_ATTR(potential_values)
10477   }
10478 };
10479 
10480 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
10481   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
10482       : AAPotentialValuesFunction(IRP, A) {}
10483 
10484   /// See AbstractAttribute::trackStatistics()
10485   void trackStatistics() const override {
10486     STATS_DECLTRACK_CS_ATTR(potential_values)
10487   }
10488 };
10489 
10490 struct AAPotentialValuesCallSiteReturned : AAPotentialValuesImpl {
10491   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
10492       : AAPotentialValuesImpl(IRP, A) {}
10493 
10494   /// See AbstractAttribute::updateImpl(...).
10495   ChangeStatus updateImpl(Attributor &A) override {
10496     auto AssumedBefore = getAssumed();
10497 
10498     Function *Callee = getAssociatedFunction();
10499     if (!Callee)
10500       return indicatePessimisticFixpoint();
10501 
10502     bool UsedAssumedInformation = false;
10503     auto *CB = cast<CallBase>(getCtxI());
10504     if (CB->isMustTailCall() &&
10505         !A.isAssumedDead(IRPosition::inst(*CB), this, nullptr,
10506                          UsedAssumedInformation))
10507       return indicatePessimisticFixpoint();
10508 
10509     SmallVector<AA::ValueAndContext> Values;
10510     if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this,
10511                                       Values, AA::Intraprocedural,
10512                                       UsedAssumedInformation))
10513       return indicatePessimisticFixpoint();
10514 
10515     Function *Caller = CB->getCaller();
10516 
10517     bool AnyNonLocal = false;
10518     for (auto &It : Values) {
10519       Value *V = It.getValue();
10520       Optional<Value *> CallerV = A.translateArgumentToCallSiteContent(
10521           V, *CB, *this, UsedAssumedInformation);
10522       if (!CallerV.has_value()) {
10523         // Nothing to do as long as no value was determined.
10524         continue;
10525       }
10526       V = CallerV.value() ? CallerV.value() : V;
10527       if (AA::isDynamicallyUnique(A, *this, *V) &&
10528           AA::isValidInScope(*V, Caller)) {
10529         if (CallerV.value()) {
10530           SmallVector<AA::ValueAndContext> ArgValues;
10531           IRPosition IRP = IRPosition::value(*V);
10532           if (auto *Arg = dyn_cast<Argument>(V))
10533             if (Arg->getParent() == CB->getCalledFunction())
10534               IRP = IRPosition::callsite_argument(*CB, Arg->getArgNo());
10535           if (recurseForValue(A, IRP, AA::AnyScope))
10536             continue;
10537         }
10538         addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope());
10539       } else {
10540         AnyNonLocal = true;
10541         break;
10542       }
10543     }
10544     if (AnyNonLocal) {
10545       Values.clear();
10546       if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this,
10547                                         Values, AA::Interprocedural,
10548                                         UsedAssumedInformation))
10549         return indicatePessimisticFixpoint();
10550       AnyNonLocal = false;
10551       getState() = PotentialLLVMValuesState::getBestState();
10552       for (auto &It : Values) {
10553         Value *V = It.getValue();
10554         if (!AA::isDynamicallyUnique(A, *this, *V))
10555           return indicatePessimisticFixpoint();
10556         if (AA::isValidInScope(*V, Caller)) {
10557           addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope());
10558         } else {
10559           AnyNonLocal = true;
10560           addValue(A, getState(), *V, CB, AA::Interprocedural,
10561                    getAnchorScope());
10562         }
10563       }
10564       if (AnyNonLocal)
10565         giveUpOnIntraprocedural(A);
10566     }
10567     return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
10568                                            : ChangeStatus::CHANGED;
10569   }
10570 
10571   ChangeStatus indicatePessimisticFixpoint() override {
10572     return AAPotentialValues::indicatePessimisticFixpoint();
10573   }
10574 
10575   /// See AbstractAttribute::trackStatistics()
10576   void trackStatistics() const override {
10577     STATS_DECLTRACK_CSRET_ATTR(potential_values)
10578   }
10579 };
10580 
10581 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
10582   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
10583       : AAPotentialValuesFloating(IRP, A) {}
10584 
10585   /// See AbstractAttribute::trackStatistics()
10586   void trackStatistics() const override {
10587     STATS_DECLTRACK_CSARG_ATTR(potential_values)
10588   }
10589 };
10590 } // namespace
10591 
10592 /// ---------------------- Assumption Propagation ------------------------------
10593 namespace {
10594 struct AAAssumptionInfoImpl : public AAAssumptionInfo {
10595   AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
10596                        const DenseSet<StringRef> &Known)
10597       : AAAssumptionInfo(IRP, A, Known) {}
10598 
10599   bool hasAssumption(const StringRef Assumption) const override {
10600     return isValidState() && setContains(Assumption);
10601   }
10602 
10603   /// See AbstractAttribute::getAsStr()
10604   const std::string getAsStr() const override {
10605     const SetContents &Known = getKnown();
10606     const SetContents &Assumed = getAssumed();
10607 
10608     const std::string KnownStr =
10609         llvm::join(Known.getSet().begin(), Known.getSet().end(), ",");
10610     const std::string AssumedStr =
10611         (Assumed.isUniversal())
10612             ? "Universal"
10613             : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ",");
10614 
10615     return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
10616   }
10617 };
10618 
10619 /// Propagates assumption information from parent functions to all of their
10620 /// successors. An assumption can be propagated if the containing function
10621 /// dominates the called function.
10622 ///
10623 /// We start with a "known" set of assumptions already valid for the associated
10624 /// function and an "assumed" set that initially contains all possible
10625 /// assumptions. The assumed set is inter-procedurally updated by narrowing its
10626 /// contents as concrete values are known. The concrete values are seeded by the
10627 /// first nodes that are either entries into the call graph, or contains no
10628 /// assumptions. Each node is updated as the intersection of the assumed state
10629 /// with all of its predecessors.
10630 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
10631   AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
10632       : AAAssumptionInfoImpl(IRP, A,
10633                              getAssumptions(*IRP.getAssociatedFunction())) {}
10634 
10635   /// See AbstractAttribute::manifest(...).
10636   ChangeStatus manifest(Attributor &A) override {
10637     const auto &Assumptions = getKnown();
10638 
10639     // Don't manifest a universal set if it somehow made it here.
10640     if (Assumptions.isUniversal())
10641       return ChangeStatus::UNCHANGED;
10642 
10643     Function *AssociatedFunction = getAssociatedFunction();
10644 
10645     bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet());
10646 
10647     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10648   }
10649 
10650   /// See AbstractAttribute::updateImpl(...).
10651   ChangeStatus updateImpl(Attributor &A) override {
10652     bool Changed = false;
10653 
10654     auto CallSitePred = [&](AbstractCallSite ACS) {
10655       const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
10656           *this, IRPosition::callsite_function(*ACS.getInstruction()),
10657           DepClassTy::REQUIRED);
10658       // Get the set of assumptions shared by all of this function's callers.
10659       Changed |= getIntersection(AssumptionAA.getAssumed());
10660       return !getAssumed().empty() || !getKnown().empty();
10661     };
10662 
10663     bool UsedAssumedInformation = false;
10664     // Get the intersection of all assumptions held by this node's predecessors.
10665     // If we don't know all the call sites then this is either an entry into the
10666     // call graph or an empty node. This node is known to only contain its own
10667     // assumptions and can be propagated to its successors.
10668     if (!A.checkForAllCallSites(CallSitePred, *this, true,
10669                                 UsedAssumedInformation))
10670       return indicatePessimisticFixpoint();
10671 
10672     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10673   }
10674 
10675   void trackStatistics() const override {}
10676 };
10677 
10678 /// Assumption Info defined for call sites.
10679 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
10680 
10681   AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
10682       : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
10683 
10684   /// See AbstractAttribute::initialize(...).
10685   void initialize(Attributor &A) override {
10686     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10687     A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10688   }
10689 
10690   /// See AbstractAttribute::manifest(...).
10691   ChangeStatus manifest(Attributor &A) override {
10692     // Don't manifest a universal set if it somehow made it here.
10693     if (getKnown().isUniversal())
10694       return ChangeStatus::UNCHANGED;
10695 
10696     CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue());
10697     bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet());
10698 
10699     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10700   }
10701 
10702   /// See AbstractAttribute::updateImpl(...).
10703   ChangeStatus updateImpl(Attributor &A) override {
10704     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10705     auto &AssumptionAA =
10706         A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10707     bool Changed = getIntersection(AssumptionAA.getAssumed());
10708     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10709   }
10710 
10711   /// See AbstractAttribute::trackStatistics()
10712   void trackStatistics() const override {}
10713 
10714 private:
10715   /// Helper to initialized the known set as all the assumptions this call and
10716   /// the callee contain.
10717   DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
10718     const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
10719     auto Assumptions = getAssumptions(CB);
10720     if (Function *F = IRP.getAssociatedFunction())
10721       set_union(Assumptions, getAssumptions(*F));
10722     if (Function *F = IRP.getAssociatedFunction())
10723       set_union(Assumptions, getAssumptions(*F));
10724     return Assumptions;
10725   }
10726 };
10727 } // namespace
10728 
10729 AACallGraphNode *AACallEdgeIterator::operator*() const {
10730   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
10731       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
10732 }
10733 
10734 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
10735 
10736 const char AAReturnedValues::ID = 0;
10737 const char AANoUnwind::ID = 0;
10738 const char AANoSync::ID = 0;
10739 const char AANoFree::ID = 0;
10740 const char AANonNull::ID = 0;
10741 const char AANoRecurse::ID = 0;
10742 const char AAWillReturn::ID = 0;
10743 const char AAUndefinedBehavior::ID = 0;
10744 const char AANoAlias::ID = 0;
10745 const char AAReachability::ID = 0;
10746 const char AANoReturn::ID = 0;
10747 const char AAIsDead::ID = 0;
10748 const char AADereferenceable::ID = 0;
10749 const char AAAlign::ID = 0;
10750 const char AAInstanceInfo::ID = 0;
10751 const char AANoCapture::ID = 0;
10752 const char AAValueSimplify::ID = 0;
10753 const char AAHeapToStack::ID = 0;
10754 const char AAPrivatizablePtr::ID = 0;
10755 const char AAMemoryBehavior::ID = 0;
10756 const char AAMemoryLocation::ID = 0;
10757 const char AAValueConstantRange::ID = 0;
10758 const char AAPotentialConstantValues::ID = 0;
10759 const char AAPotentialValues::ID = 0;
10760 const char AANoUndef::ID = 0;
10761 const char AACallEdges::ID = 0;
10762 const char AAFunctionReachability::ID = 0;
10763 const char AAPointerInfo::ID = 0;
10764 const char AAAssumptionInfo::ID = 0;
10765 
10766 // Macro magic to create the static generator function for attributes that
10767 // follow the naming scheme.
10768 
10769 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
10770   case IRPosition::PK:                                                         \
10771     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
10772 
10773 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
10774   case IRPosition::PK:                                                         \
10775     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
10776     ++NumAAs;                                                                  \
10777     break;
10778 
10779 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
10780   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10781     CLASS *AA = nullptr;                                                       \
10782     switch (IRP.getPositionKind()) {                                           \
10783       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10784       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10785       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10786       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10787       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10788       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10789       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10790       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10791     }                                                                          \
10792     return *AA;                                                                \
10793   }
10794 
10795 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
10796   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10797     CLASS *AA = nullptr;                                                       \
10798     switch (IRP.getPositionKind()) {                                           \
10799       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10800       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
10801       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10802       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10803       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10804       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10805       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10806       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10807     }                                                                          \
10808     return *AA;                                                                \
10809   }
10810 
10811 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
10812   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10813     CLASS *AA = nullptr;                                                       \
10814     switch (IRP.getPositionKind()) {                                           \
10815       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10816       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10817       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10818       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10819       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10820       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10821       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10822       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10823     }                                                                          \
10824     return *AA;                                                                \
10825   }
10826 
10827 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
10828   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10829     CLASS *AA = nullptr;                                                       \
10830     switch (IRP.getPositionKind()) {                                           \
10831       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10832       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10833       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10834       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10835       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10836       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10837       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10838       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10839     }                                                                          \
10840     return *AA;                                                                \
10841   }
10842 
10843 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
10844   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10845     CLASS *AA = nullptr;                                                       \
10846     switch (IRP.getPositionKind()) {                                           \
10847       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10848       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10849       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10850       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10851       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10852       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10853       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10854       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10855     }                                                                          \
10856     return *AA;                                                                \
10857   }
10858 
10859 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
10860 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
10861 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
10862 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
10863 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
10864 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
10865 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
10866 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
10867 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo)
10868 
10869 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
10870 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
10871 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
10872 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
10873 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
10874 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo)
10875 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
10876 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
10877 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues)
10878 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
10879 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
10880 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
10881 
10882 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
10883 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
10884 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
10885 
10886 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
10887 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
10888 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
10889 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
10890 
10891 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
10892 
10893 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
10894 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
10895 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
10896 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
10897 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
10898 #undef SWITCH_PK_CREATE
10899 #undef SWITCH_PK_INV
10900