xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp (revision 79ac3c12a714bcd3f2354c52d948aed9575c46d6)
1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumeBundleQueries.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/LazyValueInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/NoFolder.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 
36 #include <cassert>
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "attributor"
41 
42 static cl::opt<bool> ManifestInternal(
43     "attributor-manifest-internal", cl::Hidden,
44     cl::desc("Manifest Attributor internal string attributes."),
45     cl::init(false));
46 
47 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
48                                        cl::Hidden);
49 
50 template <>
51 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
52 
53 static cl::opt<unsigned, true> MaxPotentialValues(
54     "attributor-max-potential-values", cl::Hidden,
55     cl::desc("Maximum number of potential values to be "
56              "tracked for each position."),
57     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
58     cl::init(7));
59 
60 STATISTIC(NumAAs, "Number of abstract attributes created");
61 
62 // Some helper macros to deal with statistics tracking.
63 //
64 // Usage:
65 // For simple IR attribute tracking overload trackStatistics in the abstract
66 // attribute and choose the right STATS_DECLTRACK_********* macro,
67 // e.g.,:
68 //  void trackStatistics() const override {
69 //    STATS_DECLTRACK_ARG_ATTR(returned)
70 //  }
71 // If there is a single "increment" side one can use the macro
72 // STATS_DECLTRACK with a custom message. If there are multiple increment
73 // sides, STATS_DECL and STATS_TRACK can also be used separately.
74 //
75 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
76   ("Number of " #TYPE " marked '" #NAME "'")
77 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
78 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
79 #define STATS_DECL(NAME, TYPE, MSG)                                            \
80   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
81 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
82 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
83   {                                                                            \
84     STATS_DECL(NAME, TYPE, MSG)                                                \
85     STATS_TRACK(NAME, TYPE)                                                    \
86   }
87 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
88   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
89 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
90   STATS_DECLTRACK(NAME, CSArguments,                                           \
91                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
92 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
94 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
95   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
96 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
97   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
98                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
99 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
100   STATS_DECLTRACK(NAME, CSReturn,                                              \
101                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
102 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
103   STATS_DECLTRACK(NAME, Floating,                                              \
104                   ("Number of floating values known to be '" #NAME "'"))
105 
106 // Specialization of the operator<< for abstract attributes subclasses. This
107 // disambiguates situations where multiple operators are applicable.
108 namespace llvm {
109 #define PIPE_OPERATOR(CLASS)                                                   \
110   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
111     return OS << static_cast<const AbstractAttribute &>(AA);                   \
112   }
113 
114 PIPE_OPERATOR(AAIsDead)
115 PIPE_OPERATOR(AANoUnwind)
116 PIPE_OPERATOR(AANoSync)
117 PIPE_OPERATOR(AANoRecurse)
118 PIPE_OPERATOR(AAWillReturn)
119 PIPE_OPERATOR(AANoReturn)
120 PIPE_OPERATOR(AAReturnedValues)
121 PIPE_OPERATOR(AANonNull)
122 PIPE_OPERATOR(AANoAlias)
123 PIPE_OPERATOR(AADereferenceable)
124 PIPE_OPERATOR(AAAlign)
125 PIPE_OPERATOR(AANoCapture)
126 PIPE_OPERATOR(AAValueSimplify)
127 PIPE_OPERATOR(AANoFree)
128 PIPE_OPERATOR(AAHeapToStack)
129 PIPE_OPERATOR(AAReachability)
130 PIPE_OPERATOR(AAMemoryBehavior)
131 PIPE_OPERATOR(AAMemoryLocation)
132 PIPE_OPERATOR(AAValueConstantRange)
133 PIPE_OPERATOR(AAPrivatizablePtr)
134 PIPE_OPERATOR(AAUndefinedBehavior)
135 PIPE_OPERATOR(AAPotentialValues)
136 PIPE_OPERATOR(AANoUndef)
137 
138 #undef PIPE_OPERATOR
139 } // namespace llvm
140 
141 namespace {
142 
143 static Optional<ConstantInt *>
144 getAssumedConstantInt(Attributor &A, const Value &V,
145                       const AbstractAttribute &AA,
146                       bool &UsedAssumedInformation) {
147   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
148   if (C.hasValue())
149     return dyn_cast_or_null<ConstantInt>(C.getValue());
150   return llvm::None;
151 }
152 
153 /// Get pointer operand of memory accessing instruction. If \p I is
154 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
155 /// is set to false and the instruction is volatile, return nullptr.
156 static const Value *getPointerOperand(const Instruction *I,
157                                       bool AllowVolatile) {
158   if (auto *LI = dyn_cast<LoadInst>(I)) {
159     if (!AllowVolatile && LI->isVolatile())
160       return nullptr;
161     return LI->getPointerOperand();
162   }
163 
164   if (auto *SI = dyn_cast<StoreInst>(I)) {
165     if (!AllowVolatile && SI->isVolatile())
166       return nullptr;
167     return SI->getPointerOperand();
168   }
169 
170   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
171     if (!AllowVolatile && CXI->isVolatile())
172       return nullptr;
173     return CXI->getPointerOperand();
174   }
175 
176   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
177     if (!AllowVolatile && RMWI->isVolatile())
178       return nullptr;
179     return RMWI->getPointerOperand();
180   }
181 
182   return nullptr;
183 }
184 
185 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
186 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
187 /// getelement pointer instructions that traverse the natural type of \p Ptr if
188 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
189 /// through a cast to i8*.
190 ///
191 /// TODO: This could probably live somewhere more prominantly if it doesn't
192 ///       already exist.
193 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
194                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
195   assert(Offset >= 0 && "Negative offset not supported yet!");
196   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
197                     << "-bytes as " << *ResTy << "\n");
198 
199   // The initial type we are trying to traverse to get nice GEPs.
200   Type *Ty = Ptr->getType();
201 
202   SmallVector<Value *, 4> Indices;
203   std::string GEPName = Ptr->getName().str();
204   while (Offset) {
205     uint64_t Idx, Rem;
206 
207     if (auto *STy = dyn_cast<StructType>(Ty)) {
208       const StructLayout *SL = DL.getStructLayout(STy);
209       if (int64_t(SL->getSizeInBytes()) < Offset)
210         break;
211       Idx = SL->getElementContainingOffset(Offset);
212       assert(Idx < STy->getNumElements() && "Offset calculation error!");
213       Rem = Offset - SL->getElementOffset(Idx);
214       Ty = STy->getElementType(Idx);
215     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
216       Ty = PTy->getElementType();
217       if (!Ty->isSized())
218         break;
219       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
220       assert(ElementSize && "Expected type with size!");
221       Idx = Offset / ElementSize;
222       Rem = Offset % ElementSize;
223     } else {
224       // Non-aggregate type, we cast and make byte-wise progress now.
225       break;
226     }
227 
228     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
229                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
230 
231     GEPName += "." + std::to_string(Idx);
232     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
233     Offset = Rem;
234   }
235 
236   // Create a GEP if we collected indices above.
237   if (Indices.size())
238     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
239 
240   // If an offset is left we use byte-wise adjustment.
241   if (Offset) {
242     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
243     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
244                         GEPName + ".b" + Twine(Offset));
245   }
246 
247   // Ensure the result has the requested type.
248   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
249 
250   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
251   return Ptr;
252 }
253 
254 /// Recursively visit all values that might become \p IRP at some point. This
255 /// will be done by looking through cast instructions, selects, phis, and calls
256 /// with the "returned" attribute. Once we cannot look through the value any
257 /// further, the callback \p VisitValueCB is invoked and passed the current
258 /// value, the \p State, and a flag to indicate if we stripped anything.
259 /// Stripped means that we unpacked the value associated with \p IRP at least
260 /// once. Note that the value used for the callback may still be the value
261 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
262 /// we will never visit more values than specified by \p MaxValues.
263 template <typename AAType, typename StateTy>
264 static bool genericValueTraversal(
265     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
266     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
267         VisitValueCB,
268     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
269     function_ref<Value *(Value *)> StripCB = nullptr) {
270 
271   const AAIsDead *LivenessAA = nullptr;
272   if (IRP.getAnchorScope())
273     LivenessAA = &A.getAAFor<AAIsDead>(
274         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
275         /* TrackDependence */ false);
276   bool AnyDead = false;
277 
278   using Item = std::pair<Value *, const Instruction *>;
279   SmallSet<Item, 16> Visited;
280   SmallVector<Item, 16> Worklist;
281   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
282 
283   int Iteration = 0;
284   do {
285     Item I = Worklist.pop_back_val();
286     Value *V = I.first;
287     CtxI = I.second;
288     if (StripCB)
289       V = StripCB(V);
290 
291     // Check if we should process the current value. To prevent endless
292     // recursion keep a record of the values we followed!
293     if (!Visited.insert(I).second)
294       continue;
295 
296     // Make sure we limit the compile time for complex expressions.
297     if (Iteration++ >= MaxValues)
298       return false;
299 
300     // Explicitly look through calls with a "returned" attribute if we do
301     // not have a pointer as stripPointerCasts only works on them.
302     Value *NewV = nullptr;
303     if (V->getType()->isPointerTy()) {
304       NewV = V->stripPointerCasts();
305     } else {
306       auto *CB = dyn_cast<CallBase>(V);
307       if (CB && CB->getCalledFunction()) {
308         for (Argument &Arg : CB->getCalledFunction()->args())
309           if (Arg.hasReturnedAttr()) {
310             NewV = CB->getArgOperand(Arg.getArgNo());
311             break;
312           }
313       }
314     }
315     if (NewV && NewV != V) {
316       Worklist.push_back({NewV, CtxI});
317       continue;
318     }
319 
320     // Look through select instructions, visit both potential values.
321     if (auto *SI = dyn_cast<SelectInst>(V)) {
322       Worklist.push_back({SI->getTrueValue(), CtxI});
323       Worklist.push_back({SI->getFalseValue(), CtxI});
324       continue;
325     }
326 
327     // Look through phi nodes, visit all live operands.
328     if (auto *PHI = dyn_cast<PHINode>(V)) {
329       assert(LivenessAA &&
330              "Expected liveness in the presence of instructions!");
331       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
332         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
333         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
334                             LivenessAA,
335                             /* CheckBBLivenessOnly */ true)) {
336           AnyDead = true;
337           continue;
338         }
339         Worklist.push_back(
340             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
341       }
342       continue;
343     }
344 
345     if (UseValueSimplify && !isa<Constant>(V)) {
346       bool UsedAssumedInformation = false;
347       Optional<Constant *> C =
348           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
349       if (!C.hasValue())
350         continue;
351       if (Value *NewV = C.getValue()) {
352         Worklist.push_back({NewV, CtxI});
353         continue;
354       }
355     }
356 
357     // Once a leaf is reached we inform the user through the callback.
358     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
359       return false;
360   } while (!Worklist.empty());
361 
362   // If we actually used liveness information so we have to record a dependence.
363   if (AnyDead)
364     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
365 
366   // All values have been visited.
367   return true;
368 }
369 
370 const Value *stripAndAccumulateMinimalOffsets(
371     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
372     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
373     bool UseAssumed = false) {
374 
375   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
376     const IRPosition &Pos = IRPosition::value(V);
377     // Only track dependence if we are going to use the assumed info.
378     const AAValueConstantRange &ValueConstantRangeAA =
379         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
380                                          /* TrackDependence */ UseAssumed);
381     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
382                                      : ValueConstantRangeAA.getKnown();
383     // We can only use the lower part of the range because the upper part can
384     // be higher than what the value can really be.
385     ROffset = Range.getSignedMin();
386     return true;
387   };
388 
389   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
390                                                 AttributorAnalysis);
391 }
392 
393 static const Value *getMinimalBaseOfAccsesPointerOperand(
394     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
395     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
396   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
397   if (!Ptr)
398     return nullptr;
399   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
400   const Value *Base = stripAndAccumulateMinimalOffsets(
401       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
402 
403   BytesOffset = OffsetAPInt.getSExtValue();
404   return Base;
405 }
406 
407 static const Value *
408 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
409                                      const DataLayout &DL,
410                                      bool AllowNonInbounds = false) {
411   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
412   if (!Ptr)
413     return nullptr;
414 
415   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
416                                           AllowNonInbounds);
417 }
418 
419 /// Helper function to clamp a state \p S of type \p StateType with the
420 /// information in \p R and indicate/return if \p S did change (as-in update is
421 /// required to be run again).
422 template <typename StateType>
423 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
424   auto Assumed = S.getAssumed();
425   S ^= R;
426   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
427                                    : ChangeStatus::CHANGED;
428 }
429 
430 /// Clamp the information known for all returned values of a function
431 /// (identified by \p QueryingAA) into \p S.
432 template <typename AAType, typename StateType = typename AAType::StateType>
433 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
434                                      StateType &S) {
435   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
436                     << QueryingAA << " into " << S << "\n");
437 
438   assert((QueryingAA.getIRPosition().getPositionKind() ==
439               IRPosition::IRP_RETURNED ||
440           QueryingAA.getIRPosition().getPositionKind() ==
441               IRPosition::IRP_CALL_SITE_RETURNED) &&
442          "Can only clamp returned value states for a function returned or call "
443          "site returned position!");
444 
445   // Use an optional state as there might not be any return values and we want
446   // to join (IntegerState::operator&) the state of all there are.
447   Optional<StateType> T;
448 
449   // Callback for each possibly returned value.
450   auto CheckReturnValue = [&](Value &RV) -> bool {
451     const IRPosition &RVPos = IRPosition::value(RV);
452     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
453     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
454                       << " @ " << RVPos << "\n");
455     const StateType &AAS = AA.getState();
456     if (T.hasValue())
457       *T &= AAS;
458     else
459       T = AAS;
460     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
461                       << "\n");
462     return T->isValidState();
463   };
464 
465   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
466     S.indicatePessimisticFixpoint();
467   else if (T.hasValue())
468     S ^= *T;
469 }
470 
471 /// Helper class for generic deduction: return value -> returned position.
472 template <typename AAType, typename BaseType,
473           typename StateType = typename BaseType::StateType>
474 struct AAReturnedFromReturnedValues : public BaseType {
475   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
476       : BaseType(IRP, A) {}
477 
478   /// See AbstractAttribute::updateImpl(...).
479   ChangeStatus updateImpl(Attributor &A) override {
480     StateType S(StateType::getBestState(this->getState()));
481     clampReturnedValueStates<AAType, StateType>(A, *this, S);
482     // TODO: If we know we visited all returned values, thus no are assumed
483     // dead, we can take the known information from the state T.
484     return clampStateAndIndicateChange<StateType>(this->getState(), S);
485   }
486 };
487 
488 /// Clamp the information known at all call sites for a given argument
489 /// (identified by \p QueryingAA) into \p S.
490 template <typename AAType, typename StateType = typename AAType::StateType>
491 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
492                                         StateType &S) {
493   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
494                     << QueryingAA << " into " << S << "\n");
495 
496   assert(QueryingAA.getIRPosition().getPositionKind() ==
497              IRPosition::IRP_ARGUMENT &&
498          "Can only clamp call site argument states for an argument position!");
499 
500   // Use an optional state as there might not be any return values and we want
501   // to join (IntegerState::operator&) the state of all there are.
502   Optional<StateType> T;
503 
504   // The argument number which is also the call site argument number.
505   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
506 
507   auto CallSiteCheck = [&](AbstractCallSite ACS) {
508     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
509     // Check if a coresponding argument was found or if it is on not associated
510     // (which can happen for callback calls).
511     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
512       return false;
513 
514     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
515     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
516                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
517     const StateType &AAS = AA.getState();
518     if (T.hasValue())
519       *T &= AAS;
520     else
521       T = AAS;
522     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
523                       << "\n");
524     return T->isValidState();
525   };
526 
527   bool AllCallSitesKnown;
528   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
529                               AllCallSitesKnown))
530     S.indicatePessimisticFixpoint();
531   else if (T.hasValue())
532     S ^= *T;
533 }
534 
535 /// Helper class for generic deduction: call site argument -> argument position.
536 template <typename AAType, typename BaseType,
537           typename StateType = typename AAType::StateType>
538 struct AAArgumentFromCallSiteArguments : public BaseType {
539   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
540       : BaseType(IRP, A) {}
541 
542   /// See AbstractAttribute::updateImpl(...).
543   ChangeStatus updateImpl(Attributor &A) override {
544     StateType S(StateType::getBestState(this->getState()));
545     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
546     // TODO: If we know we visited all incoming values, thus no are assumed
547     // dead, we can take the known information from the state T.
548     return clampStateAndIndicateChange<StateType>(this->getState(), S);
549   }
550 };
551 
552 /// Helper class for generic replication: function returned -> cs returned.
553 template <typename AAType, typename BaseType,
554           typename StateType = typename BaseType::StateType>
555 struct AACallSiteReturnedFromReturned : public BaseType {
556   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
557       : BaseType(IRP, A) {}
558 
559   /// See AbstractAttribute::updateImpl(...).
560   ChangeStatus updateImpl(Attributor &A) override {
561     assert(this->getIRPosition().getPositionKind() ==
562                IRPosition::IRP_CALL_SITE_RETURNED &&
563            "Can only wrap function returned positions for call site returned "
564            "positions!");
565     auto &S = this->getState();
566 
567     const Function *AssociatedFunction =
568         this->getIRPosition().getAssociatedFunction();
569     if (!AssociatedFunction)
570       return S.indicatePessimisticFixpoint();
571 
572     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
573     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
574     return clampStateAndIndicateChange(S, AA.getState());
575   }
576 };
577 
578 /// Helper function to accumulate uses.
579 template <class AAType, typename StateType = typename AAType::StateType>
580 static void followUsesInContext(AAType &AA, Attributor &A,
581                                 MustBeExecutedContextExplorer &Explorer,
582                                 const Instruction *CtxI,
583                                 SetVector<const Use *> &Uses,
584                                 StateType &State) {
585   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
586   for (unsigned u = 0; u < Uses.size(); ++u) {
587     const Use *U = Uses[u];
588     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
589       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
590       if (Found && AA.followUseInMBEC(A, U, UserI, State))
591         for (const Use &Us : UserI->uses())
592           Uses.insert(&Us);
593     }
594   }
595 }
596 
597 /// Use the must-be-executed-context around \p I to add information into \p S.
598 /// The AAType class is required to have `followUseInMBEC` method with the
599 /// following signature and behaviour:
600 ///
601 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
602 /// U - Underlying use.
603 /// I - The user of the \p U.
604 /// Returns true if the value should be tracked transitively.
605 ///
606 template <class AAType, typename StateType = typename AAType::StateType>
607 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
608                              Instruction &CtxI) {
609 
610   // Container for (transitive) uses of the associated value.
611   SetVector<const Use *> Uses;
612   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
613     Uses.insert(&U);
614 
615   MustBeExecutedContextExplorer &Explorer =
616       A.getInfoCache().getMustBeExecutedContextExplorer();
617 
618   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
619 
620   if (S.isAtFixpoint())
621     return;
622 
623   SmallVector<const BranchInst *, 4> BrInsts;
624   auto Pred = [&](const Instruction *I) {
625     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
626       if (Br->isConditional())
627         BrInsts.push_back(Br);
628     return true;
629   };
630 
631   // Here, accumulate conditional branch instructions in the context. We
632   // explore the child paths and collect the known states. The disjunction of
633   // those states can be merged to its own state. Let ParentState_i be a state
634   // to indicate the known information for an i-th branch instruction in the
635   // context. ChildStates are created for its successors respectively.
636   //
637   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
638   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
639   //      ...
640   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
641   //
642   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
643   //
644   // FIXME: Currently, recursive branches are not handled. For example, we
645   // can't deduce that ptr must be dereferenced in below function.
646   //
647   // void f(int a, int c, int *ptr) {
648   //    if(a)
649   //      if (b) {
650   //        *ptr = 0;
651   //      } else {
652   //        *ptr = 1;
653   //      }
654   //    else {
655   //      if (b) {
656   //        *ptr = 0;
657   //      } else {
658   //        *ptr = 1;
659   //      }
660   //    }
661   // }
662 
663   Explorer.checkForAllContext(&CtxI, Pred);
664   for (const BranchInst *Br : BrInsts) {
665     StateType ParentState;
666 
667     // The known state of the parent state is a conjunction of children's
668     // known states so it is initialized with a best state.
669     ParentState.indicateOptimisticFixpoint();
670 
671     for (const BasicBlock *BB : Br->successors()) {
672       StateType ChildState;
673 
674       size_t BeforeSize = Uses.size();
675       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
676 
677       // Erase uses which only appear in the child.
678       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
679         It = Uses.erase(It);
680 
681       ParentState &= ChildState;
682     }
683 
684     // Use only known state.
685     S += ParentState;
686   }
687 }
688 
689 /// -----------------------NoUnwind Function Attribute--------------------------
690 
691 struct AANoUnwindImpl : AANoUnwind {
692   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
693 
694   const std::string getAsStr() const override {
695     return getAssumed() ? "nounwind" : "may-unwind";
696   }
697 
698   /// See AbstractAttribute::updateImpl(...).
699   ChangeStatus updateImpl(Attributor &A) override {
700     auto Opcodes = {
701         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
702         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
703         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
704 
705     auto CheckForNoUnwind = [&](Instruction &I) {
706       if (!I.mayThrow())
707         return true;
708 
709       if (const auto *CB = dyn_cast<CallBase>(&I)) {
710         const auto &NoUnwindAA =
711             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB));
712         return NoUnwindAA.isAssumedNoUnwind();
713       }
714       return false;
715     };
716 
717     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
718       return indicatePessimisticFixpoint();
719 
720     return ChangeStatus::UNCHANGED;
721   }
722 };
723 
724 struct AANoUnwindFunction final : public AANoUnwindImpl {
725   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
726       : AANoUnwindImpl(IRP, A) {}
727 
728   /// See AbstractAttribute::trackStatistics()
729   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
730 };
731 
732 /// NoUnwind attribute deduction for a call sites.
733 struct AANoUnwindCallSite final : AANoUnwindImpl {
734   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
735       : AANoUnwindImpl(IRP, A) {}
736 
737   /// See AbstractAttribute::initialize(...).
738   void initialize(Attributor &A) override {
739     AANoUnwindImpl::initialize(A);
740     Function *F = getAssociatedFunction();
741     if (!F || F->isDeclaration())
742       indicatePessimisticFixpoint();
743   }
744 
745   /// See AbstractAttribute::updateImpl(...).
746   ChangeStatus updateImpl(Attributor &A) override {
747     // TODO: Once we have call site specific value information we can provide
748     //       call site specific liveness information and then it makes
749     //       sense to specialize attributes for call sites arguments instead of
750     //       redirecting requests to the callee argument.
751     Function *F = getAssociatedFunction();
752     const IRPosition &FnPos = IRPosition::function(*F);
753     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
754     return clampStateAndIndicateChange(getState(), FnAA.getState());
755   }
756 
757   /// See AbstractAttribute::trackStatistics()
758   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
759 };
760 
761 /// --------------------- Function Return Values -------------------------------
762 
763 /// "Attribute" that collects all potential returned values and the return
764 /// instructions that they arise from.
765 ///
766 /// If there is a unique returned value R, the manifest method will:
767 ///   - mark R with the "returned" attribute, if R is an argument.
768 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
769 
770   /// Mapping of values potentially returned by the associated function to the
771   /// return instructions that might return them.
772   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
773 
774   /// Mapping to remember the number of returned values for a call site such
775   /// that we can avoid updates if nothing changed.
776   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
777 
778   /// Set of unresolved calls returned by the associated function.
779   SmallSetVector<CallBase *, 4> UnresolvedCalls;
780 
781   /// State flags
782   ///
783   ///{
784   bool IsFixed = false;
785   bool IsValidState = true;
786   ///}
787 
788 public:
789   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
790       : AAReturnedValues(IRP, A) {}
791 
792   /// See AbstractAttribute::initialize(...).
793   void initialize(Attributor &A) override {
794     // Reset the state.
795     IsFixed = false;
796     IsValidState = true;
797     ReturnedValues.clear();
798 
799     Function *F = getAssociatedFunction();
800     if (!F || F->isDeclaration()) {
801       indicatePessimisticFixpoint();
802       return;
803     }
804     assert(!F->getReturnType()->isVoidTy() &&
805            "Did not expect a void return type!");
806 
807     // The map from instruction opcodes to those instructions in the function.
808     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
809 
810     // Look through all arguments, if one is marked as returned we are done.
811     for (Argument &Arg : F->args()) {
812       if (Arg.hasReturnedAttr()) {
813         auto &ReturnInstSet = ReturnedValues[&Arg];
814         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
815           for (Instruction *RI : *Insts)
816             ReturnInstSet.insert(cast<ReturnInst>(RI));
817 
818         indicateOptimisticFixpoint();
819         return;
820       }
821     }
822 
823     if (!A.isFunctionIPOAmendable(*F))
824       indicatePessimisticFixpoint();
825   }
826 
827   /// See AbstractAttribute::manifest(...).
828   ChangeStatus manifest(Attributor &A) override;
829 
830   /// See AbstractAttribute::getState(...).
831   AbstractState &getState() override { return *this; }
832 
833   /// See AbstractAttribute::getState(...).
834   const AbstractState &getState() const override { return *this; }
835 
836   /// See AbstractAttribute::updateImpl(Attributor &A).
837   ChangeStatus updateImpl(Attributor &A) override;
838 
839   llvm::iterator_range<iterator> returned_values() override {
840     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
841   }
842 
843   llvm::iterator_range<const_iterator> returned_values() const override {
844     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
845   }
846 
847   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
848     return UnresolvedCalls;
849   }
850 
851   /// Return the number of potential return values, -1 if unknown.
852   size_t getNumReturnValues() const override {
853     return isValidState() ? ReturnedValues.size() : -1;
854   }
855 
856   /// Return an assumed unique return value if a single candidate is found. If
857   /// there cannot be one, return a nullptr. If it is not clear yet, return the
858   /// Optional::NoneType.
859   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
860 
861   /// See AbstractState::checkForAllReturnedValues(...).
862   bool checkForAllReturnedValuesAndReturnInsts(
863       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
864       const override;
865 
866   /// Pretty print the attribute similar to the IR representation.
867   const std::string getAsStr() const override;
868 
869   /// See AbstractState::isAtFixpoint().
870   bool isAtFixpoint() const override { return IsFixed; }
871 
872   /// See AbstractState::isValidState().
873   bool isValidState() const override { return IsValidState; }
874 
875   /// See AbstractState::indicateOptimisticFixpoint(...).
876   ChangeStatus indicateOptimisticFixpoint() override {
877     IsFixed = true;
878     return ChangeStatus::UNCHANGED;
879   }
880 
881   ChangeStatus indicatePessimisticFixpoint() override {
882     IsFixed = true;
883     IsValidState = false;
884     return ChangeStatus::CHANGED;
885   }
886 };
887 
888 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
889   ChangeStatus Changed = ChangeStatus::UNCHANGED;
890 
891   // Bookkeeping.
892   assert(isValidState());
893   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
894                   "Number of function with known return values");
895 
896   // Check if we have an assumed unique return value that we could manifest.
897   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
898 
899   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
900     return Changed;
901 
902   // Bookkeeping.
903   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
904                   "Number of function with unique return");
905 
906   // Callback to replace the uses of CB with the constant C.
907   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
908     if (CB.use_empty())
909       return ChangeStatus::UNCHANGED;
910     if (A.changeValueAfterManifest(CB, C))
911       return ChangeStatus::CHANGED;
912     return ChangeStatus::UNCHANGED;
913   };
914 
915   // If the assumed unique return value is an argument, annotate it.
916   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
917     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
918             getAssociatedFunction()->getReturnType())) {
919       getIRPosition() = IRPosition::argument(*UniqueRVArg);
920       Changed = IRAttribute::manifest(A);
921     }
922   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
923     // We can replace the returned value with the unique returned constant.
924     Value &AnchorValue = getAnchorValue();
925     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
926       for (const Use &U : F->uses())
927         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
928           if (CB->isCallee(&U)) {
929             Constant *RVCCast =
930                 CB->getType() == RVC->getType()
931                     ? RVC
932                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
933             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
934           }
935     } else {
936       assert(isa<CallBase>(AnchorValue) &&
937              "Expcected a function or call base anchor!");
938       Constant *RVCCast =
939           AnchorValue.getType() == RVC->getType()
940               ? RVC
941               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
942       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
943     }
944     if (Changed == ChangeStatus::CHANGED)
945       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
946                       "Number of function returns replaced by constant return");
947   }
948 
949   return Changed;
950 }
951 
952 const std::string AAReturnedValuesImpl::getAsStr() const {
953   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
954          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
955          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
956 }
957 
958 Optional<Value *>
959 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
960   // If checkForAllReturnedValues provides a unique value, ignoring potential
961   // undef values that can also be present, it is assumed to be the actual
962   // return value and forwarded to the caller of this method. If there are
963   // multiple, a nullptr is returned indicating there cannot be a unique
964   // returned value.
965   Optional<Value *> UniqueRV;
966 
967   auto Pred = [&](Value &RV) -> bool {
968     // If we found a second returned value and neither the current nor the saved
969     // one is an undef, there is no unique returned value. Undefs are special
970     // since we can pretend they have any value.
971     if (UniqueRV.hasValue() && UniqueRV != &RV &&
972         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
973       UniqueRV = nullptr;
974       return false;
975     }
976 
977     // Do not overwrite a value with an undef.
978     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
979       UniqueRV = &RV;
980 
981     return true;
982   };
983 
984   if (!A.checkForAllReturnedValues(Pred, *this))
985     UniqueRV = nullptr;
986 
987   return UniqueRV;
988 }
989 
990 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
991     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
992     const {
993   if (!isValidState())
994     return false;
995 
996   // Check all returned values but ignore call sites as long as we have not
997   // encountered an overdefined one during an update.
998   for (auto &It : ReturnedValues) {
999     Value *RV = It.first;
1000 
1001     CallBase *CB = dyn_cast<CallBase>(RV);
1002     if (CB && !UnresolvedCalls.count(CB))
1003       continue;
1004 
1005     if (!Pred(*RV, It.second))
1006       return false;
1007   }
1008 
1009   return true;
1010 }
1011 
1012 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1013   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1014   bool Changed = false;
1015 
1016   // State used in the value traversals starting in returned values.
1017   struct RVState {
1018     // The map in which we collect return values -> return instrs.
1019     decltype(ReturnedValues) &RetValsMap;
1020     // The flag to indicate a change.
1021     bool &Changed;
1022     // The return instrs we come from.
1023     SmallSetVector<ReturnInst *, 4> RetInsts;
1024   };
1025 
1026   // Callback for a leaf value returned by the associated function.
1027   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1028                          bool) -> bool {
1029     auto Size = RVS.RetValsMap[&Val].size();
1030     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1031     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1032     RVS.Changed |= Inserted;
1033     LLVM_DEBUG({
1034       if (Inserted)
1035         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1036                << " => " << RVS.RetInsts.size() << "\n";
1037     });
1038     return true;
1039   };
1040 
1041   // Helper method to invoke the generic value traversal.
1042   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1043                                 const Instruction *CtxI) {
1044     IRPosition RetValPos = IRPosition::value(RV);
1045     return genericValueTraversal<AAReturnedValues, RVState>(
1046         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1047         /* UseValueSimplify */ false);
1048   };
1049 
1050   // Callback for all "return intructions" live in the associated function.
1051   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1052     ReturnInst &Ret = cast<ReturnInst>(I);
1053     RVState RVS({ReturnedValues, Changed, {}});
1054     RVS.RetInsts.insert(&Ret);
1055     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1056   };
1057 
1058   // Start by discovering returned values from all live returned instructions in
1059   // the associated function.
1060   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1061     return indicatePessimisticFixpoint();
1062 
1063   // Once returned values "directly" present in the code are handled we try to
1064   // resolve returned calls. To avoid modifications to the ReturnedValues map
1065   // while we iterate over it we kept record of potential new entries in a copy
1066   // map, NewRVsMap.
1067   decltype(ReturnedValues) NewRVsMap;
1068 
1069   auto HandleReturnValue = [&](Value *RV,
1070                                SmallSetVector<ReturnInst *, 4> &RIs) {
1071     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1072                       << RIs.size() << " RIs\n");
1073     CallBase *CB = dyn_cast<CallBase>(RV);
1074     if (!CB || UnresolvedCalls.count(CB))
1075       return;
1076 
1077     if (!CB->getCalledFunction()) {
1078       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1079                         << "\n");
1080       UnresolvedCalls.insert(CB);
1081       return;
1082     }
1083 
1084     // TODO: use the function scope once we have call site AAReturnedValues.
1085     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1086         *this, IRPosition::function(*CB->getCalledFunction()));
1087     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1088                       << RetValAA << "\n");
1089 
1090     // Skip dead ends, thus if we do not know anything about the returned
1091     // call we mark it as unresolved and it will stay that way.
1092     if (!RetValAA.getState().isValidState()) {
1093       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1094                         << "\n");
1095       UnresolvedCalls.insert(CB);
1096       return;
1097     }
1098 
1099     // Do not try to learn partial information. If the callee has unresolved
1100     // return values we will treat the call as unresolved/opaque.
1101     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1102     if (!RetValAAUnresolvedCalls.empty()) {
1103       UnresolvedCalls.insert(CB);
1104       return;
1105     }
1106 
1107     // Now check if we can track transitively returned values. If possible, thus
1108     // if all return value can be represented in the current scope, do so.
1109     bool Unresolved = false;
1110     for (auto &RetValAAIt : RetValAA.returned_values()) {
1111       Value *RetVal = RetValAAIt.first;
1112       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1113           isa<Constant>(RetVal))
1114         continue;
1115       // Anything that did not fit in the above categories cannot be resolved,
1116       // mark the call as unresolved.
1117       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1118                            "cannot be translated: "
1119                         << *RetVal << "\n");
1120       UnresolvedCalls.insert(CB);
1121       Unresolved = true;
1122       break;
1123     }
1124 
1125     if (Unresolved)
1126       return;
1127 
1128     // Now track transitively returned values.
1129     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1130     if (NumRetAA == RetValAA.getNumReturnValues()) {
1131       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1132                            "changed since it was seen last\n");
1133       return;
1134     }
1135     NumRetAA = RetValAA.getNumReturnValues();
1136 
1137     for (auto &RetValAAIt : RetValAA.returned_values()) {
1138       Value *RetVal = RetValAAIt.first;
1139       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1140         // Arguments are mapped to call site operands and we begin the traversal
1141         // again.
1142         bool Unused = false;
1143         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1144         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1145         continue;
1146       }
1147       if (isa<CallBase>(RetVal)) {
1148         // Call sites are resolved by the callee attribute over time, no need to
1149         // do anything for us.
1150         continue;
1151       }
1152       if (isa<Constant>(RetVal)) {
1153         // Constants are valid everywhere, we can simply take them.
1154         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1155         continue;
1156       }
1157     }
1158   };
1159 
1160   for (auto &It : ReturnedValues)
1161     HandleReturnValue(It.first, It.second);
1162 
1163   // Because processing the new information can again lead to new return values
1164   // we have to be careful and iterate until this iteration is complete. The
1165   // idea is that we are in a stable state at the end of an update. All return
1166   // values have been handled and properly categorized. We might not update
1167   // again if we have not requested a non-fix attribute so we cannot "wait" for
1168   // the next update to analyze a new return value.
1169   while (!NewRVsMap.empty()) {
1170     auto It = std::move(NewRVsMap.back());
1171     NewRVsMap.pop_back();
1172 
1173     assert(!It.second.empty() && "Entry does not add anything.");
1174     auto &ReturnInsts = ReturnedValues[It.first];
1175     for (ReturnInst *RI : It.second)
1176       if (ReturnInsts.insert(RI)) {
1177         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1178                           << *It.first << " => " << *RI << "\n");
1179         HandleReturnValue(It.first, ReturnInsts);
1180         Changed = true;
1181       }
1182   }
1183 
1184   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1185   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1186 }
1187 
1188 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1189   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1190       : AAReturnedValuesImpl(IRP, A) {}
1191 
1192   /// See AbstractAttribute::trackStatistics()
1193   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1194 };
1195 
1196 /// Returned values information for a call sites.
1197 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1198   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1199       : AAReturnedValuesImpl(IRP, A) {}
1200 
1201   /// See AbstractAttribute::initialize(...).
1202   void initialize(Attributor &A) override {
1203     // TODO: Once we have call site specific value information we can provide
1204     //       call site specific liveness information and then it makes
1205     //       sense to specialize attributes for call sites instead of
1206     //       redirecting requests to the callee.
1207     llvm_unreachable("Abstract attributes for returned values are not "
1208                      "supported for call sites yet!");
1209   }
1210 
1211   /// See AbstractAttribute::updateImpl(...).
1212   ChangeStatus updateImpl(Attributor &A) override {
1213     return indicatePessimisticFixpoint();
1214   }
1215 
1216   /// See AbstractAttribute::trackStatistics()
1217   void trackStatistics() const override {}
1218 };
1219 
1220 /// ------------------------ NoSync Function Attribute -------------------------
1221 
1222 struct AANoSyncImpl : AANoSync {
1223   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1224 
1225   const std::string getAsStr() const override {
1226     return getAssumed() ? "nosync" : "may-sync";
1227   }
1228 
1229   /// See AbstractAttribute::updateImpl(...).
1230   ChangeStatus updateImpl(Attributor &A) override;
1231 
1232   /// Helper function used to determine whether an instruction is non-relaxed
1233   /// atomic. In other words, if an atomic instruction does not have unordered
1234   /// or monotonic ordering
1235   static bool isNonRelaxedAtomic(Instruction *I);
1236 
1237   /// Helper function used to determine whether an instruction is volatile.
1238   static bool isVolatile(Instruction *I);
1239 
1240   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1241   /// memset).
1242   static bool isNoSyncIntrinsic(Instruction *I);
1243 };
1244 
1245 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1246   if (!I->isAtomic())
1247     return false;
1248 
1249   AtomicOrdering Ordering;
1250   switch (I->getOpcode()) {
1251   case Instruction::AtomicRMW:
1252     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1253     break;
1254   case Instruction::Store:
1255     Ordering = cast<StoreInst>(I)->getOrdering();
1256     break;
1257   case Instruction::Load:
1258     Ordering = cast<LoadInst>(I)->getOrdering();
1259     break;
1260   case Instruction::Fence: {
1261     auto *FI = cast<FenceInst>(I);
1262     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1263       return false;
1264     Ordering = FI->getOrdering();
1265     break;
1266   }
1267   case Instruction::AtomicCmpXchg: {
1268     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1269     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1270     // Only if both are relaxed, than it can be treated as relaxed.
1271     // Otherwise it is non-relaxed.
1272     if (Success != AtomicOrdering::Unordered &&
1273         Success != AtomicOrdering::Monotonic)
1274       return true;
1275     if (Failure != AtomicOrdering::Unordered &&
1276         Failure != AtomicOrdering::Monotonic)
1277       return true;
1278     return false;
1279   }
1280   default:
1281     llvm_unreachable(
1282         "New atomic operations need to be known in the attributor.");
1283   }
1284 
1285   // Relaxed.
1286   if (Ordering == AtomicOrdering::Unordered ||
1287       Ordering == AtomicOrdering::Monotonic)
1288     return false;
1289   return true;
1290 }
1291 
1292 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1293 /// FIXME: We should ipmrove the handling of intrinsics.
1294 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1295   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1296     switch (II->getIntrinsicID()) {
1297     /// Element wise atomic memory intrinsics are can only be unordered,
1298     /// therefore nosync.
1299     case Intrinsic::memset_element_unordered_atomic:
1300     case Intrinsic::memmove_element_unordered_atomic:
1301     case Intrinsic::memcpy_element_unordered_atomic:
1302       return true;
1303     case Intrinsic::memset:
1304     case Intrinsic::memmove:
1305     case Intrinsic::memcpy:
1306       if (!cast<MemIntrinsic>(II)->isVolatile())
1307         return true;
1308       return false;
1309     default:
1310       return false;
1311     }
1312   }
1313   return false;
1314 }
1315 
1316 bool AANoSyncImpl::isVolatile(Instruction *I) {
1317   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1318 
1319   switch (I->getOpcode()) {
1320   case Instruction::AtomicRMW:
1321     return cast<AtomicRMWInst>(I)->isVolatile();
1322   case Instruction::Store:
1323     return cast<StoreInst>(I)->isVolatile();
1324   case Instruction::Load:
1325     return cast<LoadInst>(I)->isVolatile();
1326   case Instruction::AtomicCmpXchg:
1327     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1328   default:
1329     return false;
1330   }
1331 }
1332 
1333 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1334 
1335   auto CheckRWInstForNoSync = [&](Instruction &I) {
1336     /// We are looking for volatile instructions or Non-Relaxed atomics.
1337     /// FIXME: We should improve the handling of intrinsics.
1338 
1339     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1340       return true;
1341 
1342     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1343       if (CB->hasFnAttr(Attribute::NoSync))
1344         return true;
1345 
1346       const auto &NoSyncAA =
1347           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB));
1348       if (NoSyncAA.isAssumedNoSync())
1349         return true;
1350       return false;
1351     }
1352 
1353     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1354       return true;
1355 
1356     return false;
1357   };
1358 
1359   auto CheckForNoSync = [&](Instruction &I) {
1360     // At this point we handled all read/write effects and they are all
1361     // nosync, so they can be skipped.
1362     if (I.mayReadOrWriteMemory())
1363       return true;
1364 
1365     // non-convergent and readnone imply nosync.
1366     return !cast<CallBase>(I).isConvergent();
1367   };
1368 
1369   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1370       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1371     return indicatePessimisticFixpoint();
1372 
1373   return ChangeStatus::UNCHANGED;
1374 }
1375 
1376 struct AANoSyncFunction final : public AANoSyncImpl {
1377   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1378       : AANoSyncImpl(IRP, A) {}
1379 
1380   /// See AbstractAttribute::trackStatistics()
1381   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1382 };
1383 
1384 /// NoSync attribute deduction for a call sites.
1385 struct AANoSyncCallSite final : AANoSyncImpl {
1386   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1387       : AANoSyncImpl(IRP, A) {}
1388 
1389   /// See AbstractAttribute::initialize(...).
1390   void initialize(Attributor &A) override {
1391     AANoSyncImpl::initialize(A);
1392     Function *F = getAssociatedFunction();
1393     if (!F || F->isDeclaration())
1394       indicatePessimisticFixpoint();
1395   }
1396 
1397   /// See AbstractAttribute::updateImpl(...).
1398   ChangeStatus updateImpl(Attributor &A) override {
1399     // TODO: Once we have call site specific value information we can provide
1400     //       call site specific liveness information and then it makes
1401     //       sense to specialize attributes for call sites arguments instead of
1402     //       redirecting requests to the callee argument.
1403     Function *F = getAssociatedFunction();
1404     const IRPosition &FnPos = IRPosition::function(*F);
1405     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1406     return clampStateAndIndicateChange(getState(), FnAA.getState());
1407   }
1408 
1409   /// See AbstractAttribute::trackStatistics()
1410   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1411 };
1412 
1413 /// ------------------------ No-Free Attributes ----------------------------
1414 
1415 struct AANoFreeImpl : public AANoFree {
1416   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1417 
1418   /// See AbstractAttribute::updateImpl(...).
1419   ChangeStatus updateImpl(Attributor &A) override {
1420     auto CheckForNoFree = [&](Instruction &I) {
1421       const auto &CB = cast<CallBase>(I);
1422       if (CB.hasFnAttr(Attribute::NoFree))
1423         return true;
1424 
1425       const auto &NoFreeAA =
1426           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB));
1427       return NoFreeAA.isAssumedNoFree();
1428     };
1429 
1430     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1431       return indicatePessimisticFixpoint();
1432     return ChangeStatus::UNCHANGED;
1433   }
1434 
1435   /// See AbstractAttribute::getAsStr().
1436   const std::string getAsStr() const override {
1437     return getAssumed() ? "nofree" : "may-free";
1438   }
1439 };
1440 
1441 struct AANoFreeFunction final : public AANoFreeImpl {
1442   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1443       : AANoFreeImpl(IRP, A) {}
1444 
1445   /// See AbstractAttribute::trackStatistics()
1446   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1447 };
1448 
1449 /// NoFree attribute deduction for a call sites.
1450 struct AANoFreeCallSite final : AANoFreeImpl {
1451   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1452       : AANoFreeImpl(IRP, A) {}
1453 
1454   /// See AbstractAttribute::initialize(...).
1455   void initialize(Attributor &A) override {
1456     AANoFreeImpl::initialize(A);
1457     Function *F = getAssociatedFunction();
1458     if (!F || F->isDeclaration())
1459       indicatePessimisticFixpoint();
1460   }
1461 
1462   /// See AbstractAttribute::updateImpl(...).
1463   ChangeStatus updateImpl(Attributor &A) override {
1464     // TODO: Once we have call site specific value information we can provide
1465     //       call site specific liveness information and then it makes
1466     //       sense to specialize attributes for call sites arguments instead of
1467     //       redirecting requests to the callee argument.
1468     Function *F = getAssociatedFunction();
1469     const IRPosition &FnPos = IRPosition::function(*F);
1470     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1471     return clampStateAndIndicateChange(getState(), FnAA.getState());
1472   }
1473 
1474   /// See AbstractAttribute::trackStatistics()
1475   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1476 };
1477 
1478 /// NoFree attribute for floating values.
1479 struct AANoFreeFloating : AANoFreeImpl {
1480   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1481       : AANoFreeImpl(IRP, A) {}
1482 
1483   /// See AbstractAttribute::trackStatistics()
1484   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1485 
1486   /// See Abstract Attribute::updateImpl(...).
1487   ChangeStatus updateImpl(Attributor &A) override {
1488     const IRPosition &IRP = getIRPosition();
1489 
1490     const auto &NoFreeAA =
1491         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1492     if (NoFreeAA.isAssumedNoFree())
1493       return ChangeStatus::UNCHANGED;
1494 
1495     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1496     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1497       Instruction *UserI = cast<Instruction>(U.getUser());
1498       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1499         if (CB->isBundleOperand(&U))
1500           return false;
1501         if (!CB->isArgOperand(&U))
1502           return true;
1503         unsigned ArgNo = CB->getArgOperandNo(&U);
1504 
1505         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1506             *this, IRPosition::callsite_argument(*CB, ArgNo));
1507         return NoFreeArg.isAssumedNoFree();
1508       }
1509 
1510       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1511           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1512         Follow = true;
1513         return true;
1514       }
1515       if (isa<ReturnInst>(UserI))
1516         return true;
1517 
1518       // Unknown user.
1519       return false;
1520     };
1521     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1522       return indicatePessimisticFixpoint();
1523 
1524     return ChangeStatus::UNCHANGED;
1525   }
1526 };
1527 
1528 /// NoFree attribute for a call site argument.
1529 struct AANoFreeArgument final : AANoFreeFloating {
1530   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1531       : AANoFreeFloating(IRP, A) {}
1532 
1533   /// See AbstractAttribute::trackStatistics()
1534   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1535 };
1536 
1537 /// NoFree attribute for call site arguments.
1538 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1539   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1540       : AANoFreeFloating(IRP, A) {}
1541 
1542   /// See AbstractAttribute::updateImpl(...).
1543   ChangeStatus updateImpl(Attributor &A) override {
1544     // TODO: Once we have call site specific value information we can provide
1545     //       call site specific liveness information and then it makes
1546     //       sense to specialize attributes for call sites arguments instead of
1547     //       redirecting requests to the callee argument.
1548     Argument *Arg = getAssociatedArgument();
1549     if (!Arg)
1550       return indicatePessimisticFixpoint();
1551     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1552     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1553     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1554   }
1555 
1556   /// See AbstractAttribute::trackStatistics()
1557   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1558 };
1559 
1560 /// NoFree attribute for function return value.
1561 struct AANoFreeReturned final : AANoFreeFloating {
1562   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1563       : AANoFreeFloating(IRP, A) {
1564     llvm_unreachable("NoFree is not applicable to function returns!");
1565   }
1566 
1567   /// See AbstractAttribute::initialize(...).
1568   void initialize(Attributor &A) override {
1569     llvm_unreachable("NoFree is not applicable to function returns!");
1570   }
1571 
1572   /// See AbstractAttribute::updateImpl(...).
1573   ChangeStatus updateImpl(Attributor &A) override {
1574     llvm_unreachable("NoFree is not applicable to function returns!");
1575   }
1576 
1577   /// See AbstractAttribute::trackStatistics()
1578   void trackStatistics() const override {}
1579 };
1580 
1581 /// NoFree attribute deduction for a call site return value.
1582 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1583   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1584       : AANoFreeFloating(IRP, A) {}
1585 
1586   ChangeStatus manifest(Attributor &A) override {
1587     return ChangeStatus::UNCHANGED;
1588   }
1589   /// See AbstractAttribute::trackStatistics()
1590   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1591 };
1592 
1593 /// ------------------------ NonNull Argument Attribute ------------------------
1594 static int64_t getKnownNonNullAndDerefBytesForUse(
1595     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1596     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1597   TrackUse = false;
1598 
1599   const Value *UseV = U->get();
1600   if (!UseV->getType()->isPointerTy())
1601     return 0;
1602 
1603   Type *PtrTy = UseV->getType();
1604   const Function *F = I->getFunction();
1605   bool NullPointerIsDefined =
1606       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1607   const DataLayout &DL = A.getInfoCache().getDL();
1608   if (const auto *CB = dyn_cast<CallBase>(I)) {
1609     if (CB->isBundleOperand(U)) {
1610       if (RetainedKnowledge RK = getKnowledgeFromUse(
1611               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1612         IsNonNull |=
1613             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1614         return RK.ArgValue;
1615       }
1616       return 0;
1617     }
1618 
1619     if (CB->isCallee(U)) {
1620       IsNonNull |= !NullPointerIsDefined;
1621       return 0;
1622     }
1623 
1624     unsigned ArgNo = CB->getArgOperandNo(U);
1625     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1626     // As long as we only use known information there is no need to track
1627     // dependences here.
1628     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1629                                                   /* TrackDependence */ false);
1630     IsNonNull |= DerefAA.isKnownNonNull();
1631     return DerefAA.getKnownDereferenceableBytes();
1632   }
1633 
1634   // We need to follow common pointer manipulation uses to the accesses they
1635   // feed into. We can try to be smart to avoid looking through things we do not
1636   // like for now, e.g., non-inbounds GEPs.
1637   if (isa<CastInst>(I)) {
1638     TrackUse = true;
1639     return 0;
1640   }
1641 
1642   if (isa<GetElementPtrInst>(I)) {
1643     TrackUse = true;
1644     return 0;
1645   }
1646 
1647   int64_t Offset;
1648   const Value *Base =
1649       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1650   if (Base) {
1651     if (Base == &AssociatedValue &&
1652         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1653       int64_t DerefBytes =
1654           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1655 
1656       IsNonNull |= !NullPointerIsDefined;
1657       return std::max(int64_t(0), DerefBytes);
1658     }
1659   }
1660 
1661   /// Corner case when an offset is 0.
1662   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1663                                               /*AllowNonInbounds*/ true);
1664   if (Base) {
1665     if (Offset == 0 && Base == &AssociatedValue &&
1666         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1667       int64_t DerefBytes =
1668           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1669       IsNonNull |= !NullPointerIsDefined;
1670       return std::max(int64_t(0), DerefBytes);
1671     }
1672   }
1673 
1674   return 0;
1675 }
1676 
1677 struct AANonNullImpl : AANonNull {
1678   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1679       : AANonNull(IRP, A),
1680         NullIsDefined(NullPointerIsDefined(
1681             getAnchorScope(),
1682             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1683 
1684   /// See AbstractAttribute::initialize(...).
1685   void initialize(Attributor &A) override {
1686     Value &V = getAssociatedValue();
1687     if (!NullIsDefined &&
1688         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1689                 /* IgnoreSubsumingPositions */ false, &A)) {
1690       indicateOptimisticFixpoint();
1691       return;
1692     }
1693 
1694     if (isa<ConstantPointerNull>(V)) {
1695       indicatePessimisticFixpoint();
1696       return;
1697     }
1698 
1699     AANonNull::initialize(A);
1700 
1701     bool CanBeNull = true;
1702     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull)) {
1703       if (!CanBeNull) {
1704         indicateOptimisticFixpoint();
1705         return;
1706       }
1707     }
1708 
1709     if (isa<GlobalValue>(&getAssociatedValue())) {
1710       indicatePessimisticFixpoint();
1711       return;
1712     }
1713 
1714     if (Instruction *CtxI = getCtxI())
1715       followUsesInMBEC(*this, A, getState(), *CtxI);
1716   }
1717 
1718   /// See followUsesInMBEC
1719   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1720                        AANonNull::StateType &State) {
1721     bool IsNonNull = false;
1722     bool TrackUse = false;
1723     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1724                                        IsNonNull, TrackUse);
1725     State.setKnown(IsNonNull);
1726     return TrackUse;
1727   }
1728 
1729   /// See AbstractAttribute::getAsStr().
1730   const std::string getAsStr() const override {
1731     return getAssumed() ? "nonnull" : "may-null";
1732   }
1733 
1734   /// Flag to determine if the underlying value can be null and still allow
1735   /// valid accesses.
1736   const bool NullIsDefined;
1737 };
1738 
1739 /// NonNull attribute for a floating value.
1740 struct AANonNullFloating : public AANonNullImpl {
1741   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1742       : AANonNullImpl(IRP, A) {}
1743 
1744   /// See AbstractAttribute::updateImpl(...).
1745   ChangeStatus updateImpl(Attributor &A) override {
1746     const DataLayout &DL = A.getDataLayout();
1747 
1748     DominatorTree *DT = nullptr;
1749     AssumptionCache *AC = nullptr;
1750     InformationCache &InfoCache = A.getInfoCache();
1751     if (const Function *Fn = getAnchorScope()) {
1752       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1753       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1754     }
1755 
1756     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1757                             AANonNull::StateType &T, bool Stripped) -> bool {
1758       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1759       if (!Stripped && this == &AA) {
1760         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1761           T.indicatePessimisticFixpoint();
1762       } else {
1763         // Use abstract attribute information.
1764         const AANonNull::StateType &NS = AA.getState();
1765         T ^= NS;
1766       }
1767       return T.isValidState();
1768     };
1769 
1770     StateType T;
1771     if (!genericValueTraversal<AANonNull, StateType>(
1772             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1773       return indicatePessimisticFixpoint();
1774 
1775     return clampStateAndIndicateChange(getState(), T);
1776   }
1777 
1778   /// See AbstractAttribute::trackStatistics()
1779   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1780 };
1781 
1782 /// NonNull attribute for function return value.
1783 struct AANonNullReturned final
1784     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
1785   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1786       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1787 
1788   /// See AbstractAttribute::getAsStr().
1789   const std::string getAsStr() const override {
1790     return getAssumed() ? "nonnull" : "may-null";
1791   }
1792 
1793   /// See AbstractAttribute::trackStatistics()
1794   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1795 };
1796 
1797 /// NonNull attribute for function argument.
1798 struct AANonNullArgument final
1799     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1800   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1801       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1802 
1803   /// See AbstractAttribute::trackStatistics()
1804   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1805 };
1806 
1807 struct AANonNullCallSiteArgument final : AANonNullFloating {
1808   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1809       : AANonNullFloating(IRP, A) {}
1810 
1811   /// See AbstractAttribute::trackStatistics()
1812   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1813 };
1814 
1815 /// NonNull attribute for a call site return position.
1816 struct AANonNullCallSiteReturned final
1817     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1818   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1819       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1820 
1821   /// See AbstractAttribute::trackStatistics()
1822   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1823 };
1824 
1825 /// ------------------------ No-Recurse Attributes ----------------------------
1826 
1827 struct AANoRecurseImpl : public AANoRecurse {
1828   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1829 
1830   /// See AbstractAttribute::getAsStr()
1831   const std::string getAsStr() const override {
1832     return getAssumed() ? "norecurse" : "may-recurse";
1833   }
1834 };
1835 
1836 struct AANoRecurseFunction final : AANoRecurseImpl {
1837   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1838       : AANoRecurseImpl(IRP, A) {}
1839 
1840   /// See AbstractAttribute::initialize(...).
1841   void initialize(Attributor &A) override {
1842     AANoRecurseImpl::initialize(A);
1843     if (const Function *F = getAnchorScope())
1844       if (A.getInfoCache().getSccSize(*F) != 1)
1845         indicatePessimisticFixpoint();
1846   }
1847 
1848   /// See AbstractAttribute::updateImpl(...).
1849   ChangeStatus updateImpl(Attributor &A) override {
1850 
1851     // If all live call sites are known to be no-recurse, we are as well.
1852     auto CallSitePred = [&](AbstractCallSite ACS) {
1853       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1854           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1855           /* TrackDependence */ false, DepClassTy::OPTIONAL);
1856       return NoRecurseAA.isKnownNoRecurse();
1857     };
1858     bool AllCallSitesKnown;
1859     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1860       // If we know all call sites and all are known no-recurse, we are done.
1861       // If all known call sites, which might not be all that exist, are known
1862       // to be no-recurse, we are not done but we can continue to assume
1863       // no-recurse. If one of the call sites we have not visited will become
1864       // live, another update is triggered.
1865       if (AllCallSitesKnown)
1866         indicateOptimisticFixpoint();
1867       return ChangeStatus::UNCHANGED;
1868     }
1869 
1870     // If the above check does not hold anymore we look at the calls.
1871     auto CheckForNoRecurse = [&](Instruction &I) {
1872       const auto &CB = cast<CallBase>(I);
1873       if (CB.hasFnAttr(Attribute::NoRecurse))
1874         return true;
1875 
1876       const auto &NoRecurseAA =
1877           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB));
1878       if (!NoRecurseAA.isAssumedNoRecurse())
1879         return false;
1880 
1881       // Recursion to the same function
1882       if (CB.getCalledFunction() == getAnchorScope())
1883         return false;
1884 
1885       return true;
1886     };
1887 
1888     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1889       return indicatePessimisticFixpoint();
1890     return ChangeStatus::UNCHANGED;
1891   }
1892 
1893   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1894 };
1895 
1896 /// NoRecurse attribute deduction for a call sites.
1897 struct AANoRecurseCallSite final : AANoRecurseImpl {
1898   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1899       : AANoRecurseImpl(IRP, A) {}
1900 
1901   /// See AbstractAttribute::initialize(...).
1902   void initialize(Attributor &A) override {
1903     AANoRecurseImpl::initialize(A);
1904     Function *F = getAssociatedFunction();
1905     if (!F || F->isDeclaration())
1906       indicatePessimisticFixpoint();
1907   }
1908 
1909   /// See AbstractAttribute::updateImpl(...).
1910   ChangeStatus updateImpl(Attributor &A) override {
1911     // TODO: Once we have call site specific value information we can provide
1912     //       call site specific liveness information and then it makes
1913     //       sense to specialize attributes for call sites arguments instead of
1914     //       redirecting requests to the callee argument.
1915     Function *F = getAssociatedFunction();
1916     const IRPosition &FnPos = IRPosition::function(*F);
1917     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1918     return clampStateAndIndicateChange(getState(), FnAA.getState());
1919   }
1920 
1921   /// See AbstractAttribute::trackStatistics()
1922   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1923 };
1924 
1925 /// -------------------- Undefined-Behavior Attributes ------------------------
1926 
1927 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1928   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1929       : AAUndefinedBehavior(IRP, A) {}
1930 
1931   /// See AbstractAttribute::updateImpl(...).
1932   // through a pointer (i.e. also branches etc.)
1933   ChangeStatus updateImpl(Attributor &A) override {
1934     const size_t UBPrevSize = KnownUBInsts.size();
1935     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1936 
1937     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1938       // Skip instructions that are already saved.
1939       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1940         return true;
1941 
1942       // If we reach here, we know we have an instruction
1943       // that accesses memory through a pointer operand,
1944       // for which getPointerOperand() should give it to us.
1945       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1946       assert(PtrOp &&
1947              "Expected pointer operand of memory accessing instruction");
1948 
1949       // Either we stopped and the appropriate action was taken,
1950       // or we got back a simplified value to continue.
1951       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1952       if (!SimplifiedPtrOp.hasValue())
1953         return true;
1954       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1955 
1956       // A memory access through a pointer is considered UB
1957       // only if the pointer has constant null value.
1958       // TODO: Expand it to not only check constant values.
1959       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1960         AssumedNoUBInsts.insert(&I);
1961         return true;
1962       }
1963       const Type *PtrTy = PtrOpVal->getType();
1964 
1965       // Because we only consider instructions inside functions,
1966       // assume that a parent function exists.
1967       const Function *F = I.getFunction();
1968 
1969       // A memory access using constant null pointer is only considered UB
1970       // if null pointer is _not_ defined for the target platform.
1971       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1972         AssumedNoUBInsts.insert(&I);
1973       else
1974         KnownUBInsts.insert(&I);
1975       return true;
1976     };
1977 
1978     auto InspectBrInstForUB = [&](Instruction &I) {
1979       // A conditional branch instruction is considered UB if it has `undef`
1980       // condition.
1981 
1982       // Skip instructions that are already saved.
1983       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1984         return true;
1985 
1986       // We know we have a branch instruction.
1987       auto BrInst = cast<BranchInst>(&I);
1988 
1989       // Unconditional branches are never considered UB.
1990       if (BrInst->isUnconditional())
1991         return true;
1992 
1993       // Either we stopped and the appropriate action was taken,
1994       // or we got back a simplified value to continue.
1995       Optional<Value *> SimplifiedCond =
1996           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1997       if (!SimplifiedCond.hasValue())
1998         return true;
1999       AssumedNoUBInsts.insert(&I);
2000       return true;
2001     };
2002 
2003     auto InspectCallSiteForUB = [&](Instruction &I) {
2004       // Check whether a callsite always cause UB or not
2005 
2006       // Skip instructions that are already saved.
2007       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2008         return true;
2009 
2010       // Check nonnull and noundef argument attribute violation for each
2011       // callsite.
2012       CallBase &CB = cast<CallBase>(I);
2013       Function *Callee = CB.getCalledFunction();
2014       if (!Callee)
2015         return true;
2016       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2017         // If current argument is known to be simplified to null pointer and the
2018         // corresponding argument position is known to have nonnull attribute,
2019         // the argument is poison. Furthermore, if the argument is poison and
2020         // the position is known to have noundef attriubte, this callsite is
2021         // considered UB.
2022         if (idx >= Callee->arg_size())
2023           break;
2024         Value *ArgVal = CB.getArgOperand(idx);
2025         if (!ArgVal)
2026           continue;
2027         // Here, we handle three cases.
2028         //   (1) Not having a value means it is dead. (we can replace the value
2029         //       with undef)
2030         //   (2) Simplified to undef. The argument violate noundef attriubte.
2031         //   (3) Simplified to null pointer where known to be nonnull.
2032         //       The argument is a poison value and violate noundef attribute.
2033         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2034         auto &NoUndefAA = A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP,
2035                                                 /* TrackDependence */ false);
2036         if (!NoUndefAA.isKnownNoUndef())
2037           continue;
2038         auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2039             *this, IRPosition::value(*ArgVal), /* TrackDependence */ false);
2040         if (!ValueSimplifyAA.isKnown())
2041           continue;
2042         Optional<Value *> SimplifiedVal =
2043             ValueSimplifyAA.getAssumedSimplifiedValue(A);
2044         if (!SimplifiedVal.hasValue() ||
2045             isa<UndefValue>(*SimplifiedVal.getValue())) {
2046           KnownUBInsts.insert(&I);
2047           continue;
2048         }
2049         if (!ArgVal->getType()->isPointerTy() ||
2050             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2051           continue;
2052         auto &NonNullAA = A.getAAFor<AANonNull>(*this, CalleeArgumentIRP,
2053                                                 /* TrackDependence */ false);
2054         if (NonNullAA.isKnownNonNull())
2055           KnownUBInsts.insert(&I);
2056       }
2057       return true;
2058     };
2059 
2060     auto InspectReturnInstForUB =
2061         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2062           // Check if a return instruction always cause UB or not
2063           // Note: It is guaranteed that the returned position of the anchor
2064           //       scope has noundef attribute when this is called.
2065           //       We also ensure the return position is not "assumed dead"
2066           //       because the returned value was then potentially simplified to
2067           //       `undef` in AAReturnedValues without removing the `noundef`
2068           //       attribute yet.
2069 
2070           // When the returned position has noundef attriubte, UB occur in the
2071           // following cases.
2072           //   (1) Returned value is known to be undef.
2073           //   (2) The value is known to be a null pointer and the returned
2074           //       position has nonnull attribute (because the returned value is
2075           //       poison).
2076           bool FoundUB = false;
2077           if (isa<UndefValue>(V)) {
2078             FoundUB = true;
2079           } else {
2080             if (isa<ConstantPointerNull>(V)) {
2081               auto &NonNullAA = A.getAAFor<AANonNull>(
2082                   *this, IRPosition::returned(*getAnchorScope()),
2083                   /* TrackDependence */ false);
2084               if (NonNullAA.isKnownNonNull())
2085                 FoundUB = true;
2086             }
2087           }
2088 
2089           if (FoundUB)
2090             for (ReturnInst *RI : RetInsts)
2091               KnownUBInsts.insert(RI);
2092           return true;
2093         };
2094 
2095     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2096                               {Instruction::Load, Instruction::Store,
2097                                Instruction::AtomicCmpXchg,
2098                                Instruction::AtomicRMW},
2099                               /* CheckBBLivenessOnly */ true);
2100     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2101                               /* CheckBBLivenessOnly */ true);
2102     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2103 
2104     // If the returned position of the anchor scope has noundef attriubte, check
2105     // all returned instructions.
2106     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2107       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2108       if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2109         auto &RetPosNoUndefAA =
2110             A.getAAFor<AANoUndef>(*this, ReturnIRP,
2111                                   /* TrackDependence */ false);
2112         if (RetPosNoUndefAA.isKnownNoUndef())
2113           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2114                                                     *this);
2115       }
2116     }
2117 
2118     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2119         UBPrevSize != KnownUBInsts.size())
2120       return ChangeStatus::CHANGED;
2121     return ChangeStatus::UNCHANGED;
2122   }
2123 
2124   bool isKnownToCauseUB(Instruction *I) const override {
2125     return KnownUBInsts.count(I);
2126   }
2127 
2128   bool isAssumedToCauseUB(Instruction *I) const override {
2129     // In simple words, if an instruction is not in the assumed to _not_
2130     // cause UB, then it is assumed UB (that includes those
2131     // in the KnownUBInsts set). The rest is boilerplate
2132     // is to ensure that it is one of the instructions we test
2133     // for UB.
2134 
2135     switch (I->getOpcode()) {
2136     case Instruction::Load:
2137     case Instruction::Store:
2138     case Instruction::AtomicCmpXchg:
2139     case Instruction::AtomicRMW:
2140       return !AssumedNoUBInsts.count(I);
2141     case Instruction::Br: {
2142       auto BrInst = cast<BranchInst>(I);
2143       if (BrInst->isUnconditional())
2144         return false;
2145       return !AssumedNoUBInsts.count(I);
2146     } break;
2147     default:
2148       return false;
2149     }
2150     return false;
2151   }
2152 
2153   ChangeStatus manifest(Attributor &A) override {
2154     if (KnownUBInsts.empty())
2155       return ChangeStatus::UNCHANGED;
2156     for (Instruction *I : KnownUBInsts)
2157       A.changeToUnreachableAfterManifest(I);
2158     return ChangeStatus::CHANGED;
2159   }
2160 
2161   /// See AbstractAttribute::getAsStr()
2162   const std::string getAsStr() const override {
2163     return getAssumed() ? "undefined-behavior" : "no-ub";
2164   }
2165 
2166   /// Note: The correctness of this analysis depends on the fact that the
2167   /// following 2 sets will stop changing after some point.
2168   /// "Change" here means that their size changes.
2169   /// The size of each set is monotonically increasing
2170   /// (we only add items to them) and it is upper bounded by the number of
2171   /// instructions in the processed function (we can never save more
2172   /// elements in either set than this number). Hence, at some point,
2173   /// they will stop increasing.
2174   /// Consequently, at some point, both sets will have stopped
2175   /// changing, effectively making the analysis reach a fixpoint.
2176 
2177   /// Note: These 2 sets are disjoint and an instruction can be considered
2178   /// one of 3 things:
2179   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2180   ///    the KnownUBInsts set.
2181   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2182   ///    has a reason to assume it).
2183   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2184   ///    could not find a reason to assume or prove that it can cause UB,
2185   ///    hence it assumes it doesn't. We have a set for these instructions
2186   ///    so that we don't reprocess them in every update.
2187   ///    Note however that instructions in this set may cause UB.
2188 
2189 protected:
2190   /// A set of all live instructions _known_ to cause UB.
2191   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2192 
2193 private:
2194   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2195   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2196 
2197   // Should be called on updates in which if we're processing an instruction
2198   // \p I that depends on a value \p V, one of the following has to happen:
2199   // - If the value is assumed, then stop.
2200   // - If the value is known but undef, then consider it UB.
2201   // - Otherwise, do specific processing with the simplified value.
2202   // We return None in the first 2 cases to signify that an appropriate
2203   // action was taken and the caller should stop.
2204   // Otherwise, we return the simplified value that the caller should
2205   // use for specific processing.
2206   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2207                                          Instruction *I) {
2208     const auto &ValueSimplifyAA =
2209         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2210     Optional<Value *> SimplifiedV =
2211         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2212     if (!ValueSimplifyAA.isKnown()) {
2213       // Don't depend on assumed values.
2214       return llvm::None;
2215     }
2216     if (!SimplifiedV.hasValue()) {
2217       // If it is known (which we tested above) but it doesn't have a value,
2218       // then we can assume `undef` and hence the instruction is UB.
2219       KnownUBInsts.insert(I);
2220       return llvm::None;
2221     }
2222     Value *Val = SimplifiedV.getValue();
2223     if (isa<UndefValue>(Val)) {
2224       KnownUBInsts.insert(I);
2225       return llvm::None;
2226     }
2227     return Val;
2228   }
2229 };
2230 
2231 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2232   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2233       : AAUndefinedBehaviorImpl(IRP, A) {}
2234 
2235   /// See AbstractAttribute::trackStatistics()
2236   void trackStatistics() const override {
2237     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2238                "Number of instructions known to have UB");
2239     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2240         KnownUBInsts.size();
2241   }
2242 };
2243 
2244 /// ------------------------ Will-Return Attributes ----------------------------
2245 
2246 // Helper function that checks whether a function has any cycle which we don't
2247 // know if it is bounded or not.
2248 // Loops with maximum trip count are considered bounded, any other cycle not.
2249 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2250   ScalarEvolution *SE =
2251       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2252   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2253   // If either SCEV or LoopInfo is not available for the function then we assume
2254   // any cycle to be unbounded cycle.
2255   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2256   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2257   if (!SE || !LI) {
2258     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2259       if (SCCI.hasCycle())
2260         return true;
2261     return false;
2262   }
2263 
2264   // If there's irreducible control, the function may contain non-loop cycles.
2265   if (mayContainIrreducibleControl(F, LI))
2266     return true;
2267 
2268   // Any loop that does not have a max trip count is considered unbounded cycle.
2269   for (auto *L : LI->getLoopsInPreorder()) {
2270     if (!SE->getSmallConstantMaxTripCount(L))
2271       return true;
2272   }
2273   return false;
2274 }
2275 
2276 struct AAWillReturnImpl : public AAWillReturn {
2277   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2278       : AAWillReturn(IRP, A) {}
2279 
2280   /// See AbstractAttribute::initialize(...).
2281   void initialize(Attributor &A) override {
2282     AAWillReturn::initialize(A);
2283 
2284     Function *F = getAnchorScope();
2285     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2286       indicatePessimisticFixpoint();
2287   }
2288 
2289   /// See AbstractAttribute::updateImpl(...).
2290   ChangeStatus updateImpl(Attributor &A) override {
2291     auto CheckForWillReturn = [&](Instruction &I) {
2292       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2293       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2294       if (WillReturnAA.isKnownWillReturn())
2295         return true;
2296       if (!WillReturnAA.isAssumedWillReturn())
2297         return false;
2298       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2299       return NoRecurseAA.isAssumedNoRecurse();
2300     };
2301 
2302     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2303       return indicatePessimisticFixpoint();
2304 
2305     return ChangeStatus::UNCHANGED;
2306   }
2307 
2308   /// See AbstractAttribute::getAsStr()
2309   const std::string getAsStr() const override {
2310     return getAssumed() ? "willreturn" : "may-noreturn";
2311   }
2312 };
2313 
2314 struct AAWillReturnFunction final : AAWillReturnImpl {
2315   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2316       : AAWillReturnImpl(IRP, A) {}
2317 
2318   /// See AbstractAttribute::trackStatistics()
2319   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2320 };
2321 
2322 /// WillReturn attribute deduction for a call sites.
2323 struct AAWillReturnCallSite final : AAWillReturnImpl {
2324   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2325       : AAWillReturnImpl(IRP, A) {}
2326 
2327   /// See AbstractAttribute::initialize(...).
2328   void initialize(Attributor &A) override {
2329     AAWillReturn::initialize(A);
2330     Function *F = getAssociatedFunction();
2331     if (!F || !A.isFunctionIPOAmendable(*F))
2332       indicatePessimisticFixpoint();
2333   }
2334 
2335   /// See AbstractAttribute::updateImpl(...).
2336   ChangeStatus updateImpl(Attributor &A) override {
2337     // TODO: Once we have call site specific value information we can provide
2338     //       call site specific liveness information and then it makes
2339     //       sense to specialize attributes for call sites arguments instead of
2340     //       redirecting requests to the callee argument.
2341     Function *F = getAssociatedFunction();
2342     const IRPosition &FnPos = IRPosition::function(*F);
2343     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2344     return clampStateAndIndicateChange(getState(), FnAA.getState());
2345   }
2346 
2347   /// See AbstractAttribute::trackStatistics()
2348   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2349 };
2350 
2351 /// -------------------AAReachability Attribute--------------------------
2352 
2353 struct AAReachabilityImpl : AAReachability {
2354   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2355       : AAReachability(IRP, A) {}
2356 
2357   const std::string getAsStr() const override {
2358     // TODO: Return the number of reachable queries.
2359     return "reachable";
2360   }
2361 
2362   /// See AbstractAttribute::initialize(...).
2363   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2364 
2365   /// See AbstractAttribute::updateImpl(...).
2366   ChangeStatus updateImpl(Attributor &A) override {
2367     return indicatePessimisticFixpoint();
2368   }
2369 };
2370 
2371 struct AAReachabilityFunction final : public AAReachabilityImpl {
2372   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2373       : AAReachabilityImpl(IRP, A) {}
2374 
2375   /// See AbstractAttribute::trackStatistics()
2376   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2377 };
2378 
2379 /// ------------------------ NoAlias Argument Attribute ------------------------
2380 
2381 struct AANoAliasImpl : AANoAlias {
2382   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2383     assert(getAssociatedType()->isPointerTy() &&
2384            "Noalias is a pointer attribute");
2385   }
2386 
2387   const std::string getAsStr() const override {
2388     return getAssumed() ? "noalias" : "may-alias";
2389   }
2390 };
2391 
2392 /// NoAlias attribute for a floating value.
2393 struct AANoAliasFloating final : AANoAliasImpl {
2394   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2395       : AANoAliasImpl(IRP, A) {}
2396 
2397   /// See AbstractAttribute::initialize(...).
2398   void initialize(Attributor &A) override {
2399     AANoAliasImpl::initialize(A);
2400     Value *Val = &getAssociatedValue();
2401     do {
2402       CastInst *CI = dyn_cast<CastInst>(Val);
2403       if (!CI)
2404         break;
2405       Value *Base = CI->getOperand(0);
2406       if (!Base->hasOneUse())
2407         break;
2408       Val = Base;
2409     } while (true);
2410 
2411     if (!Val->getType()->isPointerTy()) {
2412       indicatePessimisticFixpoint();
2413       return;
2414     }
2415 
2416     if (isa<AllocaInst>(Val))
2417       indicateOptimisticFixpoint();
2418     else if (isa<ConstantPointerNull>(Val) &&
2419              !NullPointerIsDefined(getAnchorScope(),
2420                                    Val->getType()->getPointerAddressSpace()))
2421       indicateOptimisticFixpoint();
2422     else if (Val != &getAssociatedValue()) {
2423       const auto &ValNoAliasAA =
2424           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2425       if (ValNoAliasAA.isKnownNoAlias())
2426         indicateOptimisticFixpoint();
2427     }
2428   }
2429 
2430   /// See AbstractAttribute::updateImpl(...).
2431   ChangeStatus updateImpl(Attributor &A) override {
2432     // TODO: Implement this.
2433     return indicatePessimisticFixpoint();
2434   }
2435 
2436   /// See AbstractAttribute::trackStatistics()
2437   void trackStatistics() const override {
2438     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2439   }
2440 };
2441 
2442 /// NoAlias attribute for an argument.
2443 struct AANoAliasArgument final
2444     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2445   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2446   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2447 
2448   /// See AbstractAttribute::initialize(...).
2449   void initialize(Attributor &A) override {
2450     Base::initialize(A);
2451     // See callsite argument attribute and callee argument attribute.
2452     if (hasAttr({Attribute::ByVal}))
2453       indicateOptimisticFixpoint();
2454   }
2455 
2456   /// See AbstractAttribute::update(...).
2457   ChangeStatus updateImpl(Attributor &A) override {
2458     // We have to make sure no-alias on the argument does not break
2459     // synchronization when this is a callback argument, see also [1] below.
2460     // If synchronization cannot be affected, we delegate to the base updateImpl
2461     // function, otherwise we give up for now.
2462 
2463     // If the function is no-sync, no-alias cannot break synchronization.
2464     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2465         *this, IRPosition::function_scope(getIRPosition()));
2466     if (NoSyncAA.isAssumedNoSync())
2467       return Base::updateImpl(A);
2468 
2469     // If the argument is read-only, no-alias cannot break synchronization.
2470     const auto &MemBehaviorAA =
2471         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2472     if (MemBehaviorAA.isAssumedReadOnly())
2473       return Base::updateImpl(A);
2474 
2475     // If the argument is never passed through callbacks, no-alias cannot break
2476     // synchronization.
2477     bool AllCallSitesKnown;
2478     if (A.checkForAllCallSites(
2479             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2480             true, AllCallSitesKnown))
2481       return Base::updateImpl(A);
2482 
2483     // TODO: add no-alias but make sure it doesn't break synchronization by
2484     // introducing fake uses. See:
2485     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2486     //     International Workshop on OpenMP 2018,
2487     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2488 
2489     return indicatePessimisticFixpoint();
2490   }
2491 
2492   /// See AbstractAttribute::trackStatistics()
2493   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2494 };
2495 
2496 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2497   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2498       : AANoAliasImpl(IRP, A) {}
2499 
2500   /// See AbstractAttribute::initialize(...).
2501   void initialize(Attributor &A) override {
2502     // See callsite argument attribute and callee argument attribute.
2503     const auto &CB = cast<CallBase>(getAnchorValue());
2504     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2505       indicateOptimisticFixpoint();
2506     Value &Val = getAssociatedValue();
2507     if (isa<ConstantPointerNull>(Val) &&
2508         !NullPointerIsDefined(getAnchorScope(),
2509                               Val.getType()->getPointerAddressSpace()))
2510       indicateOptimisticFixpoint();
2511   }
2512 
2513   /// Determine if the underlying value may alias with the call site argument
2514   /// \p OtherArgNo of \p ICS (= the underlying call site).
2515   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2516                             const AAMemoryBehavior &MemBehaviorAA,
2517                             const CallBase &CB, unsigned OtherArgNo) {
2518     // We do not need to worry about aliasing with the underlying IRP.
2519     if (this->getCalleeArgNo() == (int)OtherArgNo)
2520       return false;
2521 
2522     // If it is not a pointer or pointer vector we do not alias.
2523     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2524     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2525       return false;
2526 
2527     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2528         *this, IRPosition::callsite_argument(CB, OtherArgNo),
2529         /* TrackDependence */ false);
2530 
2531     // If the argument is readnone, there is no read-write aliasing.
2532     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2533       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2534       return false;
2535     }
2536 
2537     // If the argument is readonly and the underlying value is readonly, there
2538     // is no read-write aliasing.
2539     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2540     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2541       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2542       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2543       return false;
2544     }
2545 
2546     // We have to utilize actual alias analysis queries so we need the object.
2547     if (!AAR)
2548       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2549 
2550     // Try to rule it out at the call site.
2551     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2552     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2553                          "callsite arguments: "
2554                       << getAssociatedValue() << " " << *ArgOp << " => "
2555                       << (IsAliasing ? "" : "no-") << "alias \n");
2556 
2557     return IsAliasing;
2558   }
2559 
2560   bool
2561   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2562                                          const AAMemoryBehavior &MemBehaviorAA,
2563                                          const AANoAlias &NoAliasAA) {
2564     // We can deduce "noalias" if the following conditions hold.
2565     // (i)   Associated value is assumed to be noalias in the definition.
2566     // (ii)  Associated value is assumed to be no-capture in all the uses
2567     //       possibly executed before this callsite.
2568     // (iii) There is no other pointer argument which could alias with the
2569     //       value.
2570 
2571     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2572     if (!AssociatedValueIsNoAliasAtDef) {
2573       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2574                         << " is not no-alias at the definition\n");
2575       return false;
2576     }
2577 
2578     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2579 
2580     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2581     const Function *ScopeFn = VIRP.getAnchorScope();
2582     auto &NoCaptureAA =
2583         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2584     // Check whether the value is captured in the scope using AANoCapture.
2585     //      Look at CFG and check only uses possibly executed before this
2586     //      callsite.
2587     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2588       Instruction *UserI = cast<Instruction>(U.getUser());
2589 
2590       // If UserI is the curr instruction and there is a single potential use of
2591       // the value in UserI we allow the use.
2592       // TODO: We should inspect the operands and allow those that cannot alias
2593       //       with the value.
2594       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2595         return true;
2596 
2597       if (ScopeFn) {
2598         const auto &ReachabilityAA =
2599             A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn));
2600 
2601         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2602           return true;
2603 
2604         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2605           if (CB->isArgOperand(&U)) {
2606 
2607             unsigned ArgNo = CB->getArgOperandNo(&U);
2608 
2609             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2610                 *this, IRPosition::callsite_argument(*CB, ArgNo));
2611 
2612             if (NoCaptureAA.isAssumedNoCapture())
2613               return true;
2614           }
2615         }
2616       }
2617 
2618       // For cases which can potentially have more users
2619       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2620           isa<SelectInst>(U)) {
2621         Follow = true;
2622         return true;
2623       }
2624 
2625       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2626       return false;
2627     };
2628 
2629     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2630       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2631         LLVM_DEBUG(
2632             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2633                    << " cannot be noalias as it is potentially captured\n");
2634         return false;
2635       }
2636     }
2637     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2638 
2639     // Check there is no other pointer argument which could alias with the
2640     // value passed at this call site.
2641     // TODO: AbstractCallSite
2642     const auto &CB = cast<CallBase>(getAnchorValue());
2643     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2644          OtherArgNo++)
2645       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2646         return false;
2647 
2648     return true;
2649   }
2650 
2651   /// See AbstractAttribute::updateImpl(...).
2652   ChangeStatus updateImpl(Attributor &A) override {
2653     // If the argument is readnone we are done as there are no accesses via the
2654     // argument.
2655     auto &MemBehaviorAA =
2656         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2657                                      /* TrackDependence */ false);
2658     if (MemBehaviorAA.isAssumedReadNone()) {
2659       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2660       return ChangeStatus::UNCHANGED;
2661     }
2662 
2663     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2664     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2665                                                   /* TrackDependence */ false);
2666 
2667     AAResults *AAR = nullptr;
2668     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2669                                                NoAliasAA)) {
2670       LLVM_DEBUG(
2671           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2672       return ChangeStatus::UNCHANGED;
2673     }
2674 
2675     return indicatePessimisticFixpoint();
2676   }
2677 
2678   /// See AbstractAttribute::trackStatistics()
2679   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2680 };
2681 
2682 /// NoAlias attribute for function return value.
2683 struct AANoAliasReturned final : AANoAliasImpl {
2684   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2685       : AANoAliasImpl(IRP, A) {}
2686 
2687   /// See AbstractAttribute::initialize(...).
2688   void initialize(Attributor &A) override {
2689     AANoAliasImpl::initialize(A);
2690     Function *F = getAssociatedFunction();
2691     if (!F || F->isDeclaration())
2692       indicatePessimisticFixpoint();
2693   }
2694 
2695   /// See AbstractAttribute::updateImpl(...).
2696   virtual ChangeStatus updateImpl(Attributor &A) override {
2697 
2698     auto CheckReturnValue = [&](Value &RV) -> bool {
2699       if (Constant *C = dyn_cast<Constant>(&RV))
2700         if (C->isNullValue() || isa<UndefValue>(C))
2701           return true;
2702 
2703       /// For now, we can only deduce noalias if we have call sites.
2704       /// FIXME: add more support.
2705       if (!isa<CallBase>(&RV))
2706         return false;
2707 
2708       const IRPosition &RVPos = IRPosition::value(RV);
2709       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2710       if (!NoAliasAA.isAssumedNoAlias())
2711         return false;
2712 
2713       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2714       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2715     };
2716 
2717     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2718       return indicatePessimisticFixpoint();
2719 
2720     return ChangeStatus::UNCHANGED;
2721   }
2722 
2723   /// See AbstractAttribute::trackStatistics()
2724   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2725 };
2726 
2727 /// NoAlias attribute deduction for a call site return value.
2728 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2729   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2730       : AANoAliasImpl(IRP, A) {}
2731 
2732   /// See AbstractAttribute::initialize(...).
2733   void initialize(Attributor &A) override {
2734     AANoAliasImpl::initialize(A);
2735     Function *F = getAssociatedFunction();
2736     if (!F || F->isDeclaration())
2737       indicatePessimisticFixpoint();
2738   }
2739 
2740   /// See AbstractAttribute::updateImpl(...).
2741   ChangeStatus updateImpl(Attributor &A) override {
2742     // TODO: Once we have call site specific value information we can provide
2743     //       call site specific liveness information and then it makes
2744     //       sense to specialize attributes for call sites arguments instead of
2745     //       redirecting requests to the callee argument.
2746     Function *F = getAssociatedFunction();
2747     const IRPosition &FnPos = IRPosition::returned(*F);
2748     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2749     return clampStateAndIndicateChange(getState(), FnAA.getState());
2750   }
2751 
2752   /// See AbstractAttribute::trackStatistics()
2753   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2754 };
2755 
2756 /// -------------------AAIsDead Function Attribute-----------------------
2757 
2758 struct AAIsDeadValueImpl : public AAIsDead {
2759   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2760 
2761   /// See AAIsDead::isAssumedDead().
2762   bool isAssumedDead() const override { return getAssumed(); }
2763 
2764   /// See AAIsDead::isKnownDead().
2765   bool isKnownDead() const override { return getKnown(); }
2766 
2767   /// See AAIsDead::isAssumedDead(BasicBlock *).
2768   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2769 
2770   /// See AAIsDead::isKnownDead(BasicBlock *).
2771   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2772 
2773   /// See AAIsDead::isAssumedDead(Instruction *I).
2774   bool isAssumedDead(const Instruction *I) const override {
2775     return I == getCtxI() && isAssumedDead();
2776   }
2777 
2778   /// See AAIsDead::isKnownDead(Instruction *I).
2779   bool isKnownDead(const Instruction *I) const override {
2780     return isAssumedDead(I) && getKnown();
2781   }
2782 
2783   /// See AbstractAttribute::getAsStr().
2784   const std::string getAsStr() const override {
2785     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2786   }
2787 
2788   /// Check if all uses are assumed dead.
2789   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2790     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2791     // Explicitly set the dependence class to required because we want a long
2792     // chain of N dependent instructions to be considered live as soon as one is
2793     // without going through N update cycles. This is not required for
2794     // correctness.
2795     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2796   }
2797 
2798   /// Determine if \p I is assumed to be side-effect free.
2799   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2800     if (!I || wouldInstructionBeTriviallyDead(I))
2801       return true;
2802 
2803     auto *CB = dyn_cast<CallBase>(I);
2804     if (!CB || isa<IntrinsicInst>(CB))
2805       return false;
2806 
2807     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2808     const auto &NoUnwindAA = A.getAndUpdateAAFor<AANoUnwind>(
2809         *this, CallIRP, /* TrackDependence */ false);
2810     if (!NoUnwindAA.isAssumedNoUnwind())
2811       return false;
2812     if (!NoUnwindAA.isKnownNoUnwind())
2813       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2814 
2815     const auto &MemBehaviorAA = A.getAndUpdateAAFor<AAMemoryBehavior>(
2816         *this, CallIRP, /* TrackDependence */ false);
2817     if (MemBehaviorAA.isAssumedReadOnly()) {
2818       if (!MemBehaviorAA.isKnownReadOnly())
2819         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2820       return true;
2821     }
2822     return false;
2823   }
2824 };
2825 
2826 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2827   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2828       : AAIsDeadValueImpl(IRP, A) {}
2829 
2830   /// See AbstractAttribute::initialize(...).
2831   void initialize(Attributor &A) override {
2832     if (isa<UndefValue>(getAssociatedValue())) {
2833       indicatePessimisticFixpoint();
2834       return;
2835     }
2836 
2837     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2838     if (!isAssumedSideEffectFree(A, I))
2839       indicatePessimisticFixpoint();
2840   }
2841 
2842   /// See AbstractAttribute::updateImpl(...).
2843   ChangeStatus updateImpl(Attributor &A) override {
2844     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2845     if (!isAssumedSideEffectFree(A, I))
2846       return indicatePessimisticFixpoint();
2847 
2848     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2849       return indicatePessimisticFixpoint();
2850     return ChangeStatus::UNCHANGED;
2851   }
2852 
2853   /// See AbstractAttribute::manifest(...).
2854   ChangeStatus manifest(Attributor &A) override {
2855     Value &V = getAssociatedValue();
2856     if (auto *I = dyn_cast<Instruction>(&V)) {
2857       // If we get here we basically know the users are all dead. We check if
2858       // isAssumedSideEffectFree returns true here again because it might not be
2859       // the case and only the users are dead but the instruction (=call) is
2860       // still needed.
2861       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2862         A.deleteAfterManifest(*I);
2863         return ChangeStatus::CHANGED;
2864       }
2865     }
2866     if (V.use_empty())
2867       return ChangeStatus::UNCHANGED;
2868 
2869     bool UsedAssumedInformation = false;
2870     Optional<Constant *> C =
2871         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2872     if (C.hasValue() && C.getValue())
2873       return ChangeStatus::UNCHANGED;
2874 
2875     // Replace the value with undef as it is dead but keep droppable uses around
2876     // as they provide information we don't want to give up on just yet.
2877     UndefValue &UV = *UndefValue::get(V.getType());
2878     bool AnyChange =
2879         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2880     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2881   }
2882 
2883   /// See AbstractAttribute::trackStatistics()
2884   void trackStatistics() const override {
2885     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2886   }
2887 };
2888 
2889 struct AAIsDeadArgument : public AAIsDeadFloating {
2890   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2891       : AAIsDeadFloating(IRP, A) {}
2892 
2893   /// See AbstractAttribute::initialize(...).
2894   void initialize(Attributor &A) override {
2895     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2896       indicatePessimisticFixpoint();
2897   }
2898 
2899   /// See AbstractAttribute::manifest(...).
2900   ChangeStatus manifest(Attributor &A) override {
2901     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2902     Argument &Arg = *getAssociatedArgument();
2903     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2904       if (A.registerFunctionSignatureRewrite(
2905               Arg, /* ReplacementTypes */ {},
2906               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2907               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2908         Arg.dropDroppableUses();
2909         return ChangeStatus::CHANGED;
2910       }
2911     return Changed;
2912   }
2913 
2914   /// See AbstractAttribute::trackStatistics()
2915   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2916 };
2917 
2918 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2919   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2920       : AAIsDeadValueImpl(IRP, A) {}
2921 
2922   /// See AbstractAttribute::initialize(...).
2923   void initialize(Attributor &A) override {
2924     if (isa<UndefValue>(getAssociatedValue()))
2925       indicatePessimisticFixpoint();
2926   }
2927 
2928   /// See AbstractAttribute::updateImpl(...).
2929   ChangeStatus updateImpl(Attributor &A) override {
2930     // TODO: Once we have call site specific value information we can provide
2931     //       call site specific liveness information and then it makes
2932     //       sense to specialize attributes for call sites arguments instead of
2933     //       redirecting requests to the callee argument.
2934     Argument *Arg = getAssociatedArgument();
2935     if (!Arg)
2936       return indicatePessimisticFixpoint();
2937     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2938     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2939     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2940   }
2941 
2942   /// See AbstractAttribute::manifest(...).
2943   ChangeStatus manifest(Attributor &A) override {
2944     CallBase &CB = cast<CallBase>(getAnchorValue());
2945     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
2946     assert(!isa<UndefValue>(U.get()) &&
2947            "Expected undef values to be filtered out!");
2948     UndefValue &UV = *UndefValue::get(U->getType());
2949     if (A.changeUseAfterManifest(U, UV))
2950       return ChangeStatus::CHANGED;
2951     return ChangeStatus::UNCHANGED;
2952   }
2953 
2954   /// See AbstractAttribute::trackStatistics()
2955   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2956 };
2957 
2958 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2959   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2960       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2961 
2962   /// See AAIsDead::isAssumedDead().
2963   bool isAssumedDead() const override {
2964     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2965   }
2966 
2967   /// See AbstractAttribute::initialize(...).
2968   void initialize(Attributor &A) override {
2969     if (isa<UndefValue>(getAssociatedValue())) {
2970       indicatePessimisticFixpoint();
2971       return;
2972     }
2973 
2974     // We track this separately as a secondary state.
2975     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2976   }
2977 
2978   /// See AbstractAttribute::updateImpl(...).
2979   ChangeStatus updateImpl(Attributor &A) override {
2980     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2981     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2982       IsAssumedSideEffectFree = false;
2983       Changed = ChangeStatus::CHANGED;
2984     }
2985 
2986     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2987       return indicatePessimisticFixpoint();
2988     return Changed;
2989   }
2990 
2991   /// See AbstractAttribute::trackStatistics()
2992   void trackStatistics() const override {
2993     if (IsAssumedSideEffectFree)
2994       STATS_DECLTRACK_CSRET_ATTR(IsDead)
2995     else
2996       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
2997   }
2998 
2999   /// See AbstractAttribute::getAsStr().
3000   const std::string getAsStr() const override {
3001     return isAssumedDead()
3002                ? "assumed-dead"
3003                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3004   }
3005 
3006 private:
3007   bool IsAssumedSideEffectFree;
3008 };
3009 
3010 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3011   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3012       : AAIsDeadValueImpl(IRP, A) {}
3013 
3014   /// See AbstractAttribute::updateImpl(...).
3015   ChangeStatus updateImpl(Attributor &A) override {
3016 
3017     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3018                               {Instruction::Ret});
3019 
3020     auto PredForCallSite = [&](AbstractCallSite ACS) {
3021       if (ACS.isCallbackCall() || !ACS.getInstruction())
3022         return false;
3023       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3024     };
3025 
3026     bool AllCallSitesKnown;
3027     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3028                                 AllCallSitesKnown))
3029       return indicatePessimisticFixpoint();
3030 
3031     return ChangeStatus::UNCHANGED;
3032   }
3033 
3034   /// See AbstractAttribute::manifest(...).
3035   ChangeStatus manifest(Attributor &A) override {
3036     // TODO: Rewrite the signature to return void?
3037     bool AnyChange = false;
3038     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3039     auto RetInstPred = [&](Instruction &I) {
3040       ReturnInst &RI = cast<ReturnInst>(I);
3041       if (!isa<UndefValue>(RI.getReturnValue()))
3042         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3043       return true;
3044     };
3045     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3046     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3047   }
3048 
3049   /// See AbstractAttribute::trackStatistics()
3050   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3051 };
3052 
3053 struct AAIsDeadFunction : public AAIsDead {
3054   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3055 
3056   /// See AbstractAttribute::initialize(...).
3057   void initialize(Attributor &A) override {
3058     const Function *F = getAnchorScope();
3059     if (F && !F->isDeclaration()) {
3060       // We only want to compute liveness once. If the function is not part of
3061       // the SCC, skip it.
3062       if (A.isRunOn(*const_cast<Function *>(F))) {
3063         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3064         assumeLive(A, F->getEntryBlock());
3065       } else {
3066         indicatePessimisticFixpoint();
3067       }
3068     }
3069   }
3070 
3071   /// See AbstractAttribute::getAsStr().
3072   const std::string getAsStr() const override {
3073     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3074            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3075            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3076            std::to_string(KnownDeadEnds.size()) + "]";
3077   }
3078 
3079   /// See AbstractAttribute::manifest(...).
3080   ChangeStatus manifest(Attributor &A) override {
3081     assert(getState().isValidState() &&
3082            "Attempted to manifest an invalid state!");
3083 
3084     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3085     Function &F = *getAnchorScope();
3086 
3087     if (AssumedLiveBlocks.empty()) {
3088       A.deleteAfterManifest(F);
3089       return ChangeStatus::CHANGED;
3090     }
3091 
3092     // Flag to determine if we can change an invoke to a call assuming the
3093     // callee is nounwind. This is not possible if the personality of the
3094     // function allows to catch asynchronous exceptions.
3095     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3096 
3097     KnownDeadEnds.set_union(ToBeExploredFrom);
3098     for (const Instruction *DeadEndI : KnownDeadEnds) {
3099       auto *CB = dyn_cast<CallBase>(DeadEndI);
3100       if (!CB)
3101         continue;
3102       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3103           *this, IRPosition::callsite_function(*CB), /* TrackDependence */ true,
3104           DepClassTy::OPTIONAL);
3105       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3106       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3107         continue;
3108 
3109       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3110         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3111       else
3112         A.changeToUnreachableAfterManifest(
3113             const_cast<Instruction *>(DeadEndI->getNextNode()));
3114       HasChanged = ChangeStatus::CHANGED;
3115     }
3116 
3117     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3118     for (BasicBlock &BB : F)
3119       if (!AssumedLiveBlocks.count(&BB)) {
3120         A.deleteAfterManifest(BB);
3121         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3122       }
3123 
3124     return HasChanged;
3125   }
3126 
3127   /// See AbstractAttribute::updateImpl(...).
3128   ChangeStatus updateImpl(Attributor &A) override;
3129 
3130   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3131     return !AssumedLiveEdges.count(std::make_pair(From, To));
3132   }
3133 
3134   /// See AbstractAttribute::trackStatistics()
3135   void trackStatistics() const override {}
3136 
3137   /// Returns true if the function is assumed dead.
3138   bool isAssumedDead() const override { return false; }
3139 
3140   /// See AAIsDead::isKnownDead().
3141   bool isKnownDead() const override { return false; }
3142 
3143   /// See AAIsDead::isAssumedDead(BasicBlock *).
3144   bool isAssumedDead(const BasicBlock *BB) const override {
3145     assert(BB->getParent() == getAnchorScope() &&
3146            "BB must be in the same anchor scope function.");
3147 
3148     if (!getAssumed())
3149       return false;
3150     return !AssumedLiveBlocks.count(BB);
3151   }
3152 
3153   /// See AAIsDead::isKnownDead(BasicBlock *).
3154   bool isKnownDead(const BasicBlock *BB) const override {
3155     return getKnown() && isAssumedDead(BB);
3156   }
3157 
3158   /// See AAIsDead::isAssumed(Instruction *I).
3159   bool isAssumedDead(const Instruction *I) const override {
3160     assert(I->getParent()->getParent() == getAnchorScope() &&
3161            "Instruction must be in the same anchor scope function.");
3162 
3163     if (!getAssumed())
3164       return false;
3165 
3166     // If it is not in AssumedLiveBlocks then it for sure dead.
3167     // Otherwise, it can still be after noreturn call in a live block.
3168     if (!AssumedLiveBlocks.count(I->getParent()))
3169       return true;
3170 
3171     // If it is not after a liveness barrier it is live.
3172     const Instruction *PrevI = I->getPrevNode();
3173     while (PrevI) {
3174       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3175         return true;
3176       PrevI = PrevI->getPrevNode();
3177     }
3178     return false;
3179   }
3180 
3181   /// See AAIsDead::isKnownDead(Instruction *I).
3182   bool isKnownDead(const Instruction *I) const override {
3183     return getKnown() && isAssumedDead(I);
3184   }
3185 
3186   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3187   /// that internal function called from \p BB should now be looked at.
3188   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3189     if (!AssumedLiveBlocks.insert(&BB).second)
3190       return false;
3191 
3192     // We assume that all of BB is (probably) live now and if there are calls to
3193     // internal functions we will assume that those are now live as well. This
3194     // is a performance optimization for blocks with calls to a lot of internal
3195     // functions. It can however cause dead functions to be treated as live.
3196     for (const Instruction &I : BB)
3197       if (const auto *CB = dyn_cast<CallBase>(&I))
3198         if (const Function *F = CB->getCalledFunction())
3199           if (F->hasLocalLinkage())
3200             A.markLiveInternalFunction(*F);
3201     return true;
3202   }
3203 
3204   /// Collection of instructions that need to be explored again, e.g., we
3205   /// did assume they do not transfer control to (one of their) successors.
3206   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3207 
3208   /// Collection of instructions that are known to not transfer control.
3209   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3210 
3211   /// Collection of all assumed live edges
3212   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3213 
3214   /// Collection of all assumed live BasicBlocks.
3215   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3216 };
3217 
3218 static bool
3219 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3220                         AbstractAttribute &AA,
3221                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3222   const IRPosition &IPos = IRPosition::callsite_function(CB);
3223 
3224   const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3225       AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3226   if (NoReturnAA.isAssumedNoReturn())
3227     return !NoReturnAA.isKnownNoReturn();
3228   if (CB.isTerminator())
3229     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3230   else
3231     AliveSuccessors.push_back(CB.getNextNode());
3232   return false;
3233 }
3234 
3235 static bool
3236 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3237                         AbstractAttribute &AA,
3238                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3239   bool UsedAssumedInformation =
3240       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3241 
3242   // First, determine if we can change an invoke to a call assuming the
3243   // callee is nounwind. This is not possible if the personality of the
3244   // function allows to catch asynchronous exceptions.
3245   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3246     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3247   } else {
3248     const IRPosition &IPos = IRPosition::callsite_function(II);
3249     const auto &AANoUnw = A.getAndUpdateAAFor<AANoUnwind>(
3250         AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3251     if (AANoUnw.isAssumedNoUnwind()) {
3252       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3253     } else {
3254       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3255     }
3256   }
3257   return UsedAssumedInformation;
3258 }
3259 
3260 static bool
3261 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3262                         AbstractAttribute &AA,
3263                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3264   bool UsedAssumedInformation = false;
3265   if (BI.getNumSuccessors() == 1) {
3266     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3267   } else {
3268     Optional<ConstantInt *> CI = getAssumedConstantInt(
3269         A, *BI.getCondition(), AA, UsedAssumedInformation);
3270     if (!CI.hasValue()) {
3271       // No value yet, assume both edges are dead.
3272     } else if (CI.getValue()) {
3273       const BasicBlock *SuccBB =
3274           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3275       AliveSuccessors.push_back(&SuccBB->front());
3276     } else {
3277       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3278       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3279       UsedAssumedInformation = false;
3280     }
3281   }
3282   return UsedAssumedInformation;
3283 }
3284 
3285 static bool
3286 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3287                         AbstractAttribute &AA,
3288                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3289   bool UsedAssumedInformation = false;
3290   Optional<ConstantInt *> CI =
3291       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3292   if (!CI.hasValue()) {
3293     // No value yet, assume all edges are dead.
3294   } else if (CI.getValue()) {
3295     for (auto &CaseIt : SI.cases()) {
3296       if (CaseIt.getCaseValue() == CI.getValue()) {
3297         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3298         return UsedAssumedInformation;
3299       }
3300     }
3301     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3302     return UsedAssumedInformation;
3303   } else {
3304     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3305       AliveSuccessors.push_back(&SuccBB->front());
3306   }
3307   return UsedAssumedInformation;
3308 }
3309 
3310 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3311   ChangeStatus Change = ChangeStatus::UNCHANGED;
3312 
3313   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3314                     << getAnchorScope()->size() << "] BBs and "
3315                     << ToBeExploredFrom.size() << " exploration points and "
3316                     << KnownDeadEnds.size() << " known dead ends\n");
3317 
3318   // Copy and clear the list of instructions we need to explore from. It is
3319   // refilled with instructions the next update has to look at.
3320   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3321                                                ToBeExploredFrom.end());
3322   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3323 
3324   SmallVector<const Instruction *, 8> AliveSuccessors;
3325   while (!Worklist.empty()) {
3326     const Instruction *I = Worklist.pop_back_val();
3327     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3328 
3329     // Fast forward for uninteresting instructions. We could look for UB here
3330     // though.
3331     while (!I->isTerminator() && !isa<CallBase>(I)) {
3332       Change = ChangeStatus::CHANGED;
3333       I = I->getNextNode();
3334     }
3335 
3336     AliveSuccessors.clear();
3337 
3338     bool UsedAssumedInformation = false;
3339     switch (I->getOpcode()) {
3340     // TODO: look for (assumed) UB to backwards propagate "deadness".
3341     default:
3342       assert(I->isTerminator() &&
3343              "Expected non-terminators to be handled already!");
3344       for (const BasicBlock *SuccBB : successors(I->getParent()))
3345         AliveSuccessors.push_back(&SuccBB->front());
3346       break;
3347     case Instruction::Call:
3348       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3349                                                        *this, AliveSuccessors);
3350       break;
3351     case Instruction::Invoke:
3352       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3353                                                        *this, AliveSuccessors);
3354       break;
3355     case Instruction::Br:
3356       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3357                                                        *this, AliveSuccessors);
3358       break;
3359     case Instruction::Switch:
3360       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3361                                                        *this, AliveSuccessors);
3362       break;
3363     }
3364 
3365     if (UsedAssumedInformation) {
3366       NewToBeExploredFrom.insert(I);
3367     } else {
3368       Change = ChangeStatus::CHANGED;
3369       if (AliveSuccessors.empty() ||
3370           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3371         KnownDeadEnds.insert(I);
3372     }
3373 
3374     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3375                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3376                       << UsedAssumedInformation << "\n");
3377 
3378     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3379       if (!I->isTerminator()) {
3380         assert(AliveSuccessors.size() == 1 &&
3381                "Non-terminator expected to have a single successor!");
3382         Worklist.push_back(AliveSuccessor);
3383       } else {
3384         // record the assumed live edge
3385         AssumedLiveEdges.insert(
3386             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3387         if (assumeLive(A, *AliveSuccessor->getParent()))
3388           Worklist.push_back(AliveSuccessor);
3389       }
3390     }
3391   }
3392 
3393   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3394 
3395   // If we know everything is live there is no need to query for liveness.
3396   // Instead, indicating a pessimistic fixpoint will cause the state to be
3397   // "invalid" and all queries to be answered conservatively without lookups.
3398   // To be in this state we have to (1) finished the exploration and (3) not
3399   // discovered any non-trivial dead end and (2) not ruled unreachable code
3400   // dead.
3401   if (ToBeExploredFrom.empty() &&
3402       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3403       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3404         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3405       }))
3406     return indicatePessimisticFixpoint();
3407   return Change;
3408 }
3409 
3410 /// Liveness information for a call sites.
3411 struct AAIsDeadCallSite final : AAIsDeadFunction {
3412   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3413       : AAIsDeadFunction(IRP, A) {}
3414 
3415   /// See AbstractAttribute::initialize(...).
3416   void initialize(Attributor &A) override {
3417     // TODO: Once we have call site specific value information we can provide
3418     //       call site specific liveness information and then it makes
3419     //       sense to specialize attributes for call sites instead of
3420     //       redirecting requests to the callee.
3421     llvm_unreachable("Abstract attributes for liveness are not "
3422                      "supported for call sites yet!");
3423   }
3424 
3425   /// See AbstractAttribute::updateImpl(...).
3426   ChangeStatus updateImpl(Attributor &A) override {
3427     return indicatePessimisticFixpoint();
3428   }
3429 
3430   /// See AbstractAttribute::trackStatistics()
3431   void trackStatistics() const override {}
3432 };
3433 
3434 /// -------------------- Dereferenceable Argument Attribute --------------------
3435 
3436 template <>
3437 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3438                                                      const DerefState &R) {
3439   ChangeStatus CS0 =
3440       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3441   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3442   return CS0 | CS1;
3443 }
3444 
3445 struct AADereferenceableImpl : AADereferenceable {
3446   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3447       : AADereferenceable(IRP, A) {}
3448   using StateType = DerefState;
3449 
3450   /// See AbstractAttribute::initialize(...).
3451   void initialize(Attributor &A) override {
3452     SmallVector<Attribute, 4> Attrs;
3453     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3454              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3455     for (const Attribute &Attr : Attrs)
3456       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3457 
3458     const IRPosition &IRP = this->getIRPosition();
3459     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP,
3460                                        /* TrackDependence */ false);
3461 
3462     bool CanBeNull;
3463     takeKnownDerefBytesMaximum(
3464         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3465             A.getDataLayout(), CanBeNull));
3466 
3467     bool IsFnInterface = IRP.isFnInterfaceKind();
3468     Function *FnScope = IRP.getAnchorScope();
3469     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3470       indicatePessimisticFixpoint();
3471       return;
3472     }
3473 
3474     if (Instruction *CtxI = getCtxI())
3475       followUsesInMBEC(*this, A, getState(), *CtxI);
3476   }
3477 
3478   /// See AbstractAttribute::getState()
3479   /// {
3480   StateType &getState() override { return *this; }
3481   const StateType &getState() const override { return *this; }
3482   /// }
3483 
3484   /// Helper function for collecting accessed bytes in must-be-executed-context
3485   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3486                               DerefState &State) {
3487     const Value *UseV = U->get();
3488     if (!UseV->getType()->isPointerTy())
3489       return;
3490 
3491     Type *PtrTy = UseV->getType();
3492     const DataLayout &DL = A.getDataLayout();
3493     int64_t Offset;
3494     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3495             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3496       if (Base == &getAssociatedValue() &&
3497           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3498         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3499         State.addAccessedBytes(Offset, Size);
3500       }
3501     }
3502   }
3503 
3504   /// See followUsesInMBEC
3505   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3506                        AADereferenceable::StateType &State) {
3507     bool IsNonNull = false;
3508     bool TrackUse = false;
3509     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3510         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3511     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3512                       << " for instruction " << *I << "\n");
3513 
3514     addAccessedBytesForUse(A, U, I, State);
3515     State.takeKnownDerefBytesMaximum(DerefBytes);
3516     return TrackUse;
3517   }
3518 
3519   /// See AbstractAttribute::manifest(...).
3520   ChangeStatus manifest(Attributor &A) override {
3521     ChangeStatus Change = AADereferenceable::manifest(A);
3522     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3523       removeAttrs({Attribute::DereferenceableOrNull});
3524       return ChangeStatus::CHANGED;
3525     }
3526     return Change;
3527   }
3528 
3529   void getDeducedAttributes(LLVMContext &Ctx,
3530                             SmallVectorImpl<Attribute> &Attrs) const override {
3531     // TODO: Add *_globally support
3532     if (isAssumedNonNull())
3533       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3534           Ctx, getAssumedDereferenceableBytes()));
3535     else
3536       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3537           Ctx, getAssumedDereferenceableBytes()));
3538   }
3539 
3540   /// See AbstractAttribute::getAsStr().
3541   const std::string getAsStr() const override {
3542     if (!getAssumedDereferenceableBytes())
3543       return "unknown-dereferenceable";
3544     return std::string("dereferenceable") +
3545            (isAssumedNonNull() ? "" : "_or_null") +
3546            (isAssumedGlobal() ? "_globally" : "") + "<" +
3547            std::to_string(getKnownDereferenceableBytes()) + "-" +
3548            std::to_string(getAssumedDereferenceableBytes()) + ">";
3549   }
3550 };
3551 
3552 /// Dereferenceable attribute for a floating value.
3553 struct AADereferenceableFloating : AADereferenceableImpl {
3554   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3555       : AADereferenceableImpl(IRP, A) {}
3556 
3557   /// See AbstractAttribute::updateImpl(...).
3558   ChangeStatus updateImpl(Attributor &A) override {
3559     const DataLayout &DL = A.getDataLayout();
3560 
3561     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3562                             bool Stripped) -> bool {
3563       unsigned IdxWidth =
3564           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3565       APInt Offset(IdxWidth, 0);
3566       const Value *Base =
3567           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3568 
3569       const auto &AA =
3570           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3571       int64_t DerefBytes = 0;
3572       if (!Stripped && this == &AA) {
3573         // Use IR information if we did not strip anything.
3574         // TODO: track globally.
3575         bool CanBeNull;
3576         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3577         T.GlobalState.indicatePessimisticFixpoint();
3578       } else {
3579         const DerefState &DS = AA.getState();
3580         DerefBytes = DS.DerefBytesState.getAssumed();
3581         T.GlobalState &= DS.GlobalState;
3582       }
3583 
3584       // For now we do not try to "increase" dereferenceability due to negative
3585       // indices as we first have to come up with code to deal with loops and
3586       // for overflows of the dereferenceable bytes.
3587       int64_t OffsetSExt = Offset.getSExtValue();
3588       if (OffsetSExt < 0)
3589         OffsetSExt = 0;
3590 
3591       T.takeAssumedDerefBytesMinimum(
3592           std::max(int64_t(0), DerefBytes - OffsetSExt));
3593 
3594       if (this == &AA) {
3595         if (!Stripped) {
3596           // If nothing was stripped IR information is all we got.
3597           T.takeKnownDerefBytesMaximum(
3598               std::max(int64_t(0), DerefBytes - OffsetSExt));
3599           T.indicatePessimisticFixpoint();
3600         } else if (OffsetSExt > 0) {
3601           // If something was stripped but there is circular reasoning we look
3602           // for the offset. If it is positive we basically decrease the
3603           // dereferenceable bytes in a circluar loop now, which will simply
3604           // drive them down to the known value in a very slow way which we
3605           // can accelerate.
3606           T.indicatePessimisticFixpoint();
3607         }
3608       }
3609 
3610       return T.isValidState();
3611     };
3612 
3613     DerefState T;
3614     if (!genericValueTraversal<AADereferenceable, DerefState>(
3615             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3616       return indicatePessimisticFixpoint();
3617 
3618     return clampStateAndIndicateChange(getState(), T);
3619   }
3620 
3621   /// See AbstractAttribute::trackStatistics()
3622   void trackStatistics() const override {
3623     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3624   }
3625 };
3626 
3627 /// Dereferenceable attribute for a return value.
3628 struct AADereferenceableReturned final
3629     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3630   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3631       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3632             IRP, A) {}
3633 
3634   /// See AbstractAttribute::trackStatistics()
3635   void trackStatistics() const override {
3636     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3637   }
3638 };
3639 
3640 /// Dereferenceable attribute for an argument
3641 struct AADereferenceableArgument final
3642     : AAArgumentFromCallSiteArguments<AADereferenceable,
3643                                       AADereferenceableImpl> {
3644   using Base =
3645       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3646   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3647       : Base(IRP, A) {}
3648 
3649   /// See AbstractAttribute::trackStatistics()
3650   void trackStatistics() const override {
3651     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3652   }
3653 };
3654 
3655 /// Dereferenceable attribute for a call site argument.
3656 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3657   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3658       : AADereferenceableFloating(IRP, A) {}
3659 
3660   /// See AbstractAttribute::trackStatistics()
3661   void trackStatistics() const override {
3662     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3663   }
3664 };
3665 
3666 /// Dereferenceable attribute deduction for a call site return value.
3667 struct AADereferenceableCallSiteReturned final
3668     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3669   using Base =
3670       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3671   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3672       : Base(IRP, A) {}
3673 
3674   /// See AbstractAttribute::trackStatistics()
3675   void trackStatistics() const override {
3676     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3677   }
3678 };
3679 
3680 // ------------------------ Align Argument Attribute ------------------------
3681 
3682 static unsigned getKnownAlignForUse(Attributor &A,
3683                                     AbstractAttribute &QueryingAA,
3684                                     Value &AssociatedValue, const Use *U,
3685                                     const Instruction *I, bool &TrackUse) {
3686   // We need to follow common pointer manipulation uses to the accesses they
3687   // feed into.
3688   if (isa<CastInst>(I)) {
3689     // Follow all but ptr2int casts.
3690     TrackUse = !isa<PtrToIntInst>(I);
3691     return 0;
3692   }
3693   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3694     if (GEP->hasAllConstantIndices()) {
3695       TrackUse = true;
3696       return 0;
3697     }
3698   }
3699 
3700   MaybeAlign MA;
3701   if (const auto *CB = dyn_cast<CallBase>(I)) {
3702     if (CB->isBundleOperand(U) || CB->isCallee(U))
3703       return 0;
3704 
3705     unsigned ArgNo = CB->getArgOperandNo(U);
3706     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3707     // As long as we only use known information there is no need to track
3708     // dependences here.
3709     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3710                                         /* TrackDependence */ false);
3711     MA = MaybeAlign(AlignAA.getKnownAlign());
3712   }
3713 
3714   const DataLayout &DL = A.getDataLayout();
3715   const Value *UseV = U->get();
3716   if (auto *SI = dyn_cast<StoreInst>(I)) {
3717     if (SI->getPointerOperand() == UseV)
3718       MA = SI->getAlign();
3719   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3720     if (LI->getPointerOperand() == UseV)
3721       MA = LI->getAlign();
3722   }
3723 
3724   if (!MA || *MA <= 1)
3725     return 0;
3726 
3727   unsigned Alignment = MA->value();
3728   int64_t Offset;
3729 
3730   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3731     if (Base == &AssociatedValue) {
3732       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3733       // So we can say that the maximum power of two which is a divisor of
3734       // gcd(Offset, Alignment) is an alignment.
3735 
3736       uint32_t gcd =
3737           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3738       Alignment = llvm::PowerOf2Floor(gcd);
3739     }
3740   }
3741 
3742   return Alignment;
3743 }
3744 
3745 struct AAAlignImpl : AAAlign {
3746   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3747 
3748   /// See AbstractAttribute::initialize(...).
3749   void initialize(Attributor &A) override {
3750     SmallVector<Attribute, 4> Attrs;
3751     getAttrs({Attribute::Alignment}, Attrs);
3752     for (const Attribute &Attr : Attrs)
3753       takeKnownMaximum(Attr.getValueAsInt());
3754 
3755     Value &V = getAssociatedValue();
3756     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3757     //       use of the function pointer. This was caused by D73131. We want to
3758     //       avoid this for function pointers especially because we iterate
3759     //       their uses and int2ptr is not handled. It is not a correctness
3760     //       problem though!
3761     if (!V.getType()->getPointerElementType()->isFunctionTy())
3762       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3763 
3764     if (getIRPosition().isFnInterfaceKind() &&
3765         (!getAnchorScope() ||
3766          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3767       indicatePessimisticFixpoint();
3768       return;
3769     }
3770 
3771     if (Instruction *CtxI = getCtxI())
3772       followUsesInMBEC(*this, A, getState(), *CtxI);
3773   }
3774 
3775   /// See AbstractAttribute::manifest(...).
3776   ChangeStatus manifest(Attributor &A) override {
3777     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3778 
3779     // Check for users that allow alignment annotations.
3780     Value &AssociatedValue = getAssociatedValue();
3781     for (const Use &U : AssociatedValue.uses()) {
3782       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3783         if (SI->getPointerOperand() == &AssociatedValue)
3784           if (SI->getAlignment() < getAssumedAlign()) {
3785             STATS_DECLTRACK(AAAlign, Store,
3786                             "Number of times alignment added to a store");
3787             SI->setAlignment(Align(getAssumedAlign()));
3788             LoadStoreChanged = ChangeStatus::CHANGED;
3789           }
3790       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3791         if (LI->getPointerOperand() == &AssociatedValue)
3792           if (LI->getAlignment() < getAssumedAlign()) {
3793             LI->setAlignment(Align(getAssumedAlign()));
3794             STATS_DECLTRACK(AAAlign, Load,
3795                             "Number of times alignment added to a load");
3796             LoadStoreChanged = ChangeStatus::CHANGED;
3797           }
3798       }
3799     }
3800 
3801     ChangeStatus Changed = AAAlign::manifest(A);
3802 
3803     Align InheritAlign =
3804         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3805     if (InheritAlign >= getAssumedAlign())
3806       return LoadStoreChanged;
3807     return Changed | LoadStoreChanged;
3808   }
3809 
3810   // TODO: Provide a helper to determine the implied ABI alignment and check in
3811   //       the existing manifest method and a new one for AAAlignImpl that value
3812   //       to avoid making the alignment explicit if it did not improve.
3813 
3814   /// See AbstractAttribute::getDeducedAttributes
3815   virtual void
3816   getDeducedAttributes(LLVMContext &Ctx,
3817                        SmallVectorImpl<Attribute> &Attrs) const override {
3818     if (getAssumedAlign() > 1)
3819       Attrs.emplace_back(
3820           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3821   }
3822 
3823   /// See followUsesInMBEC
3824   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3825                        AAAlign::StateType &State) {
3826     bool TrackUse = false;
3827 
3828     unsigned int KnownAlign =
3829         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3830     State.takeKnownMaximum(KnownAlign);
3831 
3832     return TrackUse;
3833   }
3834 
3835   /// See AbstractAttribute::getAsStr().
3836   const std::string getAsStr() const override {
3837     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3838                                 "-" + std::to_string(getAssumedAlign()) + ">")
3839                              : "unknown-align";
3840   }
3841 };
3842 
3843 /// Align attribute for a floating value.
3844 struct AAAlignFloating : AAAlignImpl {
3845   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3846 
3847   /// See AbstractAttribute::updateImpl(...).
3848   ChangeStatus updateImpl(Attributor &A) override {
3849     const DataLayout &DL = A.getDataLayout();
3850 
3851     auto VisitValueCB = [&](Value &V, const Instruction *,
3852                             AAAlign::StateType &T, bool Stripped) -> bool {
3853       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3854       if (!Stripped && this == &AA) {
3855         int64_t Offset;
3856         unsigned Alignment = 1;
3857         if (const Value *Base =
3858                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3859           Align PA = Base->getPointerAlignment(DL);
3860           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3861           // So we can say that the maximum power of two which is a divisor of
3862           // gcd(Offset, Alignment) is an alignment.
3863 
3864           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3865                                                uint32_t(PA.value()));
3866           Alignment = llvm::PowerOf2Floor(gcd);
3867         } else {
3868           Alignment = V.getPointerAlignment(DL).value();
3869         }
3870         // Use only IR information if we did not strip anything.
3871         T.takeKnownMaximum(Alignment);
3872         T.indicatePessimisticFixpoint();
3873       } else {
3874         // Use abstract attribute information.
3875         const AAAlign::StateType &DS = AA.getState();
3876         T ^= DS;
3877       }
3878       return T.isValidState();
3879     };
3880 
3881     StateType T;
3882     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3883                                                    VisitValueCB, getCtxI()))
3884       return indicatePessimisticFixpoint();
3885 
3886     // TODO: If we know we visited all incoming values, thus no are assumed
3887     // dead, we can take the known information from the state T.
3888     return clampStateAndIndicateChange(getState(), T);
3889   }
3890 
3891   /// See AbstractAttribute::trackStatistics()
3892   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3893 };
3894 
3895 /// Align attribute for function return value.
3896 struct AAAlignReturned final
3897     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3898   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
3899   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3900 
3901   /// See AbstractAttribute::initialize(...).
3902   void initialize(Attributor &A) override {
3903     Base::initialize(A);
3904     Function *F = getAssociatedFunction();
3905     if (!F || F->isDeclaration())
3906       indicatePessimisticFixpoint();
3907   }
3908 
3909   /// See AbstractAttribute::trackStatistics()
3910   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3911 };
3912 
3913 /// Align attribute for function argument.
3914 struct AAAlignArgument final
3915     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3916   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3917   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3918 
3919   /// See AbstractAttribute::manifest(...).
3920   ChangeStatus manifest(Attributor &A) override {
3921     // If the associated argument is involved in a must-tail call we give up
3922     // because we would need to keep the argument alignments of caller and
3923     // callee in-sync. Just does not seem worth the trouble right now.
3924     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3925       return ChangeStatus::UNCHANGED;
3926     return Base::manifest(A);
3927   }
3928 
3929   /// See AbstractAttribute::trackStatistics()
3930   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3931 };
3932 
3933 struct AAAlignCallSiteArgument final : AAAlignFloating {
3934   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3935       : AAAlignFloating(IRP, A) {}
3936 
3937   /// See AbstractAttribute::manifest(...).
3938   ChangeStatus manifest(Attributor &A) override {
3939     // If the associated argument is involved in a must-tail call we give up
3940     // because we would need to keep the argument alignments of caller and
3941     // callee in-sync. Just does not seem worth the trouble right now.
3942     if (Argument *Arg = getAssociatedArgument())
3943       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3944         return ChangeStatus::UNCHANGED;
3945     ChangeStatus Changed = AAAlignImpl::manifest(A);
3946     Align InheritAlign =
3947         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3948     if (InheritAlign >= getAssumedAlign())
3949       Changed = ChangeStatus::UNCHANGED;
3950     return Changed;
3951   }
3952 
3953   /// See AbstractAttribute::updateImpl(Attributor &A).
3954   ChangeStatus updateImpl(Attributor &A) override {
3955     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3956     if (Argument *Arg = getAssociatedArgument()) {
3957       // We only take known information from the argument
3958       // so we do not need to track a dependence.
3959       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3960           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3961       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3962     }
3963     return Changed;
3964   }
3965 
3966   /// See AbstractAttribute::trackStatistics()
3967   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3968 };
3969 
3970 /// Align attribute deduction for a call site return value.
3971 struct AAAlignCallSiteReturned final
3972     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3973   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3974   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3975       : Base(IRP, A) {}
3976 
3977   /// See AbstractAttribute::initialize(...).
3978   void initialize(Attributor &A) override {
3979     Base::initialize(A);
3980     Function *F = getAssociatedFunction();
3981     if (!F || F->isDeclaration())
3982       indicatePessimisticFixpoint();
3983   }
3984 
3985   /// See AbstractAttribute::trackStatistics()
3986   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3987 };
3988 
3989 /// ------------------ Function No-Return Attribute ----------------------------
3990 struct AANoReturnImpl : public AANoReturn {
3991   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
3992 
3993   /// See AbstractAttribute::initialize(...).
3994   void initialize(Attributor &A) override {
3995     AANoReturn::initialize(A);
3996     Function *F = getAssociatedFunction();
3997     if (!F || F->isDeclaration())
3998       indicatePessimisticFixpoint();
3999   }
4000 
4001   /// See AbstractAttribute::getAsStr().
4002   const std::string getAsStr() const override {
4003     return getAssumed() ? "noreturn" : "may-return";
4004   }
4005 
4006   /// See AbstractAttribute::updateImpl(Attributor &A).
4007   virtual ChangeStatus updateImpl(Attributor &A) override {
4008     auto CheckForNoReturn = [](Instruction &) { return false; };
4009     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4010                                    {(unsigned)Instruction::Ret}))
4011       return indicatePessimisticFixpoint();
4012     return ChangeStatus::UNCHANGED;
4013   }
4014 };
4015 
4016 struct AANoReturnFunction final : AANoReturnImpl {
4017   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4018       : AANoReturnImpl(IRP, A) {}
4019 
4020   /// See AbstractAttribute::trackStatistics()
4021   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4022 };
4023 
4024 /// NoReturn attribute deduction for a call sites.
4025 struct AANoReturnCallSite final : AANoReturnImpl {
4026   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4027       : AANoReturnImpl(IRP, A) {}
4028 
4029   /// See AbstractAttribute::initialize(...).
4030   void initialize(Attributor &A) override {
4031     AANoReturnImpl::initialize(A);
4032     if (Function *F = getAssociatedFunction()) {
4033       const IRPosition &FnPos = IRPosition::function(*F);
4034       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
4035       if (!FnAA.isAssumedNoReturn())
4036         indicatePessimisticFixpoint();
4037     }
4038   }
4039 
4040   /// See AbstractAttribute::updateImpl(...).
4041   ChangeStatus updateImpl(Attributor &A) override {
4042     // TODO: Once we have call site specific value information we can provide
4043     //       call site specific liveness information and then it makes
4044     //       sense to specialize attributes for call sites arguments instead of
4045     //       redirecting requests to the callee argument.
4046     Function *F = getAssociatedFunction();
4047     const IRPosition &FnPos = IRPosition::function(*F);
4048     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
4049     return clampStateAndIndicateChange(getState(), FnAA.getState());
4050   }
4051 
4052   /// See AbstractAttribute::trackStatistics()
4053   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4054 };
4055 
4056 /// ----------------------- Variable Capturing ---------------------------------
4057 
4058 /// A class to hold the state of for no-capture attributes.
4059 struct AANoCaptureImpl : public AANoCapture {
4060   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4061 
4062   /// See AbstractAttribute::initialize(...).
4063   void initialize(Attributor &A) override {
4064     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4065       indicateOptimisticFixpoint();
4066       return;
4067     }
4068     Function *AnchorScope = getAnchorScope();
4069     if (isFnInterfaceKind() &&
4070         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4071       indicatePessimisticFixpoint();
4072       return;
4073     }
4074 
4075     // You cannot "capture" null in the default address space.
4076     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4077         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4078       indicateOptimisticFixpoint();
4079       return;
4080     }
4081 
4082     const Function *F =
4083         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4084 
4085     // Check what state the associated function can actually capture.
4086     if (F)
4087       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4088     else
4089       indicatePessimisticFixpoint();
4090   }
4091 
4092   /// See AbstractAttribute::updateImpl(...).
4093   ChangeStatus updateImpl(Attributor &A) override;
4094 
4095   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4096   virtual void
4097   getDeducedAttributes(LLVMContext &Ctx,
4098                        SmallVectorImpl<Attribute> &Attrs) const override {
4099     if (!isAssumedNoCaptureMaybeReturned())
4100       return;
4101 
4102     if (isArgumentPosition()) {
4103       if (isAssumedNoCapture())
4104         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4105       else if (ManifestInternal)
4106         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4107     }
4108   }
4109 
4110   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4111   /// depending on the ability of the function associated with \p IRP to capture
4112   /// state in memory and through "returning/throwing", respectively.
4113   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4114                                                    const Function &F,
4115                                                    BitIntegerState &State) {
4116     // TODO: Once we have memory behavior attributes we should use them here.
4117 
4118     // If we know we cannot communicate or write to memory, we do not care about
4119     // ptr2int anymore.
4120     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4121         F.getReturnType()->isVoidTy()) {
4122       State.addKnownBits(NO_CAPTURE);
4123       return;
4124     }
4125 
4126     // A function cannot capture state in memory if it only reads memory, it can
4127     // however return/throw state and the state might be influenced by the
4128     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4129     if (F.onlyReadsMemory())
4130       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4131 
4132     // A function cannot communicate state back if it does not through
4133     // exceptions and doesn not return values.
4134     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4135       State.addKnownBits(NOT_CAPTURED_IN_RET);
4136 
4137     // Check existing "returned" attributes.
4138     int ArgNo = IRP.getCalleeArgNo();
4139     if (F.doesNotThrow() && ArgNo >= 0) {
4140       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4141         if (F.hasParamAttribute(u, Attribute::Returned)) {
4142           if (u == unsigned(ArgNo))
4143             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4144           else if (F.onlyReadsMemory())
4145             State.addKnownBits(NO_CAPTURE);
4146           else
4147             State.addKnownBits(NOT_CAPTURED_IN_RET);
4148           break;
4149         }
4150     }
4151   }
4152 
4153   /// See AbstractState::getAsStr().
4154   const std::string getAsStr() const override {
4155     if (isKnownNoCapture())
4156       return "known not-captured";
4157     if (isAssumedNoCapture())
4158       return "assumed not-captured";
4159     if (isKnownNoCaptureMaybeReturned())
4160       return "known not-captured-maybe-returned";
4161     if (isAssumedNoCaptureMaybeReturned())
4162       return "assumed not-captured-maybe-returned";
4163     return "assumed-captured";
4164   }
4165 };
4166 
4167 /// Attributor-aware capture tracker.
4168 struct AACaptureUseTracker final : public CaptureTracker {
4169 
4170   /// Create a capture tracker that can lookup in-flight abstract attributes
4171   /// through the Attributor \p A.
4172   ///
4173   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4174   /// search is stopped. If a use leads to a return instruction,
4175   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4176   /// If a use leads to a ptr2int which may capture the value,
4177   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4178   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4179   /// set. All values in \p PotentialCopies are later tracked as well. For every
4180   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4181   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4182   /// conservatively set to true.
4183   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4184                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4185                       SmallVectorImpl<const Value *> &PotentialCopies,
4186                       unsigned &RemainingUsesToExplore)
4187       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4188         PotentialCopies(PotentialCopies),
4189         RemainingUsesToExplore(RemainingUsesToExplore) {}
4190 
4191   /// Determine if \p V maybe captured. *Also updates the state!*
4192   bool valueMayBeCaptured(const Value *V) {
4193     if (V->getType()->isPointerTy()) {
4194       PointerMayBeCaptured(V, this);
4195     } else {
4196       State.indicatePessimisticFixpoint();
4197     }
4198     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4199   }
4200 
4201   /// See CaptureTracker::tooManyUses().
4202   void tooManyUses() override {
4203     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4204   }
4205 
4206   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4207     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4208       return true;
4209     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4210         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
4211         DepClassTy::OPTIONAL);
4212     return DerefAA.getAssumedDereferenceableBytes();
4213   }
4214 
4215   /// See CaptureTracker::captured(...).
4216   bool captured(const Use *U) override {
4217     Instruction *UInst = cast<Instruction>(U->getUser());
4218     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4219                       << "\n");
4220 
4221     // Because we may reuse the tracker multiple times we keep track of the
4222     // number of explored uses ourselves as well.
4223     if (RemainingUsesToExplore-- == 0) {
4224       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4225       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4226                           /* Return */ true);
4227     }
4228 
4229     // Deal with ptr2int by following uses.
4230     if (isa<PtrToIntInst>(UInst)) {
4231       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4232       return valueMayBeCaptured(UInst);
4233     }
4234 
4235     // Explicitly catch return instructions.
4236     if (isa<ReturnInst>(UInst))
4237       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4238                           /* Return */ true);
4239 
4240     // For now we only use special logic for call sites. However, the tracker
4241     // itself knows about a lot of other non-capturing cases already.
4242     auto *CB = dyn_cast<CallBase>(UInst);
4243     if (!CB || !CB->isArgOperand(U))
4244       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4245                           /* Return */ true);
4246 
4247     unsigned ArgNo = CB->getArgOperandNo(U);
4248     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4249     // If we have a abstract no-capture attribute for the argument we can use
4250     // it to justify a non-capture attribute here. This allows recursion!
4251     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
4252     if (ArgNoCaptureAA.isAssumedNoCapture())
4253       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4254                           /* Return */ false);
4255     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4256       addPotentialCopy(*CB);
4257       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4258                           /* Return */ false);
4259     }
4260 
4261     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4262     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4263                         /* Return */ true);
4264   }
4265 
4266   /// Register \p CS as potential copy of the value we are checking.
4267   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4268 
4269   /// See CaptureTracker::shouldExplore(...).
4270   bool shouldExplore(const Use *U) override {
4271     // Check liveness and ignore droppable users.
4272     return !U->getUser()->isDroppable() &&
4273            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4274   }
4275 
4276   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4277   /// \p CapturedInRet, then return the appropriate value for use in the
4278   /// CaptureTracker::captured() interface.
4279   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4280                     bool CapturedInRet) {
4281     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4282                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4283     if (CapturedInMem)
4284       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4285     if (CapturedInInt)
4286       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4287     if (CapturedInRet)
4288       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4289     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4290   }
4291 
4292 private:
4293   /// The attributor providing in-flight abstract attributes.
4294   Attributor &A;
4295 
4296   /// The abstract attribute currently updated.
4297   AANoCapture &NoCaptureAA;
4298 
4299   /// The abstract liveness state.
4300   const AAIsDead &IsDeadAA;
4301 
4302   /// The state currently updated.
4303   AANoCapture::StateType &State;
4304 
4305   /// Set of potential copies of the tracked value.
4306   SmallVectorImpl<const Value *> &PotentialCopies;
4307 
4308   /// Global counter to limit the number of explored uses.
4309   unsigned &RemainingUsesToExplore;
4310 };
4311 
4312 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4313   const IRPosition &IRP = getIRPosition();
4314   const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4315                                         : &IRP.getAssociatedValue();
4316   if (!V)
4317     return indicatePessimisticFixpoint();
4318 
4319   const Function *F =
4320       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4321   assert(F && "Expected a function!");
4322   const IRPosition &FnPos = IRPosition::function(*F);
4323   const auto &IsDeadAA =
4324       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4325 
4326   AANoCapture::StateType T;
4327 
4328   // Readonly means we cannot capture through memory.
4329   const auto &FnMemAA =
4330       A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false);
4331   if (FnMemAA.isAssumedReadOnly()) {
4332     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4333     if (FnMemAA.isKnownReadOnly())
4334       addKnownBits(NOT_CAPTURED_IN_MEM);
4335     else
4336       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4337   }
4338 
4339   // Make sure all returned values are different than the underlying value.
4340   // TODO: we could do this in a more sophisticated way inside
4341   //       AAReturnedValues, e.g., track all values that escape through returns
4342   //       directly somehow.
4343   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4344     bool SeenConstant = false;
4345     for (auto &It : RVAA.returned_values()) {
4346       if (isa<Constant>(It.first)) {
4347         if (SeenConstant)
4348           return false;
4349         SeenConstant = true;
4350       } else if (!isa<Argument>(It.first) ||
4351                  It.first == getAssociatedArgument())
4352         return false;
4353     }
4354     return true;
4355   };
4356 
4357   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4358       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4359   if (NoUnwindAA.isAssumedNoUnwind()) {
4360     bool IsVoidTy = F->getReturnType()->isVoidTy();
4361     const AAReturnedValues *RVAA =
4362         IsVoidTy ? nullptr
4363                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4364                                                  /* TrackDependence */ true,
4365                                                  DepClassTy::OPTIONAL);
4366     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4367       T.addKnownBits(NOT_CAPTURED_IN_RET);
4368       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4369         return ChangeStatus::UNCHANGED;
4370       if (NoUnwindAA.isKnownNoUnwind() &&
4371           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4372         addKnownBits(NOT_CAPTURED_IN_RET);
4373         if (isKnown(NOT_CAPTURED_IN_MEM))
4374           return indicateOptimisticFixpoint();
4375       }
4376     }
4377   }
4378 
4379   // Use the CaptureTracker interface and logic with the specialized tracker,
4380   // defined in AACaptureUseTracker, that can look at in-flight abstract
4381   // attributes and directly updates the assumed state.
4382   SmallVector<const Value *, 4> PotentialCopies;
4383   unsigned RemainingUsesToExplore =
4384       getDefaultMaxUsesToExploreForCaptureTracking();
4385   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4386                               RemainingUsesToExplore);
4387 
4388   // Check all potential copies of the associated value until we can assume
4389   // none will be captured or we have to assume at least one might be.
4390   unsigned Idx = 0;
4391   PotentialCopies.push_back(V);
4392   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4393     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4394 
4395   AANoCapture::StateType &S = getState();
4396   auto Assumed = S.getAssumed();
4397   S.intersectAssumedBits(T.getAssumed());
4398   if (!isAssumedNoCaptureMaybeReturned())
4399     return indicatePessimisticFixpoint();
4400   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4401                                    : ChangeStatus::CHANGED;
4402 }
4403 
4404 /// NoCapture attribute for function arguments.
4405 struct AANoCaptureArgument final : AANoCaptureImpl {
4406   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4407       : AANoCaptureImpl(IRP, A) {}
4408 
4409   /// See AbstractAttribute::trackStatistics()
4410   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4411 };
4412 
4413 /// NoCapture attribute for call site arguments.
4414 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4415   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4416       : AANoCaptureImpl(IRP, A) {}
4417 
4418   /// See AbstractAttribute::initialize(...).
4419   void initialize(Attributor &A) override {
4420     if (Argument *Arg = getAssociatedArgument())
4421       if (Arg->hasByValAttr())
4422         indicateOptimisticFixpoint();
4423     AANoCaptureImpl::initialize(A);
4424   }
4425 
4426   /// See AbstractAttribute::updateImpl(...).
4427   ChangeStatus updateImpl(Attributor &A) override {
4428     // TODO: Once we have call site specific value information we can provide
4429     //       call site specific liveness information and then it makes
4430     //       sense to specialize attributes for call sites arguments instead of
4431     //       redirecting requests to the callee argument.
4432     Argument *Arg = getAssociatedArgument();
4433     if (!Arg)
4434       return indicatePessimisticFixpoint();
4435     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4436     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4437     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4438   }
4439 
4440   /// See AbstractAttribute::trackStatistics()
4441   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4442 };
4443 
4444 /// NoCapture attribute for floating values.
4445 struct AANoCaptureFloating final : AANoCaptureImpl {
4446   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4447       : AANoCaptureImpl(IRP, A) {}
4448 
4449   /// See AbstractAttribute::trackStatistics()
4450   void trackStatistics() const override {
4451     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4452   }
4453 };
4454 
4455 /// NoCapture attribute for function return value.
4456 struct AANoCaptureReturned final : AANoCaptureImpl {
4457   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4458       : AANoCaptureImpl(IRP, A) {
4459     llvm_unreachable("NoCapture is not applicable to function returns!");
4460   }
4461 
4462   /// See AbstractAttribute::initialize(...).
4463   void initialize(Attributor &A) override {
4464     llvm_unreachable("NoCapture is not applicable to function returns!");
4465   }
4466 
4467   /// See AbstractAttribute::updateImpl(...).
4468   ChangeStatus updateImpl(Attributor &A) override {
4469     llvm_unreachable("NoCapture is not applicable to function returns!");
4470   }
4471 
4472   /// See AbstractAttribute::trackStatistics()
4473   void trackStatistics() const override {}
4474 };
4475 
4476 /// NoCapture attribute deduction for a call site return value.
4477 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4478   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4479       : AANoCaptureImpl(IRP, A) {}
4480 
4481   /// See AbstractAttribute::trackStatistics()
4482   void trackStatistics() const override {
4483     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4484   }
4485 };
4486 
4487 /// ------------------ Value Simplify Attribute ----------------------------
4488 struct AAValueSimplifyImpl : AAValueSimplify {
4489   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4490       : AAValueSimplify(IRP, A) {}
4491 
4492   /// See AbstractAttribute::initialize(...).
4493   void initialize(Attributor &A) override {
4494     if (getAssociatedValue().getType()->isVoidTy())
4495       indicatePessimisticFixpoint();
4496   }
4497 
4498   /// See AbstractAttribute::getAsStr().
4499   const std::string getAsStr() const override {
4500     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4501                         : "not-simple";
4502   }
4503 
4504   /// See AbstractAttribute::trackStatistics()
4505   void trackStatistics() const override {}
4506 
4507   /// See AAValueSimplify::getAssumedSimplifiedValue()
4508   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4509     if (!getAssumed())
4510       return const_cast<Value *>(&getAssociatedValue());
4511     return SimplifiedAssociatedValue;
4512   }
4513 
4514   /// Helper function for querying AAValueSimplify and updating candicate.
4515   /// \param QueryingValue Value trying to unify with SimplifiedValue
4516   /// \param AccumulatedSimplifiedValue Current simplification result.
4517   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4518                              Value &QueryingValue,
4519                              Optional<Value *> &AccumulatedSimplifiedValue) {
4520     // FIXME: Add a typecast support.
4521 
4522     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4523         QueryingAA, IRPosition::value(QueryingValue));
4524 
4525     Optional<Value *> QueryingValueSimplified =
4526         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4527 
4528     if (!QueryingValueSimplified.hasValue())
4529       return true;
4530 
4531     if (!QueryingValueSimplified.getValue())
4532       return false;
4533 
4534     Value &QueryingValueSimplifiedUnwrapped =
4535         *QueryingValueSimplified.getValue();
4536 
4537     if (AccumulatedSimplifiedValue.hasValue() &&
4538         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4539         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4540       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4541     if (AccumulatedSimplifiedValue.hasValue() &&
4542         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4543       return true;
4544 
4545     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4546                       << " is assumed to be "
4547                       << QueryingValueSimplifiedUnwrapped << "\n");
4548 
4549     AccumulatedSimplifiedValue = QueryingValueSimplified;
4550     return true;
4551   }
4552 
4553   /// Returns a candidate is found or not
4554   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4555     if (!getAssociatedValue().getType()->isIntegerTy())
4556       return false;
4557 
4558     const auto &AA =
4559         A.getAAFor<AAType>(*this, getIRPosition(), /* TrackDependence */ false);
4560 
4561     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4562 
4563     if (!COpt.hasValue()) {
4564       SimplifiedAssociatedValue = llvm::None;
4565       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4566       return true;
4567     }
4568     if (auto *C = COpt.getValue()) {
4569       SimplifiedAssociatedValue = C;
4570       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4571       return true;
4572     }
4573     return false;
4574   }
4575 
4576   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4577     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4578       return true;
4579     if (askSimplifiedValueFor<AAPotentialValues>(A))
4580       return true;
4581     return false;
4582   }
4583 
4584   /// See AbstractAttribute::manifest(...).
4585   ChangeStatus manifest(Attributor &A) override {
4586     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4587 
4588     if (SimplifiedAssociatedValue.hasValue() &&
4589         !SimplifiedAssociatedValue.getValue())
4590       return Changed;
4591 
4592     Value &V = getAssociatedValue();
4593     auto *C = SimplifiedAssociatedValue.hasValue()
4594                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4595                   : UndefValue::get(V.getType());
4596     if (C) {
4597       // We can replace the AssociatedValue with the constant.
4598       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4599         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4600                           << " :: " << *this << "\n");
4601         if (A.changeValueAfterManifest(V, *C))
4602           Changed = ChangeStatus::CHANGED;
4603       }
4604     }
4605 
4606     return Changed | AAValueSimplify::manifest(A);
4607   }
4608 
4609   /// See AbstractState::indicatePessimisticFixpoint(...).
4610   ChangeStatus indicatePessimisticFixpoint() override {
4611     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4612     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4613     SimplifiedAssociatedValue = &getAssociatedValue();
4614     indicateOptimisticFixpoint();
4615     return ChangeStatus::CHANGED;
4616   }
4617 
4618 protected:
4619   // An assumed simplified value. Initially, it is set to Optional::None, which
4620   // means that the value is not clear under current assumption. If in the
4621   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4622   // returns orignal associated value.
4623   Optional<Value *> SimplifiedAssociatedValue;
4624 };
4625 
4626 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4627   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4628       : AAValueSimplifyImpl(IRP, A) {}
4629 
4630   void initialize(Attributor &A) override {
4631     AAValueSimplifyImpl::initialize(A);
4632     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4633       indicatePessimisticFixpoint();
4634     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4635                  Attribute::StructRet, Attribute::Nest},
4636                 /* IgnoreSubsumingPositions */ true))
4637       indicatePessimisticFixpoint();
4638 
4639     // FIXME: This is a hack to prevent us from propagating function poiner in
4640     // the new pass manager CGSCC pass as it creates call edges the
4641     // CallGraphUpdater cannot handle yet.
4642     Value &V = getAssociatedValue();
4643     if (V.getType()->isPointerTy() &&
4644         V.getType()->getPointerElementType()->isFunctionTy() &&
4645         !A.isModulePass())
4646       indicatePessimisticFixpoint();
4647   }
4648 
4649   /// See AbstractAttribute::updateImpl(...).
4650   ChangeStatus updateImpl(Attributor &A) override {
4651     // Byval is only replacable if it is readonly otherwise we would write into
4652     // the replaced value and not the copy that byval creates implicitly.
4653     Argument *Arg = getAssociatedArgument();
4654     if (Arg->hasByValAttr()) {
4655       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4656       //       there is no race by not copying a constant byval.
4657       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4658       if (!MemAA.isAssumedReadOnly())
4659         return indicatePessimisticFixpoint();
4660     }
4661 
4662     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4663 
4664     auto PredForCallSite = [&](AbstractCallSite ACS) {
4665       const IRPosition &ACSArgPos =
4666           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4667       // Check if a coresponding argument was found or if it is on not
4668       // associated (which can happen for callback calls).
4669       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4670         return false;
4671 
4672       // We can only propagate thread independent values through callbacks.
4673       // This is different to direct/indirect call sites because for them we
4674       // know the thread executing the caller and callee is the same. For
4675       // callbacks this is not guaranteed, thus a thread dependent value could
4676       // be different for the caller and callee, making it invalid to propagate.
4677       Value &ArgOp = ACSArgPos.getAssociatedValue();
4678       if (ACS.isCallbackCall())
4679         if (auto *C = dyn_cast<Constant>(&ArgOp))
4680           if (C->isThreadDependent())
4681             return false;
4682       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4683     };
4684 
4685     bool AllCallSitesKnown;
4686     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4687                                 AllCallSitesKnown))
4688       if (!askSimplifiedValueForOtherAAs(A))
4689         return indicatePessimisticFixpoint();
4690 
4691     // If a candicate was found in this update, return CHANGED.
4692     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4693                ? ChangeStatus::UNCHANGED
4694                : ChangeStatus ::CHANGED;
4695   }
4696 
4697   /// See AbstractAttribute::trackStatistics()
4698   void trackStatistics() const override {
4699     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4700   }
4701 };
4702 
4703 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4704   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4705       : AAValueSimplifyImpl(IRP, A) {}
4706 
4707   /// See AbstractAttribute::updateImpl(...).
4708   ChangeStatus updateImpl(Attributor &A) override {
4709     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4710 
4711     auto PredForReturned = [&](Value &V) {
4712       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4713     };
4714 
4715     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4716       if (!askSimplifiedValueForOtherAAs(A))
4717         return indicatePessimisticFixpoint();
4718 
4719     // If a candicate was found in this update, return CHANGED.
4720     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4721                ? ChangeStatus::UNCHANGED
4722                : ChangeStatus ::CHANGED;
4723   }
4724 
4725   ChangeStatus manifest(Attributor &A) override {
4726     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4727 
4728     if (SimplifiedAssociatedValue.hasValue() &&
4729         !SimplifiedAssociatedValue.getValue())
4730       return Changed;
4731 
4732     Value &V = getAssociatedValue();
4733     auto *C = SimplifiedAssociatedValue.hasValue()
4734                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4735                   : UndefValue::get(V.getType());
4736     if (C) {
4737       auto PredForReturned =
4738           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4739             // We can replace the AssociatedValue with the constant.
4740             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4741               return true;
4742 
4743             for (ReturnInst *RI : RetInsts) {
4744               if (RI->getFunction() != getAnchorScope())
4745                 continue;
4746               auto *RC = C;
4747               if (RC->getType() != RI->getReturnValue()->getType())
4748                 RC = ConstantExpr::getBitCast(RC,
4749                                               RI->getReturnValue()->getType());
4750               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4751                                 << " in " << *RI << " :: " << *this << "\n");
4752               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4753                 Changed = ChangeStatus::CHANGED;
4754             }
4755             return true;
4756           };
4757       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4758     }
4759 
4760     return Changed | AAValueSimplify::manifest(A);
4761   }
4762 
4763   /// See AbstractAttribute::trackStatistics()
4764   void trackStatistics() const override {
4765     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4766   }
4767 };
4768 
4769 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4770   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4771       : AAValueSimplifyImpl(IRP, A) {}
4772 
4773   /// See AbstractAttribute::initialize(...).
4774   void initialize(Attributor &A) override {
4775     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4776     //        Needs investigation.
4777     // AAValueSimplifyImpl::initialize(A);
4778     Value &V = getAnchorValue();
4779 
4780     // TODO: add other stuffs
4781     if (isa<Constant>(V))
4782       indicatePessimisticFixpoint();
4783   }
4784 
4785   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4786   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4787   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4788   /// updated and \p Changed is set appropriately.
4789   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4790                               ChangeStatus &Changed) {
4791     if (!ICmp)
4792       return false;
4793     if (!ICmp->isEquality())
4794       return false;
4795 
4796     // This is a comparison with == or !-. We check for nullptr now.
4797     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4798     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4799     if (!Op0IsNull && !Op1IsNull)
4800       return false;
4801 
4802     LLVMContext &Ctx = ICmp->getContext();
4803     // Check for `nullptr ==/!= nullptr` first:
4804     if (Op0IsNull && Op1IsNull) {
4805       Value *NewVal = ConstantInt::get(
4806           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4807       assert(!SimplifiedAssociatedValue.hasValue() &&
4808              "Did not expect non-fixed value for constant comparison");
4809       SimplifiedAssociatedValue = NewVal;
4810       indicateOptimisticFixpoint();
4811       Changed = ChangeStatus::CHANGED;
4812       return true;
4813     }
4814 
4815     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4816     // non-nullptr operand and if we assume it's non-null we can conclude the
4817     // result of the comparison.
4818     assert((Op0IsNull || Op1IsNull) &&
4819            "Expected nullptr versus non-nullptr comparison at this point");
4820 
4821     // The index is the operand that we assume is not null.
4822     unsigned PtrIdx = Op0IsNull;
4823     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4824         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)));
4825     if (!PtrNonNullAA.isAssumedNonNull())
4826       return false;
4827 
4828     // The new value depends on the predicate, true for != and false for ==.
4829     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4830                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4831 
4832     assert((!SimplifiedAssociatedValue.hasValue() ||
4833             SimplifiedAssociatedValue == NewVal) &&
4834            "Did not expect to change value for zero-comparison");
4835 
4836     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4837     SimplifiedAssociatedValue = NewVal;
4838 
4839     if (PtrNonNullAA.isKnownNonNull())
4840       indicateOptimisticFixpoint();
4841 
4842     Changed = HasValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED;
4843     return true;
4844   }
4845 
4846   /// See AbstractAttribute::updateImpl(...).
4847   ChangeStatus updateImpl(Attributor &A) override {
4848     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4849 
4850     ChangeStatus Changed;
4851     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4852                                Changed))
4853       return Changed;
4854 
4855     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4856                             bool Stripped) -> bool {
4857       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4858       if (!Stripped && this == &AA) {
4859         // TODO: Look the instruction and check recursively.
4860 
4861         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4862                           << "\n");
4863         return false;
4864       }
4865       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4866     };
4867 
4868     bool Dummy = false;
4869     if (!genericValueTraversal<AAValueSimplify, bool>(
4870             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4871             /* UseValueSimplify */ false))
4872       if (!askSimplifiedValueForOtherAAs(A))
4873         return indicatePessimisticFixpoint();
4874 
4875     // If a candicate was found in this update, return CHANGED.
4876 
4877     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4878                ? ChangeStatus::UNCHANGED
4879                : ChangeStatus ::CHANGED;
4880   }
4881 
4882   /// See AbstractAttribute::trackStatistics()
4883   void trackStatistics() const override {
4884     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4885   }
4886 };
4887 
4888 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4889   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4890       : AAValueSimplifyImpl(IRP, A) {}
4891 
4892   /// See AbstractAttribute::initialize(...).
4893   void initialize(Attributor &A) override {
4894     SimplifiedAssociatedValue = &getAnchorValue();
4895     indicateOptimisticFixpoint();
4896   }
4897   /// See AbstractAttribute::initialize(...).
4898   ChangeStatus updateImpl(Attributor &A) override {
4899     llvm_unreachable(
4900         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4901   }
4902   /// See AbstractAttribute::trackStatistics()
4903   void trackStatistics() const override {
4904     STATS_DECLTRACK_FN_ATTR(value_simplify)
4905   }
4906 };
4907 
4908 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4909   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4910       : AAValueSimplifyFunction(IRP, A) {}
4911   /// See AbstractAttribute::trackStatistics()
4912   void trackStatistics() const override {
4913     STATS_DECLTRACK_CS_ATTR(value_simplify)
4914   }
4915 };
4916 
4917 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4918   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4919       : AAValueSimplifyReturned(IRP, A) {}
4920 
4921   /// See AbstractAttribute::manifest(...).
4922   ChangeStatus manifest(Attributor &A) override {
4923     return AAValueSimplifyImpl::manifest(A);
4924   }
4925 
4926   void trackStatistics() const override {
4927     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4928   }
4929 };
4930 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4931   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4932       : AAValueSimplifyFloating(IRP, A) {}
4933 
4934   /// See AbstractAttribute::manifest(...).
4935   ChangeStatus manifest(Attributor &A) override {
4936     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4937 
4938     if (SimplifiedAssociatedValue.hasValue() &&
4939         !SimplifiedAssociatedValue.getValue())
4940       return Changed;
4941 
4942     Value &V = getAssociatedValue();
4943     auto *C = SimplifiedAssociatedValue.hasValue()
4944                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4945                   : UndefValue::get(V.getType());
4946     if (C) {
4947       Use &U = cast<CallBase>(&getAnchorValue())
4948                    ->getArgOperandUse(getCallSiteArgNo());
4949       // We can replace the AssociatedValue with the constant.
4950       if (&V != C && V.getType() == C->getType()) {
4951         if (A.changeUseAfterManifest(U, *C))
4952           Changed = ChangeStatus::CHANGED;
4953       }
4954     }
4955 
4956     return Changed | AAValueSimplify::manifest(A);
4957   }
4958 
4959   void trackStatistics() const override {
4960     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4961   }
4962 };
4963 
4964 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4965 struct AAHeapToStackImpl : public AAHeapToStack {
4966   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
4967       : AAHeapToStack(IRP, A) {}
4968 
4969   const std::string getAsStr() const override {
4970     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4971   }
4972 
4973   ChangeStatus manifest(Attributor &A) override {
4974     assert(getState().isValidState() &&
4975            "Attempted to manifest an invalid state!");
4976 
4977     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4978     Function *F = getAnchorScope();
4979     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4980 
4981     for (Instruction *MallocCall : MallocCalls) {
4982       // This malloc cannot be replaced.
4983       if (BadMallocCalls.count(MallocCall))
4984         continue;
4985 
4986       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4987         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4988         A.deleteAfterManifest(*FreeCall);
4989         HasChanged = ChangeStatus::CHANGED;
4990       }
4991 
4992       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4993                         << "\n");
4994 
4995       Align Alignment;
4996       Constant *Size;
4997       if (isCallocLikeFn(MallocCall, TLI)) {
4998         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4999         auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1));
5000         APInt TotalSize = SizeT->getValue() * Num->getValue();
5001         Size =
5002             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
5003       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5004         Size = cast<ConstantInt>(MallocCall->getOperand(1));
5005         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5006                                    ->getValue()
5007                                    .getZExtValue())
5008                         .valueOrOne();
5009       } else {
5010         Size = cast<ConstantInt>(MallocCall->getOperand(0));
5011       }
5012 
5013       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5014       Instruction *AI =
5015           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5016                          "", MallocCall->getNextNode());
5017 
5018       if (AI->getType() != MallocCall->getType())
5019         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5020                              AI->getNextNode());
5021 
5022       A.changeValueAfterManifest(*MallocCall, *AI);
5023 
5024       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5025         auto *NBB = II->getNormalDest();
5026         BranchInst::Create(NBB, MallocCall->getParent());
5027         A.deleteAfterManifest(*MallocCall);
5028       } else {
5029         A.deleteAfterManifest(*MallocCall);
5030       }
5031 
5032       // Zero out the allocated memory if it was a calloc.
5033       if (isCallocLikeFn(MallocCall, TLI)) {
5034         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5035                                    AI->getNextNode());
5036         Value *Ops[] = {
5037             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5038             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5039 
5040         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5041         Module *M = F->getParent();
5042         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5043         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5044       }
5045       HasChanged = ChangeStatus::CHANGED;
5046     }
5047 
5048     return HasChanged;
5049   }
5050 
5051   /// Collection of all malloc calls in a function.
5052   SmallSetVector<Instruction *, 4> MallocCalls;
5053 
5054   /// Collection of malloc calls that cannot be converted.
5055   DenseSet<const Instruction *> BadMallocCalls;
5056 
5057   /// A map for each malloc call to the set of associated free calls.
5058   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5059 
5060   ChangeStatus updateImpl(Attributor &A) override;
5061 };
5062 
5063 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5064   const Function *F = getAnchorScope();
5065   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5066 
5067   MustBeExecutedContextExplorer &Explorer =
5068       A.getInfoCache().getMustBeExecutedContextExplorer();
5069 
5070   auto FreeCheck = [&](Instruction &I) {
5071     const auto &Frees = FreesForMalloc.lookup(&I);
5072     if (Frees.size() != 1)
5073       return false;
5074     Instruction *UniqueFree = *Frees.begin();
5075     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5076   };
5077 
5078   auto UsesCheck = [&](Instruction &I) {
5079     bool ValidUsesOnly = true;
5080     bool MustUse = true;
5081     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5082       Instruction *UserI = cast<Instruction>(U.getUser());
5083       if (isa<LoadInst>(UserI))
5084         return true;
5085       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5086         if (SI->getValueOperand() == U.get()) {
5087           LLVM_DEBUG(dbgs()
5088                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5089           ValidUsesOnly = false;
5090         } else {
5091           // A store into the malloc'ed memory is fine.
5092         }
5093         return true;
5094       }
5095       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5096         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5097           return true;
5098         // Record malloc.
5099         if (isFreeCall(UserI, TLI)) {
5100           if (MustUse) {
5101             FreesForMalloc[&I].insert(UserI);
5102           } else {
5103             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5104                               << *UserI << "\n");
5105             ValidUsesOnly = false;
5106           }
5107           return true;
5108         }
5109 
5110         unsigned ArgNo = CB->getArgOperandNo(&U);
5111 
5112         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5113             *this, IRPosition::callsite_argument(*CB, ArgNo));
5114 
5115         // If a callsite argument use is nofree, we are fine.
5116         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5117             *this, IRPosition::callsite_argument(*CB, ArgNo));
5118 
5119         if (!NoCaptureAA.isAssumedNoCapture() ||
5120             !ArgNoFreeAA.isAssumedNoFree()) {
5121           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5122           ValidUsesOnly = false;
5123         }
5124         return true;
5125       }
5126 
5127       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5128           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5129         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5130         Follow = true;
5131         return true;
5132       }
5133       // Unknown user for which we can not track uses further (in a way that
5134       // makes sense).
5135       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5136       ValidUsesOnly = false;
5137       return true;
5138     };
5139     A.checkForAllUses(Pred, *this, I);
5140     return ValidUsesOnly;
5141   };
5142 
5143   auto MallocCallocCheck = [&](Instruction &I) {
5144     if (BadMallocCalls.count(&I))
5145       return true;
5146 
5147     bool IsMalloc = isMallocLikeFn(&I, TLI);
5148     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5149     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5150     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5151       BadMallocCalls.insert(&I);
5152       return true;
5153     }
5154 
5155     if (IsMalloc) {
5156       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5157         if (Size->getValue().ule(MaxHeapToStackSize))
5158           if (UsesCheck(I) || FreeCheck(I)) {
5159             MallocCalls.insert(&I);
5160             return true;
5161           }
5162     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5163       // Only if the alignment and sizes are constant.
5164       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5165         if (Size->getValue().ule(MaxHeapToStackSize))
5166           if (UsesCheck(I) || FreeCheck(I)) {
5167             MallocCalls.insert(&I);
5168             return true;
5169           }
5170     } else if (IsCalloc) {
5171       bool Overflow = false;
5172       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5173         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5174           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5175                   .ule(MaxHeapToStackSize))
5176             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5177               MallocCalls.insert(&I);
5178               return true;
5179             }
5180     }
5181 
5182     BadMallocCalls.insert(&I);
5183     return true;
5184   };
5185 
5186   size_t NumBadMallocs = BadMallocCalls.size();
5187 
5188   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5189 
5190   if (NumBadMallocs != BadMallocCalls.size())
5191     return ChangeStatus::CHANGED;
5192 
5193   return ChangeStatus::UNCHANGED;
5194 }
5195 
5196 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5197   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5198       : AAHeapToStackImpl(IRP, A) {}
5199 
5200   /// See AbstractAttribute::trackStatistics().
5201   void trackStatistics() const override {
5202     STATS_DECL(
5203         MallocCalls, Function,
5204         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5205     for (auto *C : MallocCalls)
5206       if (!BadMallocCalls.count(C))
5207         ++BUILD_STAT_NAME(MallocCalls, Function);
5208   }
5209 };
5210 
5211 /// ----------------------- Privatizable Pointers ------------------------------
5212 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5213   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5214       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5215 
5216   ChangeStatus indicatePessimisticFixpoint() override {
5217     AAPrivatizablePtr::indicatePessimisticFixpoint();
5218     PrivatizableType = nullptr;
5219     return ChangeStatus::CHANGED;
5220   }
5221 
5222   /// Identify the type we can chose for a private copy of the underlying
5223   /// argument. None means it is not clear yet, nullptr means there is none.
5224   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5225 
5226   /// Return a privatizable type that encloses both T0 and T1.
5227   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5228   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5229     if (!T0.hasValue())
5230       return T1;
5231     if (!T1.hasValue())
5232       return T0;
5233     if (T0 == T1)
5234       return T0;
5235     return nullptr;
5236   }
5237 
5238   Optional<Type *> getPrivatizableType() const override {
5239     return PrivatizableType;
5240   }
5241 
5242   const std::string getAsStr() const override {
5243     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5244   }
5245 
5246 protected:
5247   Optional<Type *> PrivatizableType;
5248 };
5249 
5250 // TODO: Do this for call site arguments (probably also other values) as well.
5251 
5252 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5253   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5254       : AAPrivatizablePtrImpl(IRP, A) {}
5255 
5256   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5257   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5258     // If this is a byval argument and we know all the call sites (so we can
5259     // rewrite them), there is no need to check them explicitly.
5260     bool AllCallSitesKnown;
5261     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5262         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5263                                true, AllCallSitesKnown))
5264       return getAssociatedValue().getType()->getPointerElementType();
5265 
5266     Optional<Type *> Ty;
5267     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5268 
5269     // Make sure the associated call site argument has the same type at all call
5270     // sites and it is an allocation we know is safe to privatize, for now that
5271     // means we only allow alloca instructions.
5272     // TODO: We can additionally analyze the accesses in the callee to  create
5273     //       the type from that information instead. That is a little more
5274     //       involved and will be done in a follow up patch.
5275     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5276       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5277       // Check if a coresponding argument was found or if it is one not
5278       // associated (which can happen for callback calls).
5279       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5280         return false;
5281 
5282       // Check that all call sites agree on a type.
5283       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
5284       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5285 
5286       LLVM_DEBUG({
5287         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5288         if (CSTy.hasValue() && CSTy.getValue())
5289           CSTy.getValue()->print(dbgs());
5290         else if (CSTy.hasValue())
5291           dbgs() << "<nullptr>";
5292         else
5293           dbgs() << "<none>";
5294       });
5295 
5296       Ty = combineTypes(Ty, CSTy);
5297 
5298       LLVM_DEBUG({
5299         dbgs() << " : New Type: ";
5300         if (Ty.hasValue() && Ty.getValue())
5301           Ty.getValue()->print(dbgs());
5302         else if (Ty.hasValue())
5303           dbgs() << "<nullptr>";
5304         else
5305           dbgs() << "<none>";
5306         dbgs() << "\n";
5307       });
5308 
5309       return !Ty.hasValue() || Ty.getValue();
5310     };
5311 
5312     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5313       return nullptr;
5314     return Ty;
5315   }
5316 
5317   /// See AbstractAttribute::updateImpl(...).
5318   ChangeStatus updateImpl(Attributor &A) override {
5319     PrivatizableType = identifyPrivatizableType(A);
5320     if (!PrivatizableType.hasValue())
5321       return ChangeStatus::UNCHANGED;
5322     if (!PrivatizableType.getValue())
5323       return indicatePessimisticFixpoint();
5324 
5325     // The dependence is optional so we don't give up once we give up on the
5326     // alignment.
5327     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5328                         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5329 
5330     // Avoid arguments with padding for now.
5331     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5332         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5333                                                 A.getInfoCache().getDL())) {
5334       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5335       return indicatePessimisticFixpoint();
5336     }
5337 
5338     // Verify callee and caller agree on how the promoted argument would be
5339     // passed.
5340     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5341     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5342     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5343     Function &Fn = *getIRPosition().getAnchorScope();
5344     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5345     ArgsToPromote.insert(getAssociatedArgument());
5346     const auto *TTI =
5347         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5348     if (!TTI ||
5349         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5350             Fn, *TTI, ArgsToPromote, Dummy) ||
5351         ArgsToPromote.empty()) {
5352       LLVM_DEBUG(
5353           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5354                  << Fn.getName() << "\n");
5355       return indicatePessimisticFixpoint();
5356     }
5357 
5358     // Collect the types that will replace the privatizable type in the function
5359     // signature.
5360     SmallVector<Type *, 16> ReplacementTypes;
5361     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5362 
5363     // Register a rewrite of the argument.
5364     Argument *Arg = getAssociatedArgument();
5365     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5366       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5367       return indicatePessimisticFixpoint();
5368     }
5369 
5370     unsigned ArgNo = Arg->getArgNo();
5371 
5372     // Helper to check if for the given call site the associated argument is
5373     // passed to a callback where the privatization would be different.
5374     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5375       SmallVector<const Use *, 4> CallbackUses;
5376       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5377       for (const Use *U : CallbackUses) {
5378         AbstractCallSite CBACS(U);
5379         assert(CBACS && CBACS.isCallbackCall());
5380         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5381           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5382 
5383           LLVM_DEBUG({
5384             dbgs()
5385                 << "[AAPrivatizablePtr] Argument " << *Arg
5386                 << "check if can be privatized in the context of its parent ("
5387                 << Arg->getParent()->getName()
5388                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5389                    "callback ("
5390                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5391                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5392                 << CBACS.getCallArgOperand(CBArg) << " vs "
5393                 << CB.getArgOperand(ArgNo) << "\n"
5394                 << "[AAPrivatizablePtr] " << CBArg << " : "
5395                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5396           });
5397 
5398           if (CBArgNo != int(ArgNo))
5399             continue;
5400           const auto &CBArgPrivAA =
5401               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5402           if (CBArgPrivAA.isValidState()) {
5403             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5404             if (!CBArgPrivTy.hasValue())
5405               continue;
5406             if (CBArgPrivTy.getValue() == PrivatizableType)
5407               continue;
5408           }
5409 
5410           LLVM_DEBUG({
5411             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5412                    << " cannot be privatized in the context of its parent ("
5413                    << Arg->getParent()->getName()
5414                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5415                       "callback ("
5416                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5417                    << ").\n[AAPrivatizablePtr] for which the argument "
5418                       "privatization is not compatible.\n";
5419           });
5420           return false;
5421         }
5422       }
5423       return true;
5424     };
5425 
5426     // Helper to check if for the given call site the associated argument is
5427     // passed to a direct call where the privatization would be different.
5428     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5429       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5430       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5431       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5432              "Expected a direct call operand for callback call operand");
5433 
5434       LLVM_DEBUG({
5435         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5436                << " check if be privatized in the context of its parent ("
5437                << Arg->getParent()->getName()
5438                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5439                   "direct call of ("
5440                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5441                << ").\n";
5442       });
5443 
5444       Function *DCCallee = DC->getCalledFunction();
5445       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5446         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5447             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5448         if (DCArgPrivAA.isValidState()) {
5449           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5450           if (!DCArgPrivTy.hasValue())
5451             return true;
5452           if (DCArgPrivTy.getValue() == PrivatizableType)
5453             return true;
5454         }
5455       }
5456 
5457       LLVM_DEBUG({
5458         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5459                << " cannot be privatized in the context of its parent ("
5460                << Arg->getParent()->getName()
5461                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5462                   "direct call of ("
5463                << ACS.getInstruction()->getCalledFunction()->getName()
5464                << ").\n[AAPrivatizablePtr] for which the argument "
5465                   "privatization is not compatible.\n";
5466       });
5467       return false;
5468     };
5469 
5470     // Helper to check if the associated argument is used at the given abstract
5471     // call site in a way that is incompatible with the privatization assumed
5472     // here.
5473     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5474       if (ACS.isDirectCall())
5475         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5476       if (ACS.isCallbackCall())
5477         return IsCompatiblePrivArgOfDirectCS(ACS);
5478       return false;
5479     };
5480 
5481     bool AllCallSitesKnown;
5482     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5483                                 AllCallSitesKnown))
5484       return indicatePessimisticFixpoint();
5485 
5486     return ChangeStatus::UNCHANGED;
5487   }
5488 
5489   /// Given a type to private \p PrivType, collect the constituates (which are
5490   /// used) in \p ReplacementTypes.
5491   static void
5492   identifyReplacementTypes(Type *PrivType,
5493                            SmallVectorImpl<Type *> &ReplacementTypes) {
5494     // TODO: For now we expand the privatization type to the fullest which can
5495     //       lead to dead arguments that need to be removed later.
5496     assert(PrivType && "Expected privatizable type!");
5497 
5498     // Traverse the type, extract constituate types on the outermost level.
5499     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5500       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5501         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5502     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5503       ReplacementTypes.append(PrivArrayType->getNumElements(),
5504                               PrivArrayType->getElementType());
5505     } else {
5506       ReplacementTypes.push_back(PrivType);
5507     }
5508   }
5509 
5510   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5511   /// The values needed are taken from the arguments of \p F starting at
5512   /// position \p ArgNo.
5513   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5514                                    unsigned ArgNo, Instruction &IP) {
5515     assert(PrivType && "Expected privatizable type!");
5516 
5517     IRBuilder<NoFolder> IRB(&IP);
5518     const DataLayout &DL = F.getParent()->getDataLayout();
5519 
5520     // Traverse the type, build GEPs and stores.
5521     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5522       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5523       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5524         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5525         Value *Ptr = constructPointer(
5526             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5527         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5528       }
5529     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5530       Type *PointeeTy = PrivArrayType->getElementType();
5531       Type *PointeePtrTy = PointeeTy->getPointerTo();
5532       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5533       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5534         Value *Ptr =
5535             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5536         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5537       }
5538     } else {
5539       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5540     }
5541   }
5542 
5543   /// Extract values from \p Base according to the type \p PrivType at the
5544   /// call position \p ACS. The values are appended to \p ReplacementValues.
5545   void createReplacementValues(Align Alignment, Type *PrivType,
5546                                AbstractCallSite ACS, Value *Base,
5547                                SmallVectorImpl<Value *> &ReplacementValues) {
5548     assert(Base && "Expected base value!");
5549     assert(PrivType && "Expected privatizable type!");
5550     Instruction *IP = ACS.getInstruction();
5551 
5552     IRBuilder<NoFolder> IRB(IP);
5553     const DataLayout &DL = IP->getModule()->getDataLayout();
5554 
5555     if (Base->getType()->getPointerElementType() != PrivType)
5556       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5557                                                  "", ACS.getInstruction());
5558 
5559     // Traverse the type, build GEPs and loads.
5560     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5561       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5562       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5563         Type *PointeeTy = PrivStructType->getElementType(u);
5564         Value *Ptr =
5565             constructPointer(PointeeTy->getPointerTo(), Base,
5566                              PrivStructLayout->getElementOffset(u), IRB, DL);
5567         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5568         L->setAlignment(Alignment);
5569         ReplacementValues.push_back(L);
5570       }
5571     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5572       Type *PointeeTy = PrivArrayType->getElementType();
5573       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5574       Type *PointeePtrTy = PointeeTy->getPointerTo();
5575       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5576         Value *Ptr =
5577             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5578         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5579         L->setAlignment(Alignment);
5580         ReplacementValues.push_back(L);
5581       }
5582     } else {
5583       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5584       L->setAlignment(Alignment);
5585       ReplacementValues.push_back(L);
5586     }
5587   }
5588 
5589   /// See AbstractAttribute::manifest(...)
5590   ChangeStatus manifest(Attributor &A) override {
5591     if (!PrivatizableType.hasValue())
5592       return ChangeStatus::UNCHANGED;
5593     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5594 
5595     // Collect all tail calls in the function as we cannot allow new allocas to
5596     // escape into tail recursion.
5597     // TODO: Be smarter about new allocas escaping into tail calls.
5598     SmallVector<CallInst *, 16> TailCalls;
5599     if (!A.checkForAllInstructions(
5600             [&](Instruction &I) {
5601               CallInst &CI = cast<CallInst>(I);
5602               if (CI.isTailCall())
5603                 TailCalls.push_back(&CI);
5604               return true;
5605             },
5606             *this, {Instruction::Call}))
5607       return ChangeStatus::UNCHANGED;
5608 
5609     Argument *Arg = getAssociatedArgument();
5610     // Query AAAlign attribute for alignment of associated argument to
5611     // determine the best alignment of loads.
5612     const auto &AlignAA = A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg));
5613 
5614     // Callback to repair the associated function. A new alloca is placed at the
5615     // beginning and initialized with the values passed through arguments. The
5616     // new alloca replaces the use of the old pointer argument.
5617     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5618         [=](const Attributor::ArgumentReplacementInfo &ARI,
5619             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5620           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5621           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5622           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5623                                            Arg->getName() + ".priv", IP);
5624           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5625                                ArgIt->getArgNo(), *IP);
5626 
5627           if (AI->getType() != Arg->getType())
5628             AI =
5629                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
5630           Arg->replaceAllUsesWith(AI);
5631 
5632           for (CallInst *CI : TailCalls)
5633             CI->setTailCall(false);
5634         };
5635 
5636     // Callback to repair a call site of the associated function. The elements
5637     // of the privatizable type are loaded prior to the call and passed to the
5638     // new function version.
5639     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5640         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5641                       AbstractCallSite ACS,
5642                       SmallVectorImpl<Value *> &NewArgOperands) {
5643           // When no alignment is specified for the load instruction,
5644           // natural alignment is assumed.
5645           createReplacementValues(
5646               assumeAligned(AlignAA.getAssumedAlign()),
5647               PrivatizableType.getValue(), ACS,
5648               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5649               NewArgOperands);
5650         };
5651 
5652     // Collect the types that will replace the privatizable type in the function
5653     // signature.
5654     SmallVector<Type *, 16> ReplacementTypes;
5655     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5656 
5657     // Register a rewrite of the argument.
5658     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5659                                            std::move(FnRepairCB),
5660                                            std::move(ACSRepairCB)))
5661       return ChangeStatus::CHANGED;
5662     return ChangeStatus::UNCHANGED;
5663   }
5664 
5665   /// See AbstractAttribute::trackStatistics()
5666   void trackStatistics() const override {
5667     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5668   }
5669 };
5670 
5671 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5672   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5673       : AAPrivatizablePtrImpl(IRP, A) {}
5674 
5675   /// See AbstractAttribute::initialize(...).
5676   virtual void initialize(Attributor &A) override {
5677     // TODO: We can privatize more than arguments.
5678     indicatePessimisticFixpoint();
5679   }
5680 
5681   ChangeStatus updateImpl(Attributor &A) override {
5682     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5683                      "updateImpl will not be called");
5684   }
5685 
5686   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5687   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5688     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5689     if (!Obj) {
5690       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5691       return nullptr;
5692     }
5693 
5694     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5695       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5696         if (CI->isOne())
5697           return Obj->getType()->getPointerElementType();
5698     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5699       auto &PrivArgAA =
5700           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5701       if (PrivArgAA.isAssumedPrivatizablePtr())
5702         return Obj->getType()->getPointerElementType();
5703     }
5704 
5705     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5706                          "alloca nor privatizable argument: "
5707                       << *Obj << "!\n");
5708     return nullptr;
5709   }
5710 
5711   /// See AbstractAttribute::trackStatistics()
5712   void trackStatistics() const override {
5713     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5714   }
5715 };
5716 
5717 struct AAPrivatizablePtrCallSiteArgument final
5718     : public AAPrivatizablePtrFloating {
5719   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5720       : AAPrivatizablePtrFloating(IRP, A) {}
5721 
5722   /// See AbstractAttribute::initialize(...).
5723   void initialize(Attributor &A) override {
5724     if (getIRPosition().hasAttr(Attribute::ByVal))
5725       indicateOptimisticFixpoint();
5726   }
5727 
5728   /// See AbstractAttribute::updateImpl(...).
5729   ChangeStatus updateImpl(Attributor &A) override {
5730     PrivatizableType = identifyPrivatizableType(A);
5731     if (!PrivatizableType.hasValue())
5732       return ChangeStatus::UNCHANGED;
5733     if (!PrivatizableType.getValue())
5734       return indicatePessimisticFixpoint();
5735 
5736     const IRPosition &IRP = getIRPosition();
5737     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5738     if (!NoCaptureAA.isAssumedNoCapture()) {
5739       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5740       return indicatePessimisticFixpoint();
5741     }
5742 
5743     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5744     if (!NoAliasAA.isAssumedNoAlias()) {
5745       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5746       return indicatePessimisticFixpoint();
5747     }
5748 
5749     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5750     if (!MemBehaviorAA.isAssumedReadOnly()) {
5751       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5752       return indicatePessimisticFixpoint();
5753     }
5754 
5755     return ChangeStatus::UNCHANGED;
5756   }
5757 
5758   /// See AbstractAttribute::trackStatistics()
5759   void trackStatistics() const override {
5760     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5761   }
5762 };
5763 
5764 struct AAPrivatizablePtrCallSiteReturned final
5765     : public AAPrivatizablePtrFloating {
5766   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5767       : AAPrivatizablePtrFloating(IRP, A) {}
5768 
5769   /// See AbstractAttribute::initialize(...).
5770   void initialize(Attributor &A) override {
5771     // TODO: We can privatize more than arguments.
5772     indicatePessimisticFixpoint();
5773   }
5774 
5775   /// See AbstractAttribute::trackStatistics()
5776   void trackStatistics() const override {
5777     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5778   }
5779 };
5780 
5781 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5782   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5783       : AAPrivatizablePtrFloating(IRP, A) {}
5784 
5785   /// See AbstractAttribute::initialize(...).
5786   void initialize(Attributor &A) override {
5787     // TODO: We can privatize more than arguments.
5788     indicatePessimisticFixpoint();
5789   }
5790 
5791   /// See AbstractAttribute::trackStatistics()
5792   void trackStatistics() const override {
5793     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5794   }
5795 };
5796 
5797 /// -------------------- Memory Behavior Attributes ----------------------------
5798 /// Includes read-none, read-only, and write-only.
5799 /// ----------------------------------------------------------------------------
5800 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5801   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5802       : AAMemoryBehavior(IRP, A) {}
5803 
5804   /// See AbstractAttribute::initialize(...).
5805   void initialize(Attributor &A) override {
5806     intersectAssumedBits(BEST_STATE);
5807     getKnownStateFromValue(getIRPosition(), getState());
5808     AAMemoryBehavior::initialize(A);
5809   }
5810 
5811   /// Return the memory behavior information encoded in the IR for \p IRP.
5812   static void getKnownStateFromValue(const IRPosition &IRP,
5813                                      BitIntegerState &State,
5814                                      bool IgnoreSubsumingPositions = false) {
5815     SmallVector<Attribute, 2> Attrs;
5816     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5817     for (const Attribute &Attr : Attrs) {
5818       switch (Attr.getKindAsEnum()) {
5819       case Attribute::ReadNone:
5820         State.addKnownBits(NO_ACCESSES);
5821         break;
5822       case Attribute::ReadOnly:
5823         State.addKnownBits(NO_WRITES);
5824         break;
5825       case Attribute::WriteOnly:
5826         State.addKnownBits(NO_READS);
5827         break;
5828       default:
5829         llvm_unreachable("Unexpected attribute!");
5830       }
5831     }
5832 
5833     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5834       if (!I->mayReadFromMemory())
5835         State.addKnownBits(NO_READS);
5836       if (!I->mayWriteToMemory())
5837         State.addKnownBits(NO_WRITES);
5838     }
5839   }
5840 
5841   /// See AbstractAttribute::getDeducedAttributes(...).
5842   void getDeducedAttributes(LLVMContext &Ctx,
5843                             SmallVectorImpl<Attribute> &Attrs) const override {
5844     assert(Attrs.size() == 0);
5845     if (isAssumedReadNone())
5846       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5847     else if (isAssumedReadOnly())
5848       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5849     else if (isAssumedWriteOnly())
5850       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5851     assert(Attrs.size() <= 1);
5852   }
5853 
5854   /// See AbstractAttribute::manifest(...).
5855   ChangeStatus manifest(Attributor &A) override {
5856     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5857       return ChangeStatus::UNCHANGED;
5858 
5859     const IRPosition &IRP = getIRPosition();
5860 
5861     // Check if we would improve the existing attributes first.
5862     SmallVector<Attribute, 4> DeducedAttrs;
5863     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5864     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5865           return IRP.hasAttr(Attr.getKindAsEnum(),
5866                              /* IgnoreSubsumingPositions */ true);
5867         }))
5868       return ChangeStatus::UNCHANGED;
5869 
5870     // Clear existing attributes.
5871     IRP.removeAttrs(AttrKinds);
5872 
5873     // Use the generic manifest method.
5874     return IRAttribute::manifest(A);
5875   }
5876 
5877   /// See AbstractState::getAsStr().
5878   const std::string getAsStr() const override {
5879     if (isAssumedReadNone())
5880       return "readnone";
5881     if (isAssumedReadOnly())
5882       return "readonly";
5883     if (isAssumedWriteOnly())
5884       return "writeonly";
5885     return "may-read/write";
5886   }
5887 
5888   /// The set of IR attributes AAMemoryBehavior deals with.
5889   static const Attribute::AttrKind AttrKinds[3];
5890 };
5891 
5892 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5893     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5894 
5895 /// Memory behavior attribute for a floating value.
5896 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5897   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5898       : AAMemoryBehaviorImpl(IRP, A) {}
5899 
5900   /// See AbstractAttribute::initialize(...).
5901   void initialize(Attributor &A) override {
5902     AAMemoryBehaviorImpl::initialize(A);
5903     addUsesOf(A, getAssociatedValue());
5904   }
5905 
5906   /// See AbstractAttribute::updateImpl(...).
5907   ChangeStatus updateImpl(Attributor &A) override;
5908 
5909   /// See AbstractAttribute::trackStatistics()
5910   void trackStatistics() const override {
5911     if (isAssumedReadNone())
5912       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5913     else if (isAssumedReadOnly())
5914       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5915     else if (isAssumedWriteOnly())
5916       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5917   }
5918 
5919 private:
5920   /// Return true if users of \p UserI might access the underlying
5921   /// variable/location described by \p U and should therefore be analyzed.
5922   bool followUsersOfUseIn(Attributor &A, const Use *U,
5923                           const Instruction *UserI);
5924 
5925   /// Update the state according to the effect of use \p U in \p UserI.
5926   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5927 
5928 protected:
5929   /// Add the uses of \p V to the `Uses` set we look at during the update step.
5930   void addUsesOf(Attributor &A, const Value &V);
5931 
5932   /// Container for (transitive) uses of the associated argument.
5933   SmallVector<const Use *, 8> Uses;
5934 
5935   /// Set to remember the uses we already traversed.
5936   SmallPtrSet<const Use *, 8> Visited;
5937 };
5938 
5939 /// Memory behavior attribute for function argument.
5940 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5941   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
5942       : AAMemoryBehaviorFloating(IRP, A) {}
5943 
5944   /// See AbstractAttribute::initialize(...).
5945   void initialize(Attributor &A) override {
5946     intersectAssumedBits(BEST_STATE);
5947     const IRPosition &IRP = getIRPosition();
5948     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5949     // can query it when we use has/getAttr. That would allow us to reuse the
5950     // initialize of the base class here.
5951     bool HasByVal =
5952         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5953     getKnownStateFromValue(IRP, getState(),
5954                            /* IgnoreSubsumingPositions */ HasByVal);
5955 
5956     // Initialize the use vector with all direct uses of the associated value.
5957     Argument *Arg = getAssociatedArgument();
5958     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5959       indicatePessimisticFixpoint();
5960     } else {
5961       addUsesOf(A, *Arg);
5962     }
5963   }
5964 
5965   ChangeStatus manifest(Attributor &A) override {
5966     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5967     if (!getAssociatedValue().getType()->isPointerTy())
5968       return ChangeStatus::UNCHANGED;
5969 
5970     // TODO: From readattrs.ll: "inalloca parameters are always
5971     //                           considered written"
5972     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
5973       removeKnownBits(NO_WRITES);
5974       removeAssumedBits(NO_WRITES);
5975     }
5976     return AAMemoryBehaviorFloating::manifest(A);
5977   }
5978 
5979   /// See AbstractAttribute::trackStatistics()
5980   void trackStatistics() const override {
5981     if (isAssumedReadNone())
5982       STATS_DECLTRACK_ARG_ATTR(readnone)
5983     else if (isAssumedReadOnly())
5984       STATS_DECLTRACK_ARG_ATTR(readonly)
5985     else if (isAssumedWriteOnly())
5986       STATS_DECLTRACK_ARG_ATTR(writeonly)
5987   }
5988 };
5989 
5990 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5991   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
5992       : AAMemoryBehaviorArgument(IRP, A) {}
5993 
5994   /// See AbstractAttribute::initialize(...).
5995   void initialize(Attributor &A) override {
5996     // If we don't have an associated attribute this is either a variadic call
5997     // or an indirect call, either way, nothing to do here.
5998     Argument *Arg = getAssociatedArgument();
5999     if (!Arg) {
6000       indicatePessimisticFixpoint();
6001       return;
6002     }
6003     if (Arg->hasByValAttr()) {
6004       addKnownBits(NO_WRITES);
6005       removeKnownBits(NO_READS);
6006       removeAssumedBits(NO_READS);
6007     }
6008     AAMemoryBehaviorArgument::initialize(A);
6009     if (getAssociatedFunction()->isDeclaration())
6010       indicatePessimisticFixpoint();
6011   }
6012 
6013   /// See AbstractAttribute::updateImpl(...).
6014   ChangeStatus updateImpl(Attributor &A) override {
6015     // TODO: Once we have call site specific value information we can provide
6016     //       call site specific liveness liveness information and then it makes
6017     //       sense to specialize attributes for call sites arguments instead of
6018     //       redirecting requests to the callee argument.
6019     Argument *Arg = getAssociatedArgument();
6020     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6021     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
6022     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6023   }
6024 
6025   /// See AbstractAttribute::trackStatistics()
6026   void trackStatistics() const override {
6027     if (isAssumedReadNone())
6028       STATS_DECLTRACK_CSARG_ATTR(readnone)
6029     else if (isAssumedReadOnly())
6030       STATS_DECLTRACK_CSARG_ATTR(readonly)
6031     else if (isAssumedWriteOnly())
6032       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6033   }
6034 };
6035 
6036 /// Memory behavior attribute for a call site return position.
6037 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
6038   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6039       : AAMemoryBehaviorFloating(IRP, A) {}
6040 
6041   /// See AbstractAttribute::initialize(...).
6042   void initialize(Attributor &A) override {
6043     AAMemoryBehaviorImpl::initialize(A);
6044     Function *F = getAssociatedFunction();
6045     if (!F || F->isDeclaration())
6046       indicatePessimisticFixpoint();
6047   }
6048 
6049   /// See AbstractAttribute::manifest(...).
6050   ChangeStatus manifest(Attributor &A) override {
6051     // We do not annotate returned values.
6052     return ChangeStatus::UNCHANGED;
6053   }
6054 
6055   /// See AbstractAttribute::trackStatistics()
6056   void trackStatistics() const override {}
6057 };
6058 
6059 /// An AA to represent the memory behavior function attributes.
6060 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
6061   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6062       : AAMemoryBehaviorImpl(IRP, A) {}
6063 
6064   /// See AbstractAttribute::updateImpl(Attributor &A).
6065   virtual ChangeStatus updateImpl(Attributor &A) override;
6066 
6067   /// See AbstractAttribute::manifest(...).
6068   ChangeStatus manifest(Attributor &A) override {
6069     Function &F = cast<Function>(getAnchorValue());
6070     if (isAssumedReadNone()) {
6071       F.removeFnAttr(Attribute::ArgMemOnly);
6072       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6073       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6074     }
6075     return AAMemoryBehaviorImpl::manifest(A);
6076   }
6077 
6078   /// See AbstractAttribute::trackStatistics()
6079   void trackStatistics() const override {
6080     if (isAssumedReadNone())
6081       STATS_DECLTRACK_FN_ATTR(readnone)
6082     else if (isAssumedReadOnly())
6083       STATS_DECLTRACK_FN_ATTR(readonly)
6084     else if (isAssumedWriteOnly())
6085       STATS_DECLTRACK_FN_ATTR(writeonly)
6086   }
6087 };
6088 
6089 /// AAMemoryBehavior attribute for call sites.
6090 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
6091   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6092       : AAMemoryBehaviorImpl(IRP, A) {}
6093 
6094   /// See AbstractAttribute::initialize(...).
6095   void initialize(Attributor &A) override {
6096     AAMemoryBehaviorImpl::initialize(A);
6097     Function *F = getAssociatedFunction();
6098     if (!F || F->isDeclaration())
6099       indicatePessimisticFixpoint();
6100   }
6101 
6102   /// See AbstractAttribute::updateImpl(...).
6103   ChangeStatus updateImpl(Attributor &A) override {
6104     // TODO: Once we have call site specific value information we can provide
6105     //       call site specific liveness liveness information and then it makes
6106     //       sense to specialize attributes for call sites arguments instead of
6107     //       redirecting requests to the callee argument.
6108     Function *F = getAssociatedFunction();
6109     const IRPosition &FnPos = IRPosition::function(*F);
6110     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
6111     return clampStateAndIndicateChange(getState(), FnAA.getState());
6112   }
6113 
6114   /// See AbstractAttribute::trackStatistics()
6115   void trackStatistics() const override {
6116     if (isAssumedReadNone())
6117       STATS_DECLTRACK_CS_ATTR(readnone)
6118     else if (isAssumedReadOnly())
6119       STATS_DECLTRACK_CS_ATTR(readonly)
6120     else if (isAssumedWriteOnly())
6121       STATS_DECLTRACK_CS_ATTR(writeonly)
6122   }
6123 };
6124 
6125 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6126 
6127   // The current assumed state used to determine a change.
6128   auto AssumedState = getAssumed();
6129 
6130   auto CheckRWInst = [&](Instruction &I) {
6131     // If the instruction has an own memory behavior state, use it to restrict
6132     // the local state. No further analysis is required as the other memory
6133     // state is as optimistic as it gets.
6134     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6135       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6136           *this, IRPosition::callsite_function(*CB));
6137       intersectAssumedBits(MemBehaviorAA.getAssumed());
6138       return !isAtFixpoint();
6139     }
6140 
6141     // Remove access kind modifiers if necessary.
6142     if (I.mayReadFromMemory())
6143       removeAssumedBits(NO_READS);
6144     if (I.mayWriteToMemory())
6145       removeAssumedBits(NO_WRITES);
6146     return !isAtFixpoint();
6147   };
6148 
6149   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6150     return indicatePessimisticFixpoint();
6151 
6152   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6153                                         : ChangeStatus::UNCHANGED;
6154 }
6155 
6156 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6157 
6158   const IRPosition &IRP = getIRPosition();
6159   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6160   AAMemoryBehavior::StateType &S = getState();
6161 
6162   // First, check the function scope. We take the known information and we avoid
6163   // work if the assumed information implies the current assumed information for
6164   // this attribute. This is a valid for all but byval arguments.
6165   Argument *Arg = IRP.getAssociatedArgument();
6166   AAMemoryBehavior::base_t FnMemAssumedState =
6167       AAMemoryBehavior::StateType::getWorstState();
6168   if (!Arg || !Arg->hasByValAttr()) {
6169     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
6170         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6171     FnMemAssumedState = FnMemAA.getAssumed();
6172     S.addKnownBits(FnMemAA.getKnown());
6173     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6174       return ChangeStatus::UNCHANGED;
6175   }
6176 
6177   // Make sure the value is not captured (except through "return"), if
6178   // it is, any information derived would be irrelevant anyway as we cannot
6179   // check the potential aliases introduced by the capture. However, no need
6180   // to fall back to anythign less optimistic than the function state.
6181   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6182       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6183   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6184     S.intersectAssumedBits(FnMemAssumedState);
6185     return ChangeStatus::CHANGED;
6186   }
6187 
6188   // The current assumed state used to determine a change.
6189   auto AssumedState = S.getAssumed();
6190 
6191   // Liveness information to exclude dead users.
6192   // TODO: Take the FnPos once we have call site specific liveness information.
6193   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6194       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6195       /* TrackDependence */ false);
6196 
6197   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6198   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6199     const Use *U = Uses[i];
6200     Instruction *UserI = cast<Instruction>(U->getUser());
6201     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6202                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6203                       << "]\n");
6204     if (A.isAssumedDead(*U, this, &LivenessAA))
6205       continue;
6206 
6207     // Droppable users, e.g., llvm::assume does not actually perform any action.
6208     if (UserI->isDroppable())
6209       continue;
6210 
6211     // Check if the users of UserI should also be visited.
6212     if (followUsersOfUseIn(A, U, UserI))
6213       addUsesOf(A, *UserI);
6214 
6215     // If UserI might touch memory we analyze the use in detail.
6216     if (UserI->mayReadOrWriteMemory())
6217       analyzeUseIn(A, U, UserI);
6218   }
6219 
6220   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6221                                         : ChangeStatus::UNCHANGED;
6222 }
6223 
6224 void AAMemoryBehaviorFloating::addUsesOf(Attributor &A, const Value &V) {
6225   SmallVector<const Use *, 8> WL;
6226   for (const Use &U : V.uses())
6227     WL.push_back(&U);
6228 
6229   while (!WL.empty()) {
6230     const Use *U = WL.pop_back_val();
6231     if (!Visited.insert(U).second)
6232       continue;
6233 
6234     const Instruction *UserI = cast<Instruction>(U->getUser());
6235     if (UserI->mayReadOrWriteMemory()) {
6236       Uses.push_back(U);
6237       continue;
6238     }
6239     if (!followUsersOfUseIn(A, U, UserI))
6240       continue;
6241     for (const Use &UU : UserI->uses())
6242       WL.push_back(&UU);
6243   }
6244 }
6245 
6246 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6247                                                   const Instruction *UserI) {
6248   // The loaded value is unrelated to the pointer argument, no need to
6249   // follow the users of the load.
6250   if (isa<LoadInst>(UserI))
6251     return false;
6252 
6253   // By default we follow all uses assuming UserI might leak information on U,
6254   // we have special handling for call sites operands though.
6255   const auto *CB = dyn_cast<CallBase>(UserI);
6256   if (!CB || !CB->isArgOperand(U))
6257     return true;
6258 
6259   // If the use is a call argument known not to be captured, the users of
6260   // the call do not need to be visited because they have to be unrelated to
6261   // the input. Note that this check is not trivial even though we disallow
6262   // general capturing of the underlying argument. The reason is that the
6263   // call might the argument "through return", which we allow and for which we
6264   // need to check call users.
6265   if (U->get()->getType()->isPointerTy()) {
6266     unsigned ArgNo = CB->getArgOperandNo(U);
6267     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6268         *this, IRPosition::callsite_argument(*CB, ArgNo),
6269         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6270     return !ArgNoCaptureAA.isAssumedNoCapture();
6271   }
6272 
6273   return true;
6274 }
6275 
6276 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6277                                             const Instruction *UserI) {
6278   assert(UserI->mayReadOrWriteMemory());
6279 
6280   switch (UserI->getOpcode()) {
6281   default:
6282     // TODO: Handle all atomics and other side-effect operations we know of.
6283     break;
6284   case Instruction::Load:
6285     // Loads cause the NO_READS property to disappear.
6286     removeAssumedBits(NO_READS);
6287     return;
6288 
6289   case Instruction::Store:
6290     // Stores cause the NO_WRITES property to disappear if the use is the
6291     // pointer operand. Note that we do assume that capturing was taken care of
6292     // somewhere else.
6293     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6294       removeAssumedBits(NO_WRITES);
6295     return;
6296 
6297   case Instruction::Call:
6298   case Instruction::CallBr:
6299   case Instruction::Invoke: {
6300     // For call sites we look at the argument memory behavior attribute (this
6301     // could be recursive!) in order to restrict our own state.
6302     const auto *CB = cast<CallBase>(UserI);
6303 
6304     // Give up on operand bundles.
6305     if (CB->isBundleOperand(U)) {
6306       indicatePessimisticFixpoint();
6307       return;
6308     }
6309 
6310     // Calling a function does read the function pointer, maybe write it if the
6311     // function is self-modifying.
6312     if (CB->isCallee(U)) {
6313       removeAssumedBits(NO_READS);
6314       break;
6315     }
6316 
6317     // Adjust the possible access behavior based on the information on the
6318     // argument.
6319     IRPosition Pos;
6320     if (U->get()->getType()->isPointerTy())
6321       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6322     else
6323       Pos = IRPosition::callsite_function(*CB);
6324     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6325         *this, Pos,
6326         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6327     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6328     // and at least "known".
6329     intersectAssumedBits(MemBehaviorAA.getAssumed());
6330     return;
6331   }
6332   };
6333 
6334   // Generally, look at the "may-properties" and adjust the assumed state if we
6335   // did not trigger special handling before.
6336   if (UserI->mayReadFromMemory())
6337     removeAssumedBits(NO_READS);
6338   if (UserI->mayWriteToMemory())
6339     removeAssumedBits(NO_WRITES);
6340 }
6341 
6342 } // namespace
6343 
6344 /// -------------------- Memory Locations Attributes ---------------------------
6345 /// Includes read-none, argmemonly, inaccessiblememonly,
6346 /// inaccessiblememorargmemonly
6347 /// ----------------------------------------------------------------------------
6348 
6349 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6350     AAMemoryLocation::MemoryLocationsKind MLK) {
6351   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6352     return "all memory";
6353   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6354     return "no memory";
6355   std::string S = "memory:";
6356   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6357     S += "stack,";
6358   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6359     S += "constant,";
6360   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6361     S += "internal global,";
6362   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6363     S += "external global,";
6364   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6365     S += "argument,";
6366   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6367     S += "inaccessible,";
6368   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6369     S += "malloced,";
6370   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6371     S += "unknown,";
6372   S.pop_back();
6373   return S;
6374 }
6375 
6376 namespace {
6377 struct AAMemoryLocationImpl : public AAMemoryLocation {
6378 
6379   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6380       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6381     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6382       AccessKind2Accesses[u] = nullptr;
6383   }
6384 
6385   ~AAMemoryLocationImpl() {
6386     // The AccessSets are allocated via a BumpPtrAllocator, we call
6387     // the destructor manually.
6388     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6389       if (AccessKind2Accesses[u])
6390         AccessKind2Accesses[u]->~AccessSet();
6391   }
6392 
6393   /// See AbstractAttribute::initialize(...).
6394   void initialize(Attributor &A) override {
6395     intersectAssumedBits(BEST_STATE);
6396     getKnownStateFromValue(A, getIRPosition(), getState());
6397     AAMemoryLocation::initialize(A);
6398   }
6399 
6400   /// Return the memory behavior information encoded in the IR for \p IRP.
6401   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6402                                      BitIntegerState &State,
6403                                      bool IgnoreSubsumingPositions = false) {
6404     // For internal functions we ignore `argmemonly` and
6405     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6406     // constant propagation. It is unclear if this is the best way but it is
6407     // unlikely this will cause real performance problems. If we are deriving
6408     // attributes for the anchor function we even remove the attribute in
6409     // addition to ignoring it.
6410     bool UseArgMemOnly = true;
6411     Function *AnchorFn = IRP.getAnchorScope();
6412     if (AnchorFn && A.isRunOn(*AnchorFn))
6413       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6414 
6415     SmallVector<Attribute, 2> Attrs;
6416     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6417     for (const Attribute &Attr : Attrs) {
6418       switch (Attr.getKindAsEnum()) {
6419       case Attribute::ReadNone:
6420         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6421         break;
6422       case Attribute::InaccessibleMemOnly:
6423         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6424         break;
6425       case Attribute::ArgMemOnly:
6426         if (UseArgMemOnly)
6427           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6428         else
6429           IRP.removeAttrs({Attribute::ArgMemOnly});
6430         break;
6431       case Attribute::InaccessibleMemOrArgMemOnly:
6432         if (UseArgMemOnly)
6433           State.addKnownBits(inverseLocation(
6434               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6435         else
6436           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6437         break;
6438       default:
6439         llvm_unreachable("Unexpected attribute!");
6440       }
6441     }
6442   }
6443 
6444   /// See AbstractAttribute::getDeducedAttributes(...).
6445   void getDeducedAttributes(LLVMContext &Ctx,
6446                             SmallVectorImpl<Attribute> &Attrs) const override {
6447     assert(Attrs.size() == 0);
6448     if (isAssumedReadNone()) {
6449       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6450     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6451       if (isAssumedInaccessibleMemOnly())
6452         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6453       else if (isAssumedArgMemOnly())
6454         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6455       else if (isAssumedInaccessibleOrArgMemOnly())
6456         Attrs.push_back(
6457             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6458     }
6459     assert(Attrs.size() <= 1);
6460   }
6461 
6462   /// See AbstractAttribute::manifest(...).
6463   ChangeStatus manifest(Attributor &A) override {
6464     const IRPosition &IRP = getIRPosition();
6465 
6466     // Check if we would improve the existing attributes first.
6467     SmallVector<Attribute, 4> DeducedAttrs;
6468     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6469     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6470           return IRP.hasAttr(Attr.getKindAsEnum(),
6471                              /* IgnoreSubsumingPositions */ true);
6472         }))
6473       return ChangeStatus::UNCHANGED;
6474 
6475     // Clear existing attributes.
6476     IRP.removeAttrs(AttrKinds);
6477     if (isAssumedReadNone())
6478       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6479 
6480     // Use the generic manifest method.
6481     return IRAttribute::manifest(A);
6482   }
6483 
6484   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6485   bool checkForAllAccessesToMemoryKind(
6486       function_ref<bool(const Instruction *, const Value *, AccessKind,
6487                         MemoryLocationsKind)>
6488           Pred,
6489       MemoryLocationsKind RequestedMLK) const override {
6490     if (!isValidState())
6491       return false;
6492 
6493     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6494     if (AssumedMLK == NO_LOCATIONS)
6495       return true;
6496 
6497     unsigned Idx = 0;
6498     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6499          CurMLK *= 2, ++Idx) {
6500       if (CurMLK & RequestedMLK)
6501         continue;
6502 
6503       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6504         for (const AccessInfo &AI : *Accesses)
6505           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6506             return false;
6507     }
6508 
6509     return true;
6510   }
6511 
6512   ChangeStatus indicatePessimisticFixpoint() override {
6513     // If we give up and indicate a pessimistic fixpoint this instruction will
6514     // become an access for all potential access kinds:
6515     // TODO: Add pointers for argmemonly and globals to improve the results of
6516     //       checkForAllAccessesToMemoryKind.
6517     bool Changed = false;
6518     MemoryLocationsKind KnownMLK = getKnown();
6519     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6520     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6521       if (!(CurMLK & KnownMLK))
6522         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6523                                   getAccessKindFromInst(I));
6524     return AAMemoryLocation::indicatePessimisticFixpoint();
6525   }
6526 
6527 protected:
6528   /// Helper struct to tie together an instruction that has a read or write
6529   /// effect with the pointer it accesses (if any).
6530   struct AccessInfo {
6531 
6532     /// The instruction that caused the access.
6533     const Instruction *I;
6534 
6535     /// The base pointer that is accessed, or null if unknown.
6536     const Value *Ptr;
6537 
6538     /// The kind of access (read/write/read+write).
6539     AccessKind Kind;
6540 
6541     bool operator==(const AccessInfo &RHS) const {
6542       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6543     }
6544     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6545       if (LHS.I != RHS.I)
6546         return LHS.I < RHS.I;
6547       if (LHS.Ptr != RHS.Ptr)
6548         return LHS.Ptr < RHS.Ptr;
6549       if (LHS.Kind != RHS.Kind)
6550         return LHS.Kind < RHS.Kind;
6551       return false;
6552     }
6553   };
6554 
6555   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6556   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6557   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6558   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6559 
6560   /// Categorize the pointer arguments of CB that might access memory in
6561   /// AccessedLoc and update the state and access map accordingly.
6562   void
6563   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6564                                      AAMemoryLocation::StateType &AccessedLocs,
6565                                      bool &Changed);
6566 
6567   /// Return the kind(s) of location that may be accessed by \p V.
6568   AAMemoryLocation::MemoryLocationsKind
6569   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6570 
6571   /// Return the access kind as determined by \p I.
6572   AccessKind getAccessKindFromInst(const Instruction *I) {
6573     AccessKind AK = READ_WRITE;
6574     if (I) {
6575       AK = I->mayReadFromMemory() ? READ : NONE;
6576       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6577     }
6578     return AK;
6579   }
6580 
6581   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6582   /// an access of kind \p AK to a \p MLK memory location with the access
6583   /// pointer \p Ptr.
6584   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6585                                  MemoryLocationsKind MLK, const Instruction *I,
6586                                  const Value *Ptr, bool &Changed,
6587                                  AccessKind AK = READ_WRITE) {
6588 
6589     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6590     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6591     if (!Accesses)
6592       Accesses = new (Allocator) AccessSet();
6593     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6594     State.removeAssumedBits(MLK);
6595   }
6596 
6597   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6598   /// arguments, and update the state and access map accordingly.
6599   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6600                           AAMemoryLocation::StateType &State, bool &Changed);
6601 
6602   /// Used to allocate access sets.
6603   BumpPtrAllocator &Allocator;
6604 
6605   /// The set of IR attributes AAMemoryLocation deals with.
6606   static const Attribute::AttrKind AttrKinds[4];
6607 };
6608 
6609 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6610     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6611     Attribute::InaccessibleMemOrArgMemOnly};
6612 
6613 void AAMemoryLocationImpl::categorizePtrValue(
6614     Attributor &A, const Instruction &I, const Value &Ptr,
6615     AAMemoryLocation::StateType &State, bool &Changed) {
6616   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6617                     << Ptr << " ["
6618                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6619 
6620   auto StripGEPCB = [](Value *V) -> Value * {
6621     auto *GEP = dyn_cast<GEPOperator>(V);
6622     while (GEP) {
6623       V = GEP->getPointerOperand();
6624       GEP = dyn_cast<GEPOperator>(V);
6625     }
6626     return V;
6627   };
6628 
6629   auto VisitValueCB = [&](Value &V, const Instruction *,
6630                           AAMemoryLocation::StateType &T,
6631                           bool Stripped) -> bool {
6632     // TODO: recognize the TBAA used for constant accesses.
6633     MemoryLocationsKind MLK = NO_LOCATIONS;
6634     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6635     if (isa<UndefValue>(V))
6636       return true;
6637     if (auto *Arg = dyn_cast<Argument>(&V)) {
6638       if (Arg->hasByValAttr())
6639         MLK = NO_LOCAL_MEM;
6640       else
6641         MLK = NO_ARGUMENT_MEM;
6642     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6643       // Reading constant memory is not treated as a read "effect" by the
6644       // function attr pass so we won't neither. Constants defined by TBAA are
6645       // similar. (We know we do not write it because it is constant.)
6646       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
6647         if (GVar->isConstant())
6648           return true;
6649 
6650       if (GV->hasLocalLinkage())
6651         MLK = NO_GLOBAL_INTERNAL_MEM;
6652       else
6653         MLK = NO_GLOBAL_EXTERNAL_MEM;
6654     } else if (isa<ConstantPointerNull>(V) &&
6655                !NullPointerIsDefined(getAssociatedFunction(),
6656                                      V.getType()->getPointerAddressSpace())) {
6657       return true;
6658     } else if (isa<AllocaInst>(V)) {
6659       MLK = NO_LOCAL_MEM;
6660     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6661       const auto &NoAliasAA =
6662           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB));
6663       if (NoAliasAA.isAssumedNoAlias())
6664         MLK = NO_MALLOCED_MEM;
6665       else
6666         MLK = NO_UNKOWN_MEM;
6667     } else {
6668       MLK = NO_UNKOWN_MEM;
6669     }
6670 
6671     assert(MLK != NO_LOCATIONS && "No location specified!");
6672     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6673                               getAccessKindFromInst(&I));
6674     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6675                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6676                       << "\n");
6677     return true;
6678   };
6679 
6680   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6681           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6682           /* UseValueSimplify */ true,
6683           /* MaxValues */ 32, StripGEPCB)) {
6684     LLVM_DEBUG(
6685         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6686     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6687                               getAccessKindFromInst(&I));
6688   } else {
6689     LLVM_DEBUG(
6690         dbgs()
6691         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6692         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6693   }
6694 }
6695 
6696 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6697     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6698     bool &Changed) {
6699   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6700 
6701     // Skip non-pointer arguments.
6702     const Value *ArgOp = CB.getArgOperand(ArgNo);
6703     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6704       continue;
6705 
6706     // Skip readnone arguments.
6707     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6708     const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6709         *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6710 
6711     if (ArgOpMemLocationAA.isAssumedReadNone())
6712       continue;
6713 
6714     // Categorize potentially accessed pointer arguments as if there was an
6715     // access instruction with them as pointer.
6716     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6717   }
6718 }
6719 
6720 AAMemoryLocation::MemoryLocationsKind
6721 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6722                                                   bool &Changed) {
6723   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6724                     << I << "\n");
6725 
6726   AAMemoryLocation::StateType AccessedLocs;
6727   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6728 
6729   if (auto *CB = dyn_cast<CallBase>(&I)) {
6730 
6731     // First check if we assume any memory is access is visible.
6732     const auto &CBMemLocationAA =
6733         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB));
6734     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6735                       << " [" << CBMemLocationAA << "]\n");
6736 
6737     if (CBMemLocationAA.isAssumedReadNone())
6738       return NO_LOCATIONS;
6739 
6740     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6741       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6742                                 Changed, getAccessKindFromInst(&I));
6743       return AccessedLocs.getAssumed();
6744     }
6745 
6746     uint32_t CBAssumedNotAccessedLocs =
6747         CBMemLocationAA.getAssumedNotAccessedLocation();
6748 
6749     // Set the argmemonly and global bit as we handle them separately below.
6750     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6751         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6752 
6753     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6754       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6755         continue;
6756       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6757                                 getAccessKindFromInst(&I));
6758     }
6759 
6760     // Now handle global memory if it might be accessed. This is slightly tricky
6761     // as NO_GLOBAL_MEM has multiple bits set.
6762     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6763     if (HasGlobalAccesses) {
6764       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6765                             AccessKind Kind, MemoryLocationsKind MLK) {
6766         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6767                                   getAccessKindFromInst(&I));
6768         return true;
6769       };
6770       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6771               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6772         return AccessedLocs.getWorstState();
6773     }
6774 
6775     LLVM_DEBUG(
6776         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6777                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6778 
6779     // Now handle argument memory if it might be accessed.
6780     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6781     if (HasArgAccesses)
6782       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6783 
6784     LLVM_DEBUG(
6785         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6786                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6787 
6788     return AccessedLocs.getAssumed();
6789   }
6790 
6791   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6792     LLVM_DEBUG(
6793         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6794                << I << " [" << *Ptr << "]\n");
6795     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6796     return AccessedLocs.getAssumed();
6797   }
6798 
6799   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6800                     << I << "\n");
6801   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6802                             getAccessKindFromInst(&I));
6803   return AccessedLocs.getAssumed();
6804 }
6805 
6806 /// An AA to represent the memory behavior function attributes.
6807 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6808   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6809       : AAMemoryLocationImpl(IRP, A) {}
6810 
6811   /// See AbstractAttribute::updateImpl(Attributor &A).
6812   virtual ChangeStatus updateImpl(Attributor &A) override {
6813 
6814     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6815         *this, getIRPosition(), /* TrackDependence */ false);
6816     if (MemBehaviorAA.isAssumedReadNone()) {
6817       if (MemBehaviorAA.isKnownReadNone())
6818         return indicateOptimisticFixpoint();
6819       assert(isAssumedReadNone() &&
6820              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6821       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6822       return ChangeStatus::UNCHANGED;
6823     }
6824 
6825     // The current assumed state used to determine a change.
6826     auto AssumedState = getAssumed();
6827     bool Changed = false;
6828 
6829     auto CheckRWInst = [&](Instruction &I) {
6830       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6831       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6832                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6833       removeAssumedBits(inverseLocation(MLK, false, false));
6834       // Stop once only the valid bit set in the *not assumed location*, thus
6835       // once we don't actually exclude any memory locations in the state.
6836       return getAssumedNotAccessedLocation() != VALID_STATE;
6837     };
6838 
6839     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6840       return indicatePessimisticFixpoint();
6841 
6842     Changed |= AssumedState != getAssumed();
6843     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6844   }
6845 
6846   /// See AbstractAttribute::trackStatistics()
6847   void trackStatistics() const override {
6848     if (isAssumedReadNone())
6849       STATS_DECLTRACK_FN_ATTR(readnone)
6850     else if (isAssumedArgMemOnly())
6851       STATS_DECLTRACK_FN_ATTR(argmemonly)
6852     else if (isAssumedInaccessibleMemOnly())
6853       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6854     else if (isAssumedInaccessibleOrArgMemOnly())
6855       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6856   }
6857 };
6858 
6859 /// AAMemoryLocation attribute for call sites.
6860 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6861   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6862       : AAMemoryLocationImpl(IRP, A) {}
6863 
6864   /// See AbstractAttribute::initialize(...).
6865   void initialize(Attributor &A) override {
6866     AAMemoryLocationImpl::initialize(A);
6867     Function *F = getAssociatedFunction();
6868     if (!F || F->isDeclaration())
6869       indicatePessimisticFixpoint();
6870   }
6871 
6872   /// See AbstractAttribute::updateImpl(...).
6873   ChangeStatus updateImpl(Attributor &A) override {
6874     // TODO: Once we have call site specific value information we can provide
6875     //       call site specific liveness liveness information and then it makes
6876     //       sense to specialize attributes for call sites arguments instead of
6877     //       redirecting requests to the callee argument.
6878     Function *F = getAssociatedFunction();
6879     const IRPosition &FnPos = IRPosition::function(*F);
6880     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6881     bool Changed = false;
6882     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6883                           AccessKind Kind, MemoryLocationsKind MLK) {
6884       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6885                                 getAccessKindFromInst(I));
6886       return true;
6887     };
6888     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6889       return indicatePessimisticFixpoint();
6890     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6891   }
6892 
6893   /// See AbstractAttribute::trackStatistics()
6894   void trackStatistics() const override {
6895     if (isAssumedReadNone())
6896       STATS_DECLTRACK_CS_ATTR(readnone)
6897   }
6898 };
6899 
6900 /// ------------------ Value Constant Range Attribute -------------------------
6901 
6902 struct AAValueConstantRangeImpl : AAValueConstantRange {
6903   using StateType = IntegerRangeState;
6904   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6905       : AAValueConstantRange(IRP, A) {}
6906 
6907   /// See AbstractAttribute::getAsStr().
6908   const std::string getAsStr() const override {
6909     std::string Str;
6910     llvm::raw_string_ostream OS(Str);
6911     OS << "range(" << getBitWidth() << ")<";
6912     getKnown().print(OS);
6913     OS << " / ";
6914     getAssumed().print(OS);
6915     OS << ">";
6916     return OS.str();
6917   }
6918 
6919   /// Helper function to get a SCEV expr for the associated value at program
6920   /// point \p I.
6921   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6922     if (!getAnchorScope())
6923       return nullptr;
6924 
6925     ScalarEvolution *SE =
6926         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6927             *getAnchorScope());
6928 
6929     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6930         *getAnchorScope());
6931 
6932     if (!SE || !LI)
6933       return nullptr;
6934 
6935     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6936     if (!I)
6937       return S;
6938 
6939     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6940   }
6941 
6942   /// Helper function to get a range from SCEV for the associated value at
6943   /// program point \p I.
6944   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6945                                          const Instruction *I = nullptr) const {
6946     if (!getAnchorScope())
6947       return getWorstState(getBitWidth());
6948 
6949     ScalarEvolution *SE =
6950         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6951             *getAnchorScope());
6952 
6953     const SCEV *S = getSCEV(A, I);
6954     if (!SE || !S)
6955       return getWorstState(getBitWidth());
6956 
6957     return SE->getUnsignedRange(S);
6958   }
6959 
6960   /// Helper function to get a range from LVI for the associated value at
6961   /// program point \p I.
6962   ConstantRange
6963   getConstantRangeFromLVI(Attributor &A,
6964                           const Instruction *CtxI = nullptr) const {
6965     if (!getAnchorScope())
6966       return getWorstState(getBitWidth());
6967 
6968     LazyValueInfo *LVI =
6969         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6970             *getAnchorScope());
6971 
6972     if (!LVI || !CtxI)
6973       return getWorstState(getBitWidth());
6974     return LVI->getConstantRange(&getAssociatedValue(),
6975                                  const_cast<Instruction *>(CtxI));
6976   }
6977 
6978   /// See AAValueConstantRange::getKnownConstantRange(..).
6979   ConstantRange
6980   getKnownConstantRange(Attributor &A,
6981                         const Instruction *CtxI = nullptr) const override {
6982     if (!CtxI || CtxI == getCtxI())
6983       return getKnown();
6984 
6985     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6986     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6987     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6988   }
6989 
6990   /// See AAValueConstantRange::getAssumedConstantRange(..).
6991   ConstantRange
6992   getAssumedConstantRange(Attributor &A,
6993                           const Instruction *CtxI = nullptr) const override {
6994     // TODO: Make SCEV use Attributor assumption.
6995     //       We may be able to bound a variable range via assumptions in
6996     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6997     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6998 
6999     if (!CtxI || CtxI == getCtxI())
7000       return getAssumed();
7001 
7002     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7003     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7004     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
7005   }
7006 
7007   /// See AbstractAttribute::initialize(..).
7008   void initialize(Attributor &A) override {
7009     // Intersect a range given by SCEV.
7010     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7011 
7012     // Intersect a range given by LVI.
7013     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7014   }
7015 
7016   /// Helper function to create MDNode for range metadata.
7017   static MDNode *
7018   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7019                             const ConstantRange &AssumedConstantRange) {
7020     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7021                                   Ty, AssumedConstantRange.getLower())),
7022                               ConstantAsMetadata::get(ConstantInt::get(
7023                                   Ty, AssumedConstantRange.getUpper()))};
7024     return MDNode::get(Ctx, LowAndHigh);
7025   }
7026 
7027   /// Return true if \p Assumed is included in \p KnownRanges.
7028   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7029 
7030     if (Assumed.isFullSet())
7031       return false;
7032 
7033     if (!KnownRanges)
7034       return true;
7035 
7036     // If multiple ranges are annotated in IR, we give up to annotate assumed
7037     // range for now.
7038 
7039     // TODO:  If there exists a known range which containts assumed range, we
7040     // can say assumed range is better.
7041     if (KnownRanges->getNumOperands() > 2)
7042       return false;
7043 
7044     ConstantInt *Lower =
7045         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7046     ConstantInt *Upper =
7047         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7048 
7049     ConstantRange Known(Lower->getValue(), Upper->getValue());
7050     return Known.contains(Assumed) && Known != Assumed;
7051   }
7052 
7053   /// Helper function to set range metadata.
7054   static bool
7055   setRangeMetadataIfisBetterRange(Instruction *I,
7056                                   const ConstantRange &AssumedConstantRange) {
7057     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7058     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7059       if (!AssumedConstantRange.isEmptySet()) {
7060         I->setMetadata(LLVMContext::MD_range,
7061                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7062                                                  AssumedConstantRange));
7063         return true;
7064       }
7065     }
7066     return false;
7067   }
7068 
7069   /// See AbstractAttribute::manifest()
7070   ChangeStatus manifest(Attributor &A) override {
7071     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7072     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7073     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7074 
7075     auto &V = getAssociatedValue();
7076     if (!AssumedConstantRange.isEmptySet() &&
7077         !AssumedConstantRange.isSingleElement()) {
7078       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7079         assert(I == getCtxI() && "Should not annotate an instruction which is "
7080                                  "not the context instruction");
7081         if (isa<CallInst>(I) || isa<LoadInst>(I))
7082           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7083             Changed = ChangeStatus::CHANGED;
7084       }
7085     }
7086 
7087     return Changed;
7088   }
7089 };
7090 
7091 struct AAValueConstantRangeArgument final
7092     : AAArgumentFromCallSiteArguments<
7093           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
7094   using Base = AAArgumentFromCallSiteArguments<
7095       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
7096   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7097       : Base(IRP, A) {}
7098 
7099   /// See AbstractAttribute::initialize(..).
7100   void initialize(Attributor &A) override {
7101     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7102       indicatePessimisticFixpoint();
7103     } else {
7104       Base::initialize(A);
7105     }
7106   }
7107 
7108   /// See AbstractAttribute::trackStatistics()
7109   void trackStatistics() const override {
7110     STATS_DECLTRACK_ARG_ATTR(value_range)
7111   }
7112 };
7113 
7114 struct AAValueConstantRangeReturned
7115     : AAReturnedFromReturnedValues<AAValueConstantRange,
7116                                    AAValueConstantRangeImpl> {
7117   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
7118                                             AAValueConstantRangeImpl>;
7119   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7120       : Base(IRP, A) {}
7121 
7122   /// See AbstractAttribute::initialize(...).
7123   void initialize(Attributor &A) override {}
7124 
7125   /// See AbstractAttribute::trackStatistics()
7126   void trackStatistics() const override {
7127     STATS_DECLTRACK_FNRET_ATTR(value_range)
7128   }
7129 };
7130 
7131 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
7132   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7133       : AAValueConstantRangeImpl(IRP, A) {}
7134 
7135   /// See AbstractAttribute::initialize(...).
7136   void initialize(Attributor &A) override {
7137     AAValueConstantRangeImpl::initialize(A);
7138     Value &V = getAssociatedValue();
7139 
7140     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7141       unionAssumed(ConstantRange(C->getValue()));
7142       indicateOptimisticFixpoint();
7143       return;
7144     }
7145 
7146     if (isa<UndefValue>(&V)) {
7147       // Collapse the undef state to 0.
7148       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7149       indicateOptimisticFixpoint();
7150       return;
7151     }
7152 
7153     if (isa<CallBase>(&V))
7154       return;
7155 
7156     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7157       return;
7158     // If it is a load instruction with range metadata, use it.
7159     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7160       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7161         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7162         return;
7163       }
7164 
7165     // We can work with PHI and select instruction as we traverse their operands
7166     // during update.
7167     if (isa<SelectInst>(V) || isa<PHINode>(V))
7168       return;
7169 
7170     // Otherwise we give up.
7171     indicatePessimisticFixpoint();
7172 
7173     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7174                       << getAssociatedValue() << "\n");
7175   }
7176 
7177   bool calculateBinaryOperator(
7178       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7179       const Instruction *CtxI,
7180       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7181     Value *LHS = BinOp->getOperand(0);
7182     Value *RHS = BinOp->getOperand(1);
7183     // TODO: Allow non integers as well.
7184     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7185       return false;
7186 
7187     auto &LHSAA =
7188         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
7189     QuerriedAAs.push_back(&LHSAA);
7190     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7191 
7192     auto &RHSAA =
7193         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
7194     QuerriedAAs.push_back(&RHSAA);
7195     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7196 
7197     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7198 
7199     T.unionAssumed(AssumedRange);
7200 
7201     // TODO: Track a known state too.
7202 
7203     return T.isValidState();
7204   }
7205 
7206   bool calculateCastInst(
7207       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7208       const Instruction *CtxI,
7209       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7210     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7211     // TODO: Allow non integers as well.
7212     Value &OpV = *CastI->getOperand(0);
7213     if (!OpV.getType()->isIntegerTy())
7214       return false;
7215 
7216     auto &OpAA =
7217         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
7218     QuerriedAAs.push_back(&OpAA);
7219     T.unionAssumed(
7220         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7221     return T.isValidState();
7222   }
7223 
7224   bool
7225   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7226                    const Instruction *CtxI,
7227                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7228     Value *LHS = CmpI->getOperand(0);
7229     Value *RHS = CmpI->getOperand(1);
7230     // TODO: Allow non integers as well.
7231     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7232       return false;
7233 
7234     auto &LHSAA =
7235         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
7236     QuerriedAAs.push_back(&LHSAA);
7237     auto &RHSAA =
7238         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
7239     QuerriedAAs.push_back(&RHSAA);
7240 
7241     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7242     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7243 
7244     // If one of them is empty set, we can't decide.
7245     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7246       return true;
7247 
7248     bool MustTrue = false, MustFalse = false;
7249 
7250     auto AllowedRegion =
7251         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7252 
7253     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
7254         CmpI->getPredicate(), RHSAARange);
7255 
7256     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7257       MustFalse = true;
7258 
7259     if (SatisfyingRegion.contains(LHSAARange))
7260       MustTrue = true;
7261 
7262     assert((!MustTrue || !MustFalse) &&
7263            "Either MustTrue or MustFalse should be false!");
7264 
7265     if (MustTrue)
7266       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7267     else if (MustFalse)
7268       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7269     else
7270       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7271 
7272     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7273                       << " " << RHSAA << "\n");
7274 
7275     // TODO: Track a known state too.
7276     return T.isValidState();
7277   }
7278 
7279   /// See AbstractAttribute::updateImpl(...).
7280   ChangeStatus updateImpl(Attributor &A) override {
7281     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7282                             IntegerRangeState &T, bool Stripped) -> bool {
7283       Instruction *I = dyn_cast<Instruction>(&V);
7284       if (!I || isa<CallBase>(I)) {
7285 
7286         // If the value is not instruction, we query AA to Attributor.
7287         const auto &AA =
7288             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
7289 
7290         // Clamp operator is not used to utilize a program point CtxI.
7291         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7292 
7293         return T.isValidState();
7294       }
7295 
7296       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7297       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7298         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7299           return false;
7300       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7301         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7302           return false;
7303       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7304         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7305           return false;
7306       } else {
7307         // Give up with other instructions.
7308         // TODO: Add other instructions
7309 
7310         T.indicatePessimisticFixpoint();
7311         return false;
7312       }
7313 
7314       // Catch circular reasoning in a pessimistic way for now.
7315       // TODO: Check how the range evolves and if we stripped anything, see also
7316       //       AADereferenceable or AAAlign for similar situations.
7317       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7318         if (QueriedAA != this)
7319           continue;
7320         // If we are in a stady state we do not need to worry.
7321         if (T.getAssumed() == getState().getAssumed())
7322           continue;
7323         T.indicatePessimisticFixpoint();
7324       }
7325 
7326       return T.isValidState();
7327     };
7328 
7329     IntegerRangeState T(getBitWidth());
7330 
7331     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7332             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7333             /* UseValueSimplify */ false))
7334       return indicatePessimisticFixpoint();
7335 
7336     return clampStateAndIndicateChange(getState(), T);
7337   }
7338 
7339   /// See AbstractAttribute::trackStatistics()
7340   void trackStatistics() const override {
7341     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7342   }
7343 };
7344 
7345 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7346   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7347       : AAValueConstantRangeImpl(IRP, A) {}
7348 
7349   /// See AbstractAttribute::initialize(...).
7350   ChangeStatus updateImpl(Attributor &A) override {
7351     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7352                      "not be called");
7353   }
7354 
7355   /// See AbstractAttribute::trackStatistics()
7356   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7357 };
7358 
7359 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7360   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7361       : AAValueConstantRangeFunction(IRP, A) {}
7362 
7363   /// See AbstractAttribute::trackStatistics()
7364   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7365 };
7366 
7367 struct AAValueConstantRangeCallSiteReturned
7368     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7369                                      AAValueConstantRangeImpl> {
7370   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7371       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7372                                        AAValueConstantRangeImpl>(IRP, A) {}
7373 
7374   /// See AbstractAttribute::initialize(...).
7375   void initialize(Attributor &A) override {
7376     // If it is a load instruction with range metadata, use the metadata.
7377     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7378       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7379         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7380 
7381     AAValueConstantRangeImpl::initialize(A);
7382   }
7383 
7384   /// See AbstractAttribute::trackStatistics()
7385   void trackStatistics() const override {
7386     STATS_DECLTRACK_CSRET_ATTR(value_range)
7387   }
7388 };
7389 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7390   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7391       : AAValueConstantRangeFloating(IRP, A) {}
7392 
7393   /// See AbstractAttribute::manifest()
7394   ChangeStatus manifest(Attributor &A) override {
7395     return ChangeStatus::UNCHANGED;
7396   }
7397 
7398   /// See AbstractAttribute::trackStatistics()
7399   void trackStatistics() const override {
7400     STATS_DECLTRACK_CSARG_ATTR(value_range)
7401   }
7402 };
7403 
7404 /// ------------------ Potential Values Attribute -------------------------
7405 
7406 struct AAPotentialValuesImpl : AAPotentialValues {
7407   using StateType = PotentialConstantIntValuesState;
7408 
7409   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7410       : AAPotentialValues(IRP, A) {}
7411 
7412   /// See AbstractAttribute::getAsStr().
7413   const std::string getAsStr() const override {
7414     std::string Str;
7415     llvm::raw_string_ostream OS(Str);
7416     OS << getState();
7417     return OS.str();
7418   }
7419 
7420   /// See AbstractAttribute::updateImpl(...).
7421   ChangeStatus updateImpl(Attributor &A) override {
7422     return indicatePessimisticFixpoint();
7423   }
7424 };
7425 
7426 struct AAPotentialValuesArgument final
7427     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7428                                       PotentialConstantIntValuesState> {
7429   using Base =
7430       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7431                                       PotentialConstantIntValuesState>;
7432   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7433       : Base(IRP, A) {}
7434 
7435   /// See AbstractAttribute::initialize(..).
7436   void initialize(Attributor &A) override {
7437     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7438       indicatePessimisticFixpoint();
7439     } else {
7440       Base::initialize(A);
7441     }
7442   }
7443 
7444   /// See AbstractAttribute::trackStatistics()
7445   void trackStatistics() const override {
7446     STATS_DECLTRACK_ARG_ATTR(potential_values)
7447   }
7448 };
7449 
7450 struct AAPotentialValuesReturned
7451     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7452   using Base =
7453       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7454   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7455       : Base(IRP, A) {}
7456 
7457   /// See AbstractAttribute::trackStatistics()
7458   void trackStatistics() const override {
7459     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7460   }
7461 };
7462 
7463 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7464   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7465       : AAPotentialValuesImpl(IRP, A) {}
7466 
7467   /// See AbstractAttribute::initialize(..).
7468   void initialize(Attributor &A) override {
7469     Value &V = getAssociatedValue();
7470 
7471     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7472       unionAssumed(C->getValue());
7473       indicateOptimisticFixpoint();
7474       return;
7475     }
7476 
7477     if (isa<UndefValue>(&V)) {
7478       unionAssumedWithUndef();
7479       indicateOptimisticFixpoint();
7480       return;
7481     }
7482 
7483     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7484       return;
7485 
7486     if (isa<SelectInst>(V) || isa<PHINode>(V))
7487       return;
7488 
7489     indicatePessimisticFixpoint();
7490 
7491     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7492                       << getAssociatedValue() << "\n");
7493   }
7494 
7495   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7496                                 const APInt &RHS) {
7497     ICmpInst::Predicate Pred = ICI->getPredicate();
7498     switch (Pred) {
7499     case ICmpInst::ICMP_UGT:
7500       return LHS.ugt(RHS);
7501     case ICmpInst::ICMP_SGT:
7502       return LHS.sgt(RHS);
7503     case ICmpInst::ICMP_EQ:
7504       return LHS.eq(RHS);
7505     case ICmpInst::ICMP_UGE:
7506       return LHS.uge(RHS);
7507     case ICmpInst::ICMP_SGE:
7508       return LHS.sge(RHS);
7509     case ICmpInst::ICMP_ULT:
7510       return LHS.ult(RHS);
7511     case ICmpInst::ICMP_SLT:
7512       return LHS.slt(RHS);
7513     case ICmpInst::ICMP_NE:
7514       return LHS.ne(RHS);
7515     case ICmpInst::ICMP_ULE:
7516       return LHS.ule(RHS);
7517     case ICmpInst::ICMP_SLE:
7518       return LHS.sle(RHS);
7519     default:
7520       llvm_unreachable("Invalid ICmp predicate!");
7521     }
7522   }
7523 
7524   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7525                                  uint32_t ResultBitWidth) {
7526     Instruction::CastOps CastOp = CI->getOpcode();
7527     switch (CastOp) {
7528     default:
7529       llvm_unreachable("unsupported or not integer cast");
7530     case Instruction::Trunc:
7531       return Src.trunc(ResultBitWidth);
7532     case Instruction::SExt:
7533       return Src.sext(ResultBitWidth);
7534     case Instruction::ZExt:
7535       return Src.zext(ResultBitWidth);
7536     case Instruction::BitCast:
7537       return Src;
7538     }
7539   }
7540 
7541   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7542                                        const APInt &LHS, const APInt &RHS,
7543                                        bool &SkipOperation, bool &Unsupported) {
7544     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7545     // Unsupported is set to true when the binary operator is not supported.
7546     // SkipOperation is set to true when UB occur with the given operand pair
7547     // (LHS, RHS).
7548     // TODO: we should look at nsw and nuw keywords to handle operations
7549     //       that create poison or undef value.
7550     switch (BinOpcode) {
7551     default:
7552       Unsupported = true;
7553       return LHS;
7554     case Instruction::Add:
7555       return LHS + RHS;
7556     case Instruction::Sub:
7557       return LHS - RHS;
7558     case Instruction::Mul:
7559       return LHS * RHS;
7560     case Instruction::UDiv:
7561       if (RHS.isNullValue()) {
7562         SkipOperation = true;
7563         return LHS;
7564       }
7565       return LHS.udiv(RHS);
7566     case Instruction::SDiv:
7567       if (RHS.isNullValue()) {
7568         SkipOperation = true;
7569         return LHS;
7570       }
7571       return LHS.sdiv(RHS);
7572     case Instruction::URem:
7573       if (RHS.isNullValue()) {
7574         SkipOperation = true;
7575         return LHS;
7576       }
7577       return LHS.urem(RHS);
7578     case Instruction::SRem:
7579       if (RHS.isNullValue()) {
7580         SkipOperation = true;
7581         return LHS;
7582       }
7583       return LHS.srem(RHS);
7584     case Instruction::Shl:
7585       return LHS.shl(RHS);
7586     case Instruction::LShr:
7587       return LHS.lshr(RHS);
7588     case Instruction::AShr:
7589       return LHS.ashr(RHS);
7590     case Instruction::And:
7591       return LHS & RHS;
7592     case Instruction::Or:
7593       return LHS | RHS;
7594     case Instruction::Xor:
7595       return LHS ^ RHS;
7596     }
7597   }
7598 
7599   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7600                                            const APInt &LHS, const APInt &RHS) {
7601     bool SkipOperation = false;
7602     bool Unsupported = false;
7603     APInt Result =
7604         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7605     if (Unsupported)
7606       return false;
7607     // If SkipOperation is true, we can ignore this operand pair (L, R).
7608     if (!SkipOperation)
7609       unionAssumed(Result);
7610     return isValidState();
7611   }
7612 
7613   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7614     auto AssumedBefore = getAssumed();
7615     Value *LHS = ICI->getOperand(0);
7616     Value *RHS = ICI->getOperand(1);
7617     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7618       return indicatePessimisticFixpoint();
7619 
7620     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7621     if (!LHSAA.isValidState())
7622       return indicatePessimisticFixpoint();
7623 
7624     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7625     if (!RHSAA.isValidState())
7626       return indicatePessimisticFixpoint();
7627 
7628     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7629     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7630 
7631     // TODO: make use of undef flag to limit potential values aggressively.
7632     bool MaybeTrue = false, MaybeFalse = false;
7633     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7634     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7635       // The result of any comparison between undefs can be soundly replaced
7636       // with undef.
7637       unionAssumedWithUndef();
7638     } else if (LHSAA.undefIsContained()) {
7639       bool MaybeTrue = false, MaybeFalse = false;
7640       for (const APInt &R : RHSAAPVS) {
7641         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7642         MaybeTrue |= CmpResult;
7643         MaybeFalse |= !CmpResult;
7644         if (MaybeTrue & MaybeFalse)
7645           return indicatePessimisticFixpoint();
7646       }
7647     } else if (RHSAA.undefIsContained()) {
7648       for (const APInt &L : LHSAAPVS) {
7649         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7650         MaybeTrue |= CmpResult;
7651         MaybeFalse |= !CmpResult;
7652         if (MaybeTrue & MaybeFalse)
7653           return indicatePessimisticFixpoint();
7654       }
7655     } else {
7656       for (const APInt &L : LHSAAPVS) {
7657         for (const APInt &R : RHSAAPVS) {
7658           bool CmpResult = calculateICmpInst(ICI, L, R);
7659           MaybeTrue |= CmpResult;
7660           MaybeFalse |= !CmpResult;
7661           if (MaybeTrue & MaybeFalse)
7662             return indicatePessimisticFixpoint();
7663         }
7664       }
7665     }
7666     if (MaybeTrue)
7667       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7668     if (MaybeFalse)
7669       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7670     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7671                                          : ChangeStatus::CHANGED;
7672   }
7673 
7674   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7675     auto AssumedBefore = getAssumed();
7676     Value *LHS = SI->getTrueValue();
7677     Value *RHS = SI->getFalseValue();
7678     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7679       return indicatePessimisticFixpoint();
7680 
7681     // TODO: Use assumed simplified condition value
7682     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7683     if (!LHSAA.isValidState())
7684       return indicatePessimisticFixpoint();
7685 
7686     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7687     if (!RHSAA.isValidState())
7688       return indicatePessimisticFixpoint();
7689 
7690     if (LHSAA.undefIsContained() && RHSAA.undefIsContained())
7691       // select i1 *, undef , undef => undef
7692       unionAssumedWithUndef();
7693     else {
7694       unionAssumed(LHSAA);
7695       unionAssumed(RHSAA);
7696     }
7697     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7698                                          : ChangeStatus::CHANGED;
7699   }
7700 
7701   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7702     auto AssumedBefore = getAssumed();
7703     if (!CI->isIntegerCast())
7704       return indicatePessimisticFixpoint();
7705     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7706     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7707     Value *Src = CI->getOperand(0);
7708     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src));
7709     if (!SrcAA.isValidState())
7710       return indicatePessimisticFixpoint();
7711     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7712     if (SrcAA.undefIsContained())
7713       unionAssumedWithUndef();
7714     else {
7715       for (const APInt &S : SrcAAPVS) {
7716         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7717         unionAssumed(T);
7718       }
7719     }
7720     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7721                                          : ChangeStatus::CHANGED;
7722   }
7723 
7724   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7725     auto AssumedBefore = getAssumed();
7726     Value *LHS = BinOp->getOperand(0);
7727     Value *RHS = BinOp->getOperand(1);
7728     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7729       return indicatePessimisticFixpoint();
7730 
7731     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7732     if (!LHSAA.isValidState())
7733       return indicatePessimisticFixpoint();
7734 
7735     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7736     if (!RHSAA.isValidState())
7737       return indicatePessimisticFixpoint();
7738 
7739     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7740     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7741     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
7742 
7743     // TODO: make use of undef flag to limit potential values aggressively.
7744     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7745       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
7746         return indicatePessimisticFixpoint();
7747     } else if (LHSAA.undefIsContained()) {
7748       for (const APInt &R : RHSAAPVS) {
7749         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
7750           return indicatePessimisticFixpoint();
7751       }
7752     } else if (RHSAA.undefIsContained()) {
7753       for (const APInt &L : LHSAAPVS) {
7754         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
7755           return indicatePessimisticFixpoint();
7756       }
7757     } else {
7758       for (const APInt &L : LHSAAPVS) {
7759         for (const APInt &R : RHSAAPVS) {
7760           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
7761             return indicatePessimisticFixpoint();
7762         }
7763       }
7764     }
7765     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7766                                          : ChangeStatus::CHANGED;
7767   }
7768 
7769   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7770     auto AssumedBefore = getAssumed();
7771     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7772       Value *IncomingValue = PHI->getIncomingValue(u);
7773       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7774           *this, IRPosition::value(*IncomingValue));
7775       if (!PotentialValuesAA.isValidState())
7776         return indicatePessimisticFixpoint();
7777       if (PotentialValuesAA.undefIsContained())
7778         unionAssumedWithUndef();
7779       else
7780         unionAssumed(PotentialValuesAA.getAssumed());
7781     }
7782     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7783                                          : ChangeStatus::CHANGED;
7784   }
7785 
7786   /// See AbstractAttribute::updateImpl(...).
7787   ChangeStatus updateImpl(Attributor &A) override {
7788     Value &V = getAssociatedValue();
7789     Instruction *I = dyn_cast<Instruction>(&V);
7790 
7791     if (auto *ICI = dyn_cast<ICmpInst>(I))
7792       return updateWithICmpInst(A, ICI);
7793 
7794     if (auto *SI = dyn_cast<SelectInst>(I))
7795       return updateWithSelectInst(A, SI);
7796 
7797     if (auto *CI = dyn_cast<CastInst>(I))
7798       return updateWithCastInst(A, CI);
7799 
7800     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7801       return updateWithBinaryOperator(A, BinOp);
7802 
7803     if (auto *PHI = dyn_cast<PHINode>(I))
7804       return updateWithPHINode(A, PHI);
7805 
7806     return indicatePessimisticFixpoint();
7807   }
7808 
7809   /// See AbstractAttribute::trackStatistics()
7810   void trackStatistics() const override {
7811     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7812   }
7813 };
7814 
7815 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
7816   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7817       : AAPotentialValuesImpl(IRP, A) {}
7818 
7819   /// See AbstractAttribute::initialize(...).
7820   ChangeStatus updateImpl(Attributor &A) override {
7821     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7822                      "not be called");
7823   }
7824 
7825   /// See AbstractAttribute::trackStatistics()
7826   void trackStatistics() const override {
7827     STATS_DECLTRACK_FN_ATTR(potential_values)
7828   }
7829 };
7830 
7831 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
7832   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7833       : AAPotentialValuesFunction(IRP, A) {}
7834 
7835   /// See AbstractAttribute::trackStatistics()
7836   void trackStatistics() const override {
7837     STATS_DECLTRACK_CS_ATTR(potential_values)
7838   }
7839 };
7840 
7841 struct AAPotentialValuesCallSiteReturned
7842     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
7843   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7844       : AACallSiteReturnedFromReturned<AAPotentialValues,
7845                                        AAPotentialValuesImpl>(IRP, A) {}
7846 
7847   /// See AbstractAttribute::trackStatistics()
7848   void trackStatistics() const override {
7849     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7850   }
7851 };
7852 
7853 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
7854   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
7855       : AAPotentialValuesFloating(IRP, A) {}
7856 
7857   /// See AbstractAttribute::initialize(..).
7858   void initialize(Attributor &A) override {
7859     Value &V = getAssociatedValue();
7860 
7861     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7862       unionAssumed(C->getValue());
7863       indicateOptimisticFixpoint();
7864       return;
7865     }
7866 
7867     if (isa<UndefValue>(&V)) {
7868       unionAssumedWithUndef();
7869       indicateOptimisticFixpoint();
7870       return;
7871     }
7872   }
7873 
7874   /// See AbstractAttribute::updateImpl(...).
7875   ChangeStatus updateImpl(Attributor &A) override {
7876     Value &V = getAssociatedValue();
7877     auto AssumedBefore = getAssumed();
7878     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V));
7879     const auto &S = AA.getAssumed();
7880     unionAssumed(S);
7881     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7882                                          : ChangeStatus::CHANGED;
7883   }
7884 
7885   /// See AbstractAttribute::trackStatistics()
7886   void trackStatistics() const override {
7887     STATS_DECLTRACK_CSARG_ATTR(potential_values)
7888   }
7889 };
7890 
7891 /// ------------------------ NoUndef Attribute ---------------------------------
7892 struct AANoUndefImpl : AANoUndef {
7893   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
7894 
7895   /// See AbstractAttribute::initialize(...).
7896   void initialize(Attributor &A) override {
7897     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
7898       indicateOptimisticFixpoint();
7899       return;
7900     }
7901     Value &V = getAssociatedValue();
7902     if (isa<UndefValue>(V))
7903       indicatePessimisticFixpoint();
7904     else if (isa<FreezeInst>(V))
7905       indicateOptimisticFixpoint();
7906     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
7907              isGuaranteedNotToBeUndefOrPoison(&V))
7908       indicateOptimisticFixpoint();
7909     else
7910       AANoUndef::initialize(A);
7911   }
7912 
7913   /// See followUsesInMBEC
7914   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
7915                        AANoUndef::StateType &State) {
7916     const Value *UseV = U->get();
7917     const DominatorTree *DT = nullptr;
7918     AssumptionCache *AC = nullptr;
7919     InformationCache &InfoCache = A.getInfoCache();
7920     if (Function *F = getAnchorScope()) {
7921       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
7922       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
7923     }
7924     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
7925     bool TrackUse = false;
7926     // Track use for instructions which must produce undef or poison bits when
7927     // at least one operand contains such bits.
7928     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
7929       TrackUse = true;
7930     return TrackUse;
7931   }
7932 
7933   /// See AbstractAttribute::getAsStr().
7934   const std::string getAsStr() const override {
7935     return getAssumed() ? "noundef" : "may-undef-or-poison";
7936   }
7937 
7938   ChangeStatus manifest(Attributor &A) override {
7939     // We don't manifest noundef attribute for dead positions because the
7940     // associated values with dead positions would be replaced with undef
7941     // values.
7942     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
7943       return ChangeStatus::UNCHANGED;
7944     // A position whose simplified value does not have any value is
7945     // considered to be dead. We don't manifest noundef in such positions for
7946     // the same reason above.
7947     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
7948         *this, getIRPosition(), /* TrackDependence */ false);
7949     if (!ValueSimplifyAA.getAssumedSimplifiedValue(A).hasValue())
7950       return ChangeStatus::UNCHANGED;
7951     return AANoUndef::manifest(A);
7952   }
7953 };
7954 
7955 struct AANoUndefFloating : public AANoUndefImpl {
7956   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
7957       : AANoUndefImpl(IRP, A) {}
7958 
7959   /// See AbstractAttribute::initialize(...).
7960   void initialize(Attributor &A) override {
7961     AANoUndefImpl::initialize(A);
7962     if (!getState().isAtFixpoint())
7963       if (Instruction *CtxI = getCtxI())
7964         followUsesInMBEC(*this, A, getState(), *CtxI);
7965   }
7966 
7967   /// See AbstractAttribute::updateImpl(...).
7968   ChangeStatus updateImpl(Attributor &A) override {
7969     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7970                             AANoUndef::StateType &T, bool Stripped) -> bool {
7971       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V));
7972       if (!Stripped && this == &AA) {
7973         T.indicatePessimisticFixpoint();
7974       } else {
7975         const AANoUndef::StateType &S =
7976             static_cast<const AANoUndef::StateType &>(AA.getState());
7977         T ^= S;
7978       }
7979       return T.isValidState();
7980     };
7981 
7982     StateType T;
7983     if (!genericValueTraversal<AANoUndef, StateType>(
7984             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
7985       return indicatePessimisticFixpoint();
7986 
7987     return clampStateAndIndicateChange(getState(), T);
7988   }
7989 
7990   /// See AbstractAttribute::trackStatistics()
7991   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
7992 };
7993 
7994 struct AANoUndefReturned final
7995     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
7996   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
7997       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
7998 
7999   /// See AbstractAttribute::trackStatistics()
8000   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8001 };
8002 
8003 struct AANoUndefArgument final
8004     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
8005   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
8006       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
8007 
8008   /// See AbstractAttribute::trackStatistics()
8009   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
8010 };
8011 
8012 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
8013   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
8014       : AANoUndefFloating(IRP, A) {}
8015 
8016   /// See AbstractAttribute::trackStatistics()
8017   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
8018 };
8019 
8020 struct AANoUndefCallSiteReturned final
8021     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
8022   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
8023       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
8024 
8025   /// See AbstractAttribute::trackStatistics()
8026   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
8027 };
8028 } // namespace
8029 
8030 const char AAReturnedValues::ID = 0;
8031 const char AANoUnwind::ID = 0;
8032 const char AANoSync::ID = 0;
8033 const char AANoFree::ID = 0;
8034 const char AANonNull::ID = 0;
8035 const char AANoRecurse::ID = 0;
8036 const char AAWillReturn::ID = 0;
8037 const char AAUndefinedBehavior::ID = 0;
8038 const char AANoAlias::ID = 0;
8039 const char AAReachability::ID = 0;
8040 const char AANoReturn::ID = 0;
8041 const char AAIsDead::ID = 0;
8042 const char AADereferenceable::ID = 0;
8043 const char AAAlign::ID = 0;
8044 const char AANoCapture::ID = 0;
8045 const char AAValueSimplify::ID = 0;
8046 const char AAHeapToStack::ID = 0;
8047 const char AAPrivatizablePtr::ID = 0;
8048 const char AAMemoryBehavior::ID = 0;
8049 const char AAMemoryLocation::ID = 0;
8050 const char AAValueConstantRange::ID = 0;
8051 const char AAPotentialValues::ID = 0;
8052 const char AANoUndef::ID = 0;
8053 
8054 // Macro magic to create the static generator function for attributes that
8055 // follow the naming scheme.
8056 
8057 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8058   case IRPosition::PK:                                                         \
8059     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8060 
8061 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8062   case IRPosition::PK:                                                         \
8063     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
8064     ++NumAAs;                                                                  \
8065     break;
8066 
8067 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8068   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8069     CLASS *AA = nullptr;                                                       \
8070     switch (IRP.getPositionKind()) {                                           \
8071       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8072       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8073       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8074       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8075       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8076       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8077       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8078       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8079     }                                                                          \
8080     return *AA;                                                                \
8081   }
8082 
8083 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8084   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8085     CLASS *AA = nullptr;                                                       \
8086     switch (IRP.getPositionKind()) {                                           \
8087       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8088       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8089       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8090       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8091       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8092       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8093       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8094       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8095     }                                                                          \
8096     return *AA;                                                                \
8097   }
8098 
8099 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8100   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8101     CLASS *AA = nullptr;                                                       \
8102     switch (IRP.getPositionKind()) {                                           \
8103       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8104       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8105       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8106       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8107       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8108       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8109       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8110       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8111     }                                                                          \
8112     return *AA;                                                                \
8113   }
8114 
8115 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8116   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8117     CLASS *AA = nullptr;                                                       \
8118     switch (IRP.getPositionKind()) {                                           \
8119       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8120       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8121       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8122       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8123       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8124       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8125       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8126       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8127     }                                                                          \
8128     return *AA;                                                                \
8129   }
8130 
8131 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8132   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8133     CLASS *AA = nullptr;                                                       \
8134     switch (IRP.getPositionKind()) {                                           \
8135       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8136       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8137       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8138       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8139       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8140       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8141       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8142       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8143     }                                                                          \
8144     return *AA;                                                                \
8145   }
8146 
8147 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8148 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8149 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8150 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8151 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8152 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8153 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8154 
8155 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8156 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8157 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8158 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8159 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8160 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8161 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8162 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8163 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8164 
8165 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8166 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8167 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8168 
8169 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8170 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8171 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8172 
8173 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8174 
8175 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8176 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8177 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8178 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8179 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8180 #undef SWITCH_PK_CREATE
8181 #undef SWITCH_PK_INV
8182