xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1  //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2  //
3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4  // See https://llvm.org/LICENSE.txt for license information.
5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6  //
7  //===----------------------------------------------------------------------===//
8  //
9  // See the Attributor.h file comment and the class descriptions in that file for
10  // more information.
11  //
12  //===----------------------------------------------------------------------===//
13  
14  #include "llvm/Transforms/IPO/Attributor.h"
15  
16  #include "llvm/ADT/APInt.h"
17  #include "llvm/ADT/ArrayRef.h"
18  #include "llvm/ADT/DenseMapInfo.h"
19  #include "llvm/ADT/MapVector.h"
20  #include "llvm/ADT/SCCIterator.h"
21  #include "llvm/ADT/STLExtras.h"
22  #include "llvm/ADT/SetOperations.h"
23  #include "llvm/ADT/SetVector.h"
24  #include "llvm/ADT/SmallPtrSet.h"
25  #include "llvm/ADT/SmallVector.h"
26  #include "llvm/ADT/Statistic.h"
27  #include "llvm/ADT/StringExtras.h"
28  #include "llvm/Analysis/AliasAnalysis.h"
29  #include "llvm/Analysis/AssumeBundleQueries.h"
30  #include "llvm/Analysis/AssumptionCache.h"
31  #include "llvm/Analysis/CaptureTracking.h"
32  #include "llvm/Analysis/CycleAnalysis.h"
33  #include "llvm/Analysis/InstructionSimplify.h"
34  #include "llvm/Analysis/LazyValueInfo.h"
35  #include "llvm/Analysis/MemoryBuiltins.h"
36  #include "llvm/Analysis/OptimizationRemarkEmitter.h"
37  #include "llvm/Analysis/ScalarEvolution.h"
38  #include "llvm/Analysis/TargetTransformInfo.h"
39  #include "llvm/Analysis/ValueTracking.h"
40  #include "llvm/IR/Argument.h"
41  #include "llvm/IR/Assumptions.h"
42  #include "llvm/IR/Attributes.h"
43  #include "llvm/IR/BasicBlock.h"
44  #include "llvm/IR/Constant.h"
45  #include "llvm/IR/Constants.h"
46  #include "llvm/IR/DataLayout.h"
47  #include "llvm/IR/DerivedTypes.h"
48  #include "llvm/IR/GlobalValue.h"
49  #include "llvm/IR/IRBuilder.h"
50  #include "llvm/IR/InlineAsm.h"
51  #include "llvm/IR/InstrTypes.h"
52  #include "llvm/IR/Instruction.h"
53  #include "llvm/IR/Instructions.h"
54  #include "llvm/IR/IntrinsicInst.h"
55  #include "llvm/IR/IntrinsicsAMDGPU.h"
56  #include "llvm/IR/IntrinsicsNVPTX.h"
57  #include "llvm/IR/LLVMContext.h"
58  #include "llvm/IR/MDBuilder.h"
59  #include "llvm/IR/NoFolder.h"
60  #include "llvm/IR/Value.h"
61  #include "llvm/IR/ValueHandle.h"
62  #include "llvm/Support/Alignment.h"
63  #include "llvm/Support/Casting.h"
64  #include "llvm/Support/CommandLine.h"
65  #include "llvm/Support/ErrorHandling.h"
66  #include "llvm/Support/GraphWriter.h"
67  #include "llvm/Support/MathExtras.h"
68  #include "llvm/Support/TypeSize.h"
69  #include "llvm/Support/raw_ostream.h"
70  #include "llvm/Transforms/Utils/BasicBlockUtils.h"
71  #include "llvm/Transforms/Utils/CallPromotionUtils.h"
72  #include "llvm/Transforms/Utils/Local.h"
73  #include "llvm/Transforms/Utils/ValueMapper.h"
74  #include <cassert>
75  #include <numeric>
76  #include <optional>
77  #include <string>
78  
79  using namespace llvm;
80  
81  #define DEBUG_TYPE "attributor"
82  
83  static cl::opt<bool> ManifestInternal(
84      "attributor-manifest-internal", cl::Hidden,
85      cl::desc("Manifest Attributor internal string attributes."),
86      cl::init(false));
87  
88  static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
89                                         cl::Hidden);
90  
91  template <>
92  unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
93  
94  template <> unsigned llvm::PotentialLLVMValuesState::MaxPotentialValues = -1;
95  
96  static cl::opt<unsigned, true> MaxPotentialValues(
97      "attributor-max-potential-values", cl::Hidden,
98      cl::desc("Maximum number of potential values to be "
99               "tracked for each position."),
100      cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
101      cl::init(7));
102  
103  static cl::opt<int> MaxPotentialValuesIterations(
104      "attributor-max-potential-values-iterations", cl::Hidden,
105      cl::desc(
106          "Maximum number of iterations we keep dismantling potential values."),
107      cl::init(64));
108  
109  STATISTIC(NumAAs, "Number of abstract attributes created");
110  
111  // Some helper macros to deal with statistics tracking.
112  //
113  // Usage:
114  // For simple IR attribute tracking overload trackStatistics in the abstract
115  // attribute and choose the right STATS_DECLTRACK_********* macro,
116  // e.g.,:
117  //  void trackStatistics() const override {
118  //    STATS_DECLTRACK_ARG_ATTR(returned)
119  //  }
120  // If there is a single "increment" side one can use the macro
121  // STATS_DECLTRACK with a custom message. If there are multiple increment
122  // sides, STATS_DECL and STATS_TRACK can also be used separately.
123  //
124  #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
125    ("Number of " #TYPE " marked '" #NAME "'")
126  #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
127  #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
128  #define STATS_DECL(NAME, TYPE, MSG)                                            \
129    STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
130  #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
131  #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
132    {                                                                            \
133      STATS_DECL(NAME, TYPE, MSG)                                                \
134      STATS_TRACK(NAME, TYPE)                                                    \
135    }
136  #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
137    STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
138  #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
139    STATS_DECLTRACK(NAME, CSArguments,                                           \
140                    BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
141  #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
142    STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
143  #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
144    STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
145  #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
146    STATS_DECLTRACK(NAME, FunctionReturn,                                        \
147                    BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
148  #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
149    STATS_DECLTRACK(NAME, CSReturn,                                              \
150                    BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
151  #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
152    STATS_DECLTRACK(NAME, Floating,                                              \
153                    ("Number of floating values known to be '" #NAME "'"))
154  
155  // Specialization of the operator<< for abstract attributes subclasses. This
156  // disambiguates situations where multiple operators are applicable.
157  namespace llvm {
158  #define PIPE_OPERATOR(CLASS)                                                   \
159    raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
160      return OS << static_cast<const AbstractAttribute &>(AA);                   \
161    }
162  
163  PIPE_OPERATOR(AAIsDead)
PIPE_OPERATOR(AANoUnwind)164  PIPE_OPERATOR(AANoUnwind)
165  PIPE_OPERATOR(AANoSync)
166  PIPE_OPERATOR(AANoRecurse)
167  PIPE_OPERATOR(AANonConvergent)
168  PIPE_OPERATOR(AAWillReturn)
169  PIPE_OPERATOR(AANoReturn)
170  PIPE_OPERATOR(AANonNull)
171  PIPE_OPERATOR(AAMustProgress)
172  PIPE_OPERATOR(AANoAlias)
173  PIPE_OPERATOR(AADereferenceable)
174  PIPE_OPERATOR(AAAlign)
175  PIPE_OPERATOR(AAInstanceInfo)
176  PIPE_OPERATOR(AANoCapture)
177  PIPE_OPERATOR(AAValueSimplify)
178  PIPE_OPERATOR(AANoFree)
179  PIPE_OPERATOR(AAHeapToStack)
180  PIPE_OPERATOR(AAIntraFnReachability)
181  PIPE_OPERATOR(AAMemoryBehavior)
182  PIPE_OPERATOR(AAMemoryLocation)
183  PIPE_OPERATOR(AAValueConstantRange)
184  PIPE_OPERATOR(AAPrivatizablePtr)
185  PIPE_OPERATOR(AAUndefinedBehavior)
186  PIPE_OPERATOR(AAPotentialConstantValues)
187  PIPE_OPERATOR(AAPotentialValues)
188  PIPE_OPERATOR(AANoUndef)
189  PIPE_OPERATOR(AANoFPClass)
190  PIPE_OPERATOR(AACallEdges)
191  PIPE_OPERATOR(AAInterFnReachability)
192  PIPE_OPERATOR(AAPointerInfo)
193  PIPE_OPERATOR(AAAssumptionInfo)
194  PIPE_OPERATOR(AAUnderlyingObjects)
195  PIPE_OPERATOR(AAAddressSpace)
196  PIPE_OPERATOR(AAAllocationInfo)
197  PIPE_OPERATOR(AAIndirectCallInfo)
198  PIPE_OPERATOR(AAGlobalValueInfo)
199  PIPE_OPERATOR(AADenormalFPMath)
200  
201  #undef PIPE_OPERATOR
202  
203  template <>
204  ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
205                                                       const DerefState &R) {
206    ChangeStatus CS0 =
207        clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
208    ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
209    return CS0 | CS1;
210  }
211  
212  } // namespace llvm
213  
mayBeInCycle(const CycleInfo * CI,const Instruction * I,bool HeaderOnly,Cycle ** CPtr=nullptr)214  static bool mayBeInCycle(const CycleInfo *CI, const Instruction *I,
215                           bool HeaderOnly, Cycle **CPtr = nullptr) {
216    if (!CI)
217      return true;
218    auto *BB = I->getParent();
219    auto *C = CI->getCycle(BB);
220    if (!C)
221      return false;
222    if (CPtr)
223      *CPtr = C;
224    return !HeaderOnly || BB == C->getHeader();
225  }
226  
227  /// Checks if a type could have padding bytes.
isDenselyPacked(Type * Ty,const DataLayout & DL)228  static bool isDenselyPacked(Type *Ty, const DataLayout &DL) {
229    // There is no size information, so be conservative.
230    if (!Ty->isSized())
231      return false;
232  
233    // If the alloc size is not equal to the storage size, then there are padding
234    // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128.
235    if (DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty))
236      return false;
237  
238    // FIXME: This isn't the right way to check for padding in vectors with
239    // non-byte-size elements.
240    if (VectorType *SeqTy = dyn_cast<VectorType>(Ty))
241      return isDenselyPacked(SeqTy->getElementType(), DL);
242  
243    // For array types, check for padding within members.
244    if (ArrayType *SeqTy = dyn_cast<ArrayType>(Ty))
245      return isDenselyPacked(SeqTy->getElementType(), DL);
246  
247    if (!isa<StructType>(Ty))
248      return true;
249  
250    // Check for padding within and between elements of a struct.
251    StructType *StructTy = cast<StructType>(Ty);
252    const StructLayout *Layout = DL.getStructLayout(StructTy);
253    uint64_t StartPos = 0;
254    for (unsigned I = 0, E = StructTy->getNumElements(); I < E; ++I) {
255      Type *ElTy = StructTy->getElementType(I);
256      if (!isDenselyPacked(ElTy, DL))
257        return false;
258      if (StartPos != Layout->getElementOffsetInBits(I))
259        return false;
260      StartPos += DL.getTypeAllocSizeInBits(ElTy);
261    }
262  
263    return true;
264  }
265  
266  /// Get pointer operand of memory accessing instruction. If \p I is
267  /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
268  /// is set to false and the instruction is volatile, return nullptr.
getPointerOperand(const Instruction * I,bool AllowVolatile)269  static const Value *getPointerOperand(const Instruction *I,
270                                        bool AllowVolatile) {
271    if (!AllowVolatile && I->isVolatile())
272      return nullptr;
273  
274    if (auto *LI = dyn_cast<LoadInst>(I)) {
275      return LI->getPointerOperand();
276    }
277  
278    if (auto *SI = dyn_cast<StoreInst>(I)) {
279      return SI->getPointerOperand();
280    }
281  
282    if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
283      return CXI->getPointerOperand();
284    }
285  
286    if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
287      return RMWI->getPointerOperand();
288    }
289  
290    return nullptr;
291  }
292  
293  /// Helper function to create a pointer based on \p Ptr, and advanced by \p
294  /// Offset bytes.
constructPointer(Value * Ptr,int64_t Offset,IRBuilder<NoFolder> & IRB)295  static Value *constructPointer(Value *Ptr, int64_t Offset,
296                                 IRBuilder<NoFolder> &IRB) {
297    LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
298                      << "-bytes\n");
299  
300    if (Offset)
301      Ptr = IRB.CreatePtrAdd(Ptr, IRB.getInt64(Offset),
302                             Ptr->getName() + ".b" + Twine(Offset));
303    return Ptr;
304  }
305  
306  static const Value *
stripAndAccumulateOffsets(Attributor & A,const AbstractAttribute & QueryingAA,const Value * Val,const DataLayout & DL,APInt & Offset,bool GetMinOffset,bool AllowNonInbounds,bool UseAssumed=false)307  stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA,
308                            const Value *Val, const DataLayout &DL, APInt &Offset,
309                            bool GetMinOffset, bool AllowNonInbounds,
310                            bool UseAssumed = false) {
311  
312    auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
313      const IRPosition &Pos = IRPosition::value(V);
314      // Only track dependence if we are going to use the assumed info.
315      const AAValueConstantRange *ValueConstantRangeAA =
316          A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
317                                           UseAssumed ? DepClassTy::OPTIONAL
318                                                      : DepClassTy::NONE);
319      if (!ValueConstantRangeAA)
320        return false;
321      ConstantRange Range = UseAssumed ? ValueConstantRangeAA->getAssumed()
322                                       : ValueConstantRangeAA->getKnown();
323      if (Range.isFullSet())
324        return false;
325  
326      // We can only use the lower part of the range because the upper part can
327      // be higher than what the value can really be.
328      if (GetMinOffset)
329        ROffset = Range.getSignedMin();
330      else
331        ROffset = Range.getSignedMax();
332      return true;
333    };
334  
335    return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
336                                                  /* AllowInvariant */ true,
337                                                  AttributorAnalysis);
338  }
339  
340  static const Value *
getMinimalBaseOfPointer(Attributor & A,const AbstractAttribute & QueryingAA,const Value * Ptr,int64_t & BytesOffset,const DataLayout & DL,bool AllowNonInbounds=false)341  getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA,
342                          const Value *Ptr, int64_t &BytesOffset,
343                          const DataLayout &DL, bool AllowNonInbounds = false) {
344    APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
345    const Value *Base =
346        stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
347                                  /* GetMinOffset */ true, AllowNonInbounds);
348  
349    BytesOffset = OffsetAPInt.getSExtValue();
350    return Base;
351  }
352  
353  /// Clamp the information known for all returned values of a function
354  /// (identified by \p QueryingAA) into \p S.
355  template <typename AAType, typename StateType = typename AAType::StateType,
356            Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind,
357            bool RecurseForSelectAndPHI = true>
clampReturnedValueStates(Attributor & A,const AAType & QueryingAA,StateType & S,const IRPosition::CallBaseContext * CBContext=nullptr)358  static void clampReturnedValueStates(
359      Attributor &A, const AAType &QueryingAA, StateType &S,
360      const IRPosition::CallBaseContext *CBContext = nullptr) {
361    LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
362                      << QueryingAA << " into " << S << "\n");
363  
364    assert((QueryingAA.getIRPosition().getPositionKind() ==
365                IRPosition::IRP_RETURNED ||
366            QueryingAA.getIRPosition().getPositionKind() ==
367                IRPosition::IRP_CALL_SITE_RETURNED) &&
368           "Can only clamp returned value states for a function returned or call "
369           "site returned position!");
370  
371    // Use an optional state as there might not be any return values and we want
372    // to join (IntegerState::operator&) the state of all there are.
373    std::optional<StateType> T;
374  
375    // Callback for each possibly returned value.
376    auto CheckReturnValue = [&](Value &RV) -> bool {
377      const IRPosition &RVPos = IRPosition::value(RV, CBContext);
378      // If possible, use the hasAssumedIRAttr interface.
379      if (Attribute::isEnumAttrKind(IRAttributeKind)) {
380        bool IsKnown;
381        return AA::hasAssumedIRAttr<IRAttributeKind>(
382            A, &QueryingAA, RVPos, DepClassTy::REQUIRED, IsKnown);
383      }
384  
385      const AAType *AA =
386          A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
387      if (!AA)
388        return false;
389      LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV
390                        << " AA: " << AA->getAsStr(&A) << " @ " << RVPos << "\n");
391      const StateType &AAS = AA->getState();
392      if (!T)
393        T = StateType::getBestState(AAS);
394      *T &= AAS;
395      LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
396                        << "\n");
397      return T->isValidState();
398    };
399  
400    if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA,
401                                     AA::ValueScope::Intraprocedural,
402                                     RecurseForSelectAndPHI))
403      S.indicatePessimisticFixpoint();
404    else if (T)
405      S ^= *T;
406  }
407  
408  namespace {
409  /// Helper class for generic deduction: return value -> returned position.
410  template <typename AAType, typename BaseType,
411            typename StateType = typename BaseType::StateType,
412            bool PropagateCallBaseContext = false,
413            Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind,
414            bool RecurseForSelectAndPHI = true>
415  struct AAReturnedFromReturnedValues : public BaseType {
AAReturnedFromReturnedValues__anonc528723c0311::AAReturnedFromReturnedValues416    AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
417        : BaseType(IRP, A) {}
418  
419    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c0311::AAReturnedFromReturnedValues420    ChangeStatus updateImpl(Attributor &A) override {
421      StateType S(StateType::getBestState(this->getState()));
422      clampReturnedValueStates<AAType, StateType, IRAttributeKind,
423                               RecurseForSelectAndPHI>(
424          A, *this, S,
425          PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
426      // TODO: If we know we visited all returned values, thus no are assumed
427      // dead, we can take the known information from the state T.
428      return clampStateAndIndicateChange<StateType>(this->getState(), S);
429    }
430  };
431  
432  /// Clamp the information known at all call sites for a given argument
433  /// (identified by \p QueryingAA) into \p S.
434  template <typename AAType, typename StateType = typename AAType::StateType,
435            Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind>
clampCallSiteArgumentStates(Attributor & A,const AAType & QueryingAA,StateType & S)436  static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
437                                          StateType &S) {
438    LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
439                      << QueryingAA << " into " << S << "\n");
440  
441    assert(QueryingAA.getIRPosition().getPositionKind() ==
442               IRPosition::IRP_ARGUMENT &&
443           "Can only clamp call site argument states for an argument position!");
444  
445    // Use an optional state as there might not be any return values and we want
446    // to join (IntegerState::operator&) the state of all there are.
447    std::optional<StateType> T;
448  
449    // The argument number which is also the call site argument number.
450    unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
451  
452    auto CallSiteCheck = [&](AbstractCallSite ACS) {
453      const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
454      // Check if a coresponding argument was found or if it is on not associated
455      // (which can happen for callback calls).
456      if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
457        return false;
458  
459      // If possible, use the hasAssumedIRAttr interface.
460      if (Attribute::isEnumAttrKind(IRAttributeKind)) {
461        bool IsKnown;
462        return AA::hasAssumedIRAttr<IRAttributeKind>(
463            A, &QueryingAA, ACSArgPos, DepClassTy::REQUIRED, IsKnown);
464      }
465  
466      const AAType *AA =
467          A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
468      if (!AA)
469        return false;
470      LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
471                        << " AA: " << AA->getAsStr(&A) << " @" << ACSArgPos
472                        << "\n");
473      const StateType &AAS = AA->getState();
474      if (!T)
475        T = StateType::getBestState(AAS);
476      *T &= AAS;
477      LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
478                        << "\n");
479      return T->isValidState();
480    };
481  
482    bool UsedAssumedInformation = false;
483    if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
484                                UsedAssumedInformation))
485      S.indicatePessimisticFixpoint();
486    else if (T)
487      S ^= *T;
488  }
489  
490  /// This function is the bridge between argument position and the call base
491  /// context.
492  template <typename AAType, typename BaseType,
493            typename StateType = typename AAType::StateType,
494            Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind>
getArgumentStateFromCallBaseContext(Attributor & A,BaseType & QueryingAttribute,IRPosition & Pos,StateType & State)495  bool getArgumentStateFromCallBaseContext(Attributor &A,
496                                           BaseType &QueryingAttribute,
497                                           IRPosition &Pos, StateType &State) {
498    assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
499           "Expected an 'argument' position !");
500    const CallBase *CBContext = Pos.getCallBaseContext();
501    if (!CBContext)
502      return false;
503  
504    int ArgNo = Pos.getCallSiteArgNo();
505    assert(ArgNo >= 0 && "Invalid Arg No!");
506    const IRPosition CBArgPos = IRPosition::callsite_argument(*CBContext, ArgNo);
507  
508    // If possible, use the hasAssumedIRAttr interface.
509    if (Attribute::isEnumAttrKind(IRAttributeKind)) {
510      bool IsKnown;
511      return AA::hasAssumedIRAttr<IRAttributeKind>(
512          A, &QueryingAttribute, CBArgPos, DepClassTy::REQUIRED, IsKnown);
513    }
514  
515    const auto *AA =
516        A.getAAFor<AAType>(QueryingAttribute, CBArgPos, DepClassTy::REQUIRED);
517    if (!AA)
518      return false;
519    const StateType &CBArgumentState =
520        static_cast<const StateType &>(AA->getState());
521  
522    LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
523                      << "Position:" << Pos << "CB Arg state:" << CBArgumentState
524                      << "\n");
525  
526    // NOTE: If we want to do call site grouping it should happen here.
527    State ^= CBArgumentState;
528    return true;
529  }
530  
531  /// Helper class for generic deduction: call site argument -> argument position.
532  template <typename AAType, typename BaseType,
533            typename StateType = typename AAType::StateType,
534            bool BridgeCallBaseContext = false,
535            Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind>
536  struct AAArgumentFromCallSiteArguments : public BaseType {
AAArgumentFromCallSiteArguments__anonc528723c0311::AAArgumentFromCallSiteArguments537    AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
538        : BaseType(IRP, A) {}
539  
540    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c0311::AAArgumentFromCallSiteArguments541    ChangeStatus updateImpl(Attributor &A) override {
542      StateType S = StateType::getBestState(this->getState());
543  
544      if (BridgeCallBaseContext) {
545        bool Success =
546            getArgumentStateFromCallBaseContext<AAType, BaseType, StateType,
547                                                IRAttributeKind>(
548                A, *this, this->getIRPosition(), S);
549        if (Success)
550          return clampStateAndIndicateChange<StateType>(this->getState(), S);
551      }
552      clampCallSiteArgumentStates<AAType, StateType, IRAttributeKind>(A, *this,
553                                                                      S);
554  
555      // TODO: If we know we visited all incoming values, thus no are assumed
556      // dead, we can take the known information from the state T.
557      return clampStateAndIndicateChange<StateType>(this->getState(), S);
558    }
559  };
560  
561  /// Helper class for generic replication: function returned -> cs returned.
562  template <typename AAType, typename BaseType,
563            typename StateType = typename BaseType::StateType,
564            bool IntroduceCallBaseContext = false,
565            Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind>
566  struct AACalleeToCallSite : public BaseType {
AACalleeToCallSite__anonc528723c0311::AACalleeToCallSite567    AACalleeToCallSite(const IRPosition &IRP, Attributor &A) : BaseType(IRP, A) {}
568  
569    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c0311::AACalleeToCallSite570    ChangeStatus updateImpl(Attributor &A) override {
571      auto IRPKind = this->getIRPosition().getPositionKind();
572      assert((IRPKind == IRPosition::IRP_CALL_SITE_RETURNED ||
573              IRPKind == IRPosition::IRP_CALL_SITE) &&
574             "Can only wrap function returned positions for call site "
575             "returned positions!");
576      auto &S = this->getState();
577  
578      CallBase &CB = cast<CallBase>(this->getAnchorValue());
579      if (IntroduceCallBaseContext)
580        LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:" << CB
581                          << "\n");
582  
583      ChangeStatus Changed = ChangeStatus::UNCHANGED;
584      auto CalleePred = [&](ArrayRef<const Function *> Callees) {
585        for (const Function *Callee : Callees) {
586          IRPosition FnPos =
587              IRPKind == llvm::IRPosition::IRP_CALL_SITE_RETURNED
588                  ? IRPosition::returned(*Callee,
589                                         IntroduceCallBaseContext ? &CB : nullptr)
590                  : IRPosition::function(
591                        *Callee, IntroduceCallBaseContext ? &CB : nullptr);
592          // If possible, use the hasAssumedIRAttr interface.
593          if (Attribute::isEnumAttrKind(IRAttributeKind)) {
594            bool IsKnown;
595            if (!AA::hasAssumedIRAttr<IRAttributeKind>(
596                    A, this, FnPos, DepClassTy::REQUIRED, IsKnown))
597              return false;
598            continue;
599          }
600  
601          const AAType *AA =
602              A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
603          if (!AA)
604            return false;
605          Changed |= clampStateAndIndicateChange(S, AA->getState());
606          if (S.isAtFixpoint())
607            return S.isValidState();
608        }
609        return true;
610      };
611      if (!A.checkForAllCallees(CalleePred, *this, CB))
612        return S.indicatePessimisticFixpoint();
613      return Changed;
614    }
615  };
616  
617  /// Helper function to accumulate uses.
618  template <class AAType, typename StateType = typename AAType::StateType>
followUsesInContext(AAType & AA,Attributor & A,MustBeExecutedContextExplorer & Explorer,const Instruction * CtxI,SetVector<const Use * > & Uses,StateType & State)619  static void followUsesInContext(AAType &AA, Attributor &A,
620                                  MustBeExecutedContextExplorer &Explorer,
621                                  const Instruction *CtxI,
622                                  SetVector<const Use *> &Uses,
623                                  StateType &State) {
624    auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
625    for (unsigned u = 0; u < Uses.size(); ++u) {
626      const Use *U = Uses[u];
627      if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
628        bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
629        if (Found && AA.followUseInMBEC(A, U, UserI, State))
630          for (const Use &Us : UserI->uses())
631            Uses.insert(&Us);
632      }
633    }
634  }
635  
636  /// Use the must-be-executed-context around \p I to add information into \p S.
637  /// The AAType class is required to have `followUseInMBEC` method with the
638  /// following signature and behaviour:
639  ///
640  /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
641  /// U - Underlying use.
642  /// I - The user of the \p U.
643  /// Returns true if the value should be tracked transitively.
644  ///
645  template <class AAType, typename StateType = typename AAType::StateType>
followUsesInMBEC(AAType & AA,Attributor & A,StateType & S,Instruction & CtxI)646  static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
647                               Instruction &CtxI) {
648    MustBeExecutedContextExplorer *Explorer =
649        A.getInfoCache().getMustBeExecutedContextExplorer();
650    if (!Explorer)
651      return;
652  
653    // Container for (transitive) uses of the associated value.
654    SetVector<const Use *> Uses;
655    for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
656      Uses.insert(&U);
657  
658    followUsesInContext<AAType>(AA, A, *Explorer, &CtxI, Uses, S);
659  
660    if (S.isAtFixpoint())
661      return;
662  
663    SmallVector<const BranchInst *, 4> BrInsts;
664    auto Pred = [&](const Instruction *I) {
665      if (const BranchInst *Br = dyn_cast<BranchInst>(I))
666        if (Br->isConditional())
667          BrInsts.push_back(Br);
668      return true;
669    };
670  
671    // Here, accumulate conditional branch instructions in the context. We
672    // explore the child paths and collect the known states. The disjunction of
673    // those states can be merged to its own state. Let ParentState_i be a state
674    // to indicate the known information for an i-th branch instruction in the
675    // context. ChildStates are created for its successors respectively.
676    //
677    // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
678    // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
679    //      ...
680    // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
681    //
682    // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
683    //
684    // FIXME: Currently, recursive branches are not handled. For example, we
685    // can't deduce that ptr must be dereferenced in below function.
686    //
687    // void f(int a, int c, int *ptr) {
688    //    if(a)
689    //      if (b) {
690    //        *ptr = 0;
691    //      } else {
692    //        *ptr = 1;
693    //      }
694    //    else {
695    //      if (b) {
696    //        *ptr = 0;
697    //      } else {
698    //        *ptr = 1;
699    //      }
700    //    }
701    // }
702  
703    Explorer->checkForAllContext(&CtxI, Pred);
704    for (const BranchInst *Br : BrInsts) {
705      StateType ParentState;
706  
707      // The known state of the parent state is a conjunction of children's
708      // known states so it is initialized with a best state.
709      ParentState.indicateOptimisticFixpoint();
710  
711      for (const BasicBlock *BB : Br->successors()) {
712        StateType ChildState;
713  
714        size_t BeforeSize = Uses.size();
715        followUsesInContext(AA, A, *Explorer, &BB->front(), Uses, ChildState);
716  
717        // Erase uses which only appear in the child.
718        for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
719          It = Uses.erase(It);
720  
721        ParentState &= ChildState;
722      }
723  
724      // Use only known state.
725      S += ParentState;
726    }
727  }
728  } // namespace
729  
730  /// ------------------------ PointerInfo ---------------------------------------
731  
732  namespace llvm {
733  namespace AA {
734  namespace PointerInfo {
735  
736  struct State;
737  
738  } // namespace PointerInfo
739  } // namespace AA
740  
741  /// Helper for AA::PointerInfo::Access DenseMap/Set usage.
742  template <>
743  struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
744    using Access = AAPointerInfo::Access;
745    static inline Access getEmptyKey();
746    static inline Access getTombstoneKey();
747    static unsigned getHashValue(const Access &A);
748    static bool isEqual(const Access &LHS, const Access &RHS);
749  };
750  
751  /// Helper that allows RangeTy as a key in a DenseMap.
752  template <> struct DenseMapInfo<AA::RangeTy> {
getEmptyKeyllvm::DenseMapInfo753    static inline AA::RangeTy getEmptyKey() {
754      auto EmptyKey = DenseMapInfo<int64_t>::getEmptyKey();
755      return AA::RangeTy{EmptyKey, EmptyKey};
756    }
757  
getTombstoneKeyllvm::DenseMapInfo758    static inline AA::RangeTy getTombstoneKey() {
759      auto TombstoneKey = DenseMapInfo<int64_t>::getTombstoneKey();
760      return AA::RangeTy{TombstoneKey, TombstoneKey};
761    }
762  
getHashValuellvm::DenseMapInfo763    static unsigned getHashValue(const AA::RangeTy &Range) {
764      return detail::combineHashValue(
765          DenseMapInfo<int64_t>::getHashValue(Range.Offset),
766          DenseMapInfo<int64_t>::getHashValue(Range.Size));
767    }
768  
isEqualllvm::DenseMapInfo769    static bool isEqual(const AA::RangeTy &A, const AA::RangeTy B) {
770      return A == B;
771    }
772  };
773  
774  /// Helper for AA::PointerInfo::Access DenseMap/Set usage ignoring everythign
775  /// but the instruction
776  struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
777    using Base = DenseMapInfo<Instruction *>;
778    using Access = AAPointerInfo::Access;
779    static inline Access getEmptyKey();
780    static inline Access getTombstoneKey();
781    static unsigned getHashValue(const Access &A);
782    static bool isEqual(const Access &LHS, const Access &RHS);
783  };
784  
785  } // namespace llvm
786  
787  /// A type to track pointer/struct usage and accesses for AAPointerInfo.
788  struct AA::PointerInfo::State : public AbstractState {
789    /// Return the best possible representable state.
getBestStateAA::PointerInfo::State790    static State getBestState(const State &SIS) { return State(); }
791  
792    /// Return the worst possible representable state.
getWorstStateAA::PointerInfo::State793    static State getWorstState(const State &SIS) {
794      State R;
795      R.indicatePessimisticFixpoint();
796      return R;
797    }
798  
799    State() = default;
800    State(State &&SIS) = default;
801  
getAssumedAA::PointerInfo::State802    const State &getAssumed() const { return *this; }
803  
804    /// See AbstractState::isValidState().
isValidStateAA::PointerInfo::State805    bool isValidState() const override { return BS.isValidState(); }
806  
807    /// See AbstractState::isAtFixpoint().
isAtFixpointAA::PointerInfo::State808    bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
809  
810    /// See AbstractState::indicateOptimisticFixpoint().
indicateOptimisticFixpointAA::PointerInfo::State811    ChangeStatus indicateOptimisticFixpoint() override {
812      BS.indicateOptimisticFixpoint();
813      return ChangeStatus::UNCHANGED;
814    }
815  
816    /// See AbstractState::indicatePessimisticFixpoint().
indicatePessimisticFixpointAA::PointerInfo::State817    ChangeStatus indicatePessimisticFixpoint() override {
818      BS.indicatePessimisticFixpoint();
819      return ChangeStatus::CHANGED;
820    }
821  
operator =AA::PointerInfo::State822    State &operator=(const State &R) {
823      if (this == &R)
824        return *this;
825      BS = R.BS;
826      AccessList = R.AccessList;
827      OffsetBins = R.OffsetBins;
828      RemoteIMap = R.RemoteIMap;
829      return *this;
830    }
831  
operator =AA::PointerInfo::State832    State &operator=(State &&R) {
833      if (this == &R)
834        return *this;
835      std::swap(BS, R.BS);
836      std::swap(AccessList, R.AccessList);
837      std::swap(OffsetBins, R.OffsetBins);
838      std::swap(RemoteIMap, R.RemoteIMap);
839      return *this;
840    }
841  
842    /// Add a new Access to the state at offset \p Offset and with size \p Size.
843    /// The access is associated with \p I, writes \p Content (if anything), and
844    /// is of kind \p Kind. If an Access already exists for the same \p I and same
845    /// \p RemoteI, the two are combined, potentially losing information about
846    /// offset and size. The resulting access must now be moved from its original
847    /// OffsetBin to the bin for its new offset.
848    ///
849    /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
850    ChangeStatus addAccess(Attributor &A, const AAPointerInfo::RangeList &Ranges,
851                           Instruction &I, std::optional<Value *> Content,
852                           AAPointerInfo::AccessKind Kind, Type *Ty,
853                           Instruction *RemoteI = nullptr);
854  
beginAA::PointerInfo::State855    AAPointerInfo::const_bin_iterator begin() const { return OffsetBins.begin(); }
endAA::PointerInfo::State856    AAPointerInfo::const_bin_iterator end() const { return OffsetBins.end(); }
numOffsetBinsAA::PointerInfo::State857    int64_t numOffsetBins() const { return OffsetBins.size(); }
858  
getAccessAA::PointerInfo::State859    const AAPointerInfo::Access &getAccess(unsigned Index) const {
860      return AccessList[Index];
861    }
862  
863  protected:
864    // Every memory instruction results in an Access object. We maintain a list of
865    // all Access objects that we own, along with the following maps:
866    //
867    // - OffsetBins: RangeTy -> { Access }
868    // - RemoteIMap: RemoteI x LocalI -> Access
869    //
870    // A RemoteI is any instruction that accesses memory. RemoteI is different
871    // from LocalI if and only if LocalI is a call; then RemoteI is some
872    // instruction in the callgraph starting from LocalI. Multiple paths in the
873    // callgraph from LocalI to RemoteI may produce multiple accesses, but these
874    // are all combined into a single Access object. This may result in loss of
875    // information in RangeTy in the Access object.
876    SmallVector<AAPointerInfo::Access> AccessList;
877    AAPointerInfo::OffsetBinsTy OffsetBins;
878    DenseMap<const Instruction *, SmallVector<unsigned>> RemoteIMap;
879  
880    /// See AAPointerInfo::forallInterferingAccesses.
forallInterferingAccessesAA::PointerInfo::State881    bool forallInterferingAccesses(
882        AA::RangeTy Range,
883        function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
884      if (!isValidState())
885        return false;
886  
887      for (const auto &It : OffsetBins) {
888        AA::RangeTy ItRange = It.getFirst();
889        if (!Range.mayOverlap(ItRange))
890          continue;
891        bool IsExact = Range == ItRange && !Range.offsetOrSizeAreUnknown();
892        for (auto Index : It.getSecond()) {
893          auto &Access = AccessList[Index];
894          if (!CB(Access, IsExact))
895            return false;
896        }
897      }
898      return true;
899    }
900  
901    /// See AAPointerInfo::forallInterferingAccesses.
forallInterferingAccessesAA::PointerInfo::State902    bool forallInterferingAccesses(
903        Instruction &I,
904        function_ref<bool(const AAPointerInfo::Access &, bool)> CB,
905        AA::RangeTy &Range) const {
906      if (!isValidState())
907        return false;
908  
909      auto LocalList = RemoteIMap.find(&I);
910      if (LocalList == RemoteIMap.end()) {
911        return true;
912      }
913  
914      for (unsigned Index : LocalList->getSecond()) {
915        for (auto &R : AccessList[Index]) {
916          Range &= R;
917          if (Range.offsetAndSizeAreUnknown())
918            break;
919        }
920      }
921      return forallInterferingAccesses(Range, CB);
922    }
923  
924  private:
925    /// State to track fixpoint and validity.
926    BooleanState BS;
927  };
928  
addAccess(Attributor & A,const AAPointerInfo::RangeList & Ranges,Instruction & I,std::optional<Value * > Content,AAPointerInfo::AccessKind Kind,Type * Ty,Instruction * RemoteI)929  ChangeStatus AA::PointerInfo::State::addAccess(
930      Attributor &A, const AAPointerInfo::RangeList &Ranges, Instruction &I,
931      std::optional<Value *> Content, AAPointerInfo::AccessKind Kind, Type *Ty,
932      Instruction *RemoteI) {
933    RemoteI = RemoteI ? RemoteI : &I;
934  
935    // Check if we have an access for this instruction, if not, simply add it.
936    auto &LocalList = RemoteIMap[RemoteI];
937    bool AccExists = false;
938    unsigned AccIndex = AccessList.size();
939    for (auto Index : LocalList) {
940      auto &A = AccessList[Index];
941      if (A.getLocalInst() == &I) {
942        AccExists = true;
943        AccIndex = Index;
944        break;
945      }
946    }
947  
948    auto AddToBins = [&](const AAPointerInfo::RangeList &ToAdd) {
949      LLVM_DEBUG(if (ToAdd.size()) dbgs()
950                     << "[AAPointerInfo] Inserting access in new offset bins\n";);
951  
952      for (auto Key : ToAdd) {
953        LLVM_DEBUG(dbgs() << "    key " << Key << "\n");
954        OffsetBins[Key].insert(AccIndex);
955      }
956    };
957  
958    if (!AccExists) {
959      AccessList.emplace_back(&I, RemoteI, Ranges, Content, Kind, Ty);
960      assert((AccessList.size() == AccIndex + 1) &&
961             "New Access should have been at AccIndex");
962      LocalList.push_back(AccIndex);
963      AddToBins(AccessList[AccIndex].getRanges());
964      return ChangeStatus::CHANGED;
965    }
966  
967    // Combine the new Access with the existing Access, and then update the
968    // mapping in the offset bins.
969    AAPointerInfo::Access Acc(&I, RemoteI, Ranges, Content, Kind, Ty);
970    auto &Current = AccessList[AccIndex];
971    auto Before = Current;
972    Current &= Acc;
973    if (Current == Before)
974      return ChangeStatus::UNCHANGED;
975  
976    auto &ExistingRanges = Before.getRanges();
977    auto &NewRanges = Current.getRanges();
978  
979    // Ranges that are in the old access but not the new access need to be removed
980    // from the offset bins.
981    AAPointerInfo::RangeList ToRemove;
982    AAPointerInfo::RangeList::set_difference(ExistingRanges, NewRanges, ToRemove);
983    LLVM_DEBUG(if (ToRemove.size()) dbgs()
984                   << "[AAPointerInfo] Removing access from old offset bins\n";);
985  
986    for (auto Key : ToRemove) {
987      LLVM_DEBUG(dbgs() << "    key " << Key << "\n");
988      assert(OffsetBins.count(Key) && "Existing Access must be in some bin.");
989      auto &Bin = OffsetBins[Key];
990      assert(Bin.count(AccIndex) &&
991             "Expected bin to actually contain the Access.");
992      Bin.erase(AccIndex);
993    }
994  
995    // Ranges that are in the new access but not the old access need to be added
996    // to the offset bins.
997    AAPointerInfo::RangeList ToAdd;
998    AAPointerInfo::RangeList::set_difference(NewRanges, ExistingRanges, ToAdd);
999    AddToBins(ToAdd);
1000    return ChangeStatus::CHANGED;
1001  }
1002  
1003  namespace {
1004  
1005  /// A helper containing a list of offsets computed for a Use. Ideally this
1006  /// list should be strictly ascending, but we ensure that only when we
1007  /// actually translate the list of offsets to a RangeList.
1008  struct OffsetInfo {
1009    using VecTy = SmallVector<int64_t>;
1010    using const_iterator = VecTy::const_iterator;
1011    VecTy Offsets;
1012  
begin__anonc528723c0811::OffsetInfo1013    const_iterator begin() const { return Offsets.begin(); }
end__anonc528723c0811::OffsetInfo1014    const_iterator end() const { return Offsets.end(); }
1015  
operator ==__anonc528723c0811::OffsetInfo1016    bool operator==(const OffsetInfo &RHS) const {
1017      return Offsets == RHS.Offsets;
1018    }
1019  
operator !=__anonc528723c0811::OffsetInfo1020    bool operator!=(const OffsetInfo &RHS) const { return !(*this == RHS); }
1021  
insert__anonc528723c0811::OffsetInfo1022    void insert(int64_t Offset) { Offsets.push_back(Offset); }
isUnassigned__anonc528723c0811::OffsetInfo1023    bool isUnassigned() const { return Offsets.size() == 0; }
1024  
isUnknown__anonc528723c0811::OffsetInfo1025    bool isUnknown() const {
1026      if (isUnassigned())
1027        return false;
1028      if (Offsets.size() == 1)
1029        return Offsets.front() == AA::RangeTy::Unknown;
1030      return false;
1031    }
1032  
setUnknown__anonc528723c0811::OffsetInfo1033    void setUnknown() {
1034      Offsets.clear();
1035      Offsets.push_back(AA::RangeTy::Unknown);
1036    }
1037  
addToAll__anonc528723c0811::OffsetInfo1038    void addToAll(int64_t Inc) {
1039      for (auto &Offset : Offsets) {
1040        Offset += Inc;
1041      }
1042    }
1043  
1044    /// Copy offsets from \p R into the current list.
1045    ///
1046    /// Ideally all lists should be strictly ascending, but we defer that to the
1047    /// actual use of the list. So we just blindly append here.
merge__anonc528723c0811::OffsetInfo1048    void merge(const OffsetInfo &R) { Offsets.append(R.Offsets); }
1049  };
1050  
1051  #ifndef NDEBUG
operator <<(raw_ostream & OS,const OffsetInfo & OI)1052  static raw_ostream &operator<<(raw_ostream &OS, const OffsetInfo &OI) {
1053    ListSeparator LS;
1054    OS << "[";
1055    for (auto Offset : OI) {
1056      OS << LS << Offset;
1057    }
1058    OS << "]";
1059    return OS;
1060  }
1061  #endif // NDEBUG
1062  
1063  struct AAPointerInfoImpl
1064      : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1065    using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
AAPointerInfoImpl__anonc528723c0811::AAPointerInfoImpl1066    AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1067  
1068    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c0811::AAPointerInfoImpl1069    const std::string getAsStr(Attributor *A) const override {
1070      return std::string("PointerInfo ") +
1071             (isValidState() ? (std::string("#") +
1072                                std::to_string(OffsetBins.size()) + " bins")
1073                             : "<invalid>");
1074    }
1075  
1076    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c0811::AAPointerInfoImpl1077    ChangeStatus manifest(Attributor &A) override {
1078      return AAPointerInfo::manifest(A);
1079    }
1080  
begin__anonc528723c0811::AAPointerInfoImpl1081    virtual const_bin_iterator begin() const override { return State::begin(); }
end__anonc528723c0811::AAPointerInfoImpl1082    virtual const_bin_iterator end() const override { return State::end(); }
numOffsetBins__anonc528723c0811::AAPointerInfoImpl1083    virtual int64_t numOffsetBins() const override {
1084      return State::numOffsetBins();
1085    }
1086  
forallInterferingAccesses__anonc528723c0811::AAPointerInfoImpl1087    bool forallInterferingAccesses(
1088        AA::RangeTy Range,
1089        function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1090        const override {
1091      return State::forallInterferingAccesses(Range, CB);
1092    }
1093  
forallInterferingAccesses__anonc528723c0811::AAPointerInfoImpl1094    bool forallInterferingAccesses(
1095        Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
1096        bool FindInterferingWrites, bool FindInterferingReads,
1097        function_ref<bool(const Access &, bool)> UserCB, bool &HasBeenWrittenTo,
1098        AA::RangeTy &Range,
1099        function_ref<bool(const Access &)> SkipCB) const override {
1100      HasBeenWrittenTo = false;
1101  
1102      SmallPtrSet<const Access *, 8> DominatingWrites;
1103      SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
1104  
1105      Function &Scope = *I.getFunction();
1106      bool IsKnownNoSync;
1107      bool IsAssumedNoSync = AA::hasAssumedIRAttr<Attribute::NoSync>(
1108          A, &QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL,
1109          IsKnownNoSync);
1110      const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
1111          IRPosition::function(Scope), &QueryingAA, DepClassTy::NONE);
1112      bool AllInSameNoSyncFn = IsAssumedNoSync;
1113      bool InstIsExecutedByInitialThreadOnly =
1114          ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I);
1115  
1116      // If the function is not ending in aligned barriers, we need the stores to
1117      // be in aligned barriers. The load being in one is not sufficient since the
1118      // store might be executed by a thread that disappears after, causing the
1119      // aligned barrier guarding the load to unblock and the load to read a value
1120      // that has no CFG path to the load.
1121      bool InstIsExecutedInAlignedRegion =
1122          FindInterferingReads && ExecDomainAA &&
1123          ExecDomainAA->isExecutedInAlignedRegion(A, I);
1124  
1125      if (InstIsExecutedInAlignedRegion || InstIsExecutedByInitialThreadOnly)
1126        A.recordDependence(*ExecDomainAA, QueryingAA, DepClassTy::OPTIONAL);
1127  
1128      InformationCache &InfoCache = A.getInfoCache();
1129      bool IsThreadLocalObj =
1130          AA::isAssumedThreadLocalObject(A, getAssociatedValue(), *this);
1131  
1132      // Helper to determine if we need to consider threading, which we cannot
1133      // right now. However, if the function is (assumed) nosync or the thread
1134      // executing all instructions is the main thread only we can ignore
1135      // threading. Also, thread-local objects do not require threading reasoning.
1136      // Finally, we can ignore threading if either access is executed in an
1137      // aligned region.
1138      auto CanIgnoreThreadingForInst = [&](const Instruction &I) -> bool {
1139        if (IsThreadLocalObj || AllInSameNoSyncFn)
1140          return true;
1141        const auto *FnExecDomainAA =
1142            I.getFunction() == &Scope
1143                ? ExecDomainAA
1144                : A.lookupAAFor<AAExecutionDomain>(
1145                      IRPosition::function(*I.getFunction()), &QueryingAA,
1146                      DepClassTy::NONE);
1147        if (!FnExecDomainAA)
1148          return false;
1149        if (InstIsExecutedInAlignedRegion ||
1150            (FindInterferingWrites &&
1151             FnExecDomainAA->isExecutedInAlignedRegion(A, I))) {
1152          A.recordDependence(*FnExecDomainAA, QueryingAA, DepClassTy::OPTIONAL);
1153          return true;
1154        }
1155        if (InstIsExecutedByInitialThreadOnly &&
1156            FnExecDomainAA->isExecutedByInitialThreadOnly(I)) {
1157          A.recordDependence(*FnExecDomainAA, QueryingAA, DepClassTy::OPTIONAL);
1158          return true;
1159        }
1160        return false;
1161      };
1162  
1163      // Helper to determine if the access is executed by the same thread as the
1164      // given instruction, for now it is sufficient to avoid any potential
1165      // threading effects as we cannot deal with them anyway.
1166      auto CanIgnoreThreading = [&](const Access &Acc) -> bool {
1167        return CanIgnoreThreadingForInst(*Acc.getRemoteInst()) ||
1168               (Acc.getRemoteInst() != Acc.getLocalInst() &&
1169                CanIgnoreThreadingForInst(*Acc.getLocalInst()));
1170      };
1171  
1172      // TODO: Use inter-procedural reachability and dominance.
1173      bool IsKnownNoRecurse;
1174      AA::hasAssumedIRAttr<Attribute::NoRecurse>(
1175          A, this, IRPosition::function(Scope), DepClassTy::OPTIONAL,
1176          IsKnownNoRecurse);
1177  
1178      // TODO: Use reaching kernels from AAKernelInfo (or move it to
1179      // AAExecutionDomain) such that we allow scopes other than kernels as long
1180      // as the reaching kernels are disjoint.
1181      bool InstInKernel = Scope.hasFnAttribute("kernel");
1182      bool ObjHasKernelLifetime = false;
1183      const bool UseDominanceReasoning =
1184          FindInterferingWrites && IsKnownNoRecurse;
1185      const DominatorTree *DT =
1186          InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(Scope);
1187  
1188      // Helper to check if a value has "kernel lifetime", that is it will not
1189      // outlive a GPU kernel. This is true for shared, constant, and local
1190      // globals on AMD and NVIDIA GPUs.
1191      auto HasKernelLifetime = [&](Value *V, Module &M) {
1192        if (!AA::isGPU(M))
1193          return false;
1194        switch (AA::GPUAddressSpace(V->getType()->getPointerAddressSpace())) {
1195        case AA::GPUAddressSpace::Shared:
1196        case AA::GPUAddressSpace::Constant:
1197        case AA::GPUAddressSpace::Local:
1198          return true;
1199        default:
1200          return false;
1201        };
1202      };
1203  
1204      // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1205      // to determine if we should look at reachability from the callee. For
1206      // certain pointers we know the lifetime and we do not have to step into the
1207      // callee to determine reachability as the pointer would be dead in the
1208      // callee. See the conditional initialization below.
1209      std::function<bool(const Function &)> IsLiveInCalleeCB;
1210  
1211      if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1212        // If the alloca containing function is not recursive the alloca
1213        // must be dead in the callee.
1214        const Function *AIFn = AI->getFunction();
1215        ObjHasKernelLifetime = AIFn->hasFnAttribute("kernel");
1216        bool IsKnownNoRecurse;
1217        if (AA::hasAssumedIRAttr<Attribute::NoRecurse>(
1218                A, this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL,
1219                IsKnownNoRecurse)) {
1220          IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1221        }
1222      } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1223        // If the global has kernel lifetime we can stop if we reach a kernel
1224        // as it is "dead" in the (unknown) callees.
1225        ObjHasKernelLifetime = HasKernelLifetime(GV, *GV->getParent());
1226        if (ObjHasKernelLifetime)
1227          IsLiveInCalleeCB = [](const Function &Fn) {
1228            return !Fn.hasFnAttribute("kernel");
1229          };
1230      }
1231  
1232      // Set of accesses/instructions that will overwrite the result and are
1233      // therefore blockers in the reachability traversal.
1234      AA::InstExclusionSetTy ExclusionSet;
1235  
1236      auto AccessCB = [&](const Access &Acc, bool Exact) {
1237        Function *AccScope = Acc.getRemoteInst()->getFunction();
1238        bool AccInSameScope = AccScope == &Scope;
1239  
1240        // If the object has kernel lifetime we can ignore accesses only reachable
1241        // by other kernels. For now we only skip accesses *in* other kernels.
1242        if (InstInKernel && ObjHasKernelLifetime && !AccInSameScope &&
1243            AccScope->hasFnAttribute("kernel"))
1244          return true;
1245  
1246        if (Exact && Acc.isMustAccess() && Acc.getRemoteInst() != &I) {
1247          if (Acc.isWrite() || (isa<LoadInst>(I) && Acc.isWriteOrAssumption()))
1248            ExclusionSet.insert(Acc.getRemoteInst());
1249        }
1250  
1251        if ((!FindInterferingWrites || !Acc.isWriteOrAssumption()) &&
1252            (!FindInterferingReads || !Acc.isRead()))
1253          return true;
1254  
1255        bool Dominates = FindInterferingWrites && DT && Exact &&
1256                         Acc.isMustAccess() && AccInSameScope &&
1257                         DT->dominates(Acc.getRemoteInst(), &I);
1258        if (Dominates)
1259          DominatingWrites.insert(&Acc);
1260  
1261        // Track if all interesting accesses are in the same `nosync` function as
1262        // the given instruction.
1263        AllInSameNoSyncFn &= Acc.getRemoteInst()->getFunction() == &Scope;
1264  
1265        InterferingAccesses.push_back({&Acc, Exact});
1266        return true;
1267      };
1268      if (!State::forallInterferingAccesses(I, AccessCB, Range))
1269        return false;
1270  
1271      HasBeenWrittenTo = !DominatingWrites.empty();
1272  
1273      // Dominating writes form a chain, find the least/lowest member.
1274      Instruction *LeastDominatingWriteInst = nullptr;
1275      for (const Access *Acc : DominatingWrites) {
1276        if (!LeastDominatingWriteInst) {
1277          LeastDominatingWriteInst = Acc->getRemoteInst();
1278        } else if (DT->dominates(LeastDominatingWriteInst,
1279                                 Acc->getRemoteInst())) {
1280          LeastDominatingWriteInst = Acc->getRemoteInst();
1281        }
1282      }
1283  
1284      // Helper to determine if we can skip a specific write access.
1285      auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1286        if (SkipCB && SkipCB(Acc))
1287          return true;
1288        if (!CanIgnoreThreading(Acc))
1289          return false;
1290  
1291        // Check read (RAW) dependences and write (WAR) dependences as necessary.
1292        // If we successfully excluded all effects we are interested in, the
1293        // access can be skipped.
1294        bool ReadChecked = !FindInterferingReads;
1295        bool WriteChecked = !FindInterferingWrites;
1296  
1297        // If the instruction cannot reach the access, the former does not
1298        // interfere with what the access reads.
1299        if (!ReadChecked) {
1300          if (!AA::isPotentiallyReachable(A, I, *Acc.getRemoteInst(), QueryingAA,
1301                                          &ExclusionSet, IsLiveInCalleeCB))
1302            ReadChecked = true;
1303        }
1304        // If the instruction cannot be reach from the access, the latter does not
1305        // interfere with what the instruction reads.
1306        if (!WriteChecked) {
1307          if (!AA::isPotentiallyReachable(A, *Acc.getRemoteInst(), I, QueryingAA,
1308                                          &ExclusionSet, IsLiveInCalleeCB))
1309            WriteChecked = true;
1310        }
1311  
1312        // If we still might be affected by the write of the access but there are
1313        // dominating writes in the function of the instruction
1314        // (HasBeenWrittenTo), we can try to reason that the access is overwritten
1315        // by them. This would have happend above if they are all in the same
1316        // function, so we only check the inter-procedural case. Effectively, we
1317        // want to show that there is no call after the dominting write that might
1318        // reach the access, and when it returns reach the instruction with the
1319        // updated value. To this end, we iterate all call sites, check if they
1320        // might reach the instruction without going through another access
1321        // (ExclusionSet) and at the same time might reach the access. However,
1322        // that is all part of AAInterFnReachability.
1323        if (!WriteChecked && HasBeenWrittenTo &&
1324            Acc.getRemoteInst()->getFunction() != &Scope) {
1325  
1326          const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
1327              QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1328  
1329          // Without going backwards in the call tree, can we reach the access
1330          // from the least dominating write. Do not allow to pass the instruction
1331          // itself either.
1332          bool Inserted = ExclusionSet.insert(&I).second;
1333  
1334          if (!FnReachabilityAA ||
1335              !FnReachabilityAA->instructionCanReach(
1336                  A, *LeastDominatingWriteInst,
1337                  *Acc.getRemoteInst()->getFunction(), &ExclusionSet))
1338            WriteChecked = true;
1339  
1340          if (Inserted)
1341            ExclusionSet.erase(&I);
1342        }
1343  
1344        if (ReadChecked && WriteChecked)
1345          return true;
1346  
1347        if (!DT || !UseDominanceReasoning)
1348          return false;
1349        if (!DominatingWrites.count(&Acc))
1350          return false;
1351        return LeastDominatingWriteInst != Acc.getRemoteInst();
1352      };
1353  
1354      // Run the user callback on all accesses we cannot skip and return if
1355      // that succeeded for all or not.
1356      for (auto &It : InterferingAccesses) {
1357        if ((!AllInSameNoSyncFn && !IsThreadLocalObj && !ExecDomainAA) ||
1358            !CanSkipAccess(*It.first, It.second)) {
1359          if (!UserCB(*It.first, It.second))
1360            return false;
1361        }
1362      }
1363      return true;
1364    }
1365  
translateAndAddStateFromCallee__anonc528723c0811::AAPointerInfoImpl1366    ChangeStatus translateAndAddStateFromCallee(Attributor &A,
1367                                                const AAPointerInfo &OtherAA,
1368                                                CallBase &CB) {
1369      using namespace AA::PointerInfo;
1370      if (!OtherAA.getState().isValidState() || !isValidState())
1371        return indicatePessimisticFixpoint();
1372  
1373      const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA);
1374      bool IsByval = OtherAAImpl.getAssociatedArgument()->hasByValAttr();
1375  
1376      // Combine the accesses bin by bin.
1377      ChangeStatus Changed = ChangeStatus::UNCHANGED;
1378      const auto &State = OtherAAImpl.getState();
1379      for (const auto &It : State) {
1380        for (auto Index : It.getSecond()) {
1381          const auto &RAcc = State.getAccess(Index);
1382          if (IsByval && !RAcc.isRead())
1383            continue;
1384          bool UsedAssumedInformation = false;
1385          AccessKind AK = RAcc.getKind();
1386          auto Content = A.translateArgumentToCallSiteContent(
1387              RAcc.getContent(), CB, *this, UsedAssumedInformation);
1388          AK = AccessKind(AK & (IsByval ? AccessKind::AK_R : AccessKind::AK_RW));
1389          AK = AccessKind(AK | (RAcc.isMayAccess() ? AK_MAY : AK_MUST));
1390  
1391          Changed |= addAccess(A, RAcc.getRanges(), CB, Content, AK,
1392                               RAcc.getType(), RAcc.getRemoteInst());
1393        }
1394      }
1395      return Changed;
1396    }
1397  
translateAndAddState__anonc528723c0811::AAPointerInfoImpl1398    ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA,
1399                                      const OffsetInfo &Offsets, CallBase &CB) {
1400      using namespace AA::PointerInfo;
1401      if (!OtherAA.getState().isValidState() || !isValidState())
1402        return indicatePessimisticFixpoint();
1403  
1404      const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA);
1405  
1406      // Combine the accesses bin by bin.
1407      ChangeStatus Changed = ChangeStatus::UNCHANGED;
1408      const auto &State = OtherAAImpl.getState();
1409      for (const auto &It : State) {
1410        for (auto Index : It.getSecond()) {
1411          const auto &RAcc = State.getAccess(Index);
1412          for (auto Offset : Offsets) {
1413            auto NewRanges = Offset == AA::RangeTy::Unknown
1414                                 ? AA::RangeTy::getUnknown()
1415                                 : RAcc.getRanges();
1416            if (!NewRanges.isUnknown()) {
1417              NewRanges.addToAllOffsets(Offset);
1418            }
1419            Changed |=
1420                addAccess(A, NewRanges, CB, RAcc.getContent(), RAcc.getKind(),
1421                          RAcc.getType(), RAcc.getRemoteInst());
1422          }
1423        }
1424      }
1425      return Changed;
1426    }
1427  
1428    /// Statistic tracking for all AAPointerInfo implementations.
1429    /// See AbstractAttribute::trackStatistics().
trackPointerInfoStatistics__anonc528723c0811::AAPointerInfoImpl1430    void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1431  
1432    /// Dump the state into \p O.
dumpState__anonc528723c0811::AAPointerInfoImpl1433    void dumpState(raw_ostream &O) {
1434      for (auto &It : OffsetBins) {
1435        O << "[" << It.first.Offset << "-" << It.first.Offset + It.first.Size
1436          << "] : " << It.getSecond().size() << "\n";
1437        for (auto AccIndex : It.getSecond()) {
1438          auto &Acc = AccessList[AccIndex];
1439          O << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst() << "\n";
1440          if (Acc.getLocalInst() != Acc.getRemoteInst())
1441            O << "     -->                         " << *Acc.getRemoteInst()
1442              << "\n";
1443          if (!Acc.isWrittenValueYetUndetermined()) {
1444            if (isa_and_nonnull<Function>(Acc.getWrittenValue()))
1445              O << "       - c: func " << Acc.getWrittenValue()->getName()
1446                << "\n";
1447            else if (Acc.getWrittenValue())
1448              O << "       - c: " << *Acc.getWrittenValue() << "\n";
1449            else
1450              O << "       - c: <unknown>\n";
1451          }
1452        }
1453      }
1454    }
1455  };
1456  
1457  struct AAPointerInfoFloating : public AAPointerInfoImpl {
1458    using AccessKind = AAPointerInfo::AccessKind;
AAPointerInfoFloating__anonc528723c0811::AAPointerInfoFloating1459    AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1460        : AAPointerInfoImpl(IRP, A) {}
1461  
1462    /// Deal with an access and signal if it was handled successfully.
handleAccess__anonc528723c0811::AAPointerInfoFloating1463    bool handleAccess(Attributor &A, Instruction &I,
1464                      std::optional<Value *> Content, AccessKind Kind,
1465                      SmallVectorImpl<int64_t> &Offsets, ChangeStatus &Changed,
1466                      Type &Ty) {
1467      using namespace AA::PointerInfo;
1468      auto Size = AA::RangeTy::Unknown;
1469      const DataLayout &DL = A.getDataLayout();
1470      TypeSize AccessSize = DL.getTypeStoreSize(&Ty);
1471      if (!AccessSize.isScalable())
1472        Size = AccessSize.getFixedValue();
1473  
1474      // Make a strictly ascending list of offsets as required by addAccess()
1475      llvm::sort(Offsets);
1476      auto *Last = llvm::unique(Offsets);
1477      Offsets.erase(Last, Offsets.end());
1478  
1479      VectorType *VT = dyn_cast<VectorType>(&Ty);
1480      if (!VT || VT->getElementCount().isScalable() ||
1481          !Content.value_or(nullptr) || !isa<Constant>(*Content) ||
1482          (*Content)->getType() != VT ||
1483          DL.getTypeStoreSize(VT->getElementType()).isScalable()) {
1484        Changed = Changed | addAccess(A, {Offsets, Size}, I, Content, Kind, &Ty);
1485      } else {
1486        // Handle vector stores with constant content element-wise.
1487        // TODO: We could look for the elements or create instructions
1488        //       representing them.
1489        // TODO: We need to push the Content into the range abstraction
1490        //       (AA::RangeTy) to allow different content values for different
1491        //       ranges. ranges. Hence, support vectors storing different values.
1492        Type *ElementType = VT->getElementType();
1493        int64_t ElementSize = DL.getTypeStoreSize(ElementType).getFixedValue();
1494        auto *ConstContent = cast<Constant>(*Content);
1495        Type *Int32Ty = Type::getInt32Ty(ElementType->getContext());
1496        SmallVector<int64_t> ElementOffsets(Offsets.begin(), Offsets.end());
1497  
1498        for (int i = 0, e = VT->getElementCount().getFixedValue(); i != e; ++i) {
1499          Value *ElementContent = ConstantExpr::getExtractElement(
1500              ConstContent, ConstantInt::get(Int32Ty, i));
1501  
1502          // Add the element access.
1503          Changed = Changed | addAccess(A, {ElementOffsets, ElementSize}, I,
1504                                        ElementContent, Kind, ElementType);
1505  
1506          // Advance the offsets for the next element.
1507          for (auto &ElementOffset : ElementOffsets)
1508            ElementOffset += ElementSize;
1509        }
1510      }
1511      return true;
1512    };
1513  
1514    /// See AbstractAttribute::updateImpl(...).
1515    ChangeStatus updateImpl(Attributor &A) override;
1516  
1517    /// If the indices to \p GEP can be traced to constants, incorporate all
1518    /// of these into \p UsrOI.
1519    ///
1520    /// \return true iff \p UsrOI is updated.
1521    bool collectConstantsForGEP(Attributor &A, const DataLayout &DL,
1522                                OffsetInfo &UsrOI, const OffsetInfo &PtrOI,
1523                                const GEPOperator *GEP);
1524  
1525    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c0811::AAPointerInfoFloating1526    void trackStatistics() const override {
1527      AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1528    }
1529  };
1530  
collectConstantsForGEP(Attributor & A,const DataLayout & DL,OffsetInfo & UsrOI,const OffsetInfo & PtrOI,const GEPOperator * GEP)1531  bool AAPointerInfoFloating::collectConstantsForGEP(Attributor &A,
1532                                                     const DataLayout &DL,
1533                                                     OffsetInfo &UsrOI,
1534                                                     const OffsetInfo &PtrOI,
1535                                                     const GEPOperator *GEP) {
1536    unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
1537    MapVector<Value *, APInt> VariableOffsets;
1538    APInt ConstantOffset(BitWidth, 0);
1539  
1540    assert(!UsrOI.isUnknown() && !PtrOI.isUnknown() &&
1541           "Don't look for constant values if the offset has already been "
1542           "determined to be unknown.");
1543  
1544    if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) {
1545      UsrOI.setUnknown();
1546      return true;
1547    }
1548  
1549    LLVM_DEBUG(dbgs() << "[AAPointerInfo] GEP offset is "
1550                      << (VariableOffsets.empty() ? "" : "not") << " constant "
1551                      << *GEP << "\n");
1552  
1553    auto Union = PtrOI;
1554    Union.addToAll(ConstantOffset.getSExtValue());
1555  
1556    // Each VI in VariableOffsets has a set of potential constant values. Every
1557    // combination of elements, picked one each from these sets, is separately
1558    // added to the original set of offsets, thus resulting in more offsets.
1559    for (const auto &VI : VariableOffsets) {
1560      auto *PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>(
1561          *this, IRPosition::value(*VI.first), DepClassTy::OPTIONAL);
1562      if (!PotentialConstantsAA || !PotentialConstantsAA->isValidState()) {
1563        UsrOI.setUnknown();
1564        return true;
1565      }
1566  
1567      // UndefValue is treated as a zero, which leaves Union as is.
1568      if (PotentialConstantsAA->undefIsContained())
1569        continue;
1570  
1571      // We need at least one constant in every set to compute an actual offset.
1572      // Otherwise, we end up pessimizing AAPointerInfo by respecting offsets that
1573      // don't actually exist. In other words, the absence of constant values
1574      // implies that the operation can be assumed dead for now.
1575      auto &AssumedSet = PotentialConstantsAA->getAssumedSet();
1576      if (AssumedSet.empty())
1577        return false;
1578  
1579      OffsetInfo Product;
1580      for (const auto &ConstOffset : AssumedSet) {
1581        auto CopyPerOffset = Union;
1582        CopyPerOffset.addToAll(ConstOffset.getSExtValue() *
1583                               VI.second.getZExtValue());
1584        Product.merge(CopyPerOffset);
1585      }
1586      Union = Product;
1587    }
1588  
1589    UsrOI = std::move(Union);
1590    return true;
1591  }
1592  
updateImpl(Attributor & A)1593  ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
1594    using namespace AA::PointerInfo;
1595    ChangeStatus Changed = ChangeStatus::UNCHANGED;
1596    const DataLayout &DL = A.getDataLayout();
1597    Value &AssociatedValue = getAssociatedValue();
1598  
1599    DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1600    OffsetInfoMap[&AssociatedValue].insert(0);
1601  
1602    auto HandlePassthroughUser = [&](Value *Usr, Value *CurPtr, bool &Follow) {
1603      // One does not simply walk into a map and assign a reference to a possibly
1604      // new location. That can cause an invalidation before the assignment
1605      // happens, like so:
1606      //
1607      //   OffsetInfoMap[Usr] = OffsetInfoMap[CurPtr]; /* bad idea! */
1608      //
1609      // The RHS is a reference that may be invalidated by an insertion caused by
1610      // the LHS. So we ensure that the side-effect of the LHS happens first.
1611  
1612      assert(OffsetInfoMap.contains(CurPtr) &&
1613             "CurPtr does not exist in the map!");
1614  
1615      auto &UsrOI = OffsetInfoMap[Usr];
1616      auto &PtrOI = OffsetInfoMap[CurPtr];
1617      assert(!PtrOI.isUnassigned() &&
1618             "Cannot pass through if the input Ptr was not visited!");
1619      UsrOI.merge(PtrOI);
1620      Follow = true;
1621      return true;
1622    };
1623  
1624    auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1625      Value *CurPtr = U.get();
1626      User *Usr = U.getUser();
1627      LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " << *Usr
1628                        << "\n");
1629      assert(OffsetInfoMap.count(CurPtr) &&
1630             "The current pointer offset should have been seeded!");
1631  
1632      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1633        if (CE->isCast())
1634          return HandlePassthroughUser(Usr, CurPtr, Follow);
1635        if (!isa<GEPOperator>(CE)) {
1636          LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1637                            << "\n");
1638          return false;
1639        }
1640      }
1641      if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1642        // Note the order here, the Usr access might change the map, CurPtr is
1643        // already in it though.
1644        auto &UsrOI = OffsetInfoMap[Usr];
1645        auto &PtrOI = OffsetInfoMap[CurPtr];
1646  
1647        if (UsrOI.isUnknown())
1648          return true;
1649  
1650        if (PtrOI.isUnknown()) {
1651          Follow = true;
1652          UsrOI.setUnknown();
1653          return true;
1654        }
1655  
1656        Follow = collectConstantsForGEP(A, DL, UsrOI, PtrOI, GEP);
1657        return true;
1658      }
1659      if (isa<PtrToIntInst>(Usr))
1660        return false;
1661      if (isa<CastInst>(Usr) || isa<SelectInst>(Usr) || isa<ReturnInst>(Usr))
1662        return HandlePassthroughUser(Usr, CurPtr, Follow);
1663  
1664      // For PHIs we need to take care of the recurrence explicitly as the value
1665      // might change while we iterate through a loop. For now, we give up if
1666      // the PHI is not invariant.
1667      if (auto *PHI = dyn_cast<PHINode>(Usr)) {
1668        // Note the order here, the Usr access might change the map, CurPtr is
1669        // already in it though.
1670        bool IsFirstPHIUser = !OffsetInfoMap.count(PHI);
1671        auto &UsrOI = OffsetInfoMap[PHI];
1672        auto &PtrOI = OffsetInfoMap[CurPtr];
1673  
1674        // Check if the PHI operand has already an unknown offset as we can't
1675        // improve on that anymore.
1676        if (PtrOI.isUnknown()) {
1677          LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand offset unknown "
1678                            << *CurPtr << " in " << *PHI << "\n");
1679          Follow = !UsrOI.isUnknown();
1680          UsrOI.setUnknown();
1681          return true;
1682        }
1683  
1684        // Check if the PHI is invariant (so far).
1685        if (UsrOI == PtrOI) {
1686          assert(!PtrOI.isUnassigned() &&
1687                 "Cannot assign if the current Ptr was not visited!");
1688          LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI is invariant (so far)");
1689          return true;
1690        }
1691  
1692        // Check if the PHI operand can be traced back to AssociatedValue.
1693        APInt Offset(
1694            DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1695            0);
1696        Value *CurPtrBase = CurPtr->stripAndAccumulateConstantOffsets(
1697            DL, Offset, /* AllowNonInbounds */ true);
1698        auto It = OffsetInfoMap.find(CurPtrBase);
1699        if (It == OffsetInfoMap.end()) {
1700          LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1701                            << *CurPtr << " in " << *PHI
1702                            << " (base: " << *CurPtrBase << ")\n");
1703          UsrOI.setUnknown();
1704          Follow = true;
1705          return true;
1706        }
1707  
1708        // Check if the PHI operand is not dependent on the PHI itself. Every
1709        // recurrence is a cyclic net of PHIs in the data flow, and has an
1710        // equivalent Cycle in the control flow. One of those PHIs must be in the
1711        // header of that control flow Cycle. This is independent of the choice of
1712        // Cycles reported by CycleInfo. It is sufficient to check the PHIs in
1713        // every Cycle header; if such a node is marked unknown, this will
1714        // eventually propagate through the whole net of PHIs in the recurrence.
1715        const auto *CI =
1716            A.getInfoCache().getAnalysisResultForFunction<CycleAnalysis>(
1717                *PHI->getFunction());
1718        if (mayBeInCycle(CI, cast<Instruction>(Usr), /* HeaderOnly */ true)) {
1719          auto BaseOI = It->getSecond();
1720          BaseOI.addToAll(Offset.getZExtValue());
1721          if (IsFirstPHIUser || BaseOI == UsrOI) {
1722            LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI is invariant " << *CurPtr
1723                              << " in " << *Usr << "\n");
1724            return HandlePassthroughUser(Usr, CurPtr, Follow);
1725          }
1726  
1727          LLVM_DEBUG(
1728              dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch "
1729                     << *CurPtr << " in " << *PHI << "\n");
1730          UsrOI.setUnknown();
1731          Follow = true;
1732          return true;
1733        }
1734  
1735        UsrOI.merge(PtrOI);
1736        Follow = true;
1737        return true;
1738      }
1739  
1740      if (auto *LoadI = dyn_cast<LoadInst>(Usr)) {
1741        // If the access is to a pointer that may or may not be the associated
1742        // value, e.g. due to a PHI, we cannot assume it will be read.
1743        AccessKind AK = AccessKind::AK_R;
1744        if (getUnderlyingObject(CurPtr) == &AssociatedValue)
1745          AK = AccessKind(AK | AccessKind::AK_MUST);
1746        else
1747          AK = AccessKind(AK | AccessKind::AK_MAY);
1748        if (!handleAccess(A, *LoadI, /* Content */ nullptr, AK,
1749                          OffsetInfoMap[CurPtr].Offsets, Changed,
1750                          *LoadI->getType()))
1751          return false;
1752  
1753        auto IsAssumption = [](Instruction &I) {
1754          if (auto *II = dyn_cast<IntrinsicInst>(&I))
1755            return II->isAssumeLikeIntrinsic();
1756          return false;
1757        };
1758  
1759        auto IsImpactedInRange = [&](Instruction *FromI, Instruction *ToI) {
1760          // Check if the assumption and the load are executed together without
1761          // memory modification.
1762          do {
1763            if (FromI->mayWriteToMemory() && !IsAssumption(*FromI))
1764              return true;
1765            FromI = FromI->getNextNonDebugInstruction();
1766          } while (FromI && FromI != ToI);
1767          return false;
1768        };
1769  
1770        BasicBlock *BB = LoadI->getParent();
1771        auto IsValidAssume = [&](IntrinsicInst &IntrI) {
1772          if (IntrI.getIntrinsicID() != Intrinsic::assume)
1773            return false;
1774          BasicBlock *IntrBB = IntrI.getParent();
1775          if (IntrI.getParent() == BB) {
1776            if (IsImpactedInRange(LoadI->getNextNonDebugInstruction(), &IntrI))
1777              return false;
1778          } else {
1779            auto PredIt = pred_begin(IntrBB);
1780            if (PredIt == pred_end(IntrBB))
1781              return false;
1782            if ((*PredIt) != BB)
1783              return false;
1784            if (++PredIt != pred_end(IntrBB))
1785              return false;
1786            for (auto *SuccBB : successors(BB)) {
1787              if (SuccBB == IntrBB)
1788                continue;
1789              if (isa<UnreachableInst>(SuccBB->getTerminator()))
1790                continue;
1791              return false;
1792            }
1793            if (IsImpactedInRange(LoadI->getNextNonDebugInstruction(),
1794                                  BB->getTerminator()))
1795              return false;
1796            if (IsImpactedInRange(&IntrBB->front(), &IntrI))
1797              return false;
1798          }
1799          return true;
1800        };
1801  
1802        std::pair<Value *, IntrinsicInst *> Assumption;
1803        for (const Use &LoadU : LoadI->uses()) {
1804          if (auto *CmpI = dyn_cast<CmpInst>(LoadU.getUser())) {
1805            if (!CmpI->isEquality() || !CmpI->isTrueWhenEqual())
1806              continue;
1807            for (const Use &CmpU : CmpI->uses()) {
1808              if (auto *IntrI = dyn_cast<IntrinsicInst>(CmpU.getUser())) {
1809                if (!IsValidAssume(*IntrI))
1810                  continue;
1811                int Idx = CmpI->getOperandUse(0) == LoadU;
1812                Assumption = {CmpI->getOperand(Idx), IntrI};
1813                break;
1814              }
1815            }
1816          }
1817          if (Assumption.first)
1818            break;
1819        }
1820  
1821        // Check if we found an assumption associated with this load.
1822        if (!Assumption.first || !Assumption.second)
1823          return true;
1824  
1825        LLVM_DEBUG(dbgs() << "[AAPointerInfo] Assumption found "
1826                          << *Assumption.second << ": " << *LoadI
1827                          << " == " << *Assumption.first << "\n");
1828        bool UsedAssumedInformation = false;
1829        std::optional<Value *> Content = nullptr;
1830        if (Assumption.first)
1831          Content =
1832              A.getAssumedSimplified(*Assumption.first, *this,
1833                                     UsedAssumedInformation, AA::Interprocedural);
1834        return handleAccess(
1835            A, *Assumption.second, Content, AccessKind::AK_ASSUMPTION,
1836            OffsetInfoMap[CurPtr].Offsets, Changed, *LoadI->getType());
1837      }
1838  
1839      auto HandleStoreLike = [&](Instruction &I, Value *ValueOp, Type &ValueTy,
1840                                 ArrayRef<Value *> OtherOps, AccessKind AK) {
1841        for (auto *OtherOp : OtherOps) {
1842          if (OtherOp == CurPtr) {
1843            LLVM_DEBUG(
1844                dbgs()
1845                << "[AAPointerInfo] Escaping use in store like instruction " << I
1846                << "\n");
1847            return false;
1848          }
1849        }
1850  
1851        // If the access is to a pointer that may or may not be the associated
1852        // value, e.g. due to a PHI, we cannot assume it will be written.
1853        if (getUnderlyingObject(CurPtr) == &AssociatedValue)
1854          AK = AccessKind(AK | AccessKind::AK_MUST);
1855        else
1856          AK = AccessKind(AK | AccessKind::AK_MAY);
1857        bool UsedAssumedInformation = false;
1858        std::optional<Value *> Content = nullptr;
1859        if (ValueOp)
1860          Content = A.getAssumedSimplified(
1861              *ValueOp, *this, UsedAssumedInformation, AA::Interprocedural);
1862        return handleAccess(A, I, Content, AK, OffsetInfoMap[CurPtr].Offsets,
1863                            Changed, ValueTy);
1864      };
1865  
1866      if (auto *StoreI = dyn_cast<StoreInst>(Usr))
1867        return HandleStoreLike(*StoreI, StoreI->getValueOperand(),
1868                               *StoreI->getValueOperand()->getType(),
1869                               {StoreI->getValueOperand()}, AccessKind::AK_W);
1870      if (auto *RMWI = dyn_cast<AtomicRMWInst>(Usr))
1871        return HandleStoreLike(*RMWI, nullptr, *RMWI->getValOperand()->getType(),
1872                               {RMWI->getValOperand()}, AccessKind::AK_RW);
1873      if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(Usr))
1874        return HandleStoreLike(
1875            *CXI, nullptr, *CXI->getNewValOperand()->getType(),
1876            {CXI->getCompareOperand(), CXI->getNewValOperand()},
1877            AccessKind::AK_RW);
1878  
1879      if (auto *CB = dyn_cast<CallBase>(Usr)) {
1880        if (CB->isLifetimeStartOrEnd())
1881          return true;
1882        const auto *TLI =
1883            A.getInfoCache().getTargetLibraryInfoForFunction(*CB->getFunction());
1884        if (getFreedOperand(CB, TLI) == U)
1885          return true;
1886        if (CB->isArgOperand(&U)) {
1887          unsigned ArgNo = CB->getArgOperandNo(&U);
1888          const auto *CSArgPI = A.getAAFor<AAPointerInfo>(
1889              *this, IRPosition::callsite_argument(*CB, ArgNo),
1890              DepClassTy::REQUIRED);
1891          if (!CSArgPI)
1892            return false;
1893          Changed =
1894              translateAndAddState(A, *CSArgPI, OffsetInfoMap[CurPtr], *CB) |
1895              Changed;
1896          return isValidState();
1897        }
1898        LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1899                          << "\n");
1900        // TODO: Allow some call uses
1901        return false;
1902      }
1903  
1904      LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1905      return false;
1906    };
1907    auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1908      assert(OffsetInfoMap.count(OldU) && "Old use should be known already!");
1909      if (OffsetInfoMap.count(NewU)) {
1910        LLVM_DEBUG({
1911          if (!(OffsetInfoMap[NewU] == OffsetInfoMap[OldU])) {
1912            dbgs() << "[AAPointerInfo] Equivalent use callback failed: "
1913                   << OffsetInfoMap[NewU] << " vs " << OffsetInfoMap[OldU]
1914                   << "\n";
1915          }
1916        });
1917        return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1918      }
1919      OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1920      return true;
1921    };
1922    if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1923                           /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1924                           /* IgnoreDroppableUses */ true, EquivalentUseCB)) {
1925      LLVM_DEBUG(dbgs() << "[AAPointerInfo] Check for all uses failed, abort!\n");
1926      return indicatePessimisticFixpoint();
1927    }
1928  
1929    LLVM_DEBUG({
1930      dbgs() << "Accesses by bin after update:\n";
1931      dumpState(dbgs());
1932    });
1933  
1934    return Changed;
1935  }
1936  
1937  struct AAPointerInfoReturned final : AAPointerInfoImpl {
AAPointerInfoReturned__anonc528723c0811::AAPointerInfoReturned1938    AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1939        : AAPointerInfoImpl(IRP, A) {}
1940  
1941    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c0811::AAPointerInfoReturned1942    ChangeStatus updateImpl(Attributor &A) override {
1943      return indicatePessimisticFixpoint();
1944    }
1945  
1946    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c0811::AAPointerInfoReturned1947    void trackStatistics() const override {
1948      AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1949    }
1950  };
1951  
1952  struct AAPointerInfoArgument final : AAPointerInfoFloating {
AAPointerInfoArgument__anonc528723c0811::AAPointerInfoArgument1953    AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1954        : AAPointerInfoFloating(IRP, A) {}
1955  
1956    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c0811::AAPointerInfoArgument1957    void trackStatistics() const override {
1958      AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1959    }
1960  };
1961  
1962  struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
AAPointerInfoCallSiteArgument__anonc528723c0811::AAPointerInfoCallSiteArgument1963    AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1964        : AAPointerInfoFloating(IRP, A) {}
1965  
1966    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c0811::AAPointerInfoCallSiteArgument1967    ChangeStatus updateImpl(Attributor &A) override {
1968      using namespace AA::PointerInfo;
1969      // We handle memory intrinsics explicitly, at least the first (=
1970      // destination) and second (=source) arguments as we know how they are
1971      // accessed.
1972      if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1973        ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1974        int64_t LengthVal = AA::RangeTy::Unknown;
1975        if (Length)
1976          LengthVal = Length->getSExtValue();
1977        unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1978        ChangeStatus Changed = ChangeStatus::UNCHANGED;
1979        if (ArgNo > 1) {
1980          LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1981                            << *MI << "\n");
1982          return indicatePessimisticFixpoint();
1983        } else {
1984          auto Kind =
1985              ArgNo == 0 ? AccessKind::AK_MUST_WRITE : AccessKind::AK_MUST_READ;
1986          Changed =
1987              Changed | addAccess(A, {0, LengthVal}, *MI, nullptr, Kind, nullptr);
1988        }
1989        LLVM_DEBUG({
1990          dbgs() << "Accesses by bin after update:\n";
1991          dumpState(dbgs());
1992        });
1993  
1994        return Changed;
1995      }
1996  
1997      // TODO: Once we have call site specific value information we can provide
1998      //       call site specific liveness information and then it makes
1999      //       sense to specialize attributes for call sites arguments instead of
2000      //       redirecting requests to the callee argument.
2001      Argument *Arg = getAssociatedArgument();
2002      if (Arg) {
2003        const IRPosition &ArgPos = IRPosition::argument(*Arg);
2004        auto *ArgAA =
2005            A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
2006        if (ArgAA && ArgAA->getState().isValidState())
2007          return translateAndAddStateFromCallee(A, *ArgAA,
2008                                                *cast<CallBase>(getCtxI()));
2009        if (!Arg->getParent()->isDeclaration())
2010          return indicatePessimisticFixpoint();
2011      }
2012  
2013      bool IsKnownNoCapture;
2014      if (!AA::hasAssumedIRAttr<Attribute::NoCapture>(
2015              A, this, getIRPosition(), DepClassTy::OPTIONAL, IsKnownNoCapture))
2016        return indicatePessimisticFixpoint();
2017  
2018      bool IsKnown = false;
2019      if (AA::isAssumedReadNone(A, getIRPosition(), *this, IsKnown))
2020        return ChangeStatus::UNCHANGED;
2021      bool ReadOnly = AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown);
2022      auto Kind =
2023          ReadOnly ? AccessKind::AK_MAY_READ : AccessKind::AK_MAY_READ_WRITE;
2024      return addAccess(A, AA::RangeTy::getUnknown(), *getCtxI(), nullptr, Kind,
2025                       nullptr);
2026    }
2027  
2028    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c0811::AAPointerInfoCallSiteArgument2029    void trackStatistics() const override {
2030      AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
2031    }
2032  };
2033  
2034  struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
AAPointerInfoCallSiteReturned__anonc528723c0811::AAPointerInfoCallSiteReturned2035    AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
2036        : AAPointerInfoFloating(IRP, A) {}
2037  
2038    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c0811::AAPointerInfoCallSiteReturned2039    void trackStatistics() const override {
2040      AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
2041    }
2042  };
2043  } // namespace
2044  
2045  /// -----------------------NoUnwind Function Attribute--------------------------
2046  
2047  namespace {
2048  struct AANoUnwindImpl : AANoUnwind {
AANoUnwindImpl__anonc528723c1711::AANoUnwindImpl2049    AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
2050  
2051    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c1711::AANoUnwindImpl2052    void initialize(Attributor &A) override {
2053      bool IsKnown;
2054      assert(!AA::hasAssumedIRAttr<Attribute::NoUnwind>(
2055          A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
2056      (void)IsKnown;
2057    }
2058  
getAsStr__anonc528723c1711::AANoUnwindImpl2059    const std::string getAsStr(Attributor *A) const override {
2060      return getAssumed() ? "nounwind" : "may-unwind";
2061    }
2062  
2063    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c1711::AANoUnwindImpl2064    ChangeStatus updateImpl(Attributor &A) override {
2065      auto Opcodes = {
2066          (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
2067          (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
2068          (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
2069  
2070      auto CheckForNoUnwind = [&](Instruction &I) {
2071        if (!I.mayThrow(/* IncludePhaseOneUnwind */ true))
2072          return true;
2073  
2074        if (const auto *CB = dyn_cast<CallBase>(&I)) {
2075          bool IsKnownNoUnwind;
2076          return AA::hasAssumedIRAttr<Attribute::NoUnwind>(
2077              A, this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED,
2078              IsKnownNoUnwind);
2079        }
2080        return false;
2081      };
2082  
2083      bool UsedAssumedInformation = false;
2084      if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
2085                                     UsedAssumedInformation))
2086        return indicatePessimisticFixpoint();
2087  
2088      return ChangeStatus::UNCHANGED;
2089    }
2090  };
2091  
2092  struct AANoUnwindFunction final : public AANoUnwindImpl {
AANoUnwindFunction__anonc528723c1711::AANoUnwindFunction2093    AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
2094        : AANoUnwindImpl(IRP, A) {}
2095  
2096    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c1711::AANoUnwindFunction2097    void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
2098  };
2099  
2100  /// NoUnwind attribute deduction for a call sites.
2101  struct AANoUnwindCallSite final
2102      : AACalleeToCallSite<AANoUnwind, AANoUnwindImpl> {
AANoUnwindCallSite__anonc528723c1711::AANoUnwindCallSite2103    AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
2104        : AACalleeToCallSite<AANoUnwind, AANoUnwindImpl>(IRP, A) {}
2105  
2106    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c1711::AANoUnwindCallSite2107    void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
2108  };
2109  } // namespace
2110  
2111  /// ------------------------ NoSync Function Attribute -------------------------
2112  
isAlignedBarrier(const CallBase & CB,bool ExecutedAligned)2113  bool AANoSync::isAlignedBarrier(const CallBase &CB, bool ExecutedAligned) {
2114    switch (CB.getIntrinsicID()) {
2115    case Intrinsic::nvvm_barrier0:
2116    case Intrinsic::nvvm_barrier0_and:
2117    case Intrinsic::nvvm_barrier0_or:
2118    case Intrinsic::nvvm_barrier0_popc:
2119      return true;
2120    case Intrinsic::amdgcn_s_barrier:
2121      if (ExecutedAligned)
2122        return true;
2123      break;
2124    default:
2125      break;
2126    }
2127    return hasAssumption(CB, KnownAssumptionString("ompx_aligned_barrier"));
2128  }
2129  
isNonRelaxedAtomic(const Instruction * I)2130  bool AANoSync::isNonRelaxedAtomic(const Instruction *I) {
2131    if (!I->isAtomic())
2132      return false;
2133  
2134    if (auto *FI = dyn_cast<FenceInst>(I))
2135      // All legal orderings for fence are stronger than monotonic.
2136      return FI->getSyncScopeID() != SyncScope::SingleThread;
2137    if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
2138      // Unordered is not a legal ordering for cmpxchg.
2139      return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
2140              AI->getFailureOrdering() != AtomicOrdering::Monotonic);
2141    }
2142  
2143    AtomicOrdering Ordering;
2144    switch (I->getOpcode()) {
2145    case Instruction::AtomicRMW:
2146      Ordering = cast<AtomicRMWInst>(I)->getOrdering();
2147      break;
2148    case Instruction::Store:
2149      Ordering = cast<StoreInst>(I)->getOrdering();
2150      break;
2151    case Instruction::Load:
2152      Ordering = cast<LoadInst>(I)->getOrdering();
2153      break;
2154    default:
2155      llvm_unreachable(
2156          "New atomic operations need to be known in the attributor.");
2157    }
2158  
2159    return (Ordering != AtomicOrdering::Unordered &&
2160            Ordering != AtomicOrdering::Monotonic);
2161  }
2162  
2163  /// Return true if this intrinsic is nosync.  This is only used for intrinsics
2164  /// which would be nosync except that they have a volatile flag.  All other
2165  /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
isNoSyncIntrinsic(const Instruction * I)2166  bool AANoSync::isNoSyncIntrinsic(const Instruction *I) {
2167    if (auto *MI = dyn_cast<MemIntrinsic>(I))
2168      return !MI->isVolatile();
2169    return false;
2170  }
2171  
2172  namespace {
2173  struct AANoSyncImpl : AANoSync {
AANoSyncImpl__anonc528723c1911::AANoSyncImpl2174    AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
2175  
2176    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c1911::AANoSyncImpl2177    void initialize(Attributor &A) override {
2178      bool IsKnown;
2179      assert(!AA::hasAssumedIRAttr<Attribute::NoSync>(A, nullptr, getIRPosition(),
2180                                                      DepClassTy::NONE, IsKnown));
2181      (void)IsKnown;
2182    }
2183  
getAsStr__anonc528723c1911::AANoSyncImpl2184    const std::string getAsStr(Attributor *A) const override {
2185      return getAssumed() ? "nosync" : "may-sync";
2186    }
2187  
2188    /// See AbstractAttribute::updateImpl(...).
2189    ChangeStatus updateImpl(Attributor &A) override;
2190  };
2191  
updateImpl(Attributor & A)2192  ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
2193  
2194    auto CheckRWInstForNoSync = [&](Instruction &I) {
2195      return AA::isNoSyncInst(A, I, *this);
2196    };
2197  
2198    auto CheckForNoSync = [&](Instruction &I) {
2199      // At this point we handled all read/write effects and they are all
2200      // nosync, so they can be skipped.
2201      if (I.mayReadOrWriteMemory())
2202        return true;
2203  
2204      bool IsKnown;
2205      CallBase &CB = cast<CallBase>(I);
2206      if (AA::hasAssumedIRAttr<Attribute::NoSync>(
2207              A, this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL,
2208              IsKnown))
2209        return true;
2210  
2211      // non-convergent and readnone imply nosync.
2212      return !CB.isConvergent();
2213    };
2214  
2215    bool UsedAssumedInformation = false;
2216    if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
2217                                            UsedAssumedInformation) ||
2218        !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
2219                                           UsedAssumedInformation))
2220      return indicatePessimisticFixpoint();
2221  
2222    return ChangeStatus::UNCHANGED;
2223  }
2224  
2225  struct AANoSyncFunction final : public AANoSyncImpl {
AANoSyncFunction__anonc528723c1911::AANoSyncFunction2226    AANoSyncFunction(const IRPosition &IRP, Attributor &A)
2227        : AANoSyncImpl(IRP, A) {}
2228  
2229    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c1911::AANoSyncFunction2230    void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
2231  };
2232  
2233  /// NoSync attribute deduction for a call sites.
2234  struct AANoSyncCallSite final : AACalleeToCallSite<AANoSync, AANoSyncImpl> {
AANoSyncCallSite__anonc528723c1911::AANoSyncCallSite2235    AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
2236        : AACalleeToCallSite<AANoSync, AANoSyncImpl>(IRP, A) {}
2237  
2238    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c1911::AANoSyncCallSite2239    void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
2240  };
2241  } // namespace
2242  
2243  /// ------------------------ No-Free Attributes ----------------------------
2244  
2245  namespace {
2246  struct AANoFreeImpl : public AANoFree {
AANoFreeImpl__anonc528723c1c11::AANoFreeImpl2247    AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
2248  
2249    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c1c11::AANoFreeImpl2250    void initialize(Attributor &A) override {
2251      bool IsKnown;
2252      assert(!AA::hasAssumedIRAttr<Attribute::NoFree>(A, nullptr, getIRPosition(),
2253                                                      DepClassTy::NONE, IsKnown));
2254      (void)IsKnown;
2255    }
2256  
2257    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c1c11::AANoFreeImpl2258    ChangeStatus updateImpl(Attributor &A) override {
2259      auto CheckForNoFree = [&](Instruction &I) {
2260        bool IsKnown;
2261        return AA::hasAssumedIRAttr<Attribute::NoFree>(
2262            A, this, IRPosition::callsite_function(cast<CallBase>(I)),
2263            DepClassTy::REQUIRED, IsKnown);
2264      };
2265  
2266      bool UsedAssumedInformation = false;
2267      if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2268                                             UsedAssumedInformation))
2269        return indicatePessimisticFixpoint();
2270      return ChangeStatus::UNCHANGED;
2271    }
2272  
2273    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c1c11::AANoFreeImpl2274    const std::string getAsStr(Attributor *A) const override {
2275      return getAssumed() ? "nofree" : "may-free";
2276    }
2277  };
2278  
2279  struct AANoFreeFunction final : public AANoFreeImpl {
AANoFreeFunction__anonc528723c1c11::AANoFreeFunction2280    AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2281        : AANoFreeImpl(IRP, A) {}
2282  
2283    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c1c11::AANoFreeFunction2284    void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
2285  };
2286  
2287  /// NoFree attribute deduction for a call sites.
2288  struct AANoFreeCallSite final : AACalleeToCallSite<AANoFree, AANoFreeImpl> {
AANoFreeCallSite__anonc528723c1c11::AANoFreeCallSite2289    AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2290        : AACalleeToCallSite<AANoFree, AANoFreeImpl>(IRP, A) {}
2291  
2292    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c1c11::AANoFreeCallSite2293    void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2294  };
2295  
2296  /// NoFree attribute for floating values.
2297  struct AANoFreeFloating : AANoFreeImpl {
AANoFreeFloating__anonc528723c1c11::AANoFreeFloating2298    AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2299        : AANoFreeImpl(IRP, A) {}
2300  
2301    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c1c11::AANoFreeFloating2302    void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2303  
2304    /// See Abstract Attribute::updateImpl(...).
updateImpl__anonc528723c1c11::AANoFreeFloating2305    ChangeStatus updateImpl(Attributor &A) override {
2306      const IRPosition &IRP = getIRPosition();
2307  
2308      bool IsKnown;
2309      if (AA::hasAssumedIRAttr<Attribute::NoFree>(A, this,
2310                                                  IRPosition::function_scope(IRP),
2311                                                  DepClassTy::OPTIONAL, IsKnown))
2312        return ChangeStatus::UNCHANGED;
2313  
2314      Value &AssociatedValue = getIRPosition().getAssociatedValue();
2315      auto Pred = [&](const Use &U, bool &Follow) -> bool {
2316        Instruction *UserI = cast<Instruction>(U.getUser());
2317        if (auto *CB = dyn_cast<CallBase>(UserI)) {
2318          if (CB->isBundleOperand(&U))
2319            return false;
2320          if (!CB->isArgOperand(&U))
2321            return true;
2322          unsigned ArgNo = CB->getArgOperandNo(&U);
2323  
2324          bool IsKnown;
2325          return AA::hasAssumedIRAttr<Attribute::NoFree>(
2326              A, this, IRPosition::callsite_argument(*CB, ArgNo),
2327              DepClassTy::REQUIRED, IsKnown);
2328        }
2329  
2330        if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2331            isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2332          Follow = true;
2333          return true;
2334        }
2335        if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2336            isa<ReturnInst>(UserI))
2337          return true;
2338  
2339        // Unknown user.
2340        return false;
2341      };
2342      if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2343        return indicatePessimisticFixpoint();
2344  
2345      return ChangeStatus::UNCHANGED;
2346    }
2347  };
2348  
2349  /// NoFree attribute for a call site argument.
2350  struct AANoFreeArgument final : AANoFreeFloating {
AANoFreeArgument__anonc528723c1c11::AANoFreeArgument2351    AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2352        : AANoFreeFloating(IRP, A) {}
2353  
2354    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c1c11::AANoFreeArgument2355    void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2356  };
2357  
2358  /// NoFree attribute for call site arguments.
2359  struct AANoFreeCallSiteArgument final : AANoFreeFloating {
AANoFreeCallSiteArgument__anonc528723c1c11::AANoFreeCallSiteArgument2360    AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2361        : AANoFreeFloating(IRP, A) {}
2362  
2363    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c1c11::AANoFreeCallSiteArgument2364    ChangeStatus updateImpl(Attributor &A) override {
2365      // TODO: Once we have call site specific value information we can provide
2366      //       call site specific liveness information and then it makes
2367      //       sense to specialize attributes for call sites arguments instead of
2368      //       redirecting requests to the callee argument.
2369      Argument *Arg = getAssociatedArgument();
2370      if (!Arg)
2371        return indicatePessimisticFixpoint();
2372      const IRPosition &ArgPos = IRPosition::argument(*Arg);
2373      bool IsKnown;
2374      if (AA::hasAssumedIRAttr<Attribute::NoFree>(A, this, ArgPos,
2375                                                  DepClassTy::REQUIRED, IsKnown))
2376        return ChangeStatus::UNCHANGED;
2377      return indicatePessimisticFixpoint();
2378    }
2379  
2380    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c1c11::AANoFreeCallSiteArgument2381    void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2382  };
2383  
2384  /// NoFree attribute for function return value.
2385  struct AANoFreeReturned final : AANoFreeFloating {
AANoFreeReturned__anonc528723c1c11::AANoFreeReturned2386    AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2387        : AANoFreeFloating(IRP, A) {
2388      llvm_unreachable("NoFree is not applicable to function returns!");
2389    }
2390  
2391    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c1c11::AANoFreeReturned2392    void initialize(Attributor &A) override {
2393      llvm_unreachable("NoFree is not applicable to function returns!");
2394    }
2395  
2396    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c1c11::AANoFreeReturned2397    ChangeStatus updateImpl(Attributor &A) override {
2398      llvm_unreachable("NoFree is not applicable to function returns!");
2399    }
2400  
2401    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c1c11::AANoFreeReturned2402    void trackStatistics() const override {}
2403  };
2404  
2405  /// NoFree attribute deduction for a call site return value.
2406  struct AANoFreeCallSiteReturned final : AANoFreeFloating {
AANoFreeCallSiteReturned__anonc528723c1c11::AANoFreeCallSiteReturned2407    AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2408        : AANoFreeFloating(IRP, A) {}
2409  
manifest__anonc528723c1c11::AANoFreeCallSiteReturned2410    ChangeStatus manifest(Attributor &A) override {
2411      return ChangeStatus::UNCHANGED;
2412    }
2413    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c1c11::AANoFreeCallSiteReturned2414    void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2415  };
2416  } // namespace
2417  
2418  /// ------------------------ NonNull Argument Attribute ------------------------
2419  
isImpliedByIR(Attributor & A,const IRPosition & IRP,Attribute::AttrKind ImpliedAttributeKind,bool IgnoreSubsumingPositions)2420  bool AANonNull::isImpliedByIR(Attributor &A, const IRPosition &IRP,
2421                                Attribute::AttrKind ImpliedAttributeKind,
2422                                bool IgnoreSubsumingPositions) {
2423    SmallVector<Attribute::AttrKind, 2> AttrKinds;
2424    AttrKinds.push_back(Attribute::NonNull);
2425    if (!NullPointerIsDefined(IRP.getAnchorScope(),
2426                              IRP.getAssociatedType()->getPointerAddressSpace()))
2427      AttrKinds.push_back(Attribute::Dereferenceable);
2428    if (A.hasAttr(IRP, AttrKinds, IgnoreSubsumingPositions, Attribute::NonNull))
2429      return true;
2430  
2431    DominatorTree *DT = nullptr;
2432    AssumptionCache *AC = nullptr;
2433    InformationCache &InfoCache = A.getInfoCache();
2434    if (const Function *Fn = IRP.getAnchorScope()) {
2435      if (!Fn->isDeclaration()) {
2436        DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2437        AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2438      }
2439    }
2440  
2441    SmallVector<AA::ValueAndContext> Worklist;
2442    if (IRP.getPositionKind() != IRP_RETURNED) {
2443      Worklist.push_back({IRP.getAssociatedValue(), IRP.getCtxI()});
2444    } else {
2445      bool UsedAssumedInformation = false;
2446      if (!A.checkForAllInstructions(
2447              [&](Instruction &I) {
2448                Worklist.push_back({*cast<ReturnInst>(I).getReturnValue(), &I});
2449                return true;
2450              },
2451              IRP.getAssociatedFunction(), nullptr, {Instruction::Ret},
2452              UsedAssumedInformation, false, /*CheckPotentiallyDead=*/true))
2453        return false;
2454    }
2455  
2456    if (llvm::any_of(Worklist, [&](AA::ValueAndContext VAC) {
2457          return !isKnownNonZero(
2458              VAC.getValue(),
2459              SimplifyQuery(A.getDataLayout(), DT, AC, VAC.getCtxI()));
2460        }))
2461      return false;
2462  
2463    A.manifestAttrs(IRP, {Attribute::get(IRP.getAnchorValue().getContext(),
2464                                         Attribute::NonNull)});
2465    return true;
2466  }
2467  
2468  namespace {
getKnownNonNullAndDerefBytesForUse(Attributor & A,const AbstractAttribute & QueryingAA,Value & AssociatedValue,const Use * U,const Instruction * I,bool & IsNonNull,bool & TrackUse)2469  static int64_t getKnownNonNullAndDerefBytesForUse(
2470      Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2471      const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2472    TrackUse = false;
2473  
2474    const Value *UseV = U->get();
2475    if (!UseV->getType()->isPointerTy())
2476      return 0;
2477  
2478    // We need to follow common pointer manipulation uses to the accesses they
2479    // feed into. We can try to be smart to avoid looking through things we do not
2480    // like for now, e.g., non-inbounds GEPs.
2481    if (isa<CastInst>(I)) {
2482      TrackUse = true;
2483      return 0;
2484    }
2485  
2486    if (isa<GetElementPtrInst>(I)) {
2487      TrackUse = true;
2488      return 0;
2489    }
2490  
2491    Type *PtrTy = UseV->getType();
2492    const Function *F = I->getFunction();
2493    bool NullPointerIsDefined =
2494        F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2495    const DataLayout &DL = A.getInfoCache().getDL();
2496    if (const auto *CB = dyn_cast<CallBase>(I)) {
2497      if (CB->isBundleOperand(U)) {
2498        if (RetainedKnowledge RK = getKnowledgeFromUse(
2499                U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2500          IsNonNull |=
2501              (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2502          return RK.ArgValue;
2503        }
2504        return 0;
2505      }
2506  
2507      if (CB->isCallee(U)) {
2508        IsNonNull |= !NullPointerIsDefined;
2509        return 0;
2510      }
2511  
2512      unsigned ArgNo = CB->getArgOperandNo(U);
2513      IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2514      // As long as we only use known information there is no need to track
2515      // dependences here.
2516      bool IsKnownNonNull;
2517      AA::hasAssumedIRAttr<Attribute::NonNull>(A, &QueryingAA, IRP,
2518                                               DepClassTy::NONE, IsKnownNonNull);
2519      IsNonNull |= IsKnownNonNull;
2520      auto *DerefAA =
2521          A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2522      return DerefAA ? DerefAA->getKnownDereferenceableBytes() : 0;
2523    }
2524  
2525    std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2526    if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() ||
2527        Loc->Size.isScalable() || I->isVolatile())
2528      return 0;
2529  
2530    int64_t Offset;
2531    const Value *Base =
2532        getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2533    if (Base && Base == &AssociatedValue) {
2534      int64_t DerefBytes = Loc->Size.getValue() + Offset;
2535      IsNonNull |= !NullPointerIsDefined;
2536      return std::max(int64_t(0), DerefBytes);
2537    }
2538  
2539    /// Corner case when an offset is 0.
2540    Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
2541                                            /*AllowNonInbounds*/ true);
2542    if (Base && Base == &AssociatedValue && Offset == 0) {
2543      int64_t DerefBytes = Loc->Size.getValue();
2544      IsNonNull |= !NullPointerIsDefined;
2545      return std::max(int64_t(0), DerefBytes);
2546    }
2547  
2548    return 0;
2549  }
2550  
2551  struct AANonNullImpl : AANonNull {
AANonNullImpl__anonc528723c2211::AANonNullImpl2552    AANonNullImpl(const IRPosition &IRP, Attributor &A) : AANonNull(IRP, A) {}
2553  
2554    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c2211::AANonNullImpl2555    void initialize(Attributor &A) override {
2556      Value &V = *getAssociatedValue().stripPointerCasts();
2557      if (isa<ConstantPointerNull>(V)) {
2558        indicatePessimisticFixpoint();
2559        return;
2560      }
2561  
2562      if (Instruction *CtxI = getCtxI())
2563        followUsesInMBEC(*this, A, getState(), *CtxI);
2564    }
2565  
2566    /// See followUsesInMBEC
followUseInMBEC__anonc528723c2211::AANonNullImpl2567    bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2568                         AANonNull::StateType &State) {
2569      bool IsNonNull = false;
2570      bool TrackUse = false;
2571      getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2572                                         IsNonNull, TrackUse);
2573      State.setKnown(IsNonNull);
2574      return TrackUse;
2575    }
2576  
2577    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c2211::AANonNullImpl2578    const std::string getAsStr(Attributor *A) const override {
2579      return getAssumed() ? "nonnull" : "may-null";
2580    }
2581  };
2582  
2583  /// NonNull attribute for a floating value.
2584  struct AANonNullFloating : public AANonNullImpl {
AANonNullFloating__anonc528723c2211::AANonNullFloating2585    AANonNullFloating(const IRPosition &IRP, Attributor &A)
2586        : AANonNullImpl(IRP, A) {}
2587  
2588    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c2211::AANonNullFloating2589    ChangeStatus updateImpl(Attributor &A) override {
2590      auto CheckIRP = [&](const IRPosition &IRP) {
2591        bool IsKnownNonNull;
2592        return AA::hasAssumedIRAttr<Attribute::NonNull>(
2593            A, *this, IRP, DepClassTy::OPTIONAL, IsKnownNonNull);
2594      };
2595  
2596      bool Stripped;
2597      bool UsedAssumedInformation = false;
2598      Value *AssociatedValue = &getAssociatedValue();
2599      SmallVector<AA::ValueAndContext> Values;
2600      if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
2601                                        AA::AnyScope, UsedAssumedInformation))
2602        Stripped = false;
2603      else
2604        Stripped =
2605            Values.size() != 1 || Values.front().getValue() != AssociatedValue;
2606  
2607      if (!Stripped) {
2608        bool IsKnown;
2609        if (auto *PHI = dyn_cast<PHINode>(AssociatedValue))
2610          if (llvm::all_of(PHI->incoming_values(), [&](Value *Op) {
2611                return AA::hasAssumedIRAttr<Attribute::NonNull>(
2612                    A, this, IRPosition::value(*Op), DepClassTy::OPTIONAL,
2613                    IsKnown);
2614              }))
2615            return ChangeStatus::UNCHANGED;
2616        if (auto *Select = dyn_cast<SelectInst>(AssociatedValue))
2617          if (AA::hasAssumedIRAttr<Attribute::NonNull>(
2618                  A, this, IRPosition::value(*Select->getFalseValue()),
2619                  DepClassTy::OPTIONAL, IsKnown) &&
2620              AA::hasAssumedIRAttr<Attribute::NonNull>(
2621                  A, this, IRPosition::value(*Select->getTrueValue()),
2622                  DepClassTy::OPTIONAL, IsKnown))
2623            return ChangeStatus::UNCHANGED;
2624  
2625        // If we haven't stripped anything we might still be able to use a
2626        // different AA, but only if the IRP changes. Effectively when we
2627        // interpret this not as a call site value but as a floating/argument
2628        // value.
2629        const IRPosition AVIRP = IRPosition::value(*AssociatedValue);
2630        if (AVIRP == getIRPosition() || !CheckIRP(AVIRP))
2631          return indicatePessimisticFixpoint();
2632        return ChangeStatus::UNCHANGED;
2633      }
2634  
2635      for (const auto &VAC : Values)
2636        if (!CheckIRP(IRPosition::value(*VAC.getValue())))
2637          return indicatePessimisticFixpoint();
2638  
2639      return ChangeStatus::UNCHANGED;
2640    }
2641  
2642    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c2211::AANonNullFloating2643    void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2644  };
2645  
2646  /// NonNull attribute for function return value.
2647  struct AANonNullReturned final
2648      : AAReturnedFromReturnedValues<AANonNull, AANonNull, AANonNull::StateType,
2649                                     false, AANonNull::IRAttributeKind, false> {
AANonNullReturned__anonc528723c2211::AANonNullReturned2650    AANonNullReturned(const IRPosition &IRP, Attributor &A)
2651        : AAReturnedFromReturnedValues<AANonNull, AANonNull, AANonNull::StateType,
2652                                       false, Attribute::NonNull, false>(IRP, A) {
2653    }
2654  
2655    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c2211::AANonNullReturned2656    const std::string getAsStr(Attributor *A) const override {
2657      return getAssumed() ? "nonnull" : "may-null";
2658    }
2659  
2660    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c2211::AANonNullReturned2661    void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2662  };
2663  
2664  /// NonNull attribute for function argument.
2665  struct AANonNullArgument final
2666      : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
AANonNullArgument__anonc528723c2211::AANonNullArgument2667    AANonNullArgument(const IRPosition &IRP, Attributor &A)
2668        : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2669  
2670    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c2211::AANonNullArgument2671    void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2672  };
2673  
2674  struct AANonNullCallSiteArgument final : AANonNullFloating {
AANonNullCallSiteArgument__anonc528723c2211::AANonNullCallSiteArgument2675    AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2676        : AANonNullFloating(IRP, A) {}
2677  
2678    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c2211::AANonNullCallSiteArgument2679    void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2680  };
2681  
2682  /// NonNull attribute for a call site return position.
2683  struct AANonNullCallSiteReturned final
2684      : AACalleeToCallSite<AANonNull, AANonNullImpl> {
AANonNullCallSiteReturned__anonc528723c2211::AANonNullCallSiteReturned2685    AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2686        : AACalleeToCallSite<AANonNull, AANonNullImpl>(IRP, A) {}
2687  
2688    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c2211::AANonNullCallSiteReturned2689    void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2690  };
2691  } // namespace
2692  
2693  /// ------------------------ Must-Progress Attributes --------------------------
2694  namespace {
2695  struct AAMustProgressImpl : public AAMustProgress {
AAMustProgressImpl__anonc528723c2511::AAMustProgressImpl2696    AAMustProgressImpl(const IRPosition &IRP, Attributor &A)
2697        : AAMustProgress(IRP, A) {}
2698  
2699    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c2511::AAMustProgressImpl2700    void initialize(Attributor &A) override {
2701      bool IsKnown;
2702      assert(!AA::hasAssumedIRAttr<Attribute::MustProgress>(
2703          A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
2704      (void)IsKnown;
2705    }
2706  
2707    /// See AbstractAttribute::getAsStr()
getAsStr__anonc528723c2511::AAMustProgressImpl2708    const std::string getAsStr(Attributor *A) const override {
2709      return getAssumed() ? "mustprogress" : "may-not-progress";
2710    }
2711  };
2712  
2713  struct AAMustProgressFunction final : AAMustProgressImpl {
AAMustProgressFunction__anonc528723c2511::AAMustProgressFunction2714    AAMustProgressFunction(const IRPosition &IRP, Attributor &A)
2715        : AAMustProgressImpl(IRP, A) {}
2716  
2717    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c2511::AAMustProgressFunction2718    ChangeStatus updateImpl(Attributor &A) override {
2719      bool IsKnown;
2720      if (AA::hasAssumedIRAttr<Attribute::WillReturn>(
2721              A, this, getIRPosition(), DepClassTy::OPTIONAL, IsKnown)) {
2722        if (IsKnown)
2723          return indicateOptimisticFixpoint();
2724        return ChangeStatus::UNCHANGED;
2725      }
2726  
2727      auto CheckForMustProgress = [&](AbstractCallSite ACS) {
2728        IRPosition IPos = IRPosition::callsite_function(*ACS.getInstruction());
2729        bool IsKnownMustProgress;
2730        return AA::hasAssumedIRAttr<Attribute::MustProgress>(
2731            A, this, IPos, DepClassTy::REQUIRED, IsKnownMustProgress,
2732            /* IgnoreSubsumingPositions */ true);
2733      };
2734  
2735      bool AllCallSitesKnown = true;
2736      if (!A.checkForAllCallSites(CheckForMustProgress, *this,
2737                                  /* RequireAllCallSites */ true,
2738                                  AllCallSitesKnown))
2739        return indicatePessimisticFixpoint();
2740  
2741      return ChangeStatus::UNCHANGED;
2742    }
2743  
2744    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c2511::AAMustProgressFunction2745    void trackStatistics() const override {
2746      STATS_DECLTRACK_FN_ATTR(mustprogress)
2747    }
2748  };
2749  
2750  /// MustProgress attribute deduction for a call sites.
2751  struct AAMustProgressCallSite final : AAMustProgressImpl {
AAMustProgressCallSite__anonc528723c2511::AAMustProgressCallSite2752    AAMustProgressCallSite(const IRPosition &IRP, Attributor &A)
2753        : AAMustProgressImpl(IRP, A) {}
2754  
2755    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c2511::AAMustProgressCallSite2756    ChangeStatus updateImpl(Attributor &A) override {
2757      // TODO: Once we have call site specific value information we can provide
2758      //       call site specific liveness information and then it makes
2759      //       sense to specialize attributes for call sites arguments instead of
2760      //       redirecting requests to the callee argument.
2761      const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
2762      bool IsKnownMustProgress;
2763      if (!AA::hasAssumedIRAttr<Attribute::MustProgress>(
2764              A, this, FnPos, DepClassTy::REQUIRED, IsKnownMustProgress))
2765        return indicatePessimisticFixpoint();
2766      return ChangeStatus::UNCHANGED;
2767    }
2768  
2769    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c2511::AAMustProgressCallSite2770    void trackStatistics() const override {
2771      STATS_DECLTRACK_CS_ATTR(mustprogress);
2772    }
2773  };
2774  } // namespace
2775  
2776  /// ------------------------ No-Recurse Attributes ----------------------------
2777  
2778  namespace {
2779  struct AANoRecurseImpl : public AANoRecurse {
AANoRecurseImpl__anonc528723c2711::AANoRecurseImpl2780    AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2781  
2782    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c2711::AANoRecurseImpl2783    void initialize(Attributor &A) override {
2784      bool IsKnown;
2785      assert(!AA::hasAssumedIRAttr<Attribute::NoRecurse>(
2786          A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
2787      (void)IsKnown;
2788    }
2789  
2790    /// See AbstractAttribute::getAsStr()
getAsStr__anonc528723c2711::AANoRecurseImpl2791    const std::string getAsStr(Attributor *A) const override {
2792      return getAssumed() ? "norecurse" : "may-recurse";
2793    }
2794  };
2795  
2796  struct AANoRecurseFunction final : AANoRecurseImpl {
AANoRecurseFunction__anonc528723c2711::AANoRecurseFunction2797    AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2798        : AANoRecurseImpl(IRP, A) {}
2799  
2800    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c2711::AANoRecurseFunction2801    ChangeStatus updateImpl(Attributor &A) override {
2802  
2803      // If all live call sites are known to be no-recurse, we are as well.
2804      auto CallSitePred = [&](AbstractCallSite ACS) {
2805        bool IsKnownNoRecurse;
2806        if (!AA::hasAssumedIRAttr<Attribute::NoRecurse>(
2807                A, this,
2808                IRPosition::function(*ACS.getInstruction()->getFunction()),
2809                DepClassTy::NONE, IsKnownNoRecurse))
2810          return false;
2811        return IsKnownNoRecurse;
2812      };
2813      bool UsedAssumedInformation = false;
2814      if (A.checkForAllCallSites(CallSitePred, *this, true,
2815                                 UsedAssumedInformation)) {
2816        // If we know all call sites and all are known no-recurse, we are done.
2817        // If all known call sites, which might not be all that exist, are known
2818        // to be no-recurse, we are not done but we can continue to assume
2819        // no-recurse. If one of the call sites we have not visited will become
2820        // live, another update is triggered.
2821        if (!UsedAssumedInformation)
2822          indicateOptimisticFixpoint();
2823        return ChangeStatus::UNCHANGED;
2824      }
2825  
2826      const AAInterFnReachability *EdgeReachability =
2827          A.getAAFor<AAInterFnReachability>(*this, getIRPosition(),
2828                                            DepClassTy::REQUIRED);
2829      if (EdgeReachability && EdgeReachability->canReach(A, *getAnchorScope()))
2830        return indicatePessimisticFixpoint();
2831      return ChangeStatus::UNCHANGED;
2832    }
2833  
trackStatistics__anonc528723c2711::AANoRecurseFunction2834    void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2835  };
2836  
2837  /// NoRecurse attribute deduction for a call sites.
2838  struct AANoRecurseCallSite final
2839      : AACalleeToCallSite<AANoRecurse, AANoRecurseImpl> {
AANoRecurseCallSite__anonc528723c2711::AANoRecurseCallSite2840    AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2841        : AACalleeToCallSite<AANoRecurse, AANoRecurseImpl>(IRP, A) {}
2842  
2843    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c2711::AANoRecurseCallSite2844    void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2845  };
2846  } // namespace
2847  
2848  /// ------------------------ No-Convergent Attribute --------------------------
2849  
2850  namespace {
2851  struct AANonConvergentImpl : public AANonConvergent {
AANonConvergentImpl__anonc528723c2911::AANonConvergentImpl2852    AANonConvergentImpl(const IRPosition &IRP, Attributor &A)
2853        : AANonConvergent(IRP, A) {}
2854  
2855    /// See AbstractAttribute::getAsStr()
getAsStr__anonc528723c2911::AANonConvergentImpl2856    const std::string getAsStr(Attributor *A) const override {
2857      return getAssumed() ? "non-convergent" : "may-be-convergent";
2858    }
2859  };
2860  
2861  struct AANonConvergentFunction final : AANonConvergentImpl {
AANonConvergentFunction__anonc528723c2911::AANonConvergentFunction2862    AANonConvergentFunction(const IRPosition &IRP, Attributor &A)
2863        : AANonConvergentImpl(IRP, A) {}
2864  
2865    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c2911::AANonConvergentFunction2866    ChangeStatus updateImpl(Attributor &A) override {
2867      // If all function calls are known to not be convergent, we are not
2868      // convergent.
2869      auto CalleeIsNotConvergent = [&](Instruction &Inst) {
2870        CallBase &CB = cast<CallBase>(Inst);
2871        auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
2872        if (!Callee || Callee->isIntrinsic()) {
2873          return false;
2874        }
2875        if (Callee->isDeclaration()) {
2876          return !Callee->hasFnAttribute(Attribute::Convergent);
2877        }
2878        const auto *ConvergentAA = A.getAAFor<AANonConvergent>(
2879            *this, IRPosition::function(*Callee), DepClassTy::REQUIRED);
2880        return ConvergentAA && ConvergentAA->isAssumedNotConvergent();
2881      };
2882  
2883      bool UsedAssumedInformation = false;
2884      if (!A.checkForAllCallLikeInstructions(CalleeIsNotConvergent, *this,
2885                                             UsedAssumedInformation)) {
2886        return indicatePessimisticFixpoint();
2887      }
2888      return ChangeStatus::UNCHANGED;
2889    }
2890  
manifest__anonc528723c2911::AANonConvergentFunction2891    ChangeStatus manifest(Attributor &A) override {
2892      if (isKnownNotConvergent() &&
2893          A.hasAttr(getIRPosition(), Attribute::Convergent)) {
2894        A.removeAttrs(getIRPosition(), {Attribute::Convergent});
2895        return ChangeStatus::CHANGED;
2896      }
2897      return ChangeStatus::UNCHANGED;
2898    }
2899  
trackStatistics__anonc528723c2911::AANonConvergentFunction2900    void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(convergent) }
2901  };
2902  } // namespace
2903  
2904  /// -------------------- Undefined-Behavior Attributes ------------------------
2905  
2906  namespace {
2907  struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
AAUndefinedBehaviorImpl__anonc528723c2b11::AAUndefinedBehaviorImpl2908    AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2909        : AAUndefinedBehavior(IRP, A) {}
2910  
2911    /// See AbstractAttribute::updateImpl(...).
2912    // through a pointer (i.e. also branches etc.)
updateImpl__anonc528723c2b11::AAUndefinedBehaviorImpl2913    ChangeStatus updateImpl(Attributor &A) override {
2914      const size_t UBPrevSize = KnownUBInsts.size();
2915      const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2916  
2917      auto InspectMemAccessInstForUB = [&](Instruction &I) {
2918        // Lang ref now states volatile store is not UB, let's skip them.
2919        if (I.isVolatile() && I.mayWriteToMemory())
2920          return true;
2921  
2922        // Skip instructions that are already saved.
2923        if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2924          return true;
2925  
2926        // If we reach here, we know we have an instruction
2927        // that accesses memory through a pointer operand,
2928        // for which getPointerOperand() should give it to us.
2929        Value *PtrOp =
2930            const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2931        assert(PtrOp &&
2932               "Expected pointer operand of memory accessing instruction");
2933  
2934        // Either we stopped and the appropriate action was taken,
2935        // or we got back a simplified value to continue.
2936        std::optional<Value *> SimplifiedPtrOp =
2937            stopOnUndefOrAssumed(A, PtrOp, &I);
2938        if (!SimplifiedPtrOp || !*SimplifiedPtrOp)
2939          return true;
2940        const Value *PtrOpVal = *SimplifiedPtrOp;
2941  
2942        // A memory access through a pointer is considered UB
2943        // only if the pointer has constant null value.
2944        // TODO: Expand it to not only check constant values.
2945        if (!isa<ConstantPointerNull>(PtrOpVal)) {
2946          AssumedNoUBInsts.insert(&I);
2947          return true;
2948        }
2949        const Type *PtrTy = PtrOpVal->getType();
2950  
2951        // Because we only consider instructions inside functions,
2952        // assume that a parent function exists.
2953        const Function *F = I.getFunction();
2954  
2955        // A memory access using constant null pointer is only considered UB
2956        // if null pointer is _not_ defined for the target platform.
2957        if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2958          AssumedNoUBInsts.insert(&I);
2959        else
2960          KnownUBInsts.insert(&I);
2961        return true;
2962      };
2963  
2964      auto InspectBrInstForUB = [&](Instruction &I) {
2965        // A conditional branch instruction is considered UB if it has `undef`
2966        // condition.
2967  
2968        // Skip instructions that are already saved.
2969        if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2970          return true;
2971  
2972        // We know we have a branch instruction.
2973        auto *BrInst = cast<BranchInst>(&I);
2974  
2975        // Unconditional branches are never considered UB.
2976        if (BrInst->isUnconditional())
2977          return true;
2978  
2979        // Either we stopped and the appropriate action was taken,
2980        // or we got back a simplified value to continue.
2981        std::optional<Value *> SimplifiedCond =
2982            stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2983        if (!SimplifiedCond || !*SimplifiedCond)
2984          return true;
2985        AssumedNoUBInsts.insert(&I);
2986        return true;
2987      };
2988  
2989      auto InspectCallSiteForUB = [&](Instruction &I) {
2990        // Check whether a callsite always cause UB or not
2991  
2992        // Skip instructions that are already saved.
2993        if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2994          return true;
2995  
2996        // Check nonnull and noundef argument attribute violation for each
2997        // callsite.
2998        CallBase &CB = cast<CallBase>(I);
2999        auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
3000        if (!Callee)
3001          return true;
3002        for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
3003          // If current argument is known to be simplified to null pointer and the
3004          // corresponding argument position is known to have nonnull attribute,
3005          // the argument is poison. Furthermore, if the argument is poison and
3006          // the position is known to have noundef attriubte, this callsite is
3007          // considered UB.
3008          if (idx >= Callee->arg_size())
3009            break;
3010          Value *ArgVal = CB.getArgOperand(idx);
3011          if (!ArgVal)
3012            continue;
3013          // Here, we handle three cases.
3014          //   (1) Not having a value means it is dead. (we can replace the value
3015          //       with undef)
3016          //   (2) Simplified to undef. The argument violate noundef attriubte.
3017          //   (3) Simplified to null pointer where known to be nonnull.
3018          //       The argument is a poison value and violate noundef attribute.
3019          IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
3020          bool IsKnownNoUndef;
3021          AA::hasAssumedIRAttr<Attribute::NoUndef>(
3022              A, this, CalleeArgumentIRP, DepClassTy::NONE, IsKnownNoUndef);
3023          if (!IsKnownNoUndef)
3024            continue;
3025          bool UsedAssumedInformation = false;
3026          std::optional<Value *> SimplifiedVal =
3027              A.getAssumedSimplified(IRPosition::value(*ArgVal), *this,
3028                                     UsedAssumedInformation, AA::Interprocedural);
3029          if (UsedAssumedInformation)
3030            continue;
3031          if (SimplifiedVal && !*SimplifiedVal)
3032            return true;
3033          if (!SimplifiedVal || isa<UndefValue>(**SimplifiedVal)) {
3034            KnownUBInsts.insert(&I);
3035            continue;
3036          }
3037          if (!ArgVal->getType()->isPointerTy() ||
3038              !isa<ConstantPointerNull>(**SimplifiedVal))
3039            continue;
3040          bool IsKnownNonNull;
3041          AA::hasAssumedIRAttr<Attribute::NonNull>(
3042              A, this, CalleeArgumentIRP, DepClassTy::NONE, IsKnownNonNull);
3043          if (IsKnownNonNull)
3044            KnownUBInsts.insert(&I);
3045        }
3046        return true;
3047      };
3048  
3049      auto InspectReturnInstForUB = [&](Instruction &I) {
3050        auto &RI = cast<ReturnInst>(I);
3051        // Either we stopped and the appropriate action was taken,
3052        // or we got back a simplified return value to continue.
3053        std::optional<Value *> SimplifiedRetValue =
3054            stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
3055        if (!SimplifiedRetValue || !*SimplifiedRetValue)
3056          return true;
3057  
3058        // Check if a return instruction always cause UB or not
3059        // Note: It is guaranteed that the returned position of the anchor
3060        //       scope has noundef attribute when this is called.
3061        //       We also ensure the return position is not "assumed dead"
3062        //       because the returned value was then potentially simplified to
3063        //       `undef` in AAReturnedValues without removing the `noundef`
3064        //       attribute yet.
3065  
3066        // When the returned position has noundef attriubte, UB occurs in the
3067        // following cases.
3068        //   (1) Returned value is known to be undef.
3069        //   (2) The value is known to be a null pointer and the returned
3070        //       position has nonnull attribute (because the returned value is
3071        //       poison).
3072        if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
3073          bool IsKnownNonNull;
3074          AA::hasAssumedIRAttr<Attribute::NonNull>(
3075              A, this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE,
3076              IsKnownNonNull);
3077          if (IsKnownNonNull)
3078            KnownUBInsts.insert(&I);
3079        }
3080  
3081        return true;
3082      };
3083  
3084      bool UsedAssumedInformation = false;
3085      A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
3086                                {Instruction::Load, Instruction::Store,
3087                                 Instruction::AtomicCmpXchg,
3088                                 Instruction::AtomicRMW},
3089                                UsedAssumedInformation,
3090                                /* CheckBBLivenessOnly */ true);
3091      A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
3092                                UsedAssumedInformation,
3093                                /* CheckBBLivenessOnly */ true);
3094      A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
3095                                        UsedAssumedInformation);
3096  
3097      // If the returned position of the anchor scope has noundef attriubte, check
3098      // all returned instructions.
3099      if (!getAnchorScope()->getReturnType()->isVoidTy()) {
3100        const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
3101        if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
3102          bool IsKnownNoUndef;
3103          AA::hasAssumedIRAttr<Attribute::NoUndef>(
3104              A, this, ReturnIRP, DepClassTy::NONE, IsKnownNoUndef);
3105          if (IsKnownNoUndef)
3106            A.checkForAllInstructions(InspectReturnInstForUB, *this,
3107                                      {Instruction::Ret}, UsedAssumedInformation,
3108                                      /* CheckBBLivenessOnly */ true);
3109        }
3110      }
3111  
3112      if (NoUBPrevSize != AssumedNoUBInsts.size() ||
3113          UBPrevSize != KnownUBInsts.size())
3114        return ChangeStatus::CHANGED;
3115      return ChangeStatus::UNCHANGED;
3116    }
3117  
isKnownToCauseUB__anonc528723c2b11::AAUndefinedBehaviorImpl3118    bool isKnownToCauseUB(Instruction *I) const override {
3119      return KnownUBInsts.count(I);
3120    }
3121  
isAssumedToCauseUB__anonc528723c2b11::AAUndefinedBehaviorImpl3122    bool isAssumedToCauseUB(Instruction *I) const override {
3123      // In simple words, if an instruction is not in the assumed to _not_
3124      // cause UB, then it is assumed UB (that includes those
3125      // in the KnownUBInsts set). The rest is boilerplate
3126      // is to ensure that it is one of the instructions we test
3127      // for UB.
3128  
3129      switch (I->getOpcode()) {
3130      case Instruction::Load:
3131      case Instruction::Store:
3132      case Instruction::AtomicCmpXchg:
3133      case Instruction::AtomicRMW:
3134        return !AssumedNoUBInsts.count(I);
3135      case Instruction::Br: {
3136        auto *BrInst = cast<BranchInst>(I);
3137        if (BrInst->isUnconditional())
3138          return false;
3139        return !AssumedNoUBInsts.count(I);
3140      } break;
3141      default:
3142        return false;
3143      }
3144      return false;
3145    }
3146  
manifest__anonc528723c2b11::AAUndefinedBehaviorImpl3147    ChangeStatus manifest(Attributor &A) override {
3148      if (KnownUBInsts.empty())
3149        return ChangeStatus::UNCHANGED;
3150      for (Instruction *I : KnownUBInsts)
3151        A.changeToUnreachableAfterManifest(I);
3152      return ChangeStatus::CHANGED;
3153    }
3154  
3155    /// See AbstractAttribute::getAsStr()
getAsStr__anonc528723c2b11::AAUndefinedBehaviorImpl3156    const std::string getAsStr(Attributor *A) const override {
3157      return getAssumed() ? "undefined-behavior" : "no-ub";
3158    }
3159  
3160    /// Note: The correctness of this analysis depends on the fact that the
3161    /// following 2 sets will stop changing after some point.
3162    /// "Change" here means that their size changes.
3163    /// The size of each set is monotonically increasing
3164    /// (we only add items to them) and it is upper bounded by the number of
3165    /// instructions in the processed function (we can never save more
3166    /// elements in either set than this number). Hence, at some point,
3167    /// they will stop increasing.
3168    /// Consequently, at some point, both sets will have stopped
3169    /// changing, effectively making the analysis reach a fixpoint.
3170  
3171    /// Note: These 2 sets are disjoint and an instruction can be considered
3172    /// one of 3 things:
3173    /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
3174    ///    the KnownUBInsts set.
3175    /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
3176    ///    has a reason to assume it).
3177    /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
3178    ///    could not find a reason to assume or prove that it can cause UB,
3179    ///    hence it assumes it doesn't. We have a set for these instructions
3180    ///    so that we don't reprocess them in every update.
3181    ///    Note however that instructions in this set may cause UB.
3182  
3183  protected:
3184    /// A set of all live instructions _known_ to cause UB.
3185    SmallPtrSet<Instruction *, 8> KnownUBInsts;
3186  
3187  private:
3188    /// A set of all the (live) instructions that are assumed to _not_ cause UB.
3189    SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
3190  
3191    // Should be called on updates in which if we're processing an instruction
3192    // \p I that depends on a value \p V, one of the following has to happen:
3193    // - If the value is assumed, then stop.
3194    // - If the value is known but undef, then consider it UB.
3195    // - Otherwise, do specific processing with the simplified value.
3196    // We return std::nullopt in the first 2 cases to signify that an appropriate
3197    // action was taken and the caller should stop.
3198    // Otherwise, we return the simplified value that the caller should
3199    // use for specific processing.
stopOnUndefOrAssumed__anonc528723c2b11::AAUndefinedBehaviorImpl3200    std::optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
3201                                                Instruction *I) {
3202      bool UsedAssumedInformation = false;
3203      std::optional<Value *> SimplifiedV =
3204          A.getAssumedSimplified(IRPosition::value(*V), *this,
3205                                 UsedAssumedInformation, AA::Interprocedural);
3206      if (!UsedAssumedInformation) {
3207        // Don't depend on assumed values.
3208        if (!SimplifiedV) {
3209          // If it is known (which we tested above) but it doesn't have a value,
3210          // then we can assume `undef` and hence the instruction is UB.
3211          KnownUBInsts.insert(I);
3212          return std::nullopt;
3213        }
3214        if (!*SimplifiedV)
3215          return nullptr;
3216        V = *SimplifiedV;
3217      }
3218      if (isa<UndefValue>(V)) {
3219        KnownUBInsts.insert(I);
3220        return std::nullopt;
3221      }
3222      return V;
3223    }
3224  };
3225  
3226  struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
AAUndefinedBehaviorFunction__anonc528723c2b11::AAUndefinedBehaviorFunction3227    AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
3228        : AAUndefinedBehaviorImpl(IRP, A) {}
3229  
3230    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c2b11::AAUndefinedBehaviorFunction3231    void trackStatistics() const override {
3232      STATS_DECL(UndefinedBehaviorInstruction, Instruction,
3233                 "Number of instructions known to have UB");
3234      BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
3235          KnownUBInsts.size();
3236    }
3237  };
3238  } // namespace
3239  
3240  /// ------------------------ Will-Return Attributes ----------------------------
3241  
3242  namespace {
3243  // Helper function that checks whether a function has any cycle which we don't
3244  // know if it is bounded or not.
3245  // Loops with maximum trip count are considered bounded, any other cycle not.
mayContainUnboundedCycle(Function & F,Attributor & A)3246  static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
3247    ScalarEvolution *SE =
3248        A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
3249    LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
3250    // If either SCEV or LoopInfo is not available for the function then we assume
3251    // any cycle to be unbounded cycle.
3252    // We use scc_iterator which uses Tarjan algorithm to find all the maximal
3253    // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
3254    if (!SE || !LI) {
3255      for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
3256        if (SCCI.hasCycle())
3257          return true;
3258      return false;
3259    }
3260  
3261    // If there's irreducible control, the function may contain non-loop cycles.
3262    if (mayContainIrreducibleControl(F, LI))
3263      return true;
3264  
3265    // Any loop that does not have a max trip count is considered unbounded cycle.
3266    for (auto *L : LI->getLoopsInPreorder()) {
3267      if (!SE->getSmallConstantMaxTripCount(L))
3268        return true;
3269    }
3270    return false;
3271  }
3272  
3273  struct AAWillReturnImpl : public AAWillReturn {
AAWillReturnImpl__anonc528723c3011::AAWillReturnImpl3274    AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
3275        : AAWillReturn(IRP, A) {}
3276  
3277    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c3011::AAWillReturnImpl3278    void initialize(Attributor &A) override {
3279      bool IsKnown;
3280      assert(!AA::hasAssumedIRAttr<Attribute::WillReturn>(
3281          A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
3282      (void)IsKnown;
3283    }
3284  
3285    /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
isImpliedByMustprogressAndReadonly__anonc528723c3011::AAWillReturnImpl3286    bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
3287      if (!A.hasAttr(getIRPosition(), {Attribute::MustProgress}))
3288        return false;
3289  
3290      bool IsKnown;
3291      if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3292        return IsKnown || !KnownOnly;
3293      return false;
3294    }
3295  
3296    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c3011::AAWillReturnImpl3297    ChangeStatus updateImpl(Attributor &A) override {
3298      if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3299        return ChangeStatus::UNCHANGED;
3300  
3301      auto CheckForWillReturn = [&](Instruction &I) {
3302        IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
3303        bool IsKnown;
3304        if (AA::hasAssumedIRAttr<Attribute::WillReturn>(
3305                A, this, IPos, DepClassTy::REQUIRED, IsKnown)) {
3306          if (IsKnown)
3307            return true;
3308        } else {
3309          return false;
3310        }
3311        bool IsKnownNoRecurse;
3312        return AA::hasAssumedIRAttr<Attribute::NoRecurse>(
3313            A, this, IPos, DepClassTy::REQUIRED, IsKnownNoRecurse);
3314      };
3315  
3316      bool UsedAssumedInformation = false;
3317      if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
3318                                             UsedAssumedInformation))
3319        return indicatePessimisticFixpoint();
3320  
3321      return ChangeStatus::UNCHANGED;
3322    }
3323  
3324    /// See AbstractAttribute::getAsStr()
getAsStr__anonc528723c3011::AAWillReturnImpl3325    const std::string getAsStr(Attributor *A) const override {
3326      return getAssumed() ? "willreturn" : "may-noreturn";
3327    }
3328  };
3329  
3330  struct AAWillReturnFunction final : AAWillReturnImpl {
AAWillReturnFunction__anonc528723c3011::AAWillReturnFunction3331    AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
3332        : AAWillReturnImpl(IRP, A) {}
3333  
3334    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c3011::AAWillReturnFunction3335    void initialize(Attributor &A) override {
3336      AAWillReturnImpl::initialize(A);
3337  
3338      Function *F = getAnchorScope();
3339      assert(F && "Did expect an anchor function");
3340      if (F->isDeclaration() || mayContainUnboundedCycle(*F, A))
3341        indicatePessimisticFixpoint();
3342    }
3343  
3344    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3011::AAWillReturnFunction3345    void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
3346  };
3347  
3348  /// WillReturn attribute deduction for a call sites.
3349  struct AAWillReturnCallSite final
3350      : AACalleeToCallSite<AAWillReturn, AAWillReturnImpl> {
AAWillReturnCallSite__anonc528723c3011::AAWillReturnCallSite3351    AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
3352        : AACalleeToCallSite<AAWillReturn, AAWillReturnImpl>(IRP, A) {}
3353  
3354    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c3011::AAWillReturnCallSite3355    ChangeStatus updateImpl(Attributor &A) override {
3356      if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3357        return ChangeStatus::UNCHANGED;
3358  
3359      return AACalleeToCallSite::updateImpl(A);
3360    }
3361  
3362    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3011::AAWillReturnCallSite3363    void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
3364  };
3365  } // namespace
3366  
3367  /// -------------------AAIntraFnReachability Attribute--------------------------
3368  
3369  /// All information associated with a reachability query. This boilerplate code
3370  /// is used by both AAIntraFnReachability and AAInterFnReachability, with
3371  /// different \p ToTy values.
3372  template <typename ToTy> struct ReachabilityQueryInfo {
3373    enum class Reachable {
3374      No,
3375      Yes,
3376    };
3377  
3378    /// Start here,
3379    const Instruction *From = nullptr;
3380    /// reach this place,
3381    const ToTy *To = nullptr;
3382    /// without going through any of these instructions,
3383    const AA::InstExclusionSetTy *ExclusionSet = nullptr;
3384    /// and remember if it worked:
3385    Reachable Result = Reachable::No;
3386  
3387    /// Precomputed hash for this RQI.
3388    unsigned Hash = 0;
3389  
computeHashValueReachabilityQueryInfo3390    unsigned computeHashValue() const {
3391      assert(Hash == 0 && "Computed hash twice!");
3392      using InstSetDMI = DenseMapInfo<const AA::InstExclusionSetTy *>;
3393      using PairDMI = DenseMapInfo<std::pair<const Instruction *, const ToTy *>>;
3394      return const_cast<ReachabilityQueryInfo<ToTy> *>(this)->Hash =
3395                 detail::combineHashValue(PairDMI ::getHashValue({From, To}),
3396                                          InstSetDMI::getHashValue(ExclusionSet));
3397    }
3398  
ReachabilityQueryInfoReachabilityQueryInfo3399    ReachabilityQueryInfo(const Instruction *From, const ToTy *To)
3400        : From(From), To(To) {}
3401  
3402    /// Constructor replacement to ensure unique and stable sets are used for the
3403    /// cache.
ReachabilityQueryInfoReachabilityQueryInfo3404    ReachabilityQueryInfo(Attributor &A, const Instruction &From, const ToTy &To,
3405                          const AA::InstExclusionSetTy *ES, bool MakeUnique)
3406        : From(&From), To(&To), ExclusionSet(ES) {
3407  
3408      if (!ES || ES->empty()) {
3409        ExclusionSet = nullptr;
3410      } else if (MakeUnique) {
3411        ExclusionSet = A.getInfoCache().getOrCreateUniqueBlockExecutionSet(ES);
3412      }
3413    }
3414  
ReachabilityQueryInfoReachabilityQueryInfo3415    ReachabilityQueryInfo(const ReachabilityQueryInfo &RQI)
3416        : From(RQI.From), To(RQI.To), ExclusionSet(RQI.ExclusionSet) {}
3417  };
3418  
3419  namespace llvm {
3420  template <typename ToTy> struct DenseMapInfo<ReachabilityQueryInfo<ToTy> *> {
3421    using InstSetDMI = DenseMapInfo<const AA::InstExclusionSetTy *>;
3422    using PairDMI = DenseMapInfo<std::pair<const Instruction *, const ToTy *>>;
3423  
3424    static ReachabilityQueryInfo<ToTy> EmptyKey;
3425    static ReachabilityQueryInfo<ToTy> TombstoneKey;
3426  
getEmptyKeyllvm::DenseMapInfo3427    static inline ReachabilityQueryInfo<ToTy> *getEmptyKey() { return &EmptyKey; }
getTombstoneKeyllvm::DenseMapInfo3428    static inline ReachabilityQueryInfo<ToTy> *getTombstoneKey() {
3429      return &TombstoneKey;
3430    }
getHashValuellvm::DenseMapInfo3431    static unsigned getHashValue(const ReachabilityQueryInfo<ToTy> *RQI) {
3432      return RQI->Hash ? RQI->Hash : RQI->computeHashValue();
3433    }
isEqualllvm::DenseMapInfo3434    static bool isEqual(const ReachabilityQueryInfo<ToTy> *LHS,
3435                        const ReachabilityQueryInfo<ToTy> *RHS) {
3436      if (!PairDMI::isEqual({LHS->From, LHS->To}, {RHS->From, RHS->To}))
3437        return false;
3438      return InstSetDMI::isEqual(LHS->ExclusionSet, RHS->ExclusionSet);
3439    }
3440  };
3441  
3442  #define DefineKeys(ToTy)                                                       \
3443    template <>                                                                  \
3444    ReachabilityQueryInfo<ToTy>                                                  \
3445        DenseMapInfo<ReachabilityQueryInfo<ToTy> *>::EmptyKey =                  \
3446            ReachabilityQueryInfo<ToTy>(                                         \
3447                DenseMapInfo<const Instruction *>::getEmptyKey(),                \
3448                DenseMapInfo<const ToTy *>::getEmptyKey());                      \
3449    template <>                                                                  \
3450    ReachabilityQueryInfo<ToTy>                                                  \
3451        DenseMapInfo<ReachabilityQueryInfo<ToTy> *>::TombstoneKey =              \
3452            ReachabilityQueryInfo<ToTy>(                                         \
3453                DenseMapInfo<const Instruction *>::getTombstoneKey(),            \
3454                DenseMapInfo<const ToTy *>::getTombstoneKey());
3455  
3456  DefineKeys(Instruction) DefineKeys(Function)
3457  #undef DefineKeys
3458  
3459  } // namespace llvm
3460  
3461  namespace {
3462  
3463  template <typename BaseTy, typename ToTy>
3464  struct CachedReachabilityAA : public BaseTy {
3465    using RQITy = ReachabilityQueryInfo<ToTy>;
3466  
CachedReachabilityAA__anonc528723c3211::CachedReachabilityAA3467    CachedReachabilityAA(const IRPosition &IRP, Attributor &A) : BaseTy(IRP, A) {}
3468  
3469    /// See AbstractAttribute::isQueryAA.
isQueryAA__anonc528723c3211::CachedReachabilityAA3470    bool isQueryAA() const override { return true; }
3471  
3472    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c3211::CachedReachabilityAA3473    ChangeStatus updateImpl(Attributor &A) override {
3474      ChangeStatus Changed = ChangeStatus::UNCHANGED;
3475      for (unsigned u = 0, e = QueryVector.size(); u < e; ++u) {
3476        RQITy *RQI = QueryVector[u];
3477        if (RQI->Result == RQITy::Reachable::No &&
3478            isReachableImpl(A, *RQI, /*IsTemporaryRQI=*/false))
3479          Changed = ChangeStatus::CHANGED;
3480      }
3481      return Changed;
3482    }
3483  
3484    virtual bool isReachableImpl(Attributor &A, RQITy &RQI,
3485                                 bool IsTemporaryRQI) = 0;
3486  
rememberResult__anonc528723c3211::CachedReachabilityAA3487    bool rememberResult(Attributor &A, typename RQITy::Reachable Result,
3488                        RQITy &RQI, bool UsedExclusionSet, bool IsTemporaryRQI) {
3489      RQI.Result = Result;
3490  
3491      // Remove the temporary RQI from the cache.
3492      if (IsTemporaryRQI)
3493        QueryCache.erase(&RQI);
3494  
3495      // Insert a plain RQI (w/o exclusion set) if that makes sense. Two options:
3496      // 1) If it is reachable, it doesn't matter if we have an exclusion set for
3497      // this query. 2) We did not use the exclusion set, potentially because
3498      // there is none.
3499      if (Result == RQITy::Reachable::Yes || !UsedExclusionSet) {
3500        RQITy PlainRQI(RQI.From, RQI.To);
3501        if (!QueryCache.count(&PlainRQI)) {
3502          RQITy *RQIPtr = new (A.Allocator) RQITy(RQI.From, RQI.To);
3503          RQIPtr->Result = Result;
3504          QueryVector.push_back(RQIPtr);
3505          QueryCache.insert(RQIPtr);
3506        }
3507      }
3508  
3509      // Check if we need to insert a new permanent RQI with the exclusion set.
3510      if (IsTemporaryRQI && Result != RQITy::Reachable::Yes && UsedExclusionSet) {
3511        assert((!RQI.ExclusionSet || !RQI.ExclusionSet->empty()) &&
3512               "Did not expect empty set!");
3513        RQITy *RQIPtr = new (A.Allocator)
3514            RQITy(A, *RQI.From, *RQI.To, RQI.ExclusionSet, true);
3515        assert(RQIPtr->Result == RQITy::Reachable::No && "Already reachable?");
3516        RQIPtr->Result = Result;
3517        assert(!QueryCache.count(RQIPtr));
3518        QueryVector.push_back(RQIPtr);
3519        QueryCache.insert(RQIPtr);
3520      }
3521  
3522      if (Result == RQITy::Reachable::No && IsTemporaryRQI)
3523        A.registerForUpdate(*this);
3524      return Result == RQITy::Reachable::Yes;
3525    }
3526  
getAsStr__anonc528723c3211::CachedReachabilityAA3527    const std::string getAsStr(Attributor *A) const override {
3528      // TODO: Return the number of reachable queries.
3529      return "#queries(" + std::to_string(QueryVector.size()) + ")";
3530    }
3531  
checkQueryCache__anonc528723c3211::CachedReachabilityAA3532    bool checkQueryCache(Attributor &A, RQITy &StackRQI,
3533                         typename RQITy::Reachable &Result) {
3534      if (!this->getState().isValidState()) {
3535        Result = RQITy::Reachable::Yes;
3536        return true;
3537      }
3538  
3539      // If we have an exclusion set we might be able to find our answer by
3540      // ignoring it first.
3541      if (StackRQI.ExclusionSet) {
3542        RQITy PlainRQI(StackRQI.From, StackRQI.To);
3543        auto It = QueryCache.find(&PlainRQI);
3544        if (It != QueryCache.end() && (*It)->Result == RQITy::Reachable::No) {
3545          Result = RQITy::Reachable::No;
3546          return true;
3547        }
3548      }
3549  
3550      auto It = QueryCache.find(&StackRQI);
3551      if (It != QueryCache.end()) {
3552        Result = (*It)->Result;
3553        return true;
3554      }
3555  
3556      // Insert a temporary for recursive queries. We will replace it with a
3557      // permanent entry later.
3558      QueryCache.insert(&StackRQI);
3559      return false;
3560    }
3561  
3562  private:
3563    SmallVector<RQITy *> QueryVector;
3564    DenseSet<RQITy *> QueryCache;
3565  };
3566  
3567  struct AAIntraFnReachabilityFunction final
3568      : public CachedReachabilityAA<AAIntraFnReachability, Instruction> {
3569    using Base = CachedReachabilityAA<AAIntraFnReachability, Instruction>;
AAIntraFnReachabilityFunction__anonc528723c3211::AAIntraFnReachabilityFunction3570    AAIntraFnReachabilityFunction(const IRPosition &IRP, Attributor &A)
3571        : Base(IRP, A) {
3572      DT = A.getInfoCache().getAnalysisResultForFunction<DominatorTreeAnalysis>(
3573          *IRP.getAssociatedFunction());
3574    }
3575  
isAssumedReachable__anonc528723c3211::AAIntraFnReachabilityFunction3576    bool isAssumedReachable(
3577        Attributor &A, const Instruction &From, const Instruction &To,
3578        const AA::InstExclusionSetTy *ExclusionSet) const override {
3579      auto *NonConstThis = const_cast<AAIntraFnReachabilityFunction *>(this);
3580      if (&From == &To)
3581        return true;
3582  
3583      RQITy StackRQI(A, From, To, ExclusionSet, false);
3584      typename RQITy::Reachable Result;
3585      if (!NonConstThis->checkQueryCache(A, StackRQI, Result))
3586        return NonConstThis->isReachableImpl(A, StackRQI,
3587                                             /*IsTemporaryRQI=*/true);
3588      return Result == RQITy::Reachable::Yes;
3589    }
3590  
updateImpl__anonc528723c3211::AAIntraFnReachabilityFunction3591    ChangeStatus updateImpl(Attributor &A) override {
3592      // We only depend on liveness. DeadEdges is all we care about, check if any
3593      // of them changed.
3594      auto *LivenessAA =
3595          A.getAAFor<AAIsDead>(*this, getIRPosition(), DepClassTy::OPTIONAL);
3596      if (LivenessAA &&
3597          llvm::all_of(DeadEdges,
3598                       [&](const auto &DeadEdge) {
3599                         return LivenessAA->isEdgeDead(DeadEdge.first,
3600                                                       DeadEdge.second);
3601                       }) &&
3602          llvm::all_of(DeadBlocks, [&](const BasicBlock *BB) {
3603            return LivenessAA->isAssumedDead(BB);
3604          })) {
3605        return ChangeStatus::UNCHANGED;
3606      }
3607      DeadEdges.clear();
3608      DeadBlocks.clear();
3609      return Base::updateImpl(A);
3610    }
3611  
isReachableImpl__anonc528723c3211::AAIntraFnReachabilityFunction3612    bool isReachableImpl(Attributor &A, RQITy &RQI,
3613                         bool IsTemporaryRQI) override {
3614      const Instruction *Origin = RQI.From;
3615      bool UsedExclusionSet = false;
3616  
3617      auto WillReachInBlock = [&](const Instruction &From, const Instruction &To,
3618                                  const AA::InstExclusionSetTy *ExclusionSet) {
3619        const Instruction *IP = &From;
3620        while (IP && IP != &To) {
3621          if (ExclusionSet && IP != Origin && ExclusionSet->count(IP)) {
3622            UsedExclusionSet = true;
3623            break;
3624          }
3625          IP = IP->getNextNode();
3626        }
3627        return IP == &To;
3628      };
3629  
3630      const BasicBlock *FromBB = RQI.From->getParent();
3631      const BasicBlock *ToBB = RQI.To->getParent();
3632      assert(FromBB->getParent() == ToBB->getParent() &&
3633             "Not an intra-procedural query!");
3634  
3635      // Check intra-block reachability, however, other reaching paths are still
3636      // possible.
3637      if (FromBB == ToBB &&
3638          WillReachInBlock(*RQI.From, *RQI.To, RQI.ExclusionSet))
3639        return rememberResult(A, RQITy::Reachable::Yes, RQI, UsedExclusionSet,
3640                              IsTemporaryRQI);
3641  
3642      // Check if reaching the ToBB block is sufficient or if even that would not
3643      // ensure reaching the target. In the latter case we are done.
3644      if (!WillReachInBlock(ToBB->front(), *RQI.To, RQI.ExclusionSet))
3645        return rememberResult(A, RQITy::Reachable::No, RQI, UsedExclusionSet,
3646                              IsTemporaryRQI);
3647  
3648      const Function *Fn = FromBB->getParent();
3649      SmallPtrSet<const BasicBlock *, 16> ExclusionBlocks;
3650      if (RQI.ExclusionSet)
3651        for (auto *I : *RQI.ExclusionSet)
3652          if (I->getFunction() == Fn)
3653            ExclusionBlocks.insert(I->getParent());
3654  
3655      // Check if we make it out of the FromBB block at all.
3656      if (ExclusionBlocks.count(FromBB) &&
3657          !WillReachInBlock(*RQI.From, *FromBB->getTerminator(),
3658                            RQI.ExclusionSet))
3659        return rememberResult(A, RQITy::Reachable::No, RQI, true, IsTemporaryRQI);
3660  
3661      auto *LivenessAA =
3662          A.getAAFor<AAIsDead>(*this, getIRPosition(), DepClassTy::OPTIONAL);
3663      if (LivenessAA && LivenessAA->isAssumedDead(ToBB)) {
3664        DeadBlocks.insert(ToBB);
3665        return rememberResult(A, RQITy::Reachable::No, RQI, UsedExclusionSet,
3666                              IsTemporaryRQI);
3667      }
3668  
3669      SmallPtrSet<const BasicBlock *, 16> Visited;
3670      SmallVector<const BasicBlock *, 16> Worklist;
3671      Worklist.push_back(FromBB);
3672  
3673      DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> LocalDeadEdges;
3674      while (!Worklist.empty()) {
3675        const BasicBlock *BB = Worklist.pop_back_val();
3676        if (!Visited.insert(BB).second)
3677          continue;
3678        for (const BasicBlock *SuccBB : successors(BB)) {
3679          if (LivenessAA && LivenessAA->isEdgeDead(BB, SuccBB)) {
3680            LocalDeadEdges.insert({BB, SuccBB});
3681            continue;
3682          }
3683          // We checked before if we just need to reach the ToBB block.
3684          if (SuccBB == ToBB)
3685            return rememberResult(A, RQITy::Reachable::Yes, RQI, UsedExclusionSet,
3686                                  IsTemporaryRQI);
3687          if (DT && ExclusionBlocks.empty() && DT->dominates(BB, ToBB))
3688            return rememberResult(A, RQITy::Reachable::Yes, RQI, UsedExclusionSet,
3689                                  IsTemporaryRQI);
3690  
3691          if (ExclusionBlocks.count(SuccBB)) {
3692            UsedExclusionSet = true;
3693            continue;
3694          }
3695          Worklist.push_back(SuccBB);
3696        }
3697      }
3698  
3699      DeadEdges.insert(LocalDeadEdges.begin(), LocalDeadEdges.end());
3700      return rememberResult(A, RQITy::Reachable::No, RQI, UsedExclusionSet,
3701                            IsTemporaryRQI);
3702    }
3703  
3704    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3211::AAIntraFnReachabilityFunction3705    void trackStatistics() const override {}
3706  
3707  private:
3708    // Set of assumed dead blocks we used in the last query. If any changes we
3709    // update the state.
3710    DenseSet<const BasicBlock *> DeadBlocks;
3711  
3712    // Set of assumed dead edges we used in the last query. If any changes we
3713    // update the state.
3714    DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> DeadEdges;
3715  
3716    /// The dominator tree of the function to short-circuit reasoning.
3717    const DominatorTree *DT = nullptr;
3718  };
3719  } // namespace
3720  
3721  /// ------------------------ NoAlias Argument Attribute ------------------------
3722  
isImpliedByIR(Attributor & A,const IRPosition & IRP,Attribute::AttrKind ImpliedAttributeKind,bool IgnoreSubsumingPositions)3723  bool AANoAlias::isImpliedByIR(Attributor &A, const IRPosition &IRP,
3724                                Attribute::AttrKind ImpliedAttributeKind,
3725                                bool IgnoreSubsumingPositions) {
3726    assert(ImpliedAttributeKind == Attribute::NoAlias &&
3727           "Unexpected attribute kind");
3728    Value *Val = &IRP.getAssociatedValue();
3729    if (IRP.getPositionKind() != IRP_CALL_SITE_ARGUMENT) {
3730      if (isa<AllocaInst>(Val))
3731        return true;
3732    } else {
3733      IgnoreSubsumingPositions = true;
3734    }
3735  
3736    if (isa<UndefValue>(Val))
3737      return true;
3738  
3739    if (isa<ConstantPointerNull>(Val) &&
3740        !NullPointerIsDefined(IRP.getAnchorScope(),
3741                              Val->getType()->getPointerAddressSpace()))
3742      return true;
3743  
3744    if (A.hasAttr(IRP, {Attribute::ByVal, Attribute::NoAlias},
3745                  IgnoreSubsumingPositions, Attribute::NoAlias))
3746      return true;
3747  
3748    return false;
3749  }
3750  
3751  namespace {
3752  struct AANoAliasImpl : AANoAlias {
AANoAliasImpl__anonc528723c3611::AANoAliasImpl3753    AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
3754      assert(getAssociatedType()->isPointerTy() &&
3755             "Noalias is a pointer attribute");
3756    }
3757  
getAsStr__anonc528723c3611::AANoAliasImpl3758    const std::string getAsStr(Attributor *A) const override {
3759      return getAssumed() ? "noalias" : "may-alias";
3760    }
3761  };
3762  
3763  /// NoAlias attribute for a floating value.
3764  struct AANoAliasFloating final : AANoAliasImpl {
AANoAliasFloating__anonc528723c3611::AANoAliasFloating3765    AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3766        : AANoAliasImpl(IRP, A) {}
3767  
3768    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c3611::AANoAliasFloating3769    ChangeStatus updateImpl(Attributor &A) override {
3770      // TODO: Implement this.
3771      return indicatePessimisticFixpoint();
3772    }
3773  
3774    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3611::AANoAliasFloating3775    void trackStatistics() const override {
3776      STATS_DECLTRACK_FLOATING_ATTR(noalias)
3777    }
3778  };
3779  
3780  /// NoAlias attribute for an argument.
3781  struct AANoAliasArgument final
3782      : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3783    using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
AANoAliasArgument__anonc528723c3611::AANoAliasArgument3784    AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3785  
3786    /// See AbstractAttribute::update(...).
updateImpl__anonc528723c3611::AANoAliasArgument3787    ChangeStatus updateImpl(Attributor &A) override {
3788      // We have to make sure no-alias on the argument does not break
3789      // synchronization when this is a callback argument, see also [1] below.
3790      // If synchronization cannot be affected, we delegate to the base updateImpl
3791      // function, otherwise we give up for now.
3792  
3793      // If the function is no-sync, no-alias cannot break synchronization.
3794      bool IsKnownNoSycn;
3795      if (AA::hasAssumedIRAttr<Attribute::NoSync>(
3796              A, this, IRPosition::function_scope(getIRPosition()),
3797              DepClassTy::OPTIONAL, IsKnownNoSycn))
3798        return Base::updateImpl(A);
3799  
3800      // If the argument is read-only, no-alias cannot break synchronization.
3801      bool IsKnown;
3802      if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3803        return Base::updateImpl(A);
3804  
3805      // If the argument is never passed through callbacks, no-alias cannot break
3806      // synchronization.
3807      bool UsedAssumedInformation = false;
3808      if (A.checkForAllCallSites(
3809              [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3810              true, UsedAssumedInformation))
3811        return Base::updateImpl(A);
3812  
3813      // TODO: add no-alias but make sure it doesn't break synchronization by
3814      // introducing fake uses. See:
3815      // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3816      //     International Workshop on OpenMP 2018,
3817      //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3818  
3819      return indicatePessimisticFixpoint();
3820    }
3821  
3822    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3611::AANoAliasArgument3823    void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3824  };
3825  
3826  struct AANoAliasCallSiteArgument final : AANoAliasImpl {
AANoAliasCallSiteArgument__anonc528723c3611::AANoAliasCallSiteArgument3827    AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3828        : AANoAliasImpl(IRP, A) {}
3829  
3830    /// Determine if the underlying value may alias with the call site argument
3831    /// \p OtherArgNo of \p ICS (= the underlying call site).
mayAliasWithArgument__anonc528723c3611::AANoAliasCallSiteArgument3832    bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3833                              const AAMemoryBehavior &MemBehaviorAA,
3834                              const CallBase &CB, unsigned OtherArgNo) {
3835      // We do not need to worry about aliasing with the underlying IRP.
3836      if (this->getCalleeArgNo() == (int)OtherArgNo)
3837        return false;
3838  
3839      // If it is not a pointer or pointer vector we do not alias.
3840      const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3841      if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3842        return false;
3843  
3844      auto *CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3845          *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3846  
3847      // If the argument is readnone, there is no read-write aliasing.
3848      if (CBArgMemBehaviorAA && CBArgMemBehaviorAA->isAssumedReadNone()) {
3849        A.recordDependence(*CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3850        return false;
3851      }
3852  
3853      // If the argument is readonly and the underlying value is readonly, there
3854      // is no read-write aliasing.
3855      bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3856      if (CBArgMemBehaviorAA && CBArgMemBehaviorAA->isAssumedReadOnly() &&
3857          IsReadOnly) {
3858        A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3859        A.recordDependence(*CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3860        return false;
3861      }
3862  
3863      // We have to utilize actual alias analysis queries so we need the object.
3864      if (!AAR)
3865        AAR = A.getInfoCache().getAnalysisResultForFunction<AAManager>(
3866            *getAnchorScope());
3867  
3868      // Try to rule it out at the call site.
3869      bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3870      LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3871                           "callsite arguments: "
3872                        << getAssociatedValue() << " " << *ArgOp << " => "
3873                        << (IsAliasing ? "" : "no-") << "alias \n");
3874  
3875      return IsAliasing;
3876    }
3877  
isKnownNoAliasDueToNoAliasPreservation__anonc528723c3611::AANoAliasCallSiteArgument3878    bool isKnownNoAliasDueToNoAliasPreservation(
3879        Attributor &A, AAResults *&AAR, const AAMemoryBehavior &MemBehaviorAA) {
3880      // We can deduce "noalias" if the following conditions hold.
3881      // (i)   Associated value is assumed to be noalias in the definition.
3882      // (ii)  Associated value is assumed to be no-capture in all the uses
3883      //       possibly executed before this callsite.
3884      // (iii) There is no other pointer argument which could alias with the
3885      //       value.
3886  
3887      auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
3888        const auto *DerefAA = A.getAAFor<AADereferenceable>(
3889            *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
3890        return DerefAA ? DerefAA->getAssumedDereferenceableBytes() : 0;
3891      };
3892  
3893      const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3894      const Function *ScopeFn = VIRP.getAnchorScope();
3895      // Check whether the value is captured in the scope using AANoCapture.
3896      // Look at CFG and check only uses possibly executed before this
3897      // callsite.
3898      auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3899        Instruction *UserI = cast<Instruction>(U.getUser());
3900  
3901        // If UserI is the curr instruction and there is a single potential use of
3902        // the value in UserI we allow the use.
3903        // TODO: We should inspect the operands and allow those that cannot alias
3904        //       with the value.
3905        if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3906          return true;
3907  
3908        if (ScopeFn) {
3909          if (auto *CB = dyn_cast<CallBase>(UserI)) {
3910            if (CB->isArgOperand(&U)) {
3911  
3912              unsigned ArgNo = CB->getArgOperandNo(&U);
3913  
3914              bool IsKnownNoCapture;
3915              if (AA::hasAssumedIRAttr<Attribute::NoCapture>(
3916                      A, this, IRPosition::callsite_argument(*CB, ArgNo),
3917                      DepClassTy::OPTIONAL, IsKnownNoCapture))
3918                return true;
3919            }
3920          }
3921  
3922          if (!AA::isPotentiallyReachable(
3923                  A, *UserI, *getCtxI(), *this, /* ExclusionSet */ nullptr,
3924                  [ScopeFn](const Function &Fn) { return &Fn != ScopeFn; }))
3925            return true;
3926        }
3927  
3928        // TODO: We should track the capturing uses in AANoCapture but the problem
3929        //       is CGSCC runs. For those we would need to "allow" AANoCapture for
3930        //       a value in the module slice.
3931        switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
3932        case UseCaptureKind::NO_CAPTURE:
3933          return true;
3934        case UseCaptureKind::MAY_CAPTURE:
3935          LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI
3936                            << "\n");
3937          return false;
3938        case UseCaptureKind::PASSTHROUGH:
3939          Follow = true;
3940          return true;
3941        }
3942        llvm_unreachable("unknown UseCaptureKind");
3943      };
3944  
3945      bool IsKnownNoCapture;
3946      const AANoCapture *NoCaptureAA = nullptr;
3947      bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
3948          A, this, VIRP, DepClassTy::NONE, IsKnownNoCapture, false, &NoCaptureAA);
3949      if (!IsAssumedNoCapture &&
3950          (!NoCaptureAA || !NoCaptureAA->isAssumedNoCaptureMaybeReturned())) {
3951        if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3952          LLVM_DEBUG(
3953              dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3954                     << " cannot be noalias as it is potentially captured\n");
3955          return false;
3956        }
3957      }
3958      if (NoCaptureAA)
3959        A.recordDependence(*NoCaptureAA, *this, DepClassTy::OPTIONAL);
3960  
3961      // Check there is no other pointer argument which could alias with the
3962      // value passed at this call site.
3963      // TODO: AbstractCallSite
3964      const auto &CB = cast<CallBase>(getAnchorValue());
3965      for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3966        if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3967          return false;
3968  
3969      return true;
3970    }
3971  
3972    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c3611::AANoAliasCallSiteArgument3973    ChangeStatus updateImpl(Attributor &A) override {
3974      // If the argument is readnone we are done as there are no accesses via the
3975      // argument.
3976      auto *MemBehaviorAA =
3977          A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3978      if (MemBehaviorAA && MemBehaviorAA->isAssumedReadNone()) {
3979        A.recordDependence(*MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3980        return ChangeStatus::UNCHANGED;
3981      }
3982  
3983      bool IsKnownNoAlias;
3984      const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3985      if (!AA::hasAssumedIRAttr<Attribute::NoAlias>(
3986              A, this, VIRP, DepClassTy::REQUIRED, IsKnownNoAlias)) {
3987        LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3988                          << " is not no-alias at the definition\n");
3989        return indicatePessimisticFixpoint();
3990      }
3991  
3992      AAResults *AAR = nullptr;
3993      if (MemBehaviorAA &&
3994          isKnownNoAliasDueToNoAliasPreservation(A, AAR, *MemBehaviorAA)) {
3995        LLVM_DEBUG(
3996            dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3997        return ChangeStatus::UNCHANGED;
3998      }
3999  
4000      return indicatePessimisticFixpoint();
4001    }
4002  
4003    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3611::AANoAliasCallSiteArgument4004    void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
4005  };
4006  
4007  /// NoAlias attribute for function return value.
4008  struct AANoAliasReturned final : AANoAliasImpl {
AANoAliasReturned__anonc528723c3611::AANoAliasReturned4009    AANoAliasReturned(const IRPosition &IRP, Attributor &A)
4010        : AANoAliasImpl(IRP, A) {}
4011  
4012    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c3611::AANoAliasReturned4013    ChangeStatus updateImpl(Attributor &A) override {
4014  
4015      auto CheckReturnValue = [&](Value &RV) -> bool {
4016        if (Constant *C = dyn_cast<Constant>(&RV))
4017          if (C->isNullValue() || isa<UndefValue>(C))
4018            return true;
4019  
4020        /// For now, we can only deduce noalias if we have call sites.
4021        /// FIXME: add more support.
4022        if (!isa<CallBase>(&RV))
4023          return false;
4024  
4025        const IRPosition &RVPos = IRPosition::value(RV);
4026        bool IsKnownNoAlias;
4027        if (!AA::hasAssumedIRAttr<Attribute::NoAlias>(
4028                A, this, RVPos, DepClassTy::REQUIRED, IsKnownNoAlias))
4029          return false;
4030  
4031        bool IsKnownNoCapture;
4032        const AANoCapture *NoCaptureAA = nullptr;
4033        bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
4034            A, this, RVPos, DepClassTy::REQUIRED, IsKnownNoCapture, false,
4035            &NoCaptureAA);
4036        return IsAssumedNoCapture ||
4037               (NoCaptureAA && NoCaptureAA->isAssumedNoCaptureMaybeReturned());
4038      };
4039  
4040      if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
4041        return indicatePessimisticFixpoint();
4042  
4043      return ChangeStatus::UNCHANGED;
4044    }
4045  
4046    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3611::AANoAliasReturned4047    void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
4048  };
4049  
4050  /// NoAlias attribute deduction for a call site return value.
4051  struct AANoAliasCallSiteReturned final
4052      : AACalleeToCallSite<AANoAlias, AANoAliasImpl> {
AANoAliasCallSiteReturned__anonc528723c3611::AANoAliasCallSiteReturned4053    AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
4054        : AACalleeToCallSite<AANoAlias, AANoAliasImpl>(IRP, A) {}
4055  
4056    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3611::AANoAliasCallSiteReturned4057    void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
4058  };
4059  } // namespace
4060  
4061  /// -------------------AAIsDead Function Attribute-----------------------
4062  
4063  namespace {
4064  struct AAIsDeadValueImpl : public AAIsDead {
AAIsDeadValueImpl__anonc528723c3c11::AAIsDeadValueImpl4065    AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
4066  
4067    /// See AAIsDead::isAssumedDead().
isAssumedDead__anonc528723c3c11::AAIsDeadValueImpl4068    bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
4069  
4070    /// See AAIsDead::isKnownDead().
isKnownDead__anonc528723c3c11::AAIsDeadValueImpl4071    bool isKnownDead() const override { return isKnown(IS_DEAD); }
4072  
4073    /// See AAIsDead::isAssumedDead(BasicBlock *).
isAssumedDead__anonc528723c3c11::AAIsDeadValueImpl4074    bool isAssumedDead(const BasicBlock *BB) const override { return false; }
4075  
4076    /// See AAIsDead::isKnownDead(BasicBlock *).
isKnownDead__anonc528723c3c11::AAIsDeadValueImpl4077    bool isKnownDead(const BasicBlock *BB) const override { return false; }
4078  
4079    /// See AAIsDead::isAssumedDead(Instruction *I).
isAssumedDead__anonc528723c3c11::AAIsDeadValueImpl4080    bool isAssumedDead(const Instruction *I) const override {
4081      return I == getCtxI() && isAssumedDead();
4082    }
4083  
4084    /// See AAIsDead::isKnownDead(Instruction *I).
isKnownDead__anonc528723c3c11::AAIsDeadValueImpl4085    bool isKnownDead(const Instruction *I) const override {
4086      return isAssumedDead(I) && isKnownDead();
4087    }
4088  
4089    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c3c11::AAIsDeadValueImpl4090    const std::string getAsStr(Attributor *A) const override {
4091      return isAssumedDead() ? "assumed-dead" : "assumed-live";
4092    }
4093  
4094    /// Check if all uses are assumed dead.
areAllUsesAssumedDead__anonc528723c3c11::AAIsDeadValueImpl4095    bool areAllUsesAssumedDead(Attributor &A, Value &V) {
4096      // Callers might not check the type, void has no uses.
4097      if (V.getType()->isVoidTy() || V.use_empty())
4098        return true;
4099  
4100      // If we replace a value with a constant there are no uses left afterwards.
4101      if (!isa<Constant>(V)) {
4102        if (auto *I = dyn_cast<Instruction>(&V))
4103          if (!A.isRunOn(*I->getFunction()))
4104            return false;
4105        bool UsedAssumedInformation = false;
4106        std::optional<Constant *> C =
4107            A.getAssumedConstant(V, *this, UsedAssumedInformation);
4108        if (!C || *C)
4109          return true;
4110      }
4111  
4112      auto UsePred = [&](const Use &U, bool &Follow) { return false; };
4113      // Explicitly set the dependence class to required because we want a long
4114      // chain of N dependent instructions to be considered live as soon as one is
4115      // without going through N update cycles. This is not required for
4116      // correctness.
4117      return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
4118                               DepClassTy::REQUIRED,
4119                               /* IgnoreDroppableUses */ false);
4120    }
4121  
4122    /// Determine if \p I is assumed to be side-effect free.
isAssumedSideEffectFree__anonc528723c3c11::AAIsDeadValueImpl4123    bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
4124      if (!I || wouldInstructionBeTriviallyDead(I))
4125        return true;
4126  
4127      auto *CB = dyn_cast<CallBase>(I);
4128      if (!CB || isa<IntrinsicInst>(CB))
4129        return false;
4130  
4131      const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
4132  
4133      bool IsKnownNoUnwind;
4134      if (!AA::hasAssumedIRAttr<Attribute::NoUnwind>(
4135              A, this, CallIRP, DepClassTy::OPTIONAL, IsKnownNoUnwind))
4136        return false;
4137  
4138      bool IsKnown;
4139      return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
4140    }
4141  };
4142  
4143  struct AAIsDeadFloating : public AAIsDeadValueImpl {
AAIsDeadFloating__anonc528723c3c11::AAIsDeadFloating4144    AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
4145        : AAIsDeadValueImpl(IRP, A) {}
4146  
4147    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c3c11::AAIsDeadFloating4148    void initialize(Attributor &A) override {
4149      AAIsDeadValueImpl::initialize(A);
4150  
4151      if (isa<UndefValue>(getAssociatedValue())) {
4152        indicatePessimisticFixpoint();
4153        return;
4154      }
4155  
4156      Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
4157      if (!isAssumedSideEffectFree(A, I)) {
4158        if (!isa_and_nonnull<StoreInst>(I) && !isa_and_nonnull<FenceInst>(I))
4159          indicatePessimisticFixpoint();
4160        else
4161          removeAssumedBits(HAS_NO_EFFECT);
4162      }
4163    }
4164  
isDeadFence__anonc528723c3c11::AAIsDeadFloating4165    bool isDeadFence(Attributor &A, FenceInst &FI) {
4166      const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
4167          IRPosition::function(*FI.getFunction()), *this, DepClassTy::NONE);
4168      if (!ExecDomainAA || !ExecDomainAA->isNoOpFence(FI))
4169        return false;
4170      A.recordDependence(*ExecDomainAA, *this, DepClassTy::OPTIONAL);
4171      return true;
4172    }
4173  
isDeadStore__anonc528723c3c11::AAIsDeadFloating4174    bool isDeadStore(Attributor &A, StoreInst &SI,
4175                     SmallSetVector<Instruction *, 8> *AssumeOnlyInst = nullptr) {
4176      // Lang ref now states volatile store is not UB/dead, let's skip them.
4177      if (SI.isVolatile())
4178        return false;
4179  
4180      // If we are collecting assumes to be deleted we are in the manifest stage.
4181      // It's problematic to collect the potential copies again now so we use the
4182      // cached ones.
4183      bool UsedAssumedInformation = false;
4184      if (!AssumeOnlyInst) {
4185        PotentialCopies.clear();
4186        if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
4187                                                 UsedAssumedInformation)) {
4188          LLVM_DEBUG(
4189              dbgs()
4190              << "[AAIsDead] Could not determine potential copies of store!\n");
4191          return false;
4192        }
4193      }
4194      LLVM_DEBUG(dbgs() << "[AAIsDead] Store has " << PotentialCopies.size()
4195                        << " potential copies.\n");
4196  
4197      InformationCache &InfoCache = A.getInfoCache();
4198      return llvm::all_of(PotentialCopies, [&](Value *V) {
4199        if (A.isAssumedDead(IRPosition::value(*V), this, nullptr,
4200                            UsedAssumedInformation))
4201          return true;
4202        if (auto *LI = dyn_cast<LoadInst>(V)) {
4203          if (llvm::all_of(LI->uses(), [&](const Use &U) {
4204                auto &UserI = cast<Instruction>(*U.getUser());
4205                if (InfoCache.isOnlyUsedByAssume(UserI)) {
4206                  if (AssumeOnlyInst)
4207                    AssumeOnlyInst->insert(&UserI);
4208                  return true;
4209                }
4210                return A.isAssumedDead(U, this, nullptr, UsedAssumedInformation);
4211              })) {
4212            return true;
4213          }
4214        }
4215        LLVM_DEBUG(dbgs() << "[AAIsDead] Potential copy " << *V
4216                          << " is assumed live!\n");
4217        return false;
4218      });
4219    }
4220  
4221    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c3c11::AAIsDeadFloating4222    const std::string getAsStr(Attributor *A) const override {
4223      Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
4224      if (isa_and_nonnull<StoreInst>(I))
4225        if (isValidState())
4226          return "assumed-dead-store";
4227      if (isa_and_nonnull<FenceInst>(I))
4228        if (isValidState())
4229          return "assumed-dead-fence";
4230      return AAIsDeadValueImpl::getAsStr(A);
4231    }
4232  
4233    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c3c11::AAIsDeadFloating4234    ChangeStatus updateImpl(Attributor &A) override {
4235      Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
4236      if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
4237        if (!isDeadStore(A, *SI))
4238          return indicatePessimisticFixpoint();
4239      } else if (auto *FI = dyn_cast_or_null<FenceInst>(I)) {
4240        if (!isDeadFence(A, *FI))
4241          return indicatePessimisticFixpoint();
4242      } else {
4243        if (!isAssumedSideEffectFree(A, I))
4244          return indicatePessimisticFixpoint();
4245        if (!areAllUsesAssumedDead(A, getAssociatedValue()))
4246          return indicatePessimisticFixpoint();
4247      }
4248      return ChangeStatus::UNCHANGED;
4249    }
4250  
isRemovableStore__anonc528723c3c11::AAIsDeadFloating4251    bool isRemovableStore() const override {
4252      return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
4253    }
4254  
4255    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c3c11::AAIsDeadFloating4256    ChangeStatus manifest(Attributor &A) override {
4257      Value &V = getAssociatedValue();
4258      if (auto *I = dyn_cast<Instruction>(&V)) {
4259        // If we get here we basically know the users are all dead. We check if
4260        // isAssumedSideEffectFree returns true here again because it might not be
4261        // the case and only the users are dead but the instruction (=call) is
4262        // still needed.
4263        if (auto *SI = dyn_cast<StoreInst>(I)) {
4264          SmallSetVector<Instruction *, 8> AssumeOnlyInst;
4265          bool IsDead = isDeadStore(A, *SI, &AssumeOnlyInst);
4266          (void)IsDead;
4267          assert(IsDead && "Store was assumed to be dead!");
4268          A.deleteAfterManifest(*I);
4269          for (size_t i = 0; i < AssumeOnlyInst.size(); ++i) {
4270            Instruction *AOI = AssumeOnlyInst[i];
4271            for (auto *Usr : AOI->users())
4272              AssumeOnlyInst.insert(cast<Instruction>(Usr));
4273            A.deleteAfterManifest(*AOI);
4274          }
4275          return ChangeStatus::CHANGED;
4276        }
4277        if (auto *FI = dyn_cast<FenceInst>(I)) {
4278          assert(isDeadFence(A, *FI));
4279          A.deleteAfterManifest(*FI);
4280          return ChangeStatus::CHANGED;
4281        }
4282        if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
4283          A.deleteAfterManifest(*I);
4284          return ChangeStatus::CHANGED;
4285        }
4286      }
4287      return ChangeStatus::UNCHANGED;
4288    }
4289  
4290    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3c11::AAIsDeadFloating4291    void trackStatistics() const override {
4292      STATS_DECLTRACK_FLOATING_ATTR(IsDead)
4293    }
4294  
4295  private:
4296    // The potential copies of a dead store, used for deletion during manifest.
4297    SmallSetVector<Value *, 4> PotentialCopies;
4298  };
4299  
4300  struct AAIsDeadArgument : public AAIsDeadFloating {
AAIsDeadArgument__anonc528723c3c11::AAIsDeadArgument4301    AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
4302        : AAIsDeadFloating(IRP, A) {}
4303  
4304    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c3c11::AAIsDeadArgument4305    ChangeStatus manifest(Attributor &A) override {
4306      Argument &Arg = *getAssociatedArgument();
4307      if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
4308        if (A.registerFunctionSignatureRewrite(
4309                Arg, /* ReplacementTypes */ {},
4310                Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
4311                Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
4312          return ChangeStatus::CHANGED;
4313        }
4314      return ChangeStatus::UNCHANGED;
4315    }
4316  
4317    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3c11::AAIsDeadArgument4318    void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
4319  };
4320  
4321  struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
AAIsDeadCallSiteArgument__anonc528723c3c11::AAIsDeadCallSiteArgument4322    AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
4323        : AAIsDeadValueImpl(IRP, A) {}
4324  
4325    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c3c11::AAIsDeadCallSiteArgument4326    void initialize(Attributor &A) override {
4327      AAIsDeadValueImpl::initialize(A);
4328      if (isa<UndefValue>(getAssociatedValue()))
4329        indicatePessimisticFixpoint();
4330    }
4331  
4332    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c3c11::AAIsDeadCallSiteArgument4333    ChangeStatus updateImpl(Attributor &A) override {
4334      // TODO: Once we have call site specific value information we can provide
4335      //       call site specific liveness information and then it makes
4336      //       sense to specialize attributes for call sites arguments instead of
4337      //       redirecting requests to the callee argument.
4338      Argument *Arg = getAssociatedArgument();
4339      if (!Arg)
4340        return indicatePessimisticFixpoint();
4341      const IRPosition &ArgPos = IRPosition::argument(*Arg);
4342      auto *ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
4343      if (!ArgAA)
4344        return indicatePessimisticFixpoint();
4345      return clampStateAndIndicateChange(getState(), ArgAA->getState());
4346    }
4347  
4348    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c3c11::AAIsDeadCallSiteArgument4349    ChangeStatus manifest(Attributor &A) override {
4350      CallBase &CB = cast<CallBase>(getAnchorValue());
4351      Use &U = CB.getArgOperandUse(getCallSiteArgNo());
4352      assert(!isa<UndefValue>(U.get()) &&
4353             "Expected undef values to be filtered out!");
4354      UndefValue &UV = *UndefValue::get(U->getType());
4355      if (A.changeUseAfterManifest(U, UV))
4356        return ChangeStatus::CHANGED;
4357      return ChangeStatus::UNCHANGED;
4358    }
4359  
4360    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3c11::AAIsDeadCallSiteArgument4361    void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
4362  };
4363  
4364  struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
AAIsDeadCallSiteReturned__anonc528723c3c11::AAIsDeadCallSiteReturned4365    AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
4366        : AAIsDeadFloating(IRP, A) {}
4367  
4368    /// See AAIsDead::isAssumedDead().
isAssumedDead__anonc528723c3c11::AAIsDeadCallSiteReturned4369    bool isAssumedDead() const override {
4370      return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
4371    }
4372  
4373    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c3c11::AAIsDeadCallSiteReturned4374    void initialize(Attributor &A) override {
4375      AAIsDeadFloating::initialize(A);
4376      if (isa<UndefValue>(getAssociatedValue())) {
4377        indicatePessimisticFixpoint();
4378        return;
4379      }
4380  
4381      // We track this separately as a secondary state.
4382      IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
4383    }
4384  
4385    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c3c11::AAIsDeadCallSiteReturned4386    ChangeStatus updateImpl(Attributor &A) override {
4387      ChangeStatus Changed = ChangeStatus::UNCHANGED;
4388      if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
4389        IsAssumedSideEffectFree = false;
4390        Changed = ChangeStatus::CHANGED;
4391      }
4392      if (!areAllUsesAssumedDead(A, getAssociatedValue()))
4393        return indicatePessimisticFixpoint();
4394      return Changed;
4395    }
4396  
4397    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3c11::AAIsDeadCallSiteReturned4398    void trackStatistics() const override {
4399      if (IsAssumedSideEffectFree)
4400        STATS_DECLTRACK_CSRET_ATTR(IsDead)
4401      else
4402        STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
4403    }
4404  
4405    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c3c11::AAIsDeadCallSiteReturned4406    const std::string getAsStr(Attributor *A) const override {
4407      return isAssumedDead()
4408                 ? "assumed-dead"
4409                 : (getAssumed() ? "assumed-dead-users" : "assumed-live");
4410    }
4411  
4412  private:
4413    bool IsAssumedSideEffectFree = true;
4414  };
4415  
4416  struct AAIsDeadReturned : public AAIsDeadValueImpl {
AAIsDeadReturned__anonc528723c3c11::AAIsDeadReturned4417    AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
4418        : AAIsDeadValueImpl(IRP, A) {}
4419  
4420    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c3c11::AAIsDeadReturned4421    ChangeStatus updateImpl(Attributor &A) override {
4422  
4423      bool UsedAssumedInformation = false;
4424      A.checkForAllInstructions([](Instruction &) { return true; }, *this,
4425                                {Instruction::Ret}, UsedAssumedInformation);
4426  
4427      auto PredForCallSite = [&](AbstractCallSite ACS) {
4428        if (ACS.isCallbackCall() || !ACS.getInstruction())
4429          return false;
4430        return areAllUsesAssumedDead(A, *ACS.getInstruction());
4431      };
4432  
4433      if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4434                                  UsedAssumedInformation))
4435        return indicatePessimisticFixpoint();
4436  
4437      return ChangeStatus::UNCHANGED;
4438    }
4439  
4440    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c3c11::AAIsDeadReturned4441    ChangeStatus manifest(Attributor &A) override {
4442      // TODO: Rewrite the signature to return void?
4443      bool AnyChange = false;
4444      UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
4445      auto RetInstPred = [&](Instruction &I) {
4446        ReturnInst &RI = cast<ReturnInst>(I);
4447        if (!isa<UndefValue>(RI.getReturnValue()))
4448          AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
4449        return true;
4450      };
4451      bool UsedAssumedInformation = false;
4452      A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
4453                                UsedAssumedInformation);
4454      return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
4455    }
4456  
4457    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3c11::AAIsDeadReturned4458    void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
4459  };
4460  
4461  struct AAIsDeadFunction : public AAIsDead {
AAIsDeadFunction__anonc528723c3c11::AAIsDeadFunction4462    AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
4463  
4464    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c3c11::AAIsDeadFunction4465    void initialize(Attributor &A) override {
4466      Function *F = getAnchorScope();
4467      assert(F && "Did expect an anchor function");
4468      if (!isAssumedDeadInternalFunction(A)) {
4469        ToBeExploredFrom.insert(&F->getEntryBlock().front());
4470        assumeLive(A, F->getEntryBlock());
4471      }
4472    }
4473  
isAssumedDeadInternalFunction__anonc528723c3c11::AAIsDeadFunction4474    bool isAssumedDeadInternalFunction(Attributor &A) {
4475      if (!getAnchorScope()->hasLocalLinkage())
4476        return false;
4477      bool UsedAssumedInformation = false;
4478      return A.checkForAllCallSites([](AbstractCallSite) { return false; }, *this,
4479                                    true, UsedAssumedInformation);
4480    }
4481  
4482    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c3c11::AAIsDeadFunction4483    const std::string getAsStr(Attributor *A) const override {
4484      return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
4485             std::to_string(getAnchorScope()->size()) + "][#TBEP " +
4486             std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
4487             std::to_string(KnownDeadEnds.size()) + "]";
4488    }
4489  
4490    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c3c11::AAIsDeadFunction4491    ChangeStatus manifest(Attributor &A) override {
4492      assert(getState().isValidState() &&
4493             "Attempted to manifest an invalid state!");
4494  
4495      ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4496      Function &F = *getAnchorScope();
4497  
4498      if (AssumedLiveBlocks.empty()) {
4499        A.deleteAfterManifest(F);
4500        return ChangeStatus::CHANGED;
4501      }
4502  
4503      // Flag to determine if we can change an invoke to a call assuming the
4504      // callee is nounwind. This is not possible if the personality of the
4505      // function allows to catch asynchronous exceptions.
4506      bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
4507  
4508      KnownDeadEnds.set_union(ToBeExploredFrom);
4509      for (const Instruction *DeadEndI : KnownDeadEnds) {
4510        auto *CB = dyn_cast<CallBase>(DeadEndI);
4511        if (!CB)
4512          continue;
4513        bool IsKnownNoReturn;
4514        bool MayReturn = !AA::hasAssumedIRAttr<Attribute::NoReturn>(
4515            A, this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL,
4516            IsKnownNoReturn);
4517        if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
4518          continue;
4519  
4520        if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
4521          A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
4522        else
4523          A.changeToUnreachableAfterManifest(
4524              const_cast<Instruction *>(DeadEndI->getNextNode()));
4525        HasChanged = ChangeStatus::CHANGED;
4526      }
4527  
4528      STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
4529      for (BasicBlock &BB : F)
4530        if (!AssumedLiveBlocks.count(&BB)) {
4531          A.deleteAfterManifest(BB);
4532          ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
4533          HasChanged = ChangeStatus::CHANGED;
4534        }
4535  
4536      return HasChanged;
4537    }
4538  
4539    /// See AbstractAttribute::updateImpl(...).
4540    ChangeStatus updateImpl(Attributor &A) override;
4541  
isEdgeDead__anonc528723c3c11::AAIsDeadFunction4542    bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
4543      assert(From->getParent() == getAnchorScope() &&
4544             To->getParent() == getAnchorScope() &&
4545             "Used AAIsDead of the wrong function");
4546      return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
4547    }
4548  
4549    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3c11::AAIsDeadFunction4550    void trackStatistics() const override {}
4551  
4552    /// Returns true if the function is assumed dead.
isAssumedDead__anonc528723c3c11::AAIsDeadFunction4553    bool isAssumedDead() const override { return false; }
4554  
4555    /// See AAIsDead::isKnownDead().
isKnownDead__anonc528723c3c11::AAIsDeadFunction4556    bool isKnownDead() const override { return false; }
4557  
4558    /// See AAIsDead::isAssumedDead(BasicBlock *).
isAssumedDead__anonc528723c3c11::AAIsDeadFunction4559    bool isAssumedDead(const BasicBlock *BB) const override {
4560      assert(BB->getParent() == getAnchorScope() &&
4561             "BB must be in the same anchor scope function.");
4562  
4563      if (!getAssumed())
4564        return false;
4565      return !AssumedLiveBlocks.count(BB);
4566    }
4567  
4568    /// See AAIsDead::isKnownDead(BasicBlock *).
isKnownDead__anonc528723c3c11::AAIsDeadFunction4569    bool isKnownDead(const BasicBlock *BB) const override {
4570      return getKnown() && isAssumedDead(BB);
4571    }
4572  
4573    /// See AAIsDead::isAssumed(Instruction *I).
isAssumedDead__anonc528723c3c11::AAIsDeadFunction4574    bool isAssumedDead(const Instruction *I) const override {
4575      assert(I->getParent()->getParent() == getAnchorScope() &&
4576             "Instruction must be in the same anchor scope function.");
4577  
4578      if (!getAssumed())
4579        return false;
4580  
4581      // If it is not in AssumedLiveBlocks then it for sure dead.
4582      // Otherwise, it can still be after noreturn call in a live block.
4583      if (!AssumedLiveBlocks.count(I->getParent()))
4584        return true;
4585  
4586      // If it is not after a liveness barrier it is live.
4587      const Instruction *PrevI = I->getPrevNode();
4588      while (PrevI) {
4589        if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
4590          return true;
4591        PrevI = PrevI->getPrevNode();
4592      }
4593      return false;
4594    }
4595  
4596    /// See AAIsDead::isKnownDead(Instruction *I).
isKnownDead__anonc528723c3c11::AAIsDeadFunction4597    bool isKnownDead(const Instruction *I) const override {
4598      return getKnown() && isAssumedDead(I);
4599    }
4600  
4601    /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
4602    /// that internal function called from \p BB should now be looked at.
assumeLive__anonc528723c3c11::AAIsDeadFunction4603    bool assumeLive(Attributor &A, const BasicBlock &BB) {
4604      if (!AssumedLiveBlocks.insert(&BB).second)
4605        return false;
4606  
4607      // We assume that all of BB is (probably) live now and if there are calls to
4608      // internal functions we will assume that those are now live as well. This
4609      // is a performance optimization for blocks with calls to a lot of internal
4610      // functions. It can however cause dead functions to be treated as live.
4611      for (const Instruction &I : BB)
4612        if (const auto *CB = dyn_cast<CallBase>(&I))
4613          if (auto *F = dyn_cast_if_present<Function>(CB->getCalledOperand()))
4614            if (F->hasLocalLinkage())
4615              A.markLiveInternalFunction(*F);
4616      return true;
4617    }
4618  
4619    /// Collection of instructions that need to be explored again, e.g., we
4620    /// did assume they do not transfer control to (one of their) successors.
4621    SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
4622  
4623    /// Collection of instructions that are known to not transfer control.
4624    SmallSetVector<const Instruction *, 8> KnownDeadEnds;
4625  
4626    /// Collection of all assumed live edges
4627    DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
4628  
4629    /// Collection of all assumed live BasicBlocks.
4630    DenseSet<const BasicBlock *> AssumedLiveBlocks;
4631  };
4632  
4633  static bool
identifyAliveSuccessors(Attributor & A,const CallBase & CB,AbstractAttribute & AA,SmallVectorImpl<const Instruction * > & AliveSuccessors)4634  identifyAliveSuccessors(Attributor &A, const CallBase &CB,
4635                          AbstractAttribute &AA,
4636                          SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4637    const IRPosition &IPos = IRPosition::callsite_function(CB);
4638  
4639    bool IsKnownNoReturn;
4640    if (AA::hasAssumedIRAttr<Attribute::NoReturn>(
4641            A, &AA, IPos, DepClassTy::OPTIONAL, IsKnownNoReturn))
4642      return !IsKnownNoReturn;
4643    if (CB.isTerminator())
4644      AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
4645    else
4646      AliveSuccessors.push_back(CB.getNextNode());
4647    return false;
4648  }
4649  
4650  static bool
identifyAliveSuccessors(Attributor & A,const InvokeInst & II,AbstractAttribute & AA,SmallVectorImpl<const Instruction * > & AliveSuccessors)4651  identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
4652                          AbstractAttribute &AA,
4653                          SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4654    bool UsedAssumedInformation =
4655        identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
4656  
4657    // First, determine if we can change an invoke to a call assuming the
4658    // callee is nounwind. This is not possible if the personality of the
4659    // function allows to catch asynchronous exceptions.
4660    if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
4661      AliveSuccessors.push_back(&II.getUnwindDest()->front());
4662    } else {
4663      const IRPosition &IPos = IRPosition::callsite_function(II);
4664  
4665      bool IsKnownNoUnwind;
4666      if (AA::hasAssumedIRAttr<Attribute::NoUnwind>(
4667              A, &AA, IPos, DepClassTy::OPTIONAL, IsKnownNoUnwind)) {
4668        UsedAssumedInformation |= !IsKnownNoUnwind;
4669      } else {
4670        AliveSuccessors.push_back(&II.getUnwindDest()->front());
4671      }
4672    }
4673    return UsedAssumedInformation;
4674  }
4675  
4676  static bool
identifyAliveSuccessors(Attributor & A,const BranchInst & BI,AbstractAttribute & AA,SmallVectorImpl<const Instruction * > & AliveSuccessors)4677  identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
4678                          AbstractAttribute &AA,
4679                          SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4680    bool UsedAssumedInformation = false;
4681    if (BI.getNumSuccessors() == 1) {
4682      AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4683    } else {
4684      std::optional<Constant *> C =
4685          A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
4686      if (!C || isa_and_nonnull<UndefValue>(*C)) {
4687        // No value yet, assume both edges are dead.
4688      } else if (isa_and_nonnull<ConstantInt>(*C)) {
4689        const BasicBlock *SuccBB =
4690            BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
4691        AliveSuccessors.push_back(&SuccBB->front());
4692      } else {
4693        AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4694        AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
4695        UsedAssumedInformation = false;
4696      }
4697    }
4698    return UsedAssumedInformation;
4699  }
4700  
4701  static bool
identifyAliveSuccessors(Attributor & A,const SwitchInst & SI,AbstractAttribute & AA,SmallVectorImpl<const Instruction * > & AliveSuccessors)4702  identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
4703                          AbstractAttribute &AA,
4704                          SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4705    bool UsedAssumedInformation = false;
4706    SmallVector<AA::ValueAndContext> Values;
4707    if (!A.getAssumedSimplifiedValues(IRPosition::value(*SI.getCondition()), &AA,
4708                                      Values, AA::AnyScope,
4709                                      UsedAssumedInformation)) {
4710      // Something went wrong, assume all successors are live.
4711      for (const BasicBlock *SuccBB : successors(SI.getParent()))
4712        AliveSuccessors.push_back(&SuccBB->front());
4713      return false;
4714    }
4715  
4716    if (Values.empty() ||
4717        (Values.size() == 1 &&
4718         isa_and_nonnull<UndefValue>(Values.front().getValue()))) {
4719      // No valid value yet, assume all edges are dead.
4720      return UsedAssumedInformation;
4721    }
4722  
4723    Type &Ty = *SI.getCondition()->getType();
4724    SmallPtrSet<ConstantInt *, 8> Constants;
4725    auto CheckForConstantInt = [&](Value *V) {
4726      if (auto *CI = dyn_cast_if_present<ConstantInt>(AA::getWithType(*V, Ty))) {
4727        Constants.insert(CI);
4728        return true;
4729      }
4730      return false;
4731    };
4732  
4733    if (!all_of(Values, [&](AA::ValueAndContext &VAC) {
4734          return CheckForConstantInt(VAC.getValue());
4735        })) {
4736      for (const BasicBlock *SuccBB : successors(SI.getParent()))
4737        AliveSuccessors.push_back(&SuccBB->front());
4738      return UsedAssumedInformation;
4739    }
4740  
4741    unsigned MatchedCases = 0;
4742    for (const auto &CaseIt : SI.cases()) {
4743      if (Constants.count(CaseIt.getCaseValue())) {
4744        ++MatchedCases;
4745        AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
4746      }
4747    }
4748  
4749    // If all potential values have been matched, we will not visit the default
4750    // case.
4751    if (MatchedCases < Constants.size())
4752      AliveSuccessors.push_back(&SI.getDefaultDest()->front());
4753    return UsedAssumedInformation;
4754  }
4755  
updateImpl(Attributor & A)4756  ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
4757    ChangeStatus Change = ChangeStatus::UNCHANGED;
4758  
4759    if (AssumedLiveBlocks.empty()) {
4760      if (isAssumedDeadInternalFunction(A))
4761        return ChangeStatus::UNCHANGED;
4762  
4763      Function *F = getAnchorScope();
4764      ToBeExploredFrom.insert(&F->getEntryBlock().front());
4765      assumeLive(A, F->getEntryBlock());
4766      Change = ChangeStatus::CHANGED;
4767    }
4768  
4769    LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
4770                      << getAnchorScope()->size() << "] BBs and "
4771                      << ToBeExploredFrom.size() << " exploration points and "
4772                      << KnownDeadEnds.size() << " known dead ends\n");
4773  
4774    // Copy and clear the list of instructions we need to explore from. It is
4775    // refilled with instructions the next update has to look at.
4776    SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
4777                                                 ToBeExploredFrom.end());
4778    decltype(ToBeExploredFrom) NewToBeExploredFrom;
4779  
4780    SmallVector<const Instruction *, 8> AliveSuccessors;
4781    while (!Worklist.empty()) {
4782      const Instruction *I = Worklist.pop_back_val();
4783      LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
4784  
4785      // Fast forward for uninteresting instructions. We could look for UB here
4786      // though.
4787      while (!I->isTerminator() && !isa<CallBase>(I))
4788        I = I->getNextNode();
4789  
4790      AliveSuccessors.clear();
4791  
4792      bool UsedAssumedInformation = false;
4793      switch (I->getOpcode()) {
4794      // TODO: look for (assumed) UB to backwards propagate "deadness".
4795      default:
4796        assert(I->isTerminator() &&
4797               "Expected non-terminators to be handled already!");
4798        for (const BasicBlock *SuccBB : successors(I->getParent()))
4799          AliveSuccessors.push_back(&SuccBB->front());
4800        break;
4801      case Instruction::Call:
4802        UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4803                                                         *this, AliveSuccessors);
4804        break;
4805      case Instruction::Invoke:
4806        UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4807                                                         *this, AliveSuccessors);
4808        break;
4809      case Instruction::Br:
4810        UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4811                                                         *this, AliveSuccessors);
4812        break;
4813      case Instruction::Switch:
4814        UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4815                                                         *this, AliveSuccessors);
4816        break;
4817      }
4818  
4819      if (UsedAssumedInformation) {
4820        NewToBeExploredFrom.insert(I);
4821      } else if (AliveSuccessors.empty() ||
4822                 (I->isTerminator() &&
4823                  AliveSuccessors.size() < I->getNumSuccessors())) {
4824        if (KnownDeadEnds.insert(I))
4825          Change = ChangeStatus::CHANGED;
4826      }
4827  
4828      LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4829                        << AliveSuccessors.size() << " UsedAssumedInformation: "
4830                        << UsedAssumedInformation << "\n");
4831  
4832      for (const Instruction *AliveSuccessor : AliveSuccessors) {
4833        if (!I->isTerminator()) {
4834          assert(AliveSuccessors.size() == 1 &&
4835                 "Non-terminator expected to have a single successor!");
4836          Worklist.push_back(AliveSuccessor);
4837        } else {
4838          // record the assumed live edge
4839          auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4840          if (AssumedLiveEdges.insert(Edge).second)
4841            Change = ChangeStatus::CHANGED;
4842          if (assumeLive(A, *AliveSuccessor->getParent()))
4843            Worklist.push_back(AliveSuccessor);
4844        }
4845      }
4846    }
4847  
4848    // Check if the content of ToBeExploredFrom changed, ignore the order.
4849    if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4850        llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4851          return !ToBeExploredFrom.count(I);
4852        })) {
4853      Change = ChangeStatus::CHANGED;
4854      ToBeExploredFrom = std::move(NewToBeExploredFrom);
4855    }
4856  
4857    // If we know everything is live there is no need to query for liveness.
4858    // Instead, indicating a pessimistic fixpoint will cause the state to be
4859    // "invalid" and all queries to be answered conservatively without lookups.
4860    // To be in this state we have to (1) finished the exploration and (3) not
4861    // discovered any non-trivial dead end and (2) not ruled unreachable code
4862    // dead.
4863    if (ToBeExploredFrom.empty() &&
4864        getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4865        llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4866          return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4867        }))
4868      return indicatePessimisticFixpoint();
4869    return Change;
4870  }
4871  
4872  /// Liveness information for a call sites.
4873  struct AAIsDeadCallSite final : AAIsDeadFunction {
AAIsDeadCallSite__anonc528723c3c11::AAIsDeadCallSite4874    AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4875        : AAIsDeadFunction(IRP, A) {}
4876  
4877    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c3c11::AAIsDeadCallSite4878    void initialize(Attributor &A) override {
4879      // TODO: Once we have call site specific value information we can provide
4880      //       call site specific liveness information and then it makes
4881      //       sense to specialize attributes for call sites instead of
4882      //       redirecting requests to the callee.
4883      llvm_unreachable("Abstract attributes for liveness are not "
4884                       "supported for call sites yet!");
4885    }
4886  
4887    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c3c11::AAIsDeadCallSite4888    ChangeStatus updateImpl(Attributor &A) override {
4889      return indicatePessimisticFixpoint();
4890    }
4891  
4892    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c3c11::AAIsDeadCallSite4893    void trackStatistics() const override {}
4894  };
4895  } // namespace
4896  
4897  /// -------------------- Dereferenceable Argument Attribute --------------------
4898  
4899  namespace {
4900  struct AADereferenceableImpl : AADereferenceable {
AADereferenceableImpl__anonc528723c4911::AADereferenceableImpl4901    AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4902        : AADereferenceable(IRP, A) {}
4903    using StateType = DerefState;
4904  
4905    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c4911::AADereferenceableImpl4906    void initialize(Attributor &A) override {
4907      Value &V = *getAssociatedValue().stripPointerCasts();
4908      SmallVector<Attribute, 4> Attrs;
4909      A.getAttrs(getIRPosition(),
4910                 {Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4911                 Attrs, /* IgnoreSubsumingPositions */ false);
4912      for (const Attribute &Attr : Attrs)
4913        takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4914  
4915      // Ensure we initialize the non-null AA (if necessary).
4916      bool IsKnownNonNull;
4917      AA::hasAssumedIRAttr<Attribute::NonNull>(
4918          A, this, getIRPosition(), DepClassTy::OPTIONAL, IsKnownNonNull);
4919  
4920      bool CanBeNull, CanBeFreed;
4921      takeKnownDerefBytesMaximum(V.getPointerDereferenceableBytes(
4922          A.getDataLayout(), CanBeNull, CanBeFreed));
4923  
4924      if (Instruction *CtxI = getCtxI())
4925        followUsesInMBEC(*this, A, getState(), *CtxI);
4926    }
4927  
4928    /// See AbstractAttribute::getState()
4929    /// {
getState__anonc528723c4911::AADereferenceableImpl4930    StateType &getState() override { return *this; }
getState__anonc528723c4911::AADereferenceableImpl4931    const StateType &getState() const override { return *this; }
4932    /// }
4933  
4934    /// Helper function for collecting accessed bytes in must-be-executed-context
addAccessedBytesForUse__anonc528723c4911::AADereferenceableImpl4935    void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4936                                DerefState &State) {
4937      const Value *UseV = U->get();
4938      if (!UseV->getType()->isPointerTy())
4939        return;
4940  
4941      std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4942      if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4943        return;
4944  
4945      int64_t Offset;
4946      const Value *Base = GetPointerBaseWithConstantOffset(
4947          Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4948      if (Base && Base == &getAssociatedValue())
4949        State.addAccessedBytes(Offset, Loc->Size.getValue());
4950    }
4951  
4952    /// See followUsesInMBEC
followUseInMBEC__anonc528723c4911::AADereferenceableImpl4953    bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4954                         AADereferenceable::StateType &State) {
4955      bool IsNonNull = false;
4956      bool TrackUse = false;
4957      int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4958          A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4959      LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4960                        << " for instruction " << *I << "\n");
4961  
4962      addAccessedBytesForUse(A, U, I, State);
4963      State.takeKnownDerefBytesMaximum(DerefBytes);
4964      return TrackUse;
4965    }
4966  
4967    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c4911::AADereferenceableImpl4968    ChangeStatus manifest(Attributor &A) override {
4969      ChangeStatus Change = AADereferenceable::manifest(A);
4970      bool IsKnownNonNull;
4971      bool IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>(
4972          A, this, getIRPosition(), DepClassTy::NONE, IsKnownNonNull);
4973      if (IsAssumedNonNull &&
4974          A.hasAttr(getIRPosition(), Attribute::DereferenceableOrNull)) {
4975        A.removeAttrs(getIRPosition(), {Attribute::DereferenceableOrNull});
4976        return ChangeStatus::CHANGED;
4977      }
4978      return Change;
4979    }
4980  
getDeducedAttributes__anonc528723c4911::AADereferenceableImpl4981    void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
4982                              SmallVectorImpl<Attribute> &Attrs) const override {
4983      // TODO: Add *_globally support
4984      bool IsKnownNonNull;
4985      bool IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>(
4986          A, this, getIRPosition(), DepClassTy::NONE, IsKnownNonNull);
4987      if (IsAssumedNonNull)
4988        Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4989            Ctx, getAssumedDereferenceableBytes()));
4990      else
4991        Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4992            Ctx, getAssumedDereferenceableBytes()));
4993    }
4994  
4995    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c4911::AADereferenceableImpl4996    const std::string getAsStr(Attributor *A) const override {
4997      if (!getAssumedDereferenceableBytes())
4998        return "unknown-dereferenceable";
4999      bool IsKnownNonNull;
5000      bool IsAssumedNonNull = false;
5001      if (A)
5002        IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>(
5003            *A, this, getIRPosition(), DepClassTy::NONE, IsKnownNonNull);
5004      return std::string("dereferenceable") +
5005             (IsAssumedNonNull ? "" : "_or_null") +
5006             (isAssumedGlobal() ? "_globally" : "") + "<" +
5007             std::to_string(getKnownDereferenceableBytes()) + "-" +
5008             std::to_string(getAssumedDereferenceableBytes()) + ">" +
5009             (!A ? " [non-null is unknown]" : "");
5010    }
5011  };
5012  
5013  /// Dereferenceable attribute for a floating value.
5014  struct AADereferenceableFloating : AADereferenceableImpl {
AADereferenceableFloating__anonc528723c4911::AADereferenceableFloating5015    AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
5016        : AADereferenceableImpl(IRP, A) {}
5017  
5018    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c4911::AADereferenceableFloating5019    ChangeStatus updateImpl(Attributor &A) override {
5020      bool Stripped;
5021      bool UsedAssumedInformation = false;
5022      SmallVector<AA::ValueAndContext> Values;
5023      if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
5024                                        AA::AnyScope, UsedAssumedInformation)) {
5025        Values.push_back({getAssociatedValue(), getCtxI()});
5026        Stripped = false;
5027      } else {
5028        Stripped = Values.size() != 1 ||
5029                   Values.front().getValue() != &getAssociatedValue();
5030      }
5031  
5032      const DataLayout &DL = A.getDataLayout();
5033      DerefState T;
5034  
5035      auto VisitValueCB = [&](const Value &V) -> bool {
5036        unsigned IdxWidth =
5037            DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
5038        APInt Offset(IdxWidth, 0);
5039        const Value *Base = stripAndAccumulateOffsets(
5040            A, *this, &V, DL, Offset, /* GetMinOffset */ false,
5041            /* AllowNonInbounds */ true);
5042  
5043        const auto *AA = A.getAAFor<AADereferenceable>(
5044            *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
5045        int64_t DerefBytes = 0;
5046        if (!AA || (!Stripped && this == AA)) {
5047          // Use IR information if we did not strip anything.
5048          // TODO: track globally.
5049          bool CanBeNull, CanBeFreed;
5050          DerefBytes =
5051              Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
5052          T.GlobalState.indicatePessimisticFixpoint();
5053        } else {
5054          const DerefState &DS = AA->getState();
5055          DerefBytes = DS.DerefBytesState.getAssumed();
5056          T.GlobalState &= DS.GlobalState;
5057        }
5058  
5059        // For now we do not try to "increase" dereferenceability due to negative
5060        // indices as we first have to come up with code to deal with loops and
5061        // for overflows of the dereferenceable bytes.
5062        int64_t OffsetSExt = Offset.getSExtValue();
5063        if (OffsetSExt < 0)
5064          OffsetSExt = 0;
5065  
5066        T.takeAssumedDerefBytesMinimum(
5067            std::max(int64_t(0), DerefBytes - OffsetSExt));
5068  
5069        if (this == AA) {
5070          if (!Stripped) {
5071            // If nothing was stripped IR information is all we got.
5072            T.takeKnownDerefBytesMaximum(
5073                std::max(int64_t(0), DerefBytes - OffsetSExt));
5074            T.indicatePessimisticFixpoint();
5075          } else if (OffsetSExt > 0) {
5076            // If something was stripped but there is circular reasoning we look
5077            // for the offset. If it is positive we basically decrease the
5078            // dereferenceable bytes in a circular loop now, which will simply
5079            // drive them down to the known value in a very slow way which we
5080            // can accelerate.
5081            T.indicatePessimisticFixpoint();
5082          }
5083        }
5084  
5085        return T.isValidState();
5086      };
5087  
5088      for (const auto &VAC : Values)
5089        if (!VisitValueCB(*VAC.getValue()))
5090          return indicatePessimisticFixpoint();
5091  
5092      return clampStateAndIndicateChange(getState(), T);
5093    }
5094  
5095    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c4911::AADereferenceableFloating5096    void trackStatistics() const override {
5097      STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
5098    }
5099  };
5100  
5101  /// Dereferenceable attribute for a return value.
5102  struct AADereferenceableReturned final
5103      : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
5104    using Base =
5105        AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>;
AADereferenceableReturned__anonc528723c4911::AADereferenceableReturned5106    AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
5107        : Base(IRP, A) {}
5108  
5109    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c4911::AADereferenceableReturned5110    void trackStatistics() const override {
5111      STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
5112    }
5113  };
5114  
5115  /// Dereferenceable attribute for an argument
5116  struct AADereferenceableArgument final
5117      : AAArgumentFromCallSiteArguments<AADereferenceable,
5118                                        AADereferenceableImpl> {
5119    using Base =
5120        AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
AADereferenceableArgument__anonc528723c4911::AADereferenceableArgument5121    AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
5122        : Base(IRP, A) {}
5123  
5124    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c4911::AADereferenceableArgument5125    void trackStatistics() const override {
5126      STATS_DECLTRACK_ARG_ATTR(dereferenceable)
5127    }
5128  };
5129  
5130  /// Dereferenceable attribute for a call site argument.
5131  struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
AADereferenceableCallSiteArgument__anonc528723c4911::AADereferenceableCallSiteArgument5132    AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
5133        : AADereferenceableFloating(IRP, A) {}
5134  
5135    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c4911::AADereferenceableCallSiteArgument5136    void trackStatistics() const override {
5137      STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
5138    }
5139  };
5140  
5141  /// Dereferenceable attribute deduction for a call site return value.
5142  struct AADereferenceableCallSiteReturned final
5143      : AACalleeToCallSite<AADereferenceable, AADereferenceableImpl> {
5144    using Base = AACalleeToCallSite<AADereferenceable, AADereferenceableImpl>;
AADereferenceableCallSiteReturned__anonc528723c4911::AADereferenceableCallSiteReturned5145    AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
5146        : Base(IRP, A) {}
5147  
5148    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c4911::AADereferenceableCallSiteReturned5149    void trackStatistics() const override {
5150      STATS_DECLTRACK_CS_ATTR(dereferenceable);
5151    }
5152  };
5153  } // namespace
5154  
5155  // ------------------------ Align Argument Attribute ------------------------
5156  
5157  namespace {
getKnownAlignForUse(Attributor & A,AAAlign & QueryingAA,Value & AssociatedValue,const Use * U,const Instruction * I,bool & TrackUse)5158  static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
5159                                      Value &AssociatedValue, const Use *U,
5160                                      const Instruction *I, bool &TrackUse) {
5161    // We need to follow common pointer manipulation uses to the accesses they
5162    // feed into.
5163    if (isa<CastInst>(I)) {
5164      // Follow all but ptr2int casts.
5165      TrackUse = !isa<PtrToIntInst>(I);
5166      return 0;
5167    }
5168    if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
5169      if (GEP->hasAllConstantIndices())
5170        TrackUse = true;
5171      return 0;
5172    }
5173  
5174    MaybeAlign MA;
5175    if (const auto *CB = dyn_cast<CallBase>(I)) {
5176      if (CB->isBundleOperand(U) || CB->isCallee(U))
5177        return 0;
5178  
5179      unsigned ArgNo = CB->getArgOperandNo(U);
5180      IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
5181      // As long as we only use known information there is no need to track
5182      // dependences here.
5183      auto *AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
5184      if (AlignAA)
5185        MA = MaybeAlign(AlignAA->getKnownAlign());
5186    }
5187  
5188    const DataLayout &DL = A.getDataLayout();
5189    const Value *UseV = U->get();
5190    if (auto *SI = dyn_cast<StoreInst>(I)) {
5191      if (SI->getPointerOperand() == UseV)
5192        MA = SI->getAlign();
5193    } else if (auto *LI = dyn_cast<LoadInst>(I)) {
5194      if (LI->getPointerOperand() == UseV)
5195        MA = LI->getAlign();
5196    } else if (auto *AI = dyn_cast<AtomicRMWInst>(I)) {
5197      if (AI->getPointerOperand() == UseV)
5198        MA = AI->getAlign();
5199    } else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
5200      if (AI->getPointerOperand() == UseV)
5201        MA = AI->getAlign();
5202    }
5203  
5204    if (!MA || *MA <= QueryingAA.getKnownAlign())
5205      return 0;
5206  
5207    unsigned Alignment = MA->value();
5208    int64_t Offset;
5209  
5210    if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
5211      if (Base == &AssociatedValue) {
5212        // BasePointerAddr + Offset = Alignment * Q for some integer Q.
5213        // So we can say that the maximum power of two which is a divisor of
5214        // gcd(Offset, Alignment) is an alignment.
5215  
5216        uint32_t gcd = std::gcd(uint32_t(abs((int32_t)Offset)), Alignment);
5217        Alignment = llvm::bit_floor(gcd);
5218      }
5219    }
5220  
5221    return Alignment;
5222  }
5223  
5224  struct AAAlignImpl : AAAlign {
AAAlignImpl__anonc528723c4b11::AAAlignImpl5225    AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
5226  
5227    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c4b11::AAAlignImpl5228    void initialize(Attributor &A) override {
5229      SmallVector<Attribute, 4> Attrs;
5230      A.getAttrs(getIRPosition(), {Attribute::Alignment}, Attrs);
5231      for (const Attribute &Attr : Attrs)
5232        takeKnownMaximum(Attr.getValueAsInt());
5233  
5234      Value &V = *getAssociatedValue().stripPointerCasts();
5235      takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
5236  
5237      if (Instruction *CtxI = getCtxI())
5238        followUsesInMBEC(*this, A, getState(), *CtxI);
5239    }
5240  
5241    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c4b11::AAAlignImpl5242    ChangeStatus manifest(Attributor &A) override {
5243      ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
5244  
5245      // Check for users that allow alignment annotations.
5246      Value &AssociatedValue = getAssociatedValue();
5247      for (const Use &U : AssociatedValue.uses()) {
5248        if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
5249          if (SI->getPointerOperand() == &AssociatedValue)
5250            if (SI->getAlign() < getAssumedAlign()) {
5251              STATS_DECLTRACK(AAAlign, Store,
5252                              "Number of times alignment added to a store");
5253              SI->setAlignment(getAssumedAlign());
5254              LoadStoreChanged = ChangeStatus::CHANGED;
5255            }
5256        } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
5257          if (LI->getPointerOperand() == &AssociatedValue)
5258            if (LI->getAlign() < getAssumedAlign()) {
5259              LI->setAlignment(getAssumedAlign());
5260              STATS_DECLTRACK(AAAlign, Load,
5261                              "Number of times alignment added to a load");
5262              LoadStoreChanged = ChangeStatus::CHANGED;
5263            }
5264        }
5265      }
5266  
5267      ChangeStatus Changed = AAAlign::manifest(A);
5268  
5269      Align InheritAlign =
5270          getAssociatedValue().getPointerAlignment(A.getDataLayout());
5271      if (InheritAlign >= getAssumedAlign())
5272        return LoadStoreChanged;
5273      return Changed | LoadStoreChanged;
5274    }
5275  
5276    // TODO: Provide a helper to determine the implied ABI alignment and check in
5277    //       the existing manifest method and a new one for AAAlignImpl that value
5278    //       to avoid making the alignment explicit if it did not improve.
5279  
5280    /// See AbstractAttribute::getDeducedAttributes
getDeducedAttributes__anonc528723c4b11::AAAlignImpl5281    void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
5282                              SmallVectorImpl<Attribute> &Attrs) const override {
5283      if (getAssumedAlign() > 1)
5284        Attrs.emplace_back(
5285            Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
5286    }
5287  
5288    /// See followUsesInMBEC
followUseInMBEC__anonc528723c4b11::AAAlignImpl5289    bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
5290                         AAAlign::StateType &State) {
5291      bool TrackUse = false;
5292  
5293      unsigned int KnownAlign =
5294          getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
5295      State.takeKnownMaximum(KnownAlign);
5296  
5297      return TrackUse;
5298    }
5299  
5300    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c4b11::AAAlignImpl5301    const std::string getAsStr(Attributor *A) const override {
5302      return "align<" + std::to_string(getKnownAlign().value()) + "-" +
5303             std::to_string(getAssumedAlign().value()) + ">";
5304    }
5305  };
5306  
5307  /// Align attribute for a floating value.
5308  struct AAAlignFloating : AAAlignImpl {
AAAlignFloating__anonc528723c4b11::AAAlignFloating5309    AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
5310  
5311    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c4b11::AAAlignFloating5312    ChangeStatus updateImpl(Attributor &A) override {
5313      const DataLayout &DL = A.getDataLayout();
5314  
5315      bool Stripped;
5316      bool UsedAssumedInformation = false;
5317      SmallVector<AA::ValueAndContext> Values;
5318      if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
5319                                        AA::AnyScope, UsedAssumedInformation)) {
5320        Values.push_back({getAssociatedValue(), getCtxI()});
5321        Stripped = false;
5322      } else {
5323        Stripped = Values.size() != 1 ||
5324                   Values.front().getValue() != &getAssociatedValue();
5325      }
5326  
5327      StateType T;
5328      auto VisitValueCB = [&](Value &V) -> bool {
5329        if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
5330          return true;
5331        const auto *AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
5332                                             DepClassTy::REQUIRED);
5333        if (!AA || (!Stripped && this == AA)) {
5334          int64_t Offset;
5335          unsigned Alignment = 1;
5336          if (const Value *Base =
5337                  GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
5338            // TODO: Use AAAlign for the base too.
5339            Align PA = Base->getPointerAlignment(DL);
5340            // BasePointerAddr + Offset = Alignment * Q for some integer Q.
5341            // So we can say that the maximum power of two which is a divisor of
5342            // gcd(Offset, Alignment) is an alignment.
5343  
5344            uint32_t gcd =
5345                std::gcd(uint32_t(abs((int32_t)Offset)), uint32_t(PA.value()));
5346            Alignment = llvm::bit_floor(gcd);
5347          } else {
5348            Alignment = V.getPointerAlignment(DL).value();
5349          }
5350          // Use only IR information if we did not strip anything.
5351          T.takeKnownMaximum(Alignment);
5352          T.indicatePessimisticFixpoint();
5353        } else {
5354          // Use abstract attribute information.
5355          const AAAlign::StateType &DS = AA->getState();
5356          T ^= DS;
5357        }
5358        return T.isValidState();
5359      };
5360  
5361      for (const auto &VAC : Values) {
5362        if (!VisitValueCB(*VAC.getValue()))
5363          return indicatePessimisticFixpoint();
5364      }
5365  
5366      //  TODO: If we know we visited all incoming values, thus no are assumed
5367      //  dead, we can take the known information from the state T.
5368      return clampStateAndIndicateChange(getState(), T);
5369    }
5370  
5371    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c4b11::AAAlignFloating5372    void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
5373  };
5374  
5375  /// Align attribute for function return value.
5376  struct AAAlignReturned final
5377      : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
5378    using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
AAAlignReturned__anonc528723c4b11::AAAlignReturned5379    AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
5380  
5381    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c4b11::AAAlignReturned5382    void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
5383  };
5384  
5385  /// Align attribute for function argument.
5386  struct AAAlignArgument final
5387      : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
5388    using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
AAAlignArgument__anonc528723c4b11::AAAlignArgument5389    AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
5390  
5391    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c4b11::AAAlignArgument5392    ChangeStatus manifest(Attributor &A) override {
5393      // If the associated argument is involved in a must-tail call we give up
5394      // because we would need to keep the argument alignments of caller and
5395      // callee in-sync. Just does not seem worth the trouble right now.
5396      if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
5397        return ChangeStatus::UNCHANGED;
5398      return Base::manifest(A);
5399    }
5400  
5401    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c4b11::AAAlignArgument5402    void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
5403  };
5404  
5405  struct AAAlignCallSiteArgument final : AAAlignFloating {
AAAlignCallSiteArgument__anonc528723c4b11::AAAlignCallSiteArgument5406    AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
5407        : AAAlignFloating(IRP, A) {}
5408  
5409    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c4b11::AAAlignCallSiteArgument5410    ChangeStatus manifest(Attributor &A) override {
5411      // If the associated argument is involved in a must-tail call we give up
5412      // because we would need to keep the argument alignments of caller and
5413      // callee in-sync. Just does not seem worth the trouble right now.
5414      if (Argument *Arg = getAssociatedArgument())
5415        if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
5416          return ChangeStatus::UNCHANGED;
5417      ChangeStatus Changed = AAAlignImpl::manifest(A);
5418      Align InheritAlign =
5419          getAssociatedValue().getPointerAlignment(A.getDataLayout());
5420      if (InheritAlign >= getAssumedAlign())
5421        Changed = ChangeStatus::UNCHANGED;
5422      return Changed;
5423    }
5424  
5425    /// See AbstractAttribute::updateImpl(Attributor &A).
updateImpl__anonc528723c4b11::AAAlignCallSiteArgument5426    ChangeStatus updateImpl(Attributor &A) override {
5427      ChangeStatus Changed = AAAlignFloating::updateImpl(A);
5428      if (Argument *Arg = getAssociatedArgument()) {
5429        // We only take known information from the argument
5430        // so we do not need to track a dependence.
5431        const auto *ArgAlignAA = A.getAAFor<AAAlign>(
5432            *this, IRPosition::argument(*Arg), DepClassTy::NONE);
5433        if (ArgAlignAA)
5434          takeKnownMaximum(ArgAlignAA->getKnownAlign().value());
5435      }
5436      return Changed;
5437    }
5438  
5439    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c4b11::AAAlignCallSiteArgument5440    void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
5441  };
5442  
5443  /// Align attribute deduction for a call site return value.
5444  struct AAAlignCallSiteReturned final
5445      : AACalleeToCallSite<AAAlign, AAAlignImpl> {
5446    using Base = AACalleeToCallSite<AAAlign, AAAlignImpl>;
AAAlignCallSiteReturned__anonc528723c4b11::AAAlignCallSiteReturned5447    AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
5448        : Base(IRP, A) {}
5449  
5450    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c4b11::AAAlignCallSiteReturned5451    void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
5452  };
5453  } // namespace
5454  
5455  /// ------------------ Function No-Return Attribute ----------------------------
5456  namespace {
5457  struct AANoReturnImpl : public AANoReturn {
AANoReturnImpl__anonc528723c4d11::AANoReturnImpl5458    AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
5459  
5460    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c4d11::AANoReturnImpl5461    void initialize(Attributor &A) override {
5462      bool IsKnown;
5463      assert(!AA::hasAssumedIRAttr<Attribute::NoReturn>(
5464          A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
5465      (void)IsKnown;
5466    }
5467  
5468    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c4d11::AANoReturnImpl5469    const std::string getAsStr(Attributor *A) const override {
5470      return getAssumed() ? "noreturn" : "may-return";
5471    }
5472  
5473    /// See AbstractAttribute::updateImpl(Attributor &A).
updateImpl__anonc528723c4d11::AANoReturnImpl5474    ChangeStatus updateImpl(Attributor &A) override {
5475      auto CheckForNoReturn = [](Instruction &) { return false; };
5476      bool UsedAssumedInformation = false;
5477      if (!A.checkForAllInstructions(CheckForNoReturn, *this,
5478                                     {(unsigned)Instruction::Ret},
5479                                     UsedAssumedInformation))
5480        return indicatePessimisticFixpoint();
5481      return ChangeStatus::UNCHANGED;
5482    }
5483  };
5484  
5485  struct AANoReturnFunction final : AANoReturnImpl {
AANoReturnFunction__anonc528723c4d11::AANoReturnFunction5486    AANoReturnFunction(const IRPosition &IRP, Attributor &A)
5487        : AANoReturnImpl(IRP, A) {}
5488  
5489    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c4d11::AANoReturnFunction5490    void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
5491  };
5492  
5493  /// NoReturn attribute deduction for a call sites.
5494  struct AANoReturnCallSite final
5495      : AACalleeToCallSite<AANoReturn, AANoReturnImpl> {
AANoReturnCallSite__anonc528723c4d11::AANoReturnCallSite5496    AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
5497        : AACalleeToCallSite<AANoReturn, AANoReturnImpl>(IRP, A) {}
5498  
5499    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c4d11::AANoReturnCallSite5500    void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
5501  };
5502  } // namespace
5503  
5504  /// ----------------------- Instance Info ---------------------------------
5505  
5506  namespace {
5507  /// A class to hold the state of for no-capture attributes.
5508  struct AAInstanceInfoImpl : public AAInstanceInfo {
AAInstanceInfoImpl__anonc528723c4f11::AAInstanceInfoImpl5509    AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
5510        : AAInstanceInfo(IRP, A) {}
5511  
5512    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c4f11::AAInstanceInfoImpl5513    void initialize(Attributor &A) override {
5514      Value &V = getAssociatedValue();
5515      if (auto *C = dyn_cast<Constant>(&V)) {
5516        if (C->isThreadDependent())
5517          indicatePessimisticFixpoint();
5518        else
5519          indicateOptimisticFixpoint();
5520        return;
5521      }
5522      if (auto *CB = dyn_cast<CallBase>(&V))
5523        if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
5524            !CB->mayReadFromMemory()) {
5525          indicateOptimisticFixpoint();
5526          return;
5527        }
5528      if (auto *I = dyn_cast<Instruction>(&V)) {
5529        const auto *CI =
5530            A.getInfoCache().getAnalysisResultForFunction<CycleAnalysis>(
5531                *I->getFunction());
5532        if (mayBeInCycle(CI, I, /* HeaderOnly */ false)) {
5533          indicatePessimisticFixpoint();
5534          return;
5535        }
5536      }
5537    }
5538  
5539    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c4f11::AAInstanceInfoImpl5540    ChangeStatus updateImpl(Attributor &A) override {
5541      ChangeStatus Changed = ChangeStatus::UNCHANGED;
5542  
5543      Value &V = getAssociatedValue();
5544      const Function *Scope = nullptr;
5545      if (auto *I = dyn_cast<Instruction>(&V))
5546        Scope = I->getFunction();
5547      if (auto *A = dyn_cast<Argument>(&V)) {
5548        Scope = A->getParent();
5549        if (!Scope->hasLocalLinkage())
5550          return Changed;
5551      }
5552      if (!Scope)
5553        return indicateOptimisticFixpoint();
5554  
5555      bool IsKnownNoRecurse;
5556      if (AA::hasAssumedIRAttr<Attribute::NoRecurse>(
5557              A, this, IRPosition::function(*Scope), DepClassTy::OPTIONAL,
5558              IsKnownNoRecurse))
5559        return Changed;
5560  
5561      auto UsePred = [&](const Use &U, bool &Follow) {
5562        const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
5563        if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
5564            isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5565          Follow = true;
5566          return true;
5567        }
5568        if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
5569            (isa<StoreInst>(UserI) &&
5570             cast<StoreInst>(UserI)->getValueOperand() != U.get()))
5571          return true;
5572        if (auto *CB = dyn_cast<CallBase>(UserI)) {
5573          // This check is not guaranteeing uniqueness but for now that we cannot
5574          // end up with two versions of \p U thinking it was one.
5575          auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
5576          if (!Callee || !Callee->hasLocalLinkage())
5577            return true;
5578          if (!CB->isArgOperand(&U))
5579            return false;
5580          const auto *ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
5581              *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)),
5582              DepClassTy::OPTIONAL);
5583          if (!ArgInstanceInfoAA ||
5584              !ArgInstanceInfoAA->isAssumedUniqueForAnalysis())
5585            return false;
5586          // If this call base might reach the scope again we might forward the
5587          // argument back here. This is very conservative.
5588          if (AA::isPotentiallyReachable(
5589                  A, *CB, *Scope, *this, /* ExclusionSet */ nullptr,
5590                  [Scope](const Function &Fn) { return &Fn != Scope; }))
5591            return false;
5592          return true;
5593        }
5594        return false;
5595      };
5596  
5597      auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
5598        if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
5599          auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
5600          if ((isa<AllocaInst>(Ptr) || isNoAliasCall(Ptr)) &&
5601              AA::isDynamicallyUnique(A, *this, *Ptr))
5602            return true;
5603        }
5604        return false;
5605      };
5606  
5607      if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
5608                             DepClassTy::OPTIONAL,
5609                             /* IgnoreDroppableUses */ true, EquivalentUseCB))
5610        return indicatePessimisticFixpoint();
5611  
5612      return Changed;
5613    }
5614  
5615    /// See AbstractState::getAsStr().
getAsStr__anonc528723c4f11::AAInstanceInfoImpl5616    const std::string getAsStr(Attributor *A) const override {
5617      return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
5618    }
5619  
5620    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c4f11::AAInstanceInfoImpl5621    void trackStatistics() const override {}
5622  };
5623  
5624  /// InstanceInfo attribute for floating values.
5625  struct AAInstanceInfoFloating : AAInstanceInfoImpl {
AAInstanceInfoFloating__anonc528723c4f11::AAInstanceInfoFloating5626    AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
5627        : AAInstanceInfoImpl(IRP, A) {}
5628  };
5629  
5630  /// NoCapture attribute for function arguments.
5631  struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
AAInstanceInfoArgument__anonc528723c4f11::AAInstanceInfoArgument5632    AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
5633        : AAInstanceInfoFloating(IRP, A) {}
5634  };
5635  
5636  /// InstanceInfo attribute for call site arguments.
5637  struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
AAInstanceInfoCallSiteArgument__anonc528723c4f11::AAInstanceInfoCallSiteArgument5638    AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
5639        : AAInstanceInfoImpl(IRP, A) {}
5640  
5641    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c4f11::AAInstanceInfoCallSiteArgument5642    ChangeStatus updateImpl(Attributor &A) override {
5643      // TODO: Once we have call site specific value information we can provide
5644      //       call site specific liveness information and then it makes
5645      //       sense to specialize attributes for call sites arguments instead of
5646      //       redirecting requests to the callee argument.
5647      Argument *Arg = getAssociatedArgument();
5648      if (!Arg)
5649        return indicatePessimisticFixpoint();
5650      const IRPosition &ArgPos = IRPosition::argument(*Arg);
5651      auto *ArgAA =
5652          A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
5653      if (!ArgAA)
5654        return indicatePessimisticFixpoint();
5655      return clampStateAndIndicateChange(getState(), ArgAA->getState());
5656    }
5657  };
5658  
5659  /// InstanceInfo attribute for function return value.
5660  struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
AAInstanceInfoReturned__anonc528723c4f11::AAInstanceInfoReturned5661    AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
5662        : AAInstanceInfoImpl(IRP, A) {
5663      llvm_unreachable("InstanceInfo is not applicable to function returns!");
5664    }
5665  
5666    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c4f11::AAInstanceInfoReturned5667    void initialize(Attributor &A) override {
5668      llvm_unreachable("InstanceInfo is not applicable to function returns!");
5669    }
5670  
5671    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c4f11::AAInstanceInfoReturned5672    ChangeStatus updateImpl(Attributor &A) override {
5673      llvm_unreachable("InstanceInfo is not applicable to function returns!");
5674    }
5675  };
5676  
5677  /// InstanceInfo attribute deduction for a call site return value.
5678  struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
AAInstanceInfoCallSiteReturned__anonc528723c4f11::AAInstanceInfoCallSiteReturned5679    AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
5680        : AAInstanceInfoFloating(IRP, A) {}
5681  };
5682  } // namespace
5683  
5684  /// ----------------------- Variable Capturing ---------------------------------
isImpliedByIR(Attributor & A,const IRPosition & IRP,Attribute::AttrKind ImpliedAttributeKind,bool IgnoreSubsumingPositions)5685  bool AANoCapture::isImpliedByIR(Attributor &A, const IRPosition &IRP,
5686                                  Attribute::AttrKind ImpliedAttributeKind,
5687                                  bool IgnoreSubsumingPositions) {
5688    assert(ImpliedAttributeKind == Attribute::NoCapture &&
5689           "Unexpected attribute kind");
5690    Value &V = IRP.getAssociatedValue();
5691    if (!IRP.isArgumentPosition())
5692      return V.use_empty();
5693  
5694    // You cannot "capture" null in the default address space.
5695    //
5696    // FIXME: This should use NullPointerIsDefined to account for the function
5697    // attribute.
5698    if (isa<UndefValue>(V) || (isa<ConstantPointerNull>(V) &&
5699                               V.getType()->getPointerAddressSpace() == 0)) {
5700      return true;
5701    }
5702  
5703    if (A.hasAttr(IRP, {Attribute::NoCapture},
5704                  /* IgnoreSubsumingPositions */ true, Attribute::NoCapture))
5705      return true;
5706  
5707    if (IRP.getPositionKind() == IRP_CALL_SITE_ARGUMENT)
5708      if (Argument *Arg = IRP.getAssociatedArgument())
5709        if (A.hasAttr(IRPosition::argument(*Arg),
5710                      {Attribute::NoCapture, Attribute::ByVal},
5711                      /* IgnoreSubsumingPositions */ true)) {
5712          A.manifestAttrs(IRP,
5713                          Attribute::get(V.getContext(), Attribute::NoCapture));
5714          return true;
5715        }
5716  
5717    if (const Function *F = IRP.getAssociatedFunction()) {
5718      // Check what state the associated function can actually capture.
5719      AANoCapture::StateType State;
5720      determineFunctionCaptureCapabilities(IRP, *F, State);
5721      if (State.isKnown(NO_CAPTURE)) {
5722        A.manifestAttrs(IRP,
5723                        Attribute::get(V.getContext(), Attribute::NoCapture));
5724        return true;
5725      }
5726    }
5727  
5728    return false;
5729  }
5730  
5731  /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
5732  /// depending on the ability of the function associated with \p IRP to capture
5733  /// state in memory and through "returning/throwing", respectively.
determineFunctionCaptureCapabilities(const IRPosition & IRP,const Function & F,BitIntegerState & State)5734  void AANoCapture::determineFunctionCaptureCapabilities(const IRPosition &IRP,
5735                                                         const Function &F,
5736                                                         BitIntegerState &State) {
5737    // TODO: Once we have memory behavior attributes we should use them here.
5738  
5739    // If we know we cannot communicate or write to memory, we do not care about
5740    // ptr2int anymore.
5741    bool ReadOnly = F.onlyReadsMemory();
5742    bool NoThrow = F.doesNotThrow();
5743    bool IsVoidReturn = F.getReturnType()->isVoidTy();
5744    if (ReadOnly && NoThrow && IsVoidReturn) {
5745      State.addKnownBits(NO_CAPTURE);
5746      return;
5747    }
5748  
5749    // A function cannot capture state in memory if it only reads memory, it can
5750    // however return/throw state and the state might be influenced by the
5751    // pointer value, e.g., loading from a returned pointer might reveal a bit.
5752    if (ReadOnly)
5753      State.addKnownBits(NOT_CAPTURED_IN_MEM);
5754  
5755    // A function cannot communicate state back if it does not through
5756    // exceptions and doesn not return values.
5757    if (NoThrow && IsVoidReturn)
5758      State.addKnownBits(NOT_CAPTURED_IN_RET);
5759  
5760    // Check existing "returned" attributes.
5761    int ArgNo = IRP.getCalleeArgNo();
5762    if (!NoThrow || ArgNo < 0 ||
5763        !F.getAttributes().hasAttrSomewhere(Attribute::Returned))
5764      return;
5765  
5766    for (unsigned U = 0, E = F.arg_size(); U < E; ++U)
5767      if (F.hasParamAttribute(U, Attribute::Returned)) {
5768        if (U == unsigned(ArgNo))
5769          State.removeAssumedBits(NOT_CAPTURED_IN_RET);
5770        else if (ReadOnly)
5771          State.addKnownBits(NO_CAPTURE);
5772        else
5773          State.addKnownBits(NOT_CAPTURED_IN_RET);
5774        break;
5775      }
5776  }
5777  
5778  namespace {
5779  /// A class to hold the state of for no-capture attributes.
5780  struct AANoCaptureImpl : public AANoCapture {
AANoCaptureImpl__anonc528723c5311::AANoCaptureImpl5781    AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
5782  
5783    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c5311::AANoCaptureImpl5784    void initialize(Attributor &A) override {
5785      bool IsKnown;
5786      assert(!AA::hasAssumedIRAttr<Attribute::NoCapture>(
5787          A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown));
5788      (void)IsKnown;
5789    }
5790  
5791    /// See AbstractAttribute::updateImpl(...).
5792    ChangeStatus updateImpl(Attributor &A) override;
5793  
5794    /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
getDeducedAttributes__anonc528723c5311::AANoCaptureImpl5795    void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
5796                              SmallVectorImpl<Attribute> &Attrs) const override {
5797      if (!isAssumedNoCaptureMaybeReturned())
5798        return;
5799  
5800      if (isArgumentPosition()) {
5801        if (isAssumedNoCapture())
5802          Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
5803        else if (ManifestInternal)
5804          Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
5805      }
5806    }
5807  
5808    /// See AbstractState::getAsStr().
getAsStr__anonc528723c5311::AANoCaptureImpl5809    const std::string getAsStr(Attributor *A) const override {
5810      if (isKnownNoCapture())
5811        return "known not-captured";
5812      if (isAssumedNoCapture())
5813        return "assumed not-captured";
5814      if (isKnownNoCaptureMaybeReturned())
5815        return "known not-captured-maybe-returned";
5816      if (isAssumedNoCaptureMaybeReturned())
5817        return "assumed not-captured-maybe-returned";
5818      return "assumed-captured";
5819    }
5820  
5821    /// Check the use \p U and update \p State accordingly. Return true if we
5822    /// should continue to update the state.
checkUse__anonc528723c5311::AANoCaptureImpl5823    bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5824                  bool &Follow) {
5825      Instruction *UInst = cast<Instruction>(U.getUser());
5826      LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "
5827                        << *UInst << "\n");
5828  
5829      // Deal with ptr2int by following uses.
5830      if (isa<PtrToIntInst>(UInst)) {
5831        LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
5832        return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5833                            /* Return */ true);
5834      }
5835  
5836      // For stores we already checked if we can follow them, if they make it
5837      // here we give up.
5838      if (isa<StoreInst>(UInst))
5839        return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5840                            /* Return */ true);
5841  
5842      // Explicitly catch return instructions.
5843      if (isa<ReturnInst>(UInst)) {
5844        if (UInst->getFunction() == getAnchorScope())
5845          return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5846                              /* Return */ true);
5847        return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5848                            /* Return */ true);
5849      }
5850  
5851      // For now we only use special logic for call sites. However, the tracker
5852      // itself knows about a lot of other non-capturing cases already.
5853      auto *CB = dyn_cast<CallBase>(UInst);
5854      if (!CB || !CB->isArgOperand(&U))
5855        return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5856                            /* Return */ true);
5857  
5858      unsigned ArgNo = CB->getArgOperandNo(&U);
5859      const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5860      // If we have a abstract no-capture attribute for the argument we can use
5861      // it to justify a non-capture attribute here. This allows recursion!
5862      bool IsKnownNoCapture;
5863      const AANoCapture *ArgNoCaptureAA = nullptr;
5864      bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
5865          A, this, CSArgPos, DepClassTy::REQUIRED, IsKnownNoCapture, false,
5866          &ArgNoCaptureAA);
5867      if (IsAssumedNoCapture)
5868        return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5869                            /* Return */ false);
5870      if (ArgNoCaptureAA && ArgNoCaptureAA->isAssumedNoCaptureMaybeReturned()) {
5871        Follow = true;
5872        return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5873                            /* Return */ false);
5874      }
5875  
5876      // Lastly, we could not find a reason no-capture can be assumed so we don't.
5877      return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5878                          /* Return */ true);
5879    }
5880  
5881    /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5882    /// \p CapturedInRet, then return true if we should continue updating the
5883    /// state.
isCapturedIn__anonc528723c5311::AANoCaptureImpl5884    static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5885                             bool CapturedInInt, bool CapturedInRet) {
5886      LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5887                        << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5888      if (CapturedInMem)
5889        State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5890      if (CapturedInInt)
5891        State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5892      if (CapturedInRet)
5893        State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5894      return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5895    }
5896  };
5897  
updateImpl(Attributor & A)5898  ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5899    const IRPosition &IRP = getIRPosition();
5900    Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5901                                    : &IRP.getAssociatedValue();
5902    if (!V)
5903      return indicatePessimisticFixpoint();
5904  
5905    const Function *F =
5906        isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5907  
5908    // TODO: Is the checkForAllUses below useful for constants?
5909    if (!F)
5910      return indicatePessimisticFixpoint();
5911  
5912    AANoCapture::StateType T;
5913    const IRPosition &FnPos = IRPosition::function(*F);
5914  
5915    // Readonly means we cannot capture through memory.
5916    bool IsKnown;
5917    if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5918      T.addKnownBits(NOT_CAPTURED_IN_MEM);
5919      if (IsKnown)
5920        addKnownBits(NOT_CAPTURED_IN_MEM);
5921    }
5922  
5923    // Make sure all returned values are different than the underlying value.
5924    // TODO: we could do this in a more sophisticated way inside
5925    //       AAReturnedValues, e.g., track all values that escape through returns
5926    //       directly somehow.
5927    auto CheckReturnedArgs = [&](bool &UsedAssumedInformation) {
5928      SmallVector<AA::ValueAndContext> Values;
5929      if (!A.getAssumedSimplifiedValues(IRPosition::returned(*F), this, Values,
5930                                        AA::ValueScope::Intraprocedural,
5931                                        UsedAssumedInformation))
5932        return false;
5933      bool SeenConstant = false;
5934      for (const AA::ValueAndContext &VAC : Values) {
5935        if (isa<Constant>(VAC.getValue())) {
5936          if (SeenConstant)
5937            return false;
5938          SeenConstant = true;
5939        } else if (!isa<Argument>(VAC.getValue()) ||
5940                   VAC.getValue() == getAssociatedArgument())
5941          return false;
5942      }
5943      return true;
5944    };
5945  
5946    bool IsKnownNoUnwind;
5947    if (AA::hasAssumedIRAttr<Attribute::NoUnwind>(
5948            A, this, FnPos, DepClassTy::OPTIONAL, IsKnownNoUnwind)) {
5949      bool IsVoidTy = F->getReturnType()->isVoidTy();
5950      bool UsedAssumedInformation = false;
5951      if (IsVoidTy || CheckReturnedArgs(UsedAssumedInformation)) {
5952        T.addKnownBits(NOT_CAPTURED_IN_RET);
5953        if (T.isKnown(NOT_CAPTURED_IN_MEM))
5954          return ChangeStatus::UNCHANGED;
5955        if (IsKnownNoUnwind && (IsVoidTy || !UsedAssumedInformation)) {
5956          addKnownBits(NOT_CAPTURED_IN_RET);
5957          if (isKnown(NOT_CAPTURED_IN_MEM))
5958            return indicateOptimisticFixpoint();
5959        }
5960      }
5961    }
5962  
5963    auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
5964      const auto *DerefAA = A.getAAFor<AADereferenceable>(
5965          *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
5966      return DerefAA && DerefAA->getAssumedDereferenceableBytes();
5967    };
5968  
5969    auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
5970      switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
5971      case UseCaptureKind::NO_CAPTURE:
5972        return true;
5973      case UseCaptureKind::MAY_CAPTURE:
5974        return checkUse(A, T, U, Follow);
5975      case UseCaptureKind::PASSTHROUGH:
5976        Follow = true;
5977        return true;
5978      }
5979      llvm_unreachable("Unexpected use capture kind!");
5980    };
5981  
5982    if (!A.checkForAllUses(UseCheck, *this, *V))
5983      return indicatePessimisticFixpoint();
5984  
5985    AANoCapture::StateType &S = getState();
5986    auto Assumed = S.getAssumed();
5987    S.intersectAssumedBits(T.getAssumed());
5988    if (!isAssumedNoCaptureMaybeReturned())
5989      return indicatePessimisticFixpoint();
5990    return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5991                                     : ChangeStatus::CHANGED;
5992  }
5993  
5994  /// NoCapture attribute for function arguments.
5995  struct AANoCaptureArgument final : AANoCaptureImpl {
AANoCaptureArgument__anonc528723c5311::AANoCaptureArgument5996    AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5997        : AANoCaptureImpl(IRP, A) {}
5998  
5999    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c5311::AANoCaptureArgument6000    void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
6001  };
6002  
6003  /// NoCapture attribute for call site arguments.
6004  struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
AANoCaptureCallSiteArgument__anonc528723c5311::AANoCaptureCallSiteArgument6005    AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
6006        : AANoCaptureImpl(IRP, A) {}
6007  
6008    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c5311::AANoCaptureCallSiteArgument6009    ChangeStatus updateImpl(Attributor &A) override {
6010      // TODO: Once we have call site specific value information we can provide
6011      //       call site specific liveness information and then it makes
6012      //       sense to specialize attributes for call sites arguments instead of
6013      //       redirecting requests to the callee argument.
6014      Argument *Arg = getAssociatedArgument();
6015      if (!Arg)
6016        return indicatePessimisticFixpoint();
6017      const IRPosition &ArgPos = IRPosition::argument(*Arg);
6018      bool IsKnownNoCapture;
6019      const AANoCapture *ArgAA = nullptr;
6020      if (AA::hasAssumedIRAttr<Attribute::NoCapture>(
6021              A, this, ArgPos, DepClassTy::REQUIRED, IsKnownNoCapture, false,
6022              &ArgAA))
6023        return ChangeStatus::UNCHANGED;
6024      if (!ArgAA || !ArgAA->isAssumedNoCaptureMaybeReturned())
6025        return indicatePessimisticFixpoint();
6026      return clampStateAndIndicateChange(getState(), ArgAA->getState());
6027    }
6028  
6029    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c5311::AANoCaptureCallSiteArgument6030    void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
6031  };
6032  
6033  /// NoCapture attribute for floating values.
6034  struct AANoCaptureFloating final : AANoCaptureImpl {
AANoCaptureFloating__anonc528723c5311::AANoCaptureFloating6035    AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
6036        : AANoCaptureImpl(IRP, A) {}
6037  
6038    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c5311::AANoCaptureFloating6039    void trackStatistics() const override {
6040      STATS_DECLTRACK_FLOATING_ATTR(nocapture)
6041    }
6042  };
6043  
6044  /// NoCapture attribute for function return value.
6045  struct AANoCaptureReturned final : AANoCaptureImpl {
AANoCaptureReturned__anonc528723c5311::AANoCaptureReturned6046    AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
6047        : AANoCaptureImpl(IRP, A) {
6048      llvm_unreachable("NoCapture is not applicable to function returns!");
6049    }
6050  
6051    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c5311::AANoCaptureReturned6052    void initialize(Attributor &A) override {
6053      llvm_unreachable("NoCapture is not applicable to function returns!");
6054    }
6055  
6056    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c5311::AANoCaptureReturned6057    ChangeStatus updateImpl(Attributor &A) override {
6058      llvm_unreachable("NoCapture is not applicable to function returns!");
6059    }
6060  
6061    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c5311::AANoCaptureReturned6062    void trackStatistics() const override {}
6063  };
6064  
6065  /// NoCapture attribute deduction for a call site return value.
6066  struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
AANoCaptureCallSiteReturned__anonc528723c5311::AANoCaptureCallSiteReturned6067    AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
6068        : AANoCaptureImpl(IRP, A) {}
6069  
6070    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c5311::AANoCaptureCallSiteReturned6071    void initialize(Attributor &A) override {
6072      const Function *F = getAnchorScope();
6073      // Check what state the associated function can actually capture.
6074      determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
6075    }
6076  
6077    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c5311::AANoCaptureCallSiteReturned6078    void trackStatistics() const override {
6079      STATS_DECLTRACK_CSRET_ATTR(nocapture)
6080    }
6081  };
6082  } // namespace
6083  
6084  /// ------------------ Value Simplify Attribute ----------------------------
6085  
unionAssumed(std::optional<Value * > Other)6086  bool ValueSimplifyStateType::unionAssumed(std::optional<Value *> Other) {
6087    // FIXME: Add a typecast support.
6088    SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
6089        SimplifiedAssociatedValue, Other, Ty);
6090    if (SimplifiedAssociatedValue == std::optional<Value *>(nullptr))
6091      return false;
6092  
6093    LLVM_DEBUG({
6094      if (SimplifiedAssociatedValue)
6095        dbgs() << "[ValueSimplify] is assumed to be "
6096               << **SimplifiedAssociatedValue << "\n";
6097      else
6098        dbgs() << "[ValueSimplify] is assumed to be <none>\n";
6099    });
6100    return true;
6101  }
6102  
6103  namespace {
6104  struct AAValueSimplifyImpl : AAValueSimplify {
AAValueSimplifyImpl__anonc528723c5711::AAValueSimplifyImpl6105    AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
6106        : AAValueSimplify(IRP, A) {}
6107  
6108    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c5711::AAValueSimplifyImpl6109    void initialize(Attributor &A) override {
6110      if (getAssociatedValue().getType()->isVoidTy())
6111        indicatePessimisticFixpoint();
6112      if (A.hasSimplificationCallback(getIRPosition()))
6113        indicatePessimisticFixpoint();
6114    }
6115  
6116    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c5711::AAValueSimplifyImpl6117    const std::string getAsStr(Attributor *A) const override {
6118      LLVM_DEBUG({
6119        dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " ";
6120        if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
6121          dbgs() << "SAV: " << **SimplifiedAssociatedValue << " ";
6122      });
6123      return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
6124                            : "not-simple";
6125    }
6126  
6127    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c5711::AAValueSimplifyImpl6128    void trackStatistics() const override {}
6129  
6130    /// See AAValueSimplify::getAssumedSimplifiedValue()
6131    std::optional<Value *>
getAssumedSimplifiedValue__anonc528723c5711::AAValueSimplifyImpl6132    getAssumedSimplifiedValue(Attributor &A) const override {
6133      return SimplifiedAssociatedValue;
6134    }
6135  
6136    /// Ensure the return value is \p V with type \p Ty, if not possible return
6137    /// nullptr. If \p Check is true we will only verify such an operation would
6138    /// suceed and return a non-nullptr value if that is the case. No IR is
6139    /// generated or modified.
ensureType__anonc528723c5711::AAValueSimplifyImpl6140    static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI,
6141                             bool Check) {
6142      if (auto *TypedV = AA::getWithType(V, Ty))
6143        return TypedV;
6144      if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty))
6145        return Check ? &V
6146                     : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6147                           &V, &Ty, "", CtxI->getIterator());
6148      return nullptr;
6149    }
6150  
6151    /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble.
6152    /// If \p Check is true we will only verify such an operation would suceed and
6153    /// return a non-nullptr value if that is the case. No IR is generated or
6154    /// modified.
reproduceInst__anonc528723c5711::AAValueSimplifyImpl6155    static Value *reproduceInst(Attributor &A,
6156                                const AbstractAttribute &QueryingAA,
6157                                Instruction &I, Type &Ty, Instruction *CtxI,
6158                                bool Check, ValueToValueMapTy &VMap) {
6159      assert(CtxI && "Cannot reproduce an instruction without context!");
6160      if (Check && (I.mayReadFromMemory() ||
6161                    !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr,
6162                                                  /* TLI */ nullptr)))
6163        return nullptr;
6164      for (Value *Op : I.operands()) {
6165        Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap);
6166        if (!NewOp) {
6167          assert(Check && "Manifest of new value unexpectedly failed!");
6168          return nullptr;
6169        }
6170        if (!Check)
6171          VMap[Op] = NewOp;
6172      }
6173      if (Check)
6174        return &I;
6175  
6176      Instruction *CloneI = I.clone();
6177      // TODO: Try to salvage debug information here.
6178      CloneI->setDebugLoc(DebugLoc());
6179      VMap[&I] = CloneI;
6180      CloneI->insertBefore(CtxI);
6181      RemapInstruction(CloneI, VMap);
6182      return CloneI;
6183    }
6184  
6185    /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble.
6186    /// If \p Check is true we will only verify such an operation would suceed and
6187    /// return a non-nullptr value if that is the case. No IR is generated or
6188    /// modified.
reproduceValue__anonc528723c5711::AAValueSimplifyImpl6189    static Value *reproduceValue(Attributor &A,
6190                                 const AbstractAttribute &QueryingAA, Value &V,
6191                                 Type &Ty, Instruction *CtxI, bool Check,
6192                                 ValueToValueMapTy &VMap) {
6193      if (const auto &NewV = VMap.lookup(&V))
6194        return NewV;
6195      bool UsedAssumedInformation = false;
6196      std::optional<Value *> SimpleV = A.getAssumedSimplified(
6197          V, QueryingAA, UsedAssumedInformation, AA::Interprocedural);
6198      if (!SimpleV.has_value())
6199        return PoisonValue::get(&Ty);
6200      Value *EffectiveV = &V;
6201      if (*SimpleV)
6202        EffectiveV = *SimpleV;
6203      if (auto *C = dyn_cast<Constant>(EffectiveV))
6204        return C;
6205      if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI),
6206                                        A.getInfoCache()))
6207        return ensureType(A, *EffectiveV, Ty, CtxI, Check);
6208      if (auto *I = dyn_cast<Instruction>(EffectiveV))
6209        if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap))
6210          return ensureType(A, *NewV, Ty, CtxI, Check);
6211      return nullptr;
6212    }
6213  
6214    /// Return a value we can use as replacement for the associated one, or
6215    /// nullptr if we don't have one that makes sense.
manifestReplacementValue__anonc528723c5711::AAValueSimplifyImpl6216    Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
6217      Value *NewV = SimplifiedAssociatedValue
6218                        ? *SimplifiedAssociatedValue
6219                        : UndefValue::get(getAssociatedType());
6220      if (NewV && NewV != &getAssociatedValue()) {
6221        ValueToValueMapTy VMap;
6222        // First verify we can reprduce the value with the required type at the
6223        // context location before we actually start modifying the IR.
6224        if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
6225                           /* CheckOnly */ true, VMap))
6226          return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
6227                                /* CheckOnly */ false, VMap);
6228      }
6229      return nullptr;
6230    }
6231  
6232    /// Helper function for querying AAValueSimplify and updating candidate.
6233    /// \param IRP The value position we are trying to unify with SimplifiedValue
checkAndUpdate__anonc528723c5711::AAValueSimplifyImpl6234    bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
6235                        const IRPosition &IRP, bool Simplify = true) {
6236      bool UsedAssumedInformation = false;
6237      std::optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
6238      if (Simplify)
6239        QueryingValueSimplified = A.getAssumedSimplified(
6240            IRP, QueryingAA, UsedAssumedInformation, AA::Interprocedural);
6241      return unionAssumed(QueryingValueSimplified);
6242    }
6243  
6244    /// Returns a candidate is found or not
askSimplifiedValueFor__anonc528723c5711::AAValueSimplifyImpl6245    template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
6246      if (!getAssociatedValue().getType()->isIntegerTy())
6247        return false;
6248  
6249      // This will also pass the call base context.
6250      const auto *AA =
6251          A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
6252      if (!AA)
6253        return false;
6254  
6255      std::optional<Constant *> COpt = AA->getAssumedConstant(A);
6256  
6257      if (!COpt) {
6258        SimplifiedAssociatedValue = std::nullopt;
6259        A.recordDependence(*AA, *this, DepClassTy::OPTIONAL);
6260        return true;
6261      }
6262      if (auto *C = *COpt) {
6263        SimplifiedAssociatedValue = C;
6264        A.recordDependence(*AA, *this, DepClassTy::OPTIONAL);
6265        return true;
6266      }
6267      return false;
6268    }
6269  
askSimplifiedValueForOtherAAs__anonc528723c5711::AAValueSimplifyImpl6270    bool askSimplifiedValueForOtherAAs(Attributor &A) {
6271      if (askSimplifiedValueFor<AAValueConstantRange>(A))
6272        return true;
6273      if (askSimplifiedValueFor<AAPotentialConstantValues>(A))
6274        return true;
6275      return false;
6276    }
6277  
6278    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c5711::AAValueSimplifyImpl6279    ChangeStatus manifest(Attributor &A) override {
6280      ChangeStatus Changed = ChangeStatus::UNCHANGED;
6281      for (auto &U : getAssociatedValue().uses()) {
6282        // Check if we need to adjust the insertion point to make sure the IR is
6283        // valid.
6284        Instruction *IP = dyn_cast<Instruction>(U.getUser());
6285        if (auto *PHI = dyn_cast_or_null<PHINode>(IP))
6286          IP = PHI->getIncomingBlock(U)->getTerminator();
6287        if (auto *NewV = manifestReplacementValue(A, IP)) {
6288          LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue()
6289                            << " -> " << *NewV << " :: " << *this << "\n");
6290          if (A.changeUseAfterManifest(U, *NewV))
6291            Changed = ChangeStatus::CHANGED;
6292        }
6293      }
6294  
6295      return Changed | AAValueSimplify::manifest(A);
6296    }
6297  
6298    /// See AbstractState::indicatePessimisticFixpoint(...).
indicatePessimisticFixpoint__anonc528723c5711::AAValueSimplifyImpl6299    ChangeStatus indicatePessimisticFixpoint() override {
6300      SimplifiedAssociatedValue = &getAssociatedValue();
6301      return AAValueSimplify::indicatePessimisticFixpoint();
6302    }
6303  };
6304  
6305  struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
AAValueSimplifyArgument__anonc528723c5711::AAValueSimplifyArgument6306    AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
6307        : AAValueSimplifyImpl(IRP, A) {}
6308  
initialize__anonc528723c5711::AAValueSimplifyArgument6309    void initialize(Attributor &A) override {
6310      AAValueSimplifyImpl::initialize(A);
6311      if (A.hasAttr(getIRPosition(),
6312                    {Attribute::InAlloca, Attribute::Preallocated,
6313                     Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
6314                    /* IgnoreSubsumingPositions */ true))
6315        indicatePessimisticFixpoint();
6316    }
6317  
6318    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c5711::AAValueSimplifyArgument6319    ChangeStatus updateImpl(Attributor &A) override {
6320      // Byval is only replacable if it is readonly otherwise we would write into
6321      // the replaced value and not the copy that byval creates implicitly.
6322      Argument *Arg = getAssociatedArgument();
6323      if (Arg->hasByValAttr()) {
6324        // TODO: We probably need to verify synchronization is not an issue, e.g.,
6325        //       there is no race by not copying a constant byval.
6326        bool IsKnown;
6327        if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
6328          return indicatePessimisticFixpoint();
6329      }
6330  
6331      auto Before = SimplifiedAssociatedValue;
6332  
6333      auto PredForCallSite = [&](AbstractCallSite ACS) {
6334        const IRPosition &ACSArgPos =
6335            IRPosition::callsite_argument(ACS, getCallSiteArgNo());
6336        // Check if a coresponding argument was found or if it is on not
6337        // associated (which can happen for callback calls).
6338        if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6339          return false;
6340  
6341        // Simplify the argument operand explicitly and check if the result is
6342        // valid in the current scope. This avoids refering to simplified values
6343        // in other functions, e.g., we don't want to say a an argument in a
6344        // static function is actually an argument in a different function.
6345        bool UsedAssumedInformation = false;
6346        std::optional<Constant *> SimpleArgOp =
6347            A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
6348        if (!SimpleArgOp)
6349          return true;
6350        if (!*SimpleArgOp)
6351          return false;
6352        if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
6353          return false;
6354        return unionAssumed(*SimpleArgOp);
6355      };
6356  
6357      // Generate a answer specific to a call site context.
6358      bool Success;
6359      bool UsedAssumedInformation = false;
6360      if (hasCallBaseContext() &&
6361          getCallBaseContext()->getCalledOperand() == Arg->getParent())
6362        Success = PredForCallSite(
6363            AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
6364      else
6365        Success = A.checkForAllCallSites(PredForCallSite, *this, true,
6366                                         UsedAssumedInformation);
6367  
6368      if (!Success)
6369        if (!askSimplifiedValueForOtherAAs(A))
6370          return indicatePessimisticFixpoint();
6371  
6372      // If a candidate was found in this update, return CHANGED.
6373      return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6374                                                 : ChangeStatus ::CHANGED;
6375    }
6376  
6377    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c5711::AAValueSimplifyArgument6378    void trackStatistics() const override {
6379      STATS_DECLTRACK_ARG_ATTR(value_simplify)
6380    }
6381  };
6382  
6383  struct AAValueSimplifyReturned : AAValueSimplifyImpl {
AAValueSimplifyReturned__anonc528723c5711::AAValueSimplifyReturned6384    AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
6385        : AAValueSimplifyImpl(IRP, A) {}
6386  
6387    /// See AAValueSimplify::getAssumedSimplifiedValue()
6388    std::optional<Value *>
getAssumedSimplifiedValue__anonc528723c5711::AAValueSimplifyReturned6389    getAssumedSimplifiedValue(Attributor &A) const override {
6390      if (!isValidState())
6391        return nullptr;
6392      return SimplifiedAssociatedValue;
6393    }
6394  
6395    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c5711::AAValueSimplifyReturned6396    ChangeStatus updateImpl(Attributor &A) override {
6397      auto Before = SimplifiedAssociatedValue;
6398  
6399      auto ReturnInstCB = [&](Instruction &I) {
6400        auto &RI = cast<ReturnInst>(I);
6401        return checkAndUpdate(
6402            A, *this,
6403            IRPosition::value(*RI.getReturnValue(), getCallBaseContext()));
6404      };
6405  
6406      bool UsedAssumedInformation = false;
6407      if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
6408                                     UsedAssumedInformation))
6409        if (!askSimplifiedValueForOtherAAs(A))
6410          return indicatePessimisticFixpoint();
6411  
6412      // If a candidate was found in this update, return CHANGED.
6413      return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6414                                                 : ChangeStatus ::CHANGED;
6415    }
6416  
manifest__anonc528723c5711::AAValueSimplifyReturned6417    ChangeStatus manifest(Attributor &A) override {
6418      // We queried AAValueSimplify for the returned values so they will be
6419      // replaced if a simplified form was found. Nothing to do here.
6420      return ChangeStatus::UNCHANGED;
6421    }
6422  
6423    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c5711::AAValueSimplifyReturned6424    void trackStatistics() const override {
6425      STATS_DECLTRACK_FNRET_ATTR(value_simplify)
6426    }
6427  };
6428  
6429  struct AAValueSimplifyFloating : AAValueSimplifyImpl {
AAValueSimplifyFloating__anonc528723c5711::AAValueSimplifyFloating6430    AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
6431        : AAValueSimplifyImpl(IRP, A) {}
6432  
6433    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c5711::AAValueSimplifyFloating6434    void initialize(Attributor &A) override {
6435      AAValueSimplifyImpl::initialize(A);
6436      Value &V = getAnchorValue();
6437  
6438      // TODO: add other stuffs
6439      if (isa<Constant>(V))
6440        indicatePessimisticFixpoint();
6441    }
6442  
6443    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c5711::AAValueSimplifyFloating6444    ChangeStatus updateImpl(Attributor &A) override {
6445      auto Before = SimplifiedAssociatedValue;
6446      if (!askSimplifiedValueForOtherAAs(A))
6447        return indicatePessimisticFixpoint();
6448  
6449      // If a candidate was found in this update, return CHANGED.
6450      return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6451                                                 : ChangeStatus ::CHANGED;
6452    }
6453  
6454    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c5711::AAValueSimplifyFloating6455    void trackStatistics() const override {
6456      STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
6457    }
6458  };
6459  
6460  struct AAValueSimplifyFunction : AAValueSimplifyImpl {
AAValueSimplifyFunction__anonc528723c5711::AAValueSimplifyFunction6461    AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
6462        : AAValueSimplifyImpl(IRP, A) {}
6463  
6464    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c5711::AAValueSimplifyFunction6465    void initialize(Attributor &A) override {
6466      SimplifiedAssociatedValue = nullptr;
6467      indicateOptimisticFixpoint();
6468    }
6469    /// See AbstractAttribute::initialize(...).
updateImpl__anonc528723c5711::AAValueSimplifyFunction6470    ChangeStatus updateImpl(Attributor &A) override {
6471      llvm_unreachable(
6472          "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
6473    }
6474    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c5711::AAValueSimplifyFunction6475    void trackStatistics() const override {
6476      STATS_DECLTRACK_FN_ATTR(value_simplify)
6477    }
6478  };
6479  
6480  struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
AAValueSimplifyCallSite__anonc528723c5711::AAValueSimplifyCallSite6481    AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
6482        : AAValueSimplifyFunction(IRP, A) {}
6483    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c5711::AAValueSimplifyCallSite6484    void trackStatistics() const override {
6485      STATS_DECLTRACK_CS_ATTR(value_simplify)
6486    }
6487  };
6488  
6489  struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
AAValueSimplifyCallSiteReturned__anonc528723c5711::AAValueSimplifyCallSiteReturned6490    AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
6491        : AAValueSimplifyImpl(IRP, A) {}
6492  
initialize__anonc528723c5711::AAValueSimplifyCallSiteReturned6493    void initialize(Attributor &A) override {
6494      AAValueSimplifyImpl::initialize(A);
6495      Function *Fn = getAssociatedFunction();
6496      assert(Fn && "Did expect an associted function");
6497      for (Argument &Arg : Fn->args()) {
6498        if (Arg.hasReturnedAttr()) {
6499          auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()),
6500                                                   Arg.getArgNo());
6501          if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT &&
6502              checkAndUpdate(A, *this, IRP))
6503            indicateOptimisticFixpoint();
6504          else
6505            indicatePessimisticFixpoint();
6506          return;
6507        }
6508      }
6509    }
6510  
6511    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c5711::AAValueSimplifyCallSiteReturned6512    ChangeStatus updateImpl(Attributor &A) override {
6513      return indicatePessimisticFixpoint();
6514    }
6515  
trackStatistics__anonc528723c5711::AAValueSimplifyCallSiteReturned6516    void trackStatistics() const override {
6517      STATS_DECLTRACK_CSRET_ATTR(value_simplify)
6518    }
6519  };
6520  
6521  struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
AAValueSimplifyCallSiteArgument__anonc528723c5711::AAValueSimplifyCallSiteArgument6522    AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
6523        : AAValueSimplifyFloating(IRP, A) {}
6524  
6525    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c5711::AAValueSimplifyCallSiteArgument6526    ChangeStatus manifest(Attributor &A) override {
6527      ChangeStatus Changed = ChangeStatus::UNCHANGED;
6528      // TODO: We should avoid simplification duplication to begin with.
6529      auto *FloatAA = A.lookupAAFor<AAValueSimplify>(
6530          IRPosition::value(getAssociatedValue()), this, DepClassTy::NONE);
6531      if (FloatAA && FloatAA->getState().isValidState())
6532        return Changed;
6533  
6534      if (auto *NewV = manifestReplacementValue(A, getCtxI())) {
6535        Use &U = cast<CallBase>(&getAnchorValue())
6536                     ->getArgOperandUse(getCallSiteArgNo());
6537        if (A.changeUseAfterManifest(U, *NewV))
6538          Changed = ChangeStatus::CHANGED;
6539      }
6540  
6541      return Changed | AAValueSimplify::manifest(A);
6542    }
6543  
trackStatistics__anonc528723c5711::AAValueSimplifyCallSiteArgument6544    void trackStatistics() const override {
6545      STATS_DECLTRACK_CSARG_ATTR(value_simplify)
6546    }
6547  };
6548  } // namespace
6549  
6550  /// ----------------------- Heap-To-Stack Conversion ---------------------------
6551  namespace {
6552  struct AAHeapToStackFunction final : public AAHeapToStack {
6553  
6554    struct AllocationInfo {
6555      /// The call that allocates the memory.
6556      CallBase *const CB;
6557  
6558      /// The library function id for the allocation.
6559      LibFunc LibraryFunctionId = NotLibFunc;
6560  
6561      /// The status wrt. a rewrite.
6562      enum {
6563        STACK_DUE_TO_USE,
6564        STACK_DUE_TO_FREE,
6565        INVALID,
6566      } Status = STACK_DUE_TO_USE;
6567  
6568      /// Flag to indicate if we encountered a use that might free this allocation
6569      /// but which is not in the deallocation infos.
6570      bool HasPotentiallyFreeingUnknownUses = false;
6571  
6572      /// Flag to indicate that we should place the new alloca in the function
6573      /// entry block rather than where the call site (CB) is.
6574      bool MoveAllocaIntoEntry = true;
6575  
6576      /// The set of free calls that use this allocation.
6577      SmallSetVector<CallBase *, 1> PotentialFreeCalls{};
6578    };
6579  
6580    struct DeallocationInfo {
6581      /// The call that deallocates the memory.
6582      CallBase *const CB;
6583      /// The value freed by the call.
6584      Value *FreedOp;
6585  
6586      /// Flag to indicate if we don't know all objects this deallocation might
6587      /// free.
6588      bool MightFreeUnknownObjects = false;
6589  
6590      /// The set of allocation calls that are potentially freed.
6591      SmallSetVector<CallBase *, 1> PotentialAllocationCalls{};
6592    };
6593  
AAHeapToStackFunction__anonc528723c5a11::AAHeapToStackFunction6594    AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
6595        : AAHeapToStack(IRP, A) {}
6596  
~AAHeapToStackFunction__anonc528723c5a11::AAHeapToStackFunction6597    ~AAHeapToStackFunction() {
6598      // Ensure we call the destructor so we release any memory allocated in the
6599      // sets.
6600      for (auto &It : AllocationInfos)
6601        It.second->~AllocationInfo();
6602      for (auto &It : DeallocationInfos)
6603        It.second->~DeallocationInfo();
6604    }
6605  
initialize__anonc528723c5a11::AAHeapToStackFunction6606    void initialize(Attributor &A) override {
6607      AAHeapToStack::initialize(A);
6608  
6609      const Function *F = getAnchorScope();
6610      const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6611  
6612      auto AllocationIdentifierCB = [&](Instruction &I) {
6613        CallBase *CB = dyn_cast<CallBase>(&I);
6614        if (!CB)
6615          return true;
6616        if (Value *FreedOp = getFreedOperand(CB, TLI)) {
6617          DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB, FreedOp};
6618          return true;
6619        }
6620        // To do heap to stack, we need to know that the allocation itself is
6621        // removable once uses are rewritten, and that we can initialize the
6622        // alloca to the same pattern as the original allocation result.
6623        if (isRemovableAlloc(CB, TLI)) {
6624          auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
6625          if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
6626            AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
6627            AllocationInfos[CB] = AI;
6628            if (TLI)
6629              TLI->getLibFunc(*CB, AI->LibraryFunctionId);
6630          }
6631        }
6632        return true;
6633      };
6634  
6635      bool UsedAssumedInformation = false;
6636      bool Success = A.checkForAllCallLikeInstructions(
6637          AllocationIdentifierCB, *this, UsedAssumedInformation,
6638          /* CheckBBLivenessOnly */ false,
6639          /* CheckPotentiallyDead */ true);
6640      (void)Success;
6641      assert(Success && "Did not expect the call base visit callback to fail!");
6642  
6643      Attributor::SimplifictionCallbackTy SCB =
6644          [](const IRPosition &, const AbstractAttribute *,
6645             bool &) -> std::optional<Value *> { return nullptr; };
6646      for (const auto &It : AllocationInfos)
6647        A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6648                                         SCB);
6649      for (const auto &It : DeallocationInfos)
6650        A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6651                                         SCB);
6652    }
6653  
getAsStr__anonc528723c5a11::AAHeapToStackFunction6654    const std::string getAsStr(Attributor *A) const override {
6655      unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
6656      for (const auto &It : AllocationInfos) {
6657        if (It.second->Status == AllocationInfo::INVALID)
6658          ++NumInvalidMallocs;
6659        else
6660          ++NumH2SMallocs;
6661      }
6662      return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
6663             std::to_string(NumInvalidMallocs);
6664    }
6665  
6666    /// See AbstractAttribute::trackStatistics().
trackStatistics__anonc528723c5a11::AAHeapToStackFunction6667    void trackStatistics() const override {
6668      STATS_DECL(
6669          MallocCalls, Function,
6670          "Number of malloc/calloc/aligned_alloc calls converted to allocas");
6671      for (const auto &It : AllocationInfos)
6672        if (It.second->Status != AllocationInfo::INVALID)
6673          ++BUILD_STAT_NAME(MallocCalls, Function);
6674    }
6675  
isAssumedHeapToStack__anonc528723c5a11::AAHeapToStackFunction6676    bool isAssumedHeapToStack(const CallBase &CB) const override {
6677      if (isValidState())
6678        if (AllocationInfo *AI =
6679                AllocationInfos.lookup(const_cast<CallBase *>(&CB)))
6680          return AI->Status != AllocationInfo::INVALID;
6681      return false;
6682    }
6683  
isAssumedHeapToStackRemovedFree__anonc528723c5a11::AAHeapToStackFunction6684    bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
6685      if (!isValidState())
6686        return false;
6687  
6688      for (const auto &It : AllocationInfos) {
6689        AllocationInfo &AI = *It.second;
6690        if (AI.Status == AllocationInfo::INVALID)
6691          continue;
6692  
6693        if (AI.PotentialFreeCalls.count(&CB))
6694          return true;
6695      }
6696  
6697      return false;
6698    }
6699  
manifest__anonc528723c5a11::AAHeapToStackFunction6700    ChangeStatus manifest(Attributor &A) override {
6701      assert(getState().isValidState() &&
6702             "Attempted to manifest an invalid state!");
6703  
6704      ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
6705      Function *F = getAnchorScope();
6706      const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6707  
6708      for (auto &It : AllocationInfos) {
6709        AllocationInfo &AI = *It.second;
6710        if (AI.Status == AllocationInfo::INVALID)
6711          continue;
6712  
6713        for (CallBase *FreeCall : AI.PotentialFreeCalls) {
6714          LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
6715          A.deleteAfterManifest(*FreeCall);
6716          HasChanged = ChangeStatus::CHANGED;
6717        }
6718  
6719        LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
6720                          << "\n");
6721  
6722        auto Remark = [&](OptimizationRemark OR) {
6723          LibFunc IsAllocShared;
6724          if (TLI->getLibFunc(*AI.CB, IsAllocShared))
6725            if (IsAllocShared == LibFunc___kmpc_alloc_shared)
6726              return OR << "Moving globalized variable to the stack.";
6727          return OR << "Moving memory allocation from the heap to the stack.";
6728        };
6729        if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6730          A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
6731        else
6732          A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
6733  
6734        const DataLayout &DL = A.getInfoCache().getDL();
6735        Value *Size;
6736        std::optional<APInt> SizeAPI = getSize(A, *this, AI);
6737        if (SizeAPI) {
6738          Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
6739        } else {
6740          LLVMContext &Ctx = AI.CB->getContext();
6741          ObjectSizeOpts Opts;
6742          ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
6743          SizeOffsetValue SizeOffsetPair = Eval.compute(AI.CB);
6744          assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&
6745                 cast<ConstantInt>(SizeOffsetPair.Offset)->isZero());
6746          Size = SizeOffsetPair.Size;
6747        }
6748  
6749        BasicBlock::iterator IP = AI.MoveAllocaIntoEntry
6750                                      ? F->getEntryBlock().begin()
6751                                      : AI.CB->getIterator();
6752  
6753        Align Alignment(1);
6754        if (MaybeAlign RetAlign = AI.CB->getRetAlign())
6755          Alignment = std::max(Alignment, *RetAlign);
6756        if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6757          std::optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
6758          assert(AlignmentAPI && AlignmentAPI->getZExtValue() > 0 &&
6759                 "Expected an alignment during manifest!");
6760          Alignment =
6761              std::max(Alignment, assumeAligned(AlignmentAPI->getZExtValue()));
6762        }
6763  
6764        // TODO: Hoist the alloca towards the function entry.
6765        unsigned AS = DL.getAllocaAddrSpace();
6766        Instruction *Alloca =
6767            new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
6768                           AI.CB->getName() + ".h2s", IP);
6769  
6770        if (Alloca->getType() != AI.CB->getType())
6771          Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6772              Alloca, AI.CB->getType(), "malloc_cast", AI.CB->getIterator());
6773  
6774        auto *I8Ty = Type::getInt8Ty(F->getContext());
6775        auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
6776        assert(InitVal &&
6777               "Must be able to materialize initial memory state of allocation");
6778  
6779        A.changeAfterManifest(IRPosition::inst(*AI.CB), *Alloca);
6780  
6781        if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
6782          auto *NBB = II->getNormalDest();
6783          BranchInst::Create(NBB, AI.CB->getParent());
6784          A.deleteAfterManifest(*AI.CB);
6785        } else {
6786          A.deleteAfterManifest(*AI.CB);
6787        }
6788  
6789        // Initialize the alloca with the same value as used by the allocation
6790        // function.  We can skip undef as the initial value of an alloc is
6791        // undef, and the memset would simply end up being DSEd.
6792        if (!isa<UndefValue>(InitVal)) {
6793          IRBuilder<> Builder(Alloca->getNextNode());
6794          // TODO: Use alignment above if align!=1
6795          Builder.CreateMemSet(Alloca, InitVal, Size, std::nullopt);
6796        }
6797        HasChanged = ChangeStatus::CHANGED;
6798      }
6799  
6800      return HasChanged;
6801    }
6802  
getAPInt__anonc528723c5a11::AAHeapToStackFunction6803    std::optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
6804                                  Value &V) {
6805      bool UsedAssumedInformation = false;
6806      std::optional<Constant *> SimpleV =
6807          A.getAssumedConstant(V, AA, UsedAssumedInformation);
6808      if (!SimpleV)
6809        return APInt(64, 0);
6810      if (auto *CI = dyn_cast_or_null<ConstantInt>(*SimpleV))
6811        return CI->getValue();
6812      return std::nullopt;
6813    }
6814  
getSize__anonc528723c5a11::AAHeapToStackFunction6815    std::optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6816                                 AllocationInfo &AI) {
6817      auto Mapper = [&](const Value *V) -> const Value * {
6818        bool UsedAssumedInformation = false;
6819        if (std::optional<Constant *> SimpleV =
6820                A.getAssumedConstant(*V, AA, UsedAssumedInformation))
6821          if (*SimpleV)
6822            return *SimpleV;
6823        return V;
6824      };
6825  
6826      const Function *F = getAnchorScope();
6827      const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6828      return getAllocSize(AI.CB, TLI, Mapper);
6829    }
6830  
6831    /// Collection of all malloc-like calls in a function with associated
6832    /// information.
6833    MapVector<CallBase *, AllocationInfo *> AllocationInfos;
6834  
6835    /// Collection of all free-like calls in a function with associated
6836    /// information.
6837    MapVector<CallBase *, DeallocationInfo *> DeallocationInfos;
6838  
6839    ChangeStatus updateImpl(Attributor &A) override;
6840  };
6841  
updateImpl(Attributor & A)6842  ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6843    ChangeStatus Changed = ChangeStatus::UNCHANGED;
6844    const Function *F = getAnchorScope();
6845    const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6846  
6847    const auto *LivenessAA =
6848        A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6849  
6850    MustBeExecutedContextExplorer *Explorer =
6851        A.getInfoCache().getMustBeExecutedContextExplorer();
6852  
6853    bool StackIsAccessibleByOtherThreads =
6854        A.getInfoCache().stackIsAccessibleByOtherThreads();
6855  
6856    LoopInfo *LI =
6857        A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(*F);
6858    std::optional<bool> MayContainIrreducibleControl;
6859    auto IsInLoop = [&](BasicBlock &BB) {
6860      if (&F->getEntryBlock() == &BB)
6861        return false;
6862      if (!MayContainIrreducibleControl.has_value())
6863        MayContainIrreducibleControl = mayContainIrreducibleControl(*F, LI);
6864      if (*MayContainIrreducibleControl)
6865        return true;
6866      if (!LI)
6867        return true;
6868      return LI->getLoopFor(&BB) != nullptr;
6869    };
6870  
6871    // Flag to ensure we update our deallocation information at most once per
6872    // updateImpl call and only if we use the free check reasoning.
6873    bool HasUpdatedFrees = false;
6874  
6875    auto UpdateFrees = [&]() {
6876      HasUpdatedFrees = true;
6877  
6878      for (auto &It : DeallocationInfos) {
6879        DeallocationInfo &DI = *It.second;
6880        // For now we cannot use deallocations that have unknown inputs, skip
6881        // them.
6882        if (DI.MightFreeUnknownObjects)
6883          continue;
6884  
6885        // No need to analyze dead calls, ignore them instead.
6886        bool UsedAssumedInformation = false;
6887        if (A.isAssumedDead(*DI.CB, this, LivenessAA, UsedAssumedInformation,
6888                            /* CheckBBLivenessOnly */ true))
6889          continue;
6890  
6891        // Use the non-optimistic version to get the freed object.
6892        Value *Obj = getUnderlyingObject(DI.FreedOp);
6893        if (!Obj) {
6894          LLVM_DEBUG(dbgs() << "[H2S] Unknown underlying object for free!\n");
6895          DI.MightFreeUnknownObjects = true;
6896          continue;
6897        }
6898  
6899        // Free of null and undef can be ignored as no-ops (or UB in the latter
6900        // case).
6901        if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6902          continue;
6903  
6904        CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6905        if (!ObjCB) {
6906          LLVM_DEBUG(dbgs() << "[H2S] Free of a non-call object: " << *Obj
6907                            << "\n");
6908          DI.MightFreeUnknownObjects = true;
6909          continue;
6910        }
6911  
6912        AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6913        if (!AI) {
6914          LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6915                            << "\n");
6916          DI.MightFreeUnknownObjects = true;
6917          continue;
6918        }
6919  
6920        DI.PotentialAllocationCalls.insert(ObjCB);
6921      }
6922    };
6923  
6924    auto FreeCheck = [&](AllocationInfo &AI) {
6925      // If the stack is not accessible by other threads, the "must-free" logic
6926      // doesn't apply as the pointer could be shared and needs to be places in
6927      // "shareable" memory.
6928      if (!StackIsAccessibleByOtherThreads) {
6929        bool IsKnownNoSycn;
6930        if (!AA::hasAssumedIRAttr<Attribute::NoSync>(
6931                A, this, getIRPosition(), DepClassTy::OPTIONAL, IsKnownNoSycn)) {
6932          LLVM_DEBUG(
6933              dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6934                        "other threads and function is not nosync:\n");
6935          return false;
6936        }
6937      }
6938      if (!HasUpdatedFrees)
6939        UpdateFrees();
6940  
6941      // TODO: Allow multi exit functions that have different free calls.
6942      if (AI.PotentialFreeCalls.size() != 1) {
6943        LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6944                          << AI.PotentialFreeCalls.size() << "\n");
6945        return false;
6946      }
6947      CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6948      DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6949      if (!DI) {
6950        LLVM_DEBUG(
6951            dbgs() << "[H2S] unique free call was not known as deallocation call "
6952                   << *UniqueFree << "\n");
6953        return false;
6954      }
6955      if (DI->MightFreeUnknownObjects) {
6956        LLVM_DEBUG(
6957            dbgs() << "[H2S] unique free call might free unknown allocations\n");
6958        return false;
6959      }
6960      if (DI->PotentialAllocationCalls.empty())
6961        return true;
6962      if (DI->PotentialAllocationCalls.size() > 1) {
6963        LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6964                          << DI->PotentialAllocationCalls.size()
6965                          << " different allocations\n");
6966        return false;
6967      }
6968      if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6969        LLVM_DEBUG(
6970            dbgs()
6971            << "[H2S] unique free call not known to free this allocation but "
6972            << **DI->PotentialAllocationCalls.begin() << "\n");
6973        return false;
6974      }
6975  
6976      // __kmpc_alloc_shared and __kmpc_alloc_free are by construction matched.
6977      if (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared) {
6978        Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6979        if (!Explorer || !Explorer->findInContextOf(UniqueFree, CtxI)) {
6980          LLVM_DEBUG(dbgs() << "[H2S] unique free call might not be executed "
6981                               "with the allocation "
6982                            << *UniqueFree << "\n");
6983          return false;
6984        }
6985      }
6986      return true;
6987    };
6988  
6989    auto UsesCheck = [&](AllocationInfo &AI) {
6990      bool ValidUsesOnly = true;
6991  
6992      auto Pred = [&](const Use &U, bool &Follow) -> bool {
6993        Instruction *UserI = cast<Instruction>(U.getUser());
6994        if (isa<LoadInst>(UserI))
6995          return true;
6996        if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6997          if (SI->getValueOperand() == U.get()) {
6998            LLVM_DEBUG(dbgs()
6999                       << "[H2S] escaping store to memory: " << *UserI << "\n");
7000            ValidUsesOnly = false;
7001          } else {
7002            // A store into the malloc'ed memory is fine.
7003          }
7004          return true;
7005        }
7006        if (auto *CB = dyn_cast<CallBase>(UserI)) {
7007          if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
7008            return true;
7009          if (DeallocationInfos.count(CB)) {
7010            AI.PotentialFreeCalls.insert(CB);
7011            return true;
7012          }
7013  
7014          unsigned ArgNo = CB->getArgOperandNo(&U);
7015          auto CBIRP = IRPosition::callsite_argument(*CB, ArgNo);
7016  
7017          bool IsKnownNoCapture;
7018          bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
7019              A, this, CBIRP, DepClassTy::OPTIONAL, IsKnownNoCapture);
7020  
7021          // If a call site argument use is nofree, we are fine.
7022          bool IsKnownNoFree;
7023          bool IsAssumedNoFree = AA::hasAssumedIRAttr<Attribute::NoFree>(
7024              A, this, CBIRP, DepClassTy::OPTIONAL, IsKnownNoFree);
7025  
7026          if (!IsAssumedNoCapture ||
7027              (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
7028               !IsAssumedNoFree)) {
7029            AI.HasPotentiallyFreeingUnknownUses |= !IsAssumedNoFree;
7030  
7031            // Emit a missed remark if this is missed OpenMP globalization.
7032            auto Remark = [&](OptimizationRemarkMissed ORM) {
7033              return ORM
7034                     << "Could not move globalized variable to the stack. "
7035                        "Variable is potentially captured in call. Mark "
7036                        "parameter as `__attribute__((noescape))` to override.";
7037            };
7038  
7039            if (ValidUsesOnly &&
7040                AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
7041              A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark);
7042  
7043            LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
7044            ValidUsesOnly = false;
7045          }
7046          return true;
7047        }
7048  
7049        if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
7050            isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
7051          Follow = true;
7052          return true;
7053        }
7054        // Unknown user for which we can not track uses further (in a way that
7055        // makes sense).
7056        LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
7057        ValidUsesOnly = false;
7058        return true;
7059      };
7060      if (!A.checkForAllUses(Pred, *this, *AI.CB, /* CheckBBLivenessOnly */ false,
7061                             DepClassTy::OPTIONAL, /* IgnoreDroppableUses */ true,
7062                             [&](const Use &OldU, const Use &NewU) {
7063                               auto *SI = dyn_cast<StoreInst>(OldU.getUser());
7064                               return !SI || StackIsAccessibleByOtherThreads ||
7065                                      AA::isAssumedThreadLocalObject(
7066                                          A, *SI->getPointerOperand(), *this);
7067                             }))
7068        return false;
7069      return ValidUsesOnly;
7070    };
7071  
7072    // The actual update starts here. We look at all allocations and depending on
7073    // their status perform the appropriate check(s).
7074    for (auto &It : AllocationInfos) {
7075      AllocationInfo &AI = *It.second;
7076      if (AI.Status == AllocationInfo::INVALID)
7077        continue;
7078  
7079      if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
7080        std::optional<APInt> APAlign = getAPInt(A, *this, *Align);
7081        if (!APAlign) {
7082          // Can't generate an alloca which respects the required alignment
7083          // on the allocation.
7084          LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
7085                            << "\n");
7086          AI.Status = AllocationInfo::INVALID;
7087          Changed = ChangeStatus::CHANGED;
7088          continue;
7089        }
7090        if (APAlign->ugt(llvm::Value::MaximumAlignment) ||
7091            !APAlign->isPowerOf2()) {
7092          LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign
7093                            << "\n");
7094          AI.Status = AllocationInfo::INVALID;
7095          Changed = ChangeStatus::CHANGED;
7096          continue;
7097        }
7098      }
7099  
7100      std::optional<APInt> Size = getSize(A, *this, AI);
7101      if (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
7102          MaxHeapToStackSize != -1) {
7103        if (!Size || Size->ugt(MaxHeapToStackSize)) {
7104          LLVM_DEBUG({
7105            if (!Size)
7106              dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
7107            else
7108              dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
7109                     << MaxHeapToStackSize << "\n";
7110          });
7111  
7112          AI.Status = AllocationInfo::INVALID;
7113          Changed = ChangeStatus::CHANGED;
7114          continue;
7115        }
7116      }
7117  
7118      switch (AI.Status) {
7119      case AllocationInfo::STACK_DUE_TO_USE:
7120        if (UsesCheck(AI))
7121          break;
7122        AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
7123        [[fallthrough]];
7124      case AllocationInfo::STACK_DUE_TO_FREE:
7125        if (FreeCheck(AI))
7126          break;
7127        AI.Status = AllocationInfo::INVALID;
7128        Changed = ChangeStatus::CHANGED;
7129        break;
7130      case AllocationInfo::INVALID:
7131        llvm_unreachable("Invalid allocations should never reach this point!");
7132      };
7133  
7134      // Check if we still think we can move it into the entry block. If the
7135      // alloca comes from a converted __kmpc_alloc_shared then we can usually
7136      // ignore the potential compilations associated with loops.
7137      bool IsGlobalizedLocal =
7138          AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared;
7139      if (AI.MoveAllocaIntoEntry &&
7140          (!Size.has_value() ||
7141           (!IsGlobalizedLocal && IsInLoop(*AI.CB->getParent()))))
7142        AI.MoveAllocaIntoEntry = false;
7143    }
7144  
7145    return Changed;
7146  }
7147  } // namespace
7148  
7149  /// ----------------------- Privatizable Pointers ------------------------------
7150  namespace {
7151  struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
AAPrivatizablePtrImpl__anonc528723c6711::AAPrivatizablePtrImpl7152    AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
7153        : AAPrivatizablePtr(IRP, A), PrivatizableType(std::nullopt) {}
7154  
indicatePessimisticFixpoint__anonc528723c6711::AAPrivatizablePtrImpl7155    ChangeStatus indicatePessimisticFixpoint() override {
7156      AAPrivatizablePtr::indicatePessimisticFixpoint();
7157      PrivatizableType = nullptr;
7158      return ChangeStatus::CHANGED;
7159    }
7160  
7161    /// Identify the type we can chose for a private copy of the underlying
7162    /// argument. std::nullopt means it is not clear yet, nullptr means there is
7163    /// none.
7164    virtual std::optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
7165  
7166    /// Return a privatizable type that encloses both T0 and T1.
7167    /// TODO: This is merely a stub for now as we should manage a mapping as well.
combineTypes__anonc528723c6711::AAPrivatizablePtrImpl7168    std::optional<Type *> combineTypes(std::optional<Type *> T0,
7169                                       std::optional<Type *> T1) {
7170      if (!T0)
7171        return T1;
7172      if (!T1)
7173        return T0;
7174      if (T0 == T1)
7175        return T0;
7176      return nullptr;
7177    }
7178  
getPrivatizableType__anonc528723c6711::AAPrivatizablePtrImpl7179    std::optional<Type *> getPrivatizableType() const override {
7180      return PrivatizableType;
7181    }
7182  
getAsStr__anonc528723c6711::AAPrivatizablePtrImpl7183    const std::string getAsStr(Attributor *A) const override {
7184      return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
7185    }
7186  
7187  protected:
7188    std::optional<Type *> PrivatizableType;
7189  };
7190  
7191  // TODO: Do this for call site arguments (probably also other values) as well.
7192  
7193  struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
AAPrivatizablePtrArgument__anonc528723c6711::AAPrivatizablePtrArgument7194    AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
7195        : AAPrivatizablePtrImpl(IRP, A) {}
7196  
7197    /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
identifyPrivatizableType__anonc528723c6711::AAPrivatizablePtrArgument7198    std::optional<Type *> identifyPrivatizableType(Attributor &A) override {
7199      // If this is a byval argument and we know all the call sites (so we can
7200      // rewrite them), there is no need to check them explicitly.
7201      bool UsedAssumedInformation = false;
7202      SmallVector<Attribute, 1> Attrs;
7203      A.getAttrs(getIRPosition(), {Attribute::ByVal}, Attrs,
7204                 /* IgnoreSubsumingPositions */ true);
7205      if (!Attrs.empty() &&
7206          A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
7207                                 true, UsedAssumedInformation))
7208        return Attrs[0].getValueAsType();
7209  
7210      std::optional<Type *> Ty;
7211      unsigned ArgNo = getIRPosition().getCallSiteArgNo();
7212  
7213      // Make sure the associated call site argument has the same type at all call
7214      // sites and it is an allocation we know is safe to privatize, for now that
7215      // means we only allow alloca instructions.
7216      // TODO: We can additionally analyze the accesses in the callee to  create
7217      //       the type from that information instead. That is a little more
7218      //       involved and will be done in a follow up patch.
7219      auto CallSiteCheck = [&](AbstractCallSite ACS) {
7220        IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
7221        // Check if a coresponding argument was found or if it is one not
7222        // associated (which can happen for callback calls).
7223        if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
7224          return false;
7225  
7226        // Check that all call sites agree on a type.
7227        auto *PrivCSArgAA =
7228            A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
7229        if (!PrivCSArgAA)
7230          return false;
7231        std::optional<Type *> CSTy = PrivCSArgAA->getPrivatizableType();
7232  
7233        LLVM_DEBUG({
7234          dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
7235          if (CSTy && *CSTy)
7236            (*CSTy)->print(dbgs());
7237          else if (CSTy)
7238            dbgs() << "<nullptr>";
7239          else
7240            dbgs() << "<none>";
7241        });
7242  
7243        Ty = combineTypes(Ty, CSTy);
7244  
7245        LLVM_DEBUG({
7246          dbgs() << " : New Type: ";
7247          if (Ty && *Ty)
7248            (*Ty)->print(dbgs());
7249          else if (Ty)
7250            dbgs() << "<nullptr>";
7251          else
7252            dbgs() << "<none>";
7253          dbgs() << "\n";
7254        });
7255  
7256        return !Ty || *Ty;
7257      };
7258  
7259      if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
7260                                  UsedAssumedInformation))
7261        return nullptr;
7262      return Ty;
7263    }
7264  
7265    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c6711::AAPrivatizablePtrArgument7266    ChangeStatus updateImpl(Attributor &A) override {
7267      PrivatizableType = identifyPrivatizableType(A);
7268      if (!PrivatizableType)
7269        return ChangeStatus::UNCHANGED;
7270      if (!*PrivatizableType)
7271        return indicatePessimisticFixpoint();
7272  
7273      // The dependence is optional so we don't give up once we give up on the
7274      // alignment.
7275      A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
7276                          DepClassTy::OPTIONAL);
7277  
7278      // Avoid arguments with padding for now.
7279      if (!A.hasAttr(getIRPosition(), Attribute::ByVal) &&
7280          !isDenselyPacked(*PrivatizableType, A.getInfoCache().getDL())) {
7281        LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
7282        return indicatePessimisticFixpoint();
7283      }
7284  
7285      // Collect the types that will replace the privatizable type in the function
7286      // signature.
7287      SmallVector<Type *, 16> ReplacementTypes;
7288      identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
7289  
7290      // Verify callee and caller agree on how the promoted argument would be
7291      // passed.
7292      Function &Fn = *getIRPosition().getAnchorScope();
7293      const auto *TTI =
7294          A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
7295      if (!TTI) {
7296        LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
7297                          << Fn.getName() << "\n");
7298        return indicatePessimisticFixpoint();
7299      }
7300  
7301      auto CallSiteCheck = [&](AbstractCallSite ACS) {
7302        CallBase *CB = ACS.getInstruction();
7303        return TTI->areTypesABICompatible(
7304            CB->getCaller(),
7305            dyn_cast_if_present<Function>(CB->getCalledOperand()),
7306            ReplacementTypes);
7307      };
7308      bool UsedAssumedInformation = false;
7309      if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
7310                                  UsedAssumedInformation)) {
7311        LLVM_DEBUG(
7312            dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
7313                   << Fn.getName() << "\n");
7314        return indicatePessimisticFixpoint();
7315      }
7316  
7317      // Register a rewrite of the argument.
7318      Argument *Arg = getAssociatedArgument();
7319      if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
7320        LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
7321        return indicatePessimisticFixpoint();
7322      }
7323  
7324      unsigned ArgNo = Arg->getArgNo();
7325  
7326      // Helper to check if for the given call site the associated argument is
7327      // passed to a callback where the privatization would be different.
7328      auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
7329        SmallVector<const Use *, 4> CallbackUses;
7330        AbstractCallSite::getCallbackUses(CB, CallbackUses);
7331        for (const Use *U : CallbackUses) {
7332          AbstractCallSite CBACS(U);
7333          assert(CBACS && CBACS.isCallbackCall());
7334          for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
7335            int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
7336  
7337            LLVM_DEBUG({
7338              dbgs()
7339                  << "[AAPrivatizablePtr] Argument " << *Arg
7340                  << "check if can be privatized in the context of its parent ("
7341                  << Arg->getParent()->getName()
7342                  << ")\n[AAPrivatizablePtr] because it is an argument in a "
7343                     "callback ("
7344                  << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
7345                  << ")\n[AAPrivatizablePtr] " << CBArg << " : "
7346                  << CBACS.getCallArgOperand(CBArg) << " vs "
7347                  << CB.getArgOperand(ArgNo) << "\n"
7348                  << "[AAPrivatizablePtr] " << CBArg << " : "
7349                  << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
7350            });
7351  
7352            if (CBArgNo != int(ArgNo))
7353              continue;
7354            const auto *CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
7355                *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
7356            if (CBArgPrivAA && CBArgPrivAA->isValidState()) {
7357              auto CBArgPrivTy = CBArgPrivAA->getPrivatizableType();
7358              if (!CBArgPrivTy)
7359                continue;
7360              if (*CBArgPrivTy == PrivatizableType)
7361                continue;
7362            }
7363  
7364            LLVM_DEBUG({
7365              dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
7366                     << " cannot be privatized in the context of its parent ("
7367                     << Arg->getParent()->getName()
7368                     << ")\n[AAPrivatizablePtr] because it is an argument in a "
7369                        "callback ("
7370                     << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
7371                     << ").\n[AAPrivatizablePtr] for which the argument "
7372                        "privatization is not compatible.\n";
7373            });
7374            return false;
7375          }
7376        }
7377        return true;
7378      };
7379  
7380      // Helper to check if for the given call site the associated argument is
7381      // passed to a direct call where the privatization would be different.
7382      auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
7383        CallBase *DC = cast<CallBase>(ACS.getInstruction());
7384        int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
7385        assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
7386               "Expected a direct call operand for callback call operand");
7387  
7388        Function *DCCallee =
7389            dyn_cast_if_present<Function>(DC->getCalledOperand());
7390        LLVM_DEBUG({
7391          dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
7392                 << " check if be privatized in the context of its parent ("
7393                 << Arg->getParent()->getName()
7394                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
7395                    "direct call of ("
7396                 << DCArgNo << "@" << DCCallee->getName() << ").\n";
7397        });
7398  
7399        if (unsigned(DCArgNo) < DCCallee->arg_size()) {
7400          const auto *DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
7401              *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
7402              DepClassTy::REQUIRED);
7403          if (DCArgPrivAA && DCArgPrivAA->isValidState()) {
7404            auto DCArgPrivTy = DCArgPrivAA->getPrivatizableType();
7405            if (!DCArgPrivTy)
7406              return true;
7407            if (*DCArgPrivTy == PrivatizableType)
7408              return true;
7409          }
7410        }
7411  
7412        LLVM_DEBUG({
7413          dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
7414                 << " cannot be privatized in the context of its parent ("
7415                 << Arg->getParent()->getName()
7416                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
7417                    "direct call of ("
7418                 << ACS.getInstruction()->getCalledOperand()->getName()
7419                 << ").\n[AAPrivatizablePtr] for which the argument "
7420                    "privatization is not compatible.\n";
7421        });
7422        return false;
7423      };
7424  
7425      // Helper to check if the associated argument is used at the given abstract
7426      // call site in a way that is incompatible with the privatization assumed
7427      // here.
7428      auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
7429        if (ACS.isDirectCall())
7430          return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
7431        if (ACS.isCallbackCall())
7432          return IsCompatiblePrivArgOfDirectCS(ACS);
7433        return false;
7434      };
7435  
7436      if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
7437                                  UsedAssumedInformation))
7438        return indicatePessimisticFixpoint();
7439  
7440      return ChangeStatus::UNCHANGED;
7441    }
7442  
7443    /// Given a type to private \p PrivType, collect the constituates (which are
7444    /// used) in \p ReplacementTypes.
7445    static void
identifyReplacementTypes__anonc528723c6711::AAPrivatizablePtrArgument7446    identifyReplacementTypes(Type *PrivType,
7447                             SmallVectorImpl<Type *> &ReplacementTypes) {
7448      // TODO: For now we expand the privatization type to the fullest which can
7449      //       lead to dead arguments that need to be removed later.
7450      assert(PrivType && "Expected privatizable type!");
7451  
7452      // Traverse the type, extract constituate types on the outermost level.
7453      if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
7454        for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
7455          ReplacementTypes.push_back(PrivStructType->getElementType(u));
7456      } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
7457        ReplacementTypes.append(PrivArrayType->getNumElements(),
7458                                PrivArrayType->getElementType());
7459      } else {
7460        ReplacementTypes.push_back(PrivType);
7461      }
7462    }
7463  
7464    /// Initialize \p Base according to the type \p PrivType at position \p IP.
7465    /// The values needed are taken from the arguments of \p F starting at
7466    /// position \p ArgNo.
createInitialization__anonc528723c6711::AAPrivatizablePtrArgument7467    static void createInitialization(Type *PrivType, Value &Base, Function &F,
7468                                     unsigned ArgNo, BasicBlock::iterator IP) {
7469      assert(PrivType && "Expected privatizable type!");
7470  
7471      IRBuilder<NoFolder> IRB(IP->getParent(), IP);
7472      const DataLayout &DL = F.getDataLayout();
7473  
7474      // Traverse the type, build GEPs and stores.
7475      if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
7476        const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
7477        for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
7478          Value *Ptr =
7479              constructPointer(&Base, PrivStructLayout->getElementOffset(u), IRB);
7480          new StoreInst(F.getArg(ArgNo + u), Ptr, IP);
7481        }
7482      } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
7483        Type *PointeeTy = PrivArrayType->getElementType();
7484        uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
7485        for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
7486          Value *Ptr = constructPointer(&Base, u * PointeeTySize, IRB);
7487          new StoreInst(F.getArg(ArgNo + u), Ptr, IP);
7488        }
7489      } else {
7490        new StoreInst(F.getArg(ArgNo), &Base, IP);
7491      }
7492    }
7493  
7494    /// Extract values from \p Base according to the type \p PrivType at the
7495    /// call position \p ACS. The values are appended to \p ReplacementValues.
createReplacementValues__anonc528723c6711::AAPrivatizablePtrArgument7496    void createReplacementValues(Align Alignment, Type *PrivType,
7497                                 AbstractCallSite ACS, Value *Base,
7498                                 SmallVectorImpl<Value *> &ReplacementValues) {
7499      assert(Base && "Expected base value!");
7500      assert(PrivType && "Expected privatizable type!");
7501      Instruction *IP = ACS.getInstruction();
7502  
7503      IRBuilder<NoFolder> IRB(IP);
7504      const DataLayout &DL = IP->getDataLayout();
7505  
7506      // Traverse the type, build GEPs and loads.
7507      if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
7508        const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
7509        for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
7510          Type *PointeeTy = PrivStructType->getElementType(u);
7511          Value *Ptr =
7512              constructPointer(Base, PrivStructLayout->getElementOffset(u), IRB);
7513          LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP->getIterator());
7514          L->setAlignment(Alignment);
7515          ReplacementValues.push_back(L);
7516        }
7517      } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
7518        Type *PointeeTy = PrivArrayType->getElementType();
7519        uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
7520        for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
7521          Value *Ptr = constructPointer(Base, u * PointeeTySize, IRB);
7522          LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP->getIterator());
7523          L->setAlignment(Alignment);
7524          ReplacementValues.push_back(L);
7525        }
7526      } else {
7527        LoadInst *L = new LoadInst(PrivType, Base, "", IP->getIterator());
7528        L->setAlignment(Alignment);
7529        ReplacementValues.push_back(L);
7530      }
7531    }
7532  
7533    /// See AbstractAttribute::manifest(...)
manifest__anonc528723c6711::AAPrivatizablePtrArgument7534    ChangeStatus manifest(Attributor &A) override {
7535      if (!PrivatizableType)
7536        return ChangeStatus::UNCHANGED;
7537      assert(*PrivatizableType && "Expected privatizable type!");
7538  
7539      // Collect all tail calls in the function as we cannot allow new allocas to
7540      // escape into tail recursion.
7541      // TODO: Be smarter about new allocas escaping into tail calls.
7542      SmallVector<CallInst *, 16> TailCalls;
7543      bool UsedAssumedInformation = false;
7544      if (!A.checkForAllInstructions(
7545              [&](Instruction &I) {
7546                CallInst &CI = cast<CallInst>(I);
7547                if (CI.isTailCall())
7548                  TailCalls.push_back(&CI);
7549                return true;
7550              },
7551              *this, {Instruction::Call}, UsedAssumedInformation))
7552        return ChangeStatus::UNCHANGED;
7553  
7554      Argument *Arg = getAssociatedArgument();
7555      // Query AAAlign attribute for alignment of associated argument to
7556      // determine the best alignment of loads.
7557      const auto *AlignAA =
7558          A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
7559  
7560      // Callback to repair the associated function. A new alloca is placed at the
7561      // beginning and initialized with the values passed through arguments. The
7562      // new alloca replaces the use of the old pointer argument.
7563      Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
7564          [=](const Attributor::ArgumentReplacementInfo &ARI,
7565              Function &ReplacementFn, Function::arg_iterator ArgIt) {
7566            BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
7567            BasicBlock::iterator IP = EntryBB.getFirstInsertionPt();
7568            const DataLayout &DL = IP->getDataLayout();
7569            unsigned AS = DL.getAllocaAddrSpace();
7570            Instruction *AI = new AllocaInst(*PrivatizableType, AS,
7571                                             Arg->getName() + ".priv", IP);
7572            createInitialization(*PrivatizableType, *AI, ReplacementFn,
7573                                 ArgIt->getArgNo(), IP);
7574  
7575            if (AI->getType() != Arg->getType())
7576              AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
7577                  AI, Arg->getType(), "", IP);
7578            Arg->replaceAllUsesWith(AI);
7579  
7580            for (CallInst *CI : TailCalls)
7581              CI->setTailCall(false);
7582          };
7583  
7584      // Callback to repair a call site of the associated function. The elements
7585      // of the privatizable type are loaded prior to the call and passed to the
7586      // new function version.
7587      Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
7588          [=](const Attributor::ArgumentReplacementInfo &ARI,
7589              AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
7590            // When no alignment is specified for the load instruction,
7591            // natural alignment is assumed.
7592            createReplacementValues(
7593                AlignAA ? AlignAA->getAssumedAlign() : Align(0),
7594                *PrivatizableType, ACS,
7595                ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
7596                NewArgOperands);
7597          };
7598  
7599      // Collect the types that will replace the privatizable type in the function
7600      // signature.
7601      SmallVector<Type *, 16> ReplacementTypes;
7602      identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
7603  
7604      // Register a rewrite of the argument.
7605      if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
7606                                             std::move(FnRepairCB),
7607                                             std::move(ACSRepairCB)))
7608        return ChangeStatus::CHANGED;
7609      return ChangeStatus::UNCHANGED;
7610    }
7611  
7612    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c6711::AAPrivatizablePtrArgument7613    void trackStatistics() const override {
7614      STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
7615    }
7616  };
7617  
7618  struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
AAPrivatizablePtrFloating__anonc528723c6711::AAPrivatizablePtrFloating7619    AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
7620        : AAPrivatizablePtrImpl(IRP, A) {}
7621  
7622    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c6711::AAPrivatizablePtrFloating7623    void initialize(Attributor &A) override {
7624      // TODO: We can privatize more than arguments.
7625      indicatePessimisticFixpoint();
7626    }
7627  
updateImpl__anonc528723c6711::AAPrivatizablePtrFloating7628    ChangeStatus updateImpl(Attributor &A) override {
7629      llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
7630                       "updateImpl will not be called");
7631    }
7632  
7633    /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
identifyPrivatizableType__anonc528723c6711::AAPrivatizablePtrFloating7634    std::optional<Type *> identifyPrivatizableType(Attributor &A) override {
7635      Value *Obj = getUnderlyingObject(&getAssociatedValue());
7636      if (!Obj) {
7637        LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
7638        return nullptr;
7639      }
7640  
7641      if (auto *AI = dyn_cast<AllocaInst>(Obj))
7642        if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
7643          if (CI->isOne())
7644            return AI->getAllocatedType();
7645      if (auto *Arg = dyn_cast<Argument>(Obj)) {
7646        auto *PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
7647            *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
7648        if (PrivArgAA && PrivArgAA->isAssumedPrivatizablePtr())
7649          return PrivArgAA->getPrivatizableType();
7650      }
7651  
7652      LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
7653                           "alloca nor privatizable argument: "
7654                        << *Obj << "!\n");
7655      return nullptr;
7656    }
7657  
7658    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c6711::AAPrivatizablePtrFloating7659    void trackStatistics() const override {
7660      STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
7661    }
7662  };
7663  
7664  struct AAPrivatizablePtrCallSiteArgument final
7665      : public AAPrivatizablePtrFloating {
AAPrivatizablePtrCallSiteArgument__anonc528723c6711::AAPrivatizablePtrCallSiteArgument7666    AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
7667        : AAPrivatizablePtrFloating(IRP, A) {}
7668  
7669    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c6711::AAPrivatizablePtrCallSiteArgument7670    void initialize(Attributor &A) override {
7671      if (A.hasAttr(getIRPosition(), Attribute::ByVal))
7672        indicateOptimisticFixpoint();
7673    }
7674  
7675    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c6711::AAPrivatizablePtrCallSiteArgument7676    ChangeStatus updateImpl(Attributor &A) override {
7677      PrivatizableType = identifyPrivatizableType(A);
7678      if (!PrivatizableType)
7679        return ChangeStatus::UNCHANGED;
7680      if (!*PrivatizableType)
7681        return indicatePessimisticFixpoint();
7682  
7683      const IRPosition &IRP = getIRPosition();
7684      bool IsKnownNoCapture;
7685      bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
7686          A, this, IRP, DepClassTy::REQUIRED, IsKnownNoCapture);
7687      if (!IsAssumedNoCapture) {
7688        LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
7689        return indicatePessimisticFixpoint();
7690      }
7691  
7692      bool IsKnownNoAlias;
7693      if (!AA::hasAssumedIRAttr<Attribute::NoAlias>(
7694              A, this, IRP, DepClassTy::REQUIRED, IsKnownNoAlias)) {
7695        LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
7696        return indicatePessimisticFixpoint();
7697      }
7698  
7699      bool IsKnown;
7700      if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) {
7701        LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
7702        return indicatePessimisticFixpoint();
7703      }
7704  
7705      return ChangeStatus::UNCHANGED;
7706    }
7707  
7708    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c6711::AAPrivatizablePtrCallSiteArgument7709    void trackStatistics() const override {
7710      STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
7711    }
7712  };
7713  
7714  struct AAPrivatizablePtrCallSiteReturned final
7715      : public AAPrivatizablePtrFloating {
AAPrivatizablePtrCallSiteReturned__anonc528723c6711::AAPrivatizablePtrCallSiteReturned7716    AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
7717        : AAPrivatizablePtrFloating(IRP, A) {}
7718  
7719    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c6711::AAPrivatizablePtrCallSiteReturned7720    void initialize(Attributor &A) override {
7721      // TODO: We can privatize more than arguments.
7722      indicatePessimisticFixpoint();
7723    }
7724  
7725    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c6711::AAPrivatizablePtrCallSiteReturned7726    void trackStatistics() const override {
7727      STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
7728    }
7729  };
7730  
7731  struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
AAPrivatizablePtrReturned__anonc528723c6711::AAPrivatizablePtrReturned7732    AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
7733        : AAPrivatizablePtrFloating(IRP, A) {}
7734  
7735    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c6711::AAPrivatizablePtrReturned7736    void initialize(Attributor &A) override {
7737      // TODO: We can privatize more than arguments.
7738      indicatePessimisticFixpoint();
7739    }
7740  
7741    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c6711::AAPrivatizablePtrReturned7742    void trackStatistics() const override {
7743      STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
7744    }
7745  };
7746  } // namespace
7747  
7748  /// -------------------- Memory Behavior Attributes ----------------------------
7749  /// Includes read-none, read-only, and write-only.
7750  /// ----------------------------------------------------------------------------
7751  namespace {
7752  struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
AAMemoryBehaviorImpl__anonc528723c7211::AAMemoryBehaviorImpl7753    AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
7754        : AAMemoryBehavior(IRP, A) {}
7755  
7756    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c7211::AAMemoryBehaviorImpl7757    void initialize(Attributor &A) override {
7758      intersectAssumedBits(BEST_STATE);
7759      getKnownStateFromValue(A, getIRPosition(), getState());
7760      AAMemoryBehavior::initialize(A);
7761    }
7762  
7763    /// Return the memory behavior information encoded in the IR for \p IRP.
getKnownStateFromValue__anonc528723c7211::AAMemoryBehaviorImpl7764    static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7765                                       BitIntegerState &State,
7766                                       bool IgnoreSubsumingPositions = false) {
7767      SmallVector<Attribute, 2> Attrs;
7768      A.getAttrs(IRP, AttrKinds, Attrs, IgnoreSubsumingPositions);
7769      for (const Attribute &Attr : Attrs) {
7770        switch (Attr.getKindAsEnum()) {
7771        case Attribute::ReadNone:
7772          State.addKnownBits(NO_ACCESSES);
7773          break;
7774        case Attribute::ReadOnly:
7775          State.addKnownBits(NO_WRITES);
7776          break;
7777        case Attribute::WriteOnly:
7778          State.addKnownBits(NO_READS);
7779          break;
7780        default:
7781          llvm_unreachable("Unexpected attribute!");
7782        }
7783      }
7784  
7785      if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
7786        if (!I->mayReadFromMemory())
7787          State.addKnownBits(NO_READS);
7788        if (!I->mayWriteToMemory())
7789          State.addKnownBits(NO_WRITES);
7790      }
7791    }
7792  
7793    /// See AbstractAttribute::getDeducedAttributes(...).
getDeducedAttributes__anonc528723c7211::AAMemoryBehaviorImpl7794    void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
7795                              SmallVectorImpl<Attribute> &Attrs) const override {
7796      assert(Attrs.size() == 0);
7797      if (isAssumedReadNone())
7798        Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7799      else if (isAssumedReadOnly())
7800        Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
7801      else if (isAssumedWriteOnly())
7802        Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
7803      assert(Attrs.size() <= 1);
7804    }
7805  
7806    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c7211::AAMemoryBehaviorImpl7807    ChangeStatus manifest(Attributor &A) override {
7808      const IRPosition &IRP = getIRPosition();
7809  
7810      if (A.hasAttr(IRP, Attribute::ReadNone,
7811                    /* IgnoreSubsumingPositions */ true))
7812        return ChangeStatus::UNCHANGED;
7813  
7814      // Check if we would improve the existing attributes first.
7815      SmallVector<Attribute, 4> DeducedAttrs;
7816      getDeducedAttributes(A, IRP.getAnchorValue().getContext(), DeducedAttrs);
7817      if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7818            return A.hasAttr(IRP, Attr.getKindAsEnum(),
7819                             /* IgnoreSubsumingPositions */ true);
7820          }))
7821        return ChangeStatus::UNCHANGED;
7822  
7823      // Clear existing attributes.
7824      A.removeAttrs(IRP, AttrKinds);
7825      // Clear conflicting writable attribute.
7826      if (isAssumedReadOnly())
7827        A.removeAttrs(IRP, Attribute::Writable);
7828  
7829      // Use the generic manifest method.
7830      return IRAttribute::manifest(A);
7831    }
7832  
7833    /// See AbstractState::getAsStr().
getAsStr__anonc528723c7211::AAMemoryBehaviorImpl7834    const std::string getAsStr(Attributor *A) const override {
7835      if (isAssumedReadNone())
7836        return "readnone";
7837      if (isAssumedReadOnly())
7838        return "readonly";
7839      if (isAssumedWriteOnly())
7840        return "writeonly";
7841      return "may-read/write";
7842    }
7843  
7844    /// The set of IR attributes AAMemoryBehavior deals with.
7845    static const Attribute::AttrKind AttrKinds[3];
7846  };
7847  
7848  const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
7849      Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
7850  
7851  /// Memory behavior attribute for a floating value.
7852  struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
AAMemoryBehaviorFloating__anonc528723c7211::AAMemoryBehaviorFloating7853    AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
7854        : AAMemoryBehaviorImpl(IRP, A) {}
7855  
7856    /// See AbstractAttribute::updateImpl(...).
7857    ChangeStatus updateImpl(Attributor &A) override;
7858  
7859    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7211::AAMemoryBehaviorFloating7860    void trackStatistics() const override {
7861      if (isAssumedReadNone())
7862        STATS_DECLTRACK_FLOATING_ATTR(readnone)
7863      else if (isAssumedReadOnly())
7864        STATS_DECLTRACK_FLOATING_ATTR(readonly)
7865      else if (isAssumedWriteOnly())
7866        STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7867    }
7868  
7869  private:
7870    /// Return true if users of \p UserI might access the underlying
7871    /// variable/location described by \p U and should therefore be analyzed.
7872    bool followUsersOfUseIn(Attributor &A, const Use &U,
7873                            const Instruction *UserI);
7874  
7875    /// Update the state according to the effect of use \p U in \p UserI.
7876    void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7877  };
7878  
7879  /// Memory behavior attribute for function argument.
7880  struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
AAMemoryBehaviorArgument__anonc528723c7211::AAMemoryBehaviorArgument7881    AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7882        : AAMemoryBehaviorFloating(IRP, A) {}
7883  
7884    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c7211::AAMemoryBehaviorArgument7885    void initialize(Attributor &A) override {
7886      intersectAssumedBits(BEST_STATE);
7887      const IRPosition &IRP = getIRPosition();
7888      // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7889      // can query it when we use has/getAttr. That would allow us to reuse the
7890      // initialize of the base class here.
7891      bool HasByVal = A.hasAttr(IRP, {Attribute::ByVal},
7892                                /* IgnoreSubsumingPositions */ true);
7893      getKnownStateFromValue(A, IRP, getState(),
7894                             /* IgnoreSubsumingPositions */ HasByVal);
7895    }
7896  
manifest__anonc528723c7211::AAMemoryBehaviorArgument7897    ChangeStatus manifest(Attributor &A) override {
7898      // TODO: Pointer arguments are not supported on vectors of pointers yet.
7899      if (!getAssociatedValue().getType()->isPointerTy())
7900        return ChangeStatus::UNCHANGED;
7901  
7902      // TODO: From readattrs.ll: "inalloca parameters are always
7903      //                           considered written"
7904      if (A.hasAttr(getIRPosition(),
7905                    {Attribute::InAlloca, Attribute::Preallocated})) {
7906        removeKnownBits(NO_WRITES);
7907        removeAssumedBits(NO_WRITES);
7908      }
7909      A.removeAttrs(getIRPosition(), AttrKinds);
7910      return AAMemoryBehaviorFloating::manifest(A);
7911    }
7912  
7913    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7211::AAMemoryBehaviorArgument7914    void trackStatistics() const override {
7915      if (isAssumedReadNone())
7916        STATS_DECLTRACK_ARG_ATTR(readnone)
7917      else if (isAssumedReadOnly())
7918        STATS_DECLTRACK_ARG_ATTR(readonly)
7919      else if (isAssumedWriteOnly())
7920        STATS_DECLTRACK_ARG_ATTR(writeonly)
7921    }
7922  };
7923  
7924  struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
AAMemoryBehaviorCallSiteArgument__anonc528723c7211::AAMemoryBehaviorCallSiteArgument7925    AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7926        : AAMemoryBehaviorArgument(IRP, A) {}
7927  
7928    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c7211::AAMemoryBehaviorCallSiteArgument7929    void initialize(Attributor &A) override {
7930      // If we don't have an associated attribute this is either a variadic call
7931      // or an indirect call, either way, nothing to do here.
7932      Argument *Arg = getAssociatedArgument();
7933      if (!Arg) {
7934        indicatePessimisticFixpoint();
7935        return;
7936      }
7937      if (Arg->hasByValAttr()) {
7938        addKnownBits(NO_WRITES);
7939        removeKnownBits(NO_READS);
7940        removeAssumedBits(NO_READS);
7941      }
7942      AAMemoryBehaviorArgument::initialize(A);
7943      if (getAssociatedFunction()->isDeclaration())
7944        indicatePessimisticFixpoint();
7945    }
7946  
7947    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c7211::AAMemoryBehaviorCallSiteArgument7948    ChangeStatus updateImpl(Attributor &A) override {
7949      // TODO: Once we have call site specific value information we can provide
7950      //       call site specific liveness liveness information and then it makes
7951      //       sense to specialize attributes for call sites arguments instead of
7952      //       redirecting requests to the callee argument.
7953      Argument *Arg = getAssociatedArgument();
7954      const IRPosition &ArgPos = IRPosition::argument(*Arg);
7955      auto *ArgAA =
7956          A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7957      if (!ArgAA)
7958        return indicatePessimisticFixpoint();
7959      return clampStateAndIndicateChange(getState(), ArgAA->getState());
7960    }
7961  
7962    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7211::AAMemoryBehaviorCallSiteArgument7963    void trackStatistics() const override {
7964      if (isAssumedReadNone())
7965        STATS_DECLTRACK_CSARG_ATTR(readnone)
7966      else if (isAssumedReadOnly())
7967        STATS_DECLTRACK_CSARG_ATTR(readonly)
7968      else if (isAssumedWriteOnly())
7969        STATS_DECLTRACK_CSARG_ATTR(writeonly)
7970    }
7971  };
7972  
7973  /// Memory behavior attribute for a call site return position.
7974  struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
AAMemoryBehaviorCallSiteReturned__anonc528723c7211::AAMemoryBehaviorCallSiteReturned7975    AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7976        : AAMemoryBehaviorFloating(IRP, A) {}
7977  
7978    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c7211::AAMemoryBehaviorCallSiteReturned7979    void initialize(Attributor &A) override {
7980      AAMemoryBehaviorImpl::initialize(A);
7981    }
7982    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c7211::AAMemoryBehaviorCallSiteReturned7983    ChangeStatus manifest(Attributor &A) override {
7984      // We do not annotate returned values.
7985      return ChangeStatus::UNCHANGED;
7986    }
7987  
7988    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7211::AAMemoryBehaviorCallSiteReturned7989    void trackStatistics() const override {}
7990  };
7991  
7992  /// An AA to represent the memory behavior function attributes.
7993  struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
AAMemoryBehaviorFunction__anonc528723c7211::AAMemoryBehaviorFunction7994    AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7995        : AAMemoryBehaviorImpl(IRP, A) {}
7996  
7997    /// See AbstractAttribute::updateImpl(Attributor &A).
7998    ChangeStatus updateImpl(Attributor &A) override;
7999  
8000    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c7211::AAMemoryBehaviorFunction8001    ChangeStatus manifest(Attributor &A) override {
8002      // TODO: It would be better to merge this with AAMemoryLocation, so that
8003      // we could determine read/write per location. This would also have the
8004      // benefit of only one place trying to manifest the memory attribute.
8005      Function &F = cast<Function>(getAnchorValue());
8006      MemoryEffects ME = MemoryEffects::unknown();
8007      if (isAssumedReadNone())
8008        ME = MemoryEffects::none();
8009      else if (isAssumedReadOnly())
8010        ME = MemoryEffects::readOnly();
8011      else if (isAssumedWriteOnly())
8012        ME = MemoryEffects::writeOnly();
8013  
8014      A.removeAttrs(getIRPosition(), AttrKinds);
8015      // Clear conflicting writable attribute.
8016      if (ME.onlyReadsMemory())
8017        for (Argument &Arg : F.args())
8018          A.removeAttrs(IRPosition::argument(Arg), Attribute::Writable);
8019      return A.manifestAttrs(getIRPosition(),
8020                             Attribute::getWithMemoryEffects(F.getContext(), ME));
8021    }
8022  
8023    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7211::AAMemoryBehaviorFunction8024    void trackStatistics() const override {
8025      if (isAssumedReadNone())
8026        STATS_DECLTRACK_FN_ATTR(readnone)
8027      else if (isAssumedReadOnly())
8028        STATS_DECLTRACK_FN_ATTR(readonly)
8029      else if (isAssumedWriteOnly())
8030        STATS_DECLTRACK_FN_ATTR(writeonly)
8031    }
8032  };
8033  
8034  /// AAMemoryBehavior attribute for call sites.
8035  struct AAMemoryBehaviorCallSite final
8036      : AACalleeToCallSite<AAMemoryBehavior, AAMemoryBehaviorImpl> {
AAMemoryBehaviorCallSite__anonc528723c7211::AAMemoryBehaviorCallSite8037    AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
8038        : AACalleeToCallSite<AAMemoryBehavior, AAMemoryBehaviorImpl>(IRP, A) {}
8039  
8040    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c7211::AAMemoryBehaviorCallSite8041    ChangeStatus manifest(Attributor &A) override {
8042      // TODO: Deduplicate this with AAMemoryBehaviorFunction.
8043      CallBase &CB = cast<CallBase>(getAnchorValue());
8044      MemoryEffects ME = MemoryEffects::unknown();
8045      if (isAssumedReadNone())
8046        ME = MemoryEffects::none();
8047      else if (isAssumedReadOnly())
8048        ME = MemoryEffects::readOnly();
8049      else if (isAssumedWriteOnly())
8050        ME = MemoryEffects::writeOnly();
8051  
8052      A.removeAttrs(getIRPosition(), AttrKinds);
8053      // Clear conflicting writable attribute.
8054      if (ME.onlyReadsMemory())
8055        for (Use &U : CB.args())
8056          A.removeAttrs(IRPosition::callsite_argument(CB, U.getOperandNo()),
8057                        Attribute::Writable);
8058      return A.manifestAttrs(
8059          getIRPosition(), Attribute::getWithMemoryEffects(CB.getContext(), ME));
8060    }
8061  
8062    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7211::AAMemoryBehaviorCallSite8063    void trackStatistics() const override {
8064      if (isAssumedReadNone())
8065        STATS_DECLTRACK_CS_ATTR(readnone)
8066      else if (isAssumedReadOnly())
8067        STATS_DECLTRACK_CS_ATTR(readonly)
8068      else if (isAssumedWriteOnly())
8069        STATS_DECLTRACK_CS_ATTR(writeonly)
8070    }
8071  };
8072  
updateImpl(Attributor & A)8073  ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
8074  
8075    // The current assumed state used to determine a change.
8076    auto AssumedState = getAssumed();
8077  
8078    auto CheckRWInst = [&](Instruction &I) {
8079      // If the instruction has an own memory behavior state, use it to restrict
8080      // the local state. No further analysis is required as the other memory
8081      // state is as optimistic as it gets.
8082      if (const auto *CB = dyn_cast<CallBase>(&I)) {
8083        const auto *MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
8084            *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
8085        if (MemBehaviorAA) {
8086          intersectAssumedBits(MemBehaviorAA->getAssumed());
8087          return !isAtFixpoint();
8088        }
8089      }
8090  
8091      // Remove access kind modifiers if necessary.
8092      if (I.mayReadFromMemory())
8093        removeAssumedBits(NO_READS);
8094      if (I.mayWriteToMemory())
8095        removeAssumedBits(NO_WRITES);
8096      return !isAtFixpoint();
8097    };
8098  
8099    bool UsedAssumedInformation = false;
8100    if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
8101                                            UsedAssumedInformation))
8102      return indicatePessimisticFixpoint();
8103  
8104    return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
8105                                          : ChangeStatus::UNCHANGED;
8106  }
8107  
updateImpl(Attributor & A)8108  ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
8109  
8110    const IRPosition &IRP = getIRPosition();
8111    const IRPosition &FnPos = IRPosition::function_scope(IRP);
8112    AAMemoryBehavior::StateType &S = getState();
8113  
8114    // First, check the function scope. We take the known information and we avoid
8115    // work if the assumed information implies the current assumed information for
8116    // this attribute. This is a valid for all but byval arguments.
8117    Argument *Arg = IRP.getAssociatedArgument();
8118    AAMemoryBehavior::base_t FnMemAssumedState =
8119        AAMemoryBehavior::StateType::getWorstState();
8120    if (!Arg || !Arg->hasByValAttr()) {
8121      const auto *FnMemAA =
8122          A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
8123      if (FnMemAA) {
8124        FnMemAssumedState = FnMemAA->getAssumed();
8125        S.addKnownBits(FnMemAA->getKnown());
8126        if ((S.getAssumed() & FnMemAA->getAssumed()) == S.getAssumed())
8127          return ChangeStatus::UNCHANGED;
8128      }
8129    }
8130  
8131    // The current assumed state used to determine a change.
8132    auto AssumedState = S.getAssumed();
8133  
8134    // Make sure the value is not captured (except through "return"), if
8135    // it is, any information derived would be irrelevant anyway as we cannot
8136    // check the potential aliases introduced by the capture. However, no need
8137    // to fall back to anythign less optimistic than the function state.
8138    bool IsKnownNoCapture;
8139    const AANoCapture *ArgNoCaptureAA = nullptr;
8140    bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
8141        A, this, IRP, DepClassTy::OPTIONAL, IsKnownNoCapture, false,
8142        &ArgNoCaptureAA);
8143  
8144    if (!IsAssumedNoCapture &&
8145        (!ArgNoCaptureAA || !ArgNoCaptureAA->isAssumedNoCaptureMaybeReturned())) {
8146      S.intersectAssumedBits(FnMemAssumedState);
8147      return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
8148                                            : ChangeStatus::UNCHANGED;
8149    }
8150  
8151    // Visit and expand uses until all are analyzed or a fixpoint is reached.
8152    auto UsePred = [&](const Use &U, bool &Follow) -> bool {
8153      Instruction *UserI = cast<Instruction>(U.getUser());
8154      LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
8155                        << " \n");
8156  
8157      // Droppable users, e.g., llvm::assume does not actually perform any action.
8158      if (UserI->isDroppable())
8159        return true;
8160  
8161      // Check if the users of UserI should also be visited.
8162      Follow = followUsersOfUseIn(A, U, UserI);
8163  
8164      // If UserI might touch memory we analyze the use in detail.
8165      if (UserI->mayReadOrWriteMemory())
8166        analyzeUseIn(A, U, UserI);
8167  
8168      return !isAtFixpoint();
8169    };
8170  
8171    if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
8172      return indicatePessimisticFixpoint();
8173  
8174    return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
8175                                          : ChangeStatus::UNCHANGED;
8176  }
8177  
followUsersOfUseIn(Attributor & A,const Use & U,const Instruction * UserI)8178  bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
8179                                                    const Instruction *UserI) {
8180    // The loaded value is unrelated to the pointer argument, no need to
8181    // follow the users of the load.
8182    if (isa<LoadInst>(UserI) || isa<ReturnInst>(UserI))
8183      return false;
8184  
8185    // By default we follow all uses assuming UserI might leak information on U,
8186    // we have special handling for call sites operands though.
8187    const auto *CB = dyn_cast<CallBase>(UserI);
8188    if (!CB || !CB->isArgOperand(&U))
8189      return true;
8190  
8191    // If the use is a call argument known not to be captured, the users of
8192    // the call do not need to be visited because they have to be unrelated to
8193    // the input. Note that this check is not trivial even though we disallow
8194    // general capturing of the underlying argument. The reason is that the
8195    // call might the argument "through return", which we allow and for which we
8196    // need to check call users.
8197    if (U.get()->getType()->isPointerTy()) {
8198      unsigned ArgNo = CB->getArgOperandNo(&U);
8199      bool IsKnownNoCapture;
8200      return !AA::hasAssumedIRAttr<Attribute::NoCapture>(
8201          A, this, IRPosition::callsite_argument(*CB, ArgNo),
8202          DepClassTy::OPTIONAL, IsKnownNoCapture);
8203    }
8204  
8205    return true;
8206  }
8207  
analyzeUseIn(Attributor & A,const Use & U,const Instruction * UserI)8208  void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
8209                                              const Instruction *UserI) {
8210    assert(UserI->mayReadOrWriteMemory());
8211  
8212    switch (UserI->getOpcode()) {
8213    default:
8214      // TODO: Handle all atomics and other side-effect operations we know of.
8215      break;
8216    case Instruction::Load:
8217      // Loads cause the NO_READS property to disappear.
8218      removeAssumedBits(NO_READS);
8219      return;
8220  
8221    case Instruction::Store:
8222      // Stores cause the NO_WRITES property to disappear if the use is the
8223      // pointer operand. Note that while capturing was taken care of somewhere
8224      // else we need to deal with stores of the value that is not looked through.
8225      if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
8226        removeAssumedBits(NO_WRITES);
8227      else
8228        indicatePessimisticFixpoint();
8229      return;
8230  
8231    case Instruction::Call:
8232    case Instruction::CallBr:
8233    case Instruction::Invoke: {
8234      // For call sites we look at the argument memory behavior attribute (this
8235      // could be recursive!) in order to restrict our own state.
8236      const auto *CB = cast<CallBase>(UserI);
8237  
8238      // Give up on operand bundles.
8239      if (CB->isBundleOperand(&U)) {
8240        indicatePessimisticFixpoint();
8241        return;
8242      }
8243  
8244      // Calling a function does read the function pointer, maybe write it if the
8245      // function is self-modifying.
8246      if (CB->isCallee(&U)) {
8247        removeAssumedBits(NO_READS);
8248        break;
8249      }
8250  
8251      // Adjust the possible access behavior based on the information on the
8252      // argument.
8253      IRPosition Pos;
8254      if (U.get()->getType()->isPointerTy())
8255        Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
8256      else
8257        Pos = IRPosition::callsite_function(*CB);
8258      const auto *MemBehaviorAA =
8259          A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
8260      if (!MemBehaviorAA)
8261        break;
8262      // "assumed" has at most the same bits as the MemBehaviorAA assumed
8263      // and at least "known".
8264      intersectAssumedBits(MemBehaviorAA->getAssumed());
8265      return;
8266    }
8267    };
8268  
8269    // Generally, look at the "may-properties" and adjust the assumed state if we
8270    // did not trigger special handling before.
8271    if (UserI->mayReadFromMemory())
8272      removeAssumedBits(NO_READS);
8273    if (UserI->mayWriteToMemory())
8274      removeAssumedBits(NO_WRITES);
8275  }
8276  } // namespace
8277  
8278  /// -------------------- Memory Locations Attributes ---------------------------
8279  /// Includes read-none, argmemonly, inaccessiblememonly,
8280  /// inaccessiblememorargmemonly
8281  /// ----------------------------------------------------------------------------
8282  
getMemoryLocationsAsStr(AAMemoryLocation::MemoryLocationsKind MLK)8283  std::string AAMemoryLocation::getMemoryLocationsAsStr(
8284      AAMemoryLocation::MemoryLocationsKind MLK) {
8285    if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
8286      return "all memory";
8287    if (MLK == AAMemoryLocation::NO_LOCATIONS)
8288      return "no memory";
8289    std::string S = "memory:";
8290    if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
8291      S += "stack,";
8292    if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
8293      S += "constant,";
8294    if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
8295      S += "internal global,";
8296    if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
8297      S += "external global,";
8298    if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
8299      S += "argument,";
8300    if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
8301      S += "inaccessible,";
8302    if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
8303      S += "malloced,";
8304    if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
8305      S += "unknown,";
8306    S.pop_back();
8307    return S;
8308  }
8309  
8310  namespace {
8311  struct AAMemoryLocationImpl : public AAMemoryLocation {
8312  
AAMemoryLocationImpl__anonc528723c7611::AAMemoryLocationImpl8313    AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
8314        : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
8315      AccessKind2Accesses.fill(nullptr);
8316    }
8317  
~AAMemoryLocationImpl__anonc528723c7611::AAMemoryLocationImpl8318    ~AAMemoryLocationImpl() {
8319      // The AccessSets are allocated via a BumpPtrAllocator, we call
8320      // the destructor manually.
8321      for (AccessSet *AS : AccessKind2Accesses)
8322        if (AS)
8323          AS->~AccessSet();
8324    }
8325  
8326    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c7611::AAMemoryLocationImpl8327    void initialize(Attributor &A) override {
8328      intersectAssumedBits(BEST_STATE);
8329      getKnownStateFromValue(A, getIRPosition(), getState());
8330      AAMemoryLocation::initialize(A);
8331    }
8332  
8333    /// Return the memory behavior information encoded in the IR for \p IRP.
getKnownStateFromValue__anonc528723c7611::AAMemoryLocationImpl8334    static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
8335                                       BitIntegerState &State,
8336                                       bool IgnoreSubsumingPositions = false) {
8337      // For internal functions we ignore `argmemonly` and
8338      // `inaccessiblememorargmemonly` as we might break it via interprocedural
8339      // constant propagation. It is unclear if this is the best way but it is
8340      // unlikely this will cause real performance problems. If we are deriving
8341      // attributes for the anchor function we even remove the attribute in
8342      // addition to ignoring it.
8343      // TODO: A better way to handle this would be to add ~NO_GLOBAL_MEM /
8344      // MemoryEffects::Other as a possible location.
8345      bool UseArgMemOnly = true;
8346      Function *AnchorFn = IRP.getAnchorScope();
8347      if (AnchorFn && A.isRunOn(*AnchorFn))
8348        UseArgMemOnly = !AnchorFn->hasLocalLinkage();
8349  
8350      SmallVector<Attribute, 2> Attrs;
8351      A.getAttrs(IRP, {Attribute::Memory}, Attrs, IgnoreSubsumingPositions);
8352      for (const Attribute &Attr : Attrs) {
8353        // TODO: We can map MemoryEffects to Attributor locations more precisely.
8354        MemoryEffects ME = Attr.getMemoryEffects();
8355        if (ME.doesNotAccessMemory()) {
8356          State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
8357          continue;
8358        }
8359        if (ME.onlyAccessesInaccessibleMem()) {
8360          State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
8361          continue;
8362        }
8363        if (ME.onlyAccessesArgPointees()) {
8364          if (UseArgMemOnly)
8365            State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
8366          else {
8367            // Remove location information, only keep read/write info.
8368            ME = MemoryEffects(ME.getModRef());
8369            A.manifestAttrs(IRP,
8370                            Attribute::getWithMemoryEffects(
8371                                IRP.getAnchorValue().getContext(), ME),
8372                            /*ForceReplace*/ true);
8373          }
8374          continue;
8375        }
8376        if (ME.onlyAccessesInaccessibleOrArgMem()) {
8377          if (UseArgMemOnly)
8378            State.addKnownBits(inverseLocation(
8379                NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
8380          else {
8381            // Remove location information, only keep read/write info.
8382            ME = MemoryEffects(ME.getModRef());
8383            A.manifestAttrs(IRP,
8384                            Attribute::getWithMemoryEffects(
8385                                IRP.getAnchorValue().getContext(), ME),
8386                            /*ForceReplace*/ true);
8387          }
8388          continue;
8389        }
8390      }
8391    }
8392  
8393    /// See AbstractAttribute::getDeducedAttributes(...).
getDeducedAttributes__anonc528723c7611::AAMemoryLocationImpl8394    void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
8395                              SmallVectorImpl<Attribute> &Attrs) const override {
8396      // TODO: We can map Attributor locations to MemoryEffects more precisely.
8397      assert(Attrs.size() == 0);
8398      if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
8399        if (isAssumedReadNone())
8400          Attrs.push_back(
8401              Attribute::getWithMemoryEffects(Ctx, MemoryEffects::none()));
8402        else if (isAssumedInaccessibleMemOnly())
8403          Attrs.push_back(Attribute::getWithMemoryEffects(
8404              Ctx, MemoryEffects::inaccessibleMemOnly()));
8405        else if (isAssumedArgMemOnly())
8406          Attrs.push_back(
8407              Attribute::getWithMemoryEffects(Ctx, MemoryEffects::argMemOnly()));
8408        else if (isAssumedInaccessibleOrArgMemOnly())
8409          Attrs.push_back(Attribute::getWithMemoryEffects(
8410              Ctx, MemoryEffects::inaccessibleOrArgMemOnly()));
8411      }
8412      assert(Attrs.size() <= 1);
8413    }
8414  
8415    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c7611::AAMemoryLocationImpl8416    ChangeStatus manifest(Attributor &A) override {
8417      // TODO: If AAMemoryLocation and AAMemoryBehavior are merged, we could
8418      // provide per-location modref information here.
8419      const IRPosition &IRP = getIRPosition();
8420  
8421      SmallVector<Attribute, 1> DeducedAttrs;
8422      getDeducedAttributes(A, IRP.getAnchorValue().getContext(), DeducedAttrs);
8423      if (DeducedAttrs.size() != 1)
8424        return ChangeStatus::UNCHANGED;
8425      MemoryEffects ME = DeducedAttrs[0].getMemoryEffects();
8426  
8427      return A.manifestAttrs(IRP, Attribute::getWithMemoryEffects(
8428                                      IRP.getAnchorValue().getContext(), ME));
8429    }
8430  
8431    /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
checkForAllAccessesToMemoryKind__anonc528723c7611::AAMemoryLocationImpl8432    bool checkForAllAccessesToMemoryKind(
8433        function_ref<bool(const Instruction *, const Value *, AccessKind,
8434                          MemoryLocationsKind)>
8435            Pred,
8436        MemoryLocationsKind RequestedMLK) const override {
8437      if (!isValidState())
8438        return false;
8439  
8440      MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
8441      if (AssumedMLK == NO_LOCATIONS)
8442        return true;
8443  
8444      unsigned Idx = 0;
8445      for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
8446           CurMLK *= 2, ++Idx) {
8447        if (CurMLK & RequestedMLK)
8448          continue;
8449  
8450        if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
8451          for (const AccessInfo &AI : *Accesses)
8452            if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
8453              return false;
8454      }
8455  
8456      return true;
8457    }
8458  
indicatePessimisticFixpoint__anonc528723c7611::AAMemoryLocationImpl8459    ChangeStatus indicatePessimisticFixpoint() override {
8460      // If we give up and indicate a pessimistic fixpoint this instruction will
8461      // become an access for all potential access kinds:
8462      // TODO: Add pointers for argmemonly and globals to improve the results of
8463      //       checkForAllAccessesToMemoryKind.
8464      bool Changed = false;
8465      MemoryLocationsKind KnownMLK = getKnown();
8466      Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
8467      for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
8468        if (!(CurMLK & KnownMLK))
8469          updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
8470                                    getAccessKindFromInst(I));
8471      return AAMemoryLocation::indicatePessimisticFixpoint();
8472    }
8473  
8474  protected:
8475    /// Helper struct to tie together an instruction that has a read or write
8476    /// effect with the pointer it accesses (if any).
8477    struct AccessInfo {
8478  
8479      /// The instruction that caused the access.
8480      const Instruction *I;
8481  
8482      /// The base pointer that is accessed, or null if unknown.
8483      const Value *Ptr;
8484  
8485      /// The kind of access (read/write/read+write).
8486      AccessKind Kind;
8487  
operator ==__anonc528723c7611::AAMemoryLocationImpl::AccessInfo8488      bool operator==(const AccessInfo &RHS) const {
8489        return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
8490      }
operator ()__anonc528723c7611::AAMemoryLocationImpl::AccessInfo8491      bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
8492        if (LHS.I != RHS.I)
8493          return LHS.I < RHS.I;
8494        if (LHS.Ptr != RHS.Ptr)
8495          return LHS.Ptr < RHS.Ptr;
8496        if (LHS.Kind != RHS.Kind)
8497          return LHS.Kind < RHS.Kind;
8498        return false;
8499      }
8500    };
8501  
8502    /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
8503    /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
8504    using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
8505    std::array<AccessSet *, llvm::CTLog2<VALID_STATE>()> AccessKind2Accesses;
8506  
8507    /// Categorize the pointer arguments of CB that might access memory in
8508    /// AccessedLoc and update the state and access map accordingly.
8509    void
8510    categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
8511                                       AAMemoryLocation::StateType &AccessedLocs,
8512                                       bool &Changed);
8513  
8514    /// Return the kind(s) of location that may be accessed by \p V.
8515    AAMemoryLocation::MemoryLocationsKind
8516    categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
8517  
8518    /// Return the access kind as determined by \p I.
getAccessKindFromInst__anonc528723c7611::AAMemoryLocationImpl8519    AccessKind getAccessKindFromInst(const Instruction *I) {
8520      AccessKind AK = READ_WRITE;
8521      if (I) {
8522        AK = I->mayReadFromMemory() ? READ : NONE;
8523        AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
8524      }
8525      return AK;
8526    }
8527  
8528    /// Update the state \p State and the AccessKind2Accesses given that \p I is
8529    /// an access of kind \p AK to a \p MLK memory location with the access
8530    /// pointer \p Ptr.
updateStateAndAccessesMap__anonc528723c7611::AAMemoryLocationImpl8531    void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
8532                                   MemoryLocationsKind MLK, const Instruction *I,
8533                                   const Value *Ptr, bool &Changed,
8534                                   AccessKind AK = READ_WRITE) {
8535  
8536      assert(isPowerOf2_32(MLK) && "Expected a single location set!");
8537      auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
8538      if (!Accesses)
8539        Accesses = new (Allocator) AccessSet();
8540      Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
8541      if (MLK == NO_UNKOWN_MEM)
8542        MLK = NO_LOCATIONS;
8543      State.removeAssumedBits(MLK);
8544    }
8545  
8546    /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
8547    /// arguments, and update the state and access map accordingly.
8548    void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
8549                            AAMemoryLocation::StateType &State, bool &Changed,
8550                            unsigned AccessAS = 0);
8551  
8552    /// Used to allocate access sets.
8553    BumpPtrAllocator &Allocator;
8554  };
8555  
categorizePtrValue(Attributor & A,const Instruction & I,const Value & Ptr,AAMemoryLocation::StateType & State,bool & Changed,unsigned AccessAS)8556  void AAMemoryLocationImpl::categorizePtrValue(
8557      Attributor &A, const Instruction &I, const Value &Ptr,
8558      AAMemoryLocation::StateType &State, bool &Changed, unsigned AccessAS) {
8559    LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
8560                      << Ptr << " ["
8561                      << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
8562  
8563    auto Pred = [&](Value &Obj) {
8564      unsigned ObjectAS = Obj.getType()->getPointerAddressSpace();
8565      // TODO: recognize the TBAA used for constant accesses.
8566      MemoryLocationsKind MLK = NO_LOCATIONS;
8567  
8568      // Filter accesses to constant (GPU) memory if we have an AS at the access
8569      // site or the object is known to actually have the associated AS.
8570      if ((AccessAS == (unsigned)AA::GPUAddressSpace::Constant ||
8571           (ObjectAS == (unsigned)AA::GPUAddressSpace::Constant &&
8572            isIdentifiedObject(&Obj))) &&
8573          AA::isGPU(*I.getModule()))
8574        return true;
8575  
8576      if (isa<UndefValue>(&Obj))
8577        return true;
8578      if (isa<Argument>(&Obj)) {
8579        // TODO: For now we do not treat byval arguments as local copies performed
8580        // on the call edge, though, we should. To make that happen we need to
8581        // teach various passes, e.g., DSE, about the copy effect of a byval. That
8582        // would also allow us to mark functions only accessing byval arguments as
8583        // readnone again, arguably their accesses have no effect outside of the
8584        // function, like accesses to allocas.
8585        MLK = NO_ARGUMENT_MEM;
8586      } else if (auto *GV = dyn_cast<GlobalValue>(&Obj)) {
8587        // Reading constant memory is not treated as a read "effect" by the
8588        // function attr pass so we won't neither. Constants defined by TBAA are
8589        // similar. (We know we do not write it because it is constant.)
8590        if (auto *GVar = dyn_cast<GlobalVariable>(GV))
8591          if (GVar->isConstant())
8592            return true;
8593  
8594        if (GV->hasLocalLinkage())
8595          MLK = NO_GLOBAL_INTERNAL_MEM;
8596        else
8597          MLK = NO_GLOBAL_EXTERNAL_MEM;
8598      } else if (isa<ConstantPointerNull>(&Obj) &&
8599                 (!NullPointerIsDefined(getAssociatedFunction(), AccessAS) ||
8600                  !NullPointerIsDefined(getAssociatedFunction(), ObjectAS))) {
8601        return true;
8602      } else if (isa<AllocaInst>(&Obj)) {
8603        MLK = NO_LOCAL_MEM;
8604      } else if (const auto *CB = dyn_cast<CallBase>(&Obj)) {
8605        bool IsKnownNoAlias;
8606        if (AA::hasAssumedIRAttr<Attribute::NoAlias>(
8607                A, this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL,
8608                IsKnownNoAlias))
8609          MLK = NO_MALLOCED_MEM;
8610        else
8611          MLK = NO_UNKOWN_MEM;
8612      } else {
8613        MLK = NO_UNKOWN_MEM;
8614      }
8615  
8616      assert(MLK != NO_LOCATIONS && "No location specified!");
8617      LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
8618                        << Obj << " -> " << getMemoryLocationsAsStr(MLK) << "\n");
8619      updateStateAndAccessesMap(State, MLK, &I, &Obj, Changed,
8620                                getAccessKindFromInst(&I));
8621  
8622      return true;
8623    };
8624  
8625    const auto *AA = A.getAAFor<AAUnderlyingObjects>(
8626        *this, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
8627    if (!AA || !AA->forallUnderlyingObjects(Pred, AA::Intraprocedural)) {
8628      LLVM_DEBUG(
8629          dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
8630      updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
8631                                getAccessKindFromInst(&I));
8632      return;
8633    }
8634  
8635    LLVM_DEBUG(
8636        dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
8637               << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
8638  }
8639  
categorizeArgumentPointerLocations(Attributor & A,CallBase & CB,AAMemoryLocation::StateType & AccessedLocs,bool & Changed)8640  void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
8641      Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
8642      bool &Changed) {
8643    for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
8644  
8645      // Skip non-pointer arguments.
8646      const Value *ArgOp = CB.getArgOperand(ArgNo);
8647      if (!ArgOp->getType()->isPtrOrPtrVectorTy())
8648        continue;
8649  
8650      // Skip readnone arguments.
8651      const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
8652      const auto *ArgOpMemLocationAA =
8653          A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
8654  
8655      if (ArgOpMemLocationAA && ArgOpMemLocationAA->isAssumedReadNone())
8656        continue;
8657  
8658      // Categorize potentially accessed pointer arguments as if there was an
8659      // access instruction with them as pointer.
8660      categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
8661    }
8662  }
8663  
8664  AAMemoryLocation::MemoryLocationsKind
categorizeAccessedLocations(Attributor & A,Instruction & I,bool & Changed)8665  AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
8666                                                    bool &Changed) {
8667    LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
8668                      << I << "\n");
8669  
8670    AAMemoryLocation::StateType AccessedLocs;
8671    AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
8672  
8673    if (auto *CB = dyn_cast<CallBase>(&I)) {
8674  
8675      // First check if we assume any memory is access is visible.
8676      const auto *CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
8677          *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
8678      LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
8679                        << " [" << CBMemLocationAA << "]\n");
8680      if (!CBMemLocationAA) {
8681        updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr,
8682                                  Changed, getAccessKindFromInst(&I));
8683        return NO_UNKOWN_MEM;
8684      }
8685  
8686      if (CBMemLocationAA->isAssumedReadNone())
8687        return NO_LOCATIONS;
8688  
8689      if (CBMemLocationAA->isAssumedInaccessibleMemOnly()) {
8690        updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
8691                                  Changed, getAccessKindFromInst(&I));
8692        return AccessedLocs.getAssumed();
8693      }
8694  
8695      uint32_t CBAssumedNotAccessedLocs =
8696          CBMemLocationAA->getAssumedNotAccessedLocation();
8697  
8698      // Set the argmemonly and global bit as we handle them separately below.
8699      uint32_t CBAssumedNotAccessedLocsNoArgMem =
8700          CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
8701  
8702      for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
8703        if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
8704          continue;
8705        updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
8706                                  getAccessKindFromInst(&I));
8707      }
8708  
8709      // Now handle global memory if it might be accessed. This is slightly tricky
8710      // as NO_GLOBAL_MEM has multiple bits set.
8711      bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
8712      if (HasGlobalAccesses) {
8713        auto AccessPred = [&](const Instruction *, const Value *Ptr,
8714                              AccessKind Kind, MemoryLocationsKind MLK) {
8715          updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
8716                                    getAccessKindFromInst(&I));
8717          return true;
8718        };
8719        if (!CBMemLocationAA->checkForAllAccessesToMemoryKind(
8720                AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
8721          return AccessedLocs.getWorstState();
8722      }
8723  
8724      LLVM_DEBUG(
8725          dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
8726                 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8727  
8728      // Now handle argument memory if it might be accessed.
8729      bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
8730      if (HasArgAccesses)
8731        categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
8732  
8733      LLVM_DEBUG(
8734          dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
8735                 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8736  
8737      return AccessedLocs.getAssumed();
8738    }
8739  
8740    if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
8741      LLVM_DEBUG(
8742          dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
8743                 << I << " [" << *Ptr << "]\n");
8744      categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed,
8745                         Ptr->getType()->getPointerAddressSpace());
8746      return AccessedLocs.getAssumed();
8747    }
8748  
8749    LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
8750                      << I << "\n");
8751    updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
8752                              getAccessKindFromInst(&I));
8753    return AccessedLocs.getAssumed();
8754  }
8755  
8756  /// An AA to represent the memory behavior function attributes.
8757  struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
AAMemoryLocationFunction__anonc528723c7611::AAMemoryLocationFunction8758    AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
8759        : AAMemoryLocationImpl(IRP, A) {}
8760  
8761    /// See AbstractAttribute::updateImpl(Attributor &A).
updateImpl__anonc528723c7611::AAMemoryLocationFunction8762    ChangeStatus updateImpl(Attributor &A) override {
8763  
8764      const auto *MemBehaviorAA =
8765          A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
8766      if (MemBehaviorAA && MemBehaviorAA->isAssumedReadNone()) {
8767        if (MemBehaviorAA->isKnownReadNone())
8768          return indicateOptimisticFixpoint();
8769        assert(isAssumedReadNone() &&
8770               "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
8771        A.recordDependence(*MemBehaviorAA, *this, DepClassTy::OPTIONAL);
8772        return ChangeStatus::UNCHANGED;
8773      }
8774  
8775      // The current assumed state used to determine a change.
8776      auto AssumedState = getAssumed();
8777      bool Changed = false;
8778  
8779      auto CheckRWInst = [&](Instruction &I) {
8780        MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
8781        LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
8782                          << ": " << getMemoryLocationsAsStr(MLK) << "\n");
8783        removeAssumedBits(inverseLocation(MLK, false, false));
8784        // Stop once only the valid bit set in the *not assumed location*, thus
8785        // once we don't actually exclude any memory locations in the state.
8786        return getAssumedNotAccessedLocation() != VALID_STATE;
8787      };
8788  
8789      bool UsedAssumedInformation = false;
8790      if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
8791                                              UsedAssumedInformation))
8792        return indicatePessimisticFixpoint();
8793  
8794      Changed |= AssumedState != getAssumed();
8795      return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8796    }
8797  
8798    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7611::AAMemoryLocationFunction8799    void trackStatistics() const override {
8800      if (isAssumedReadNone())
8801        STATS_DECLTRACK_FN_ATTR(readnone)
8802      else if (isAssumedArgMemOnly())
8803        STATS_DECLTRACK_FN_ATTR(argmemonly)
8804      else if (isAssumedInaccessibleMemOnly())
8805        STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
8806      else if (isAssumedInaccessibleOrArgMemOnly())
8807        STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
8808    }
8809  };
8810  
8811  /// AAMemoryLocation attribute for call sites.
8812  struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
AAMemoryLocationCallSite__anonc528723c7611::AAMemoryLocationCallSite8813    AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
8814        : AAMemoryLocationImpl(IRP, A) {}
8815  
8816    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c7611::AAMemoryLocationCallSite8817    ChangeStatus updateImpl(Attributor &A) override {
8818      // TODO: Once we have call site specific value information we can provide
8819      //       call site specific liveness liveness information and then it makes
8820      //       sense to specialize attributes for call sites arguments instead of
8821      //       redirecting requests to the callee argument.
8822      Function *F = getAssociatedFunction();
8823      const IRPosition &FnPos = IRPosition::function(*F);
8824      auto *FnAA =
8825          A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
8826      if (!FnAA)
8827        return indicatePessimisticFixpoint();
8828      bool Changed = false;
8829      auto AccessPred = [&](const Instruction *I, const Value *Ptr,
8830                            AccessKind Kind, MemoryLocationsKind MLK) {
8831        updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
8832                                  getAccessKindFromInst(I));
8833        return true;
8834      };
8835      if (!FnAA->checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
8836        return indicatePessimisticFixpoint();
8837      return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8838    }
8839  
8840    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7611::AAMemoryLocationCallSite8841    void trackStatistics() const override {
8842      if (isAssumedReadNone())
8843        STATS_DECLTRACK_CS_ATTR(readnone)
8844    }
8845  };
8846  } // namespace
8847  
8848  /// ------------------ denormal-fp-math Attribute -------------------------
8849  
8850  namespace {
8851  struct AADenormalFPMathImpl : public AADenormalFPMath {
AADenormalFPMathImpl__anonc528723c7b11::AADenormalFPMathImpl8852    AADenormalFPMathImpl(const IRPosition &IRP, Attributor &A)
8853        : AADenormalFPMath(IRP, A) {}
8854  
getAsStr__anonc528723c7b11::AADenormalFPMathImpl8855    const std::string getAsStr(Attributor *A) const override {
8856      std::string Str("AADenormalFPMath[");
8857      raw_string_ostream OS(Str);
8858  
8859      DenormalState Known = getKnown();
8860      if (Known.Mode.isValid())
8861        OS << "denormal-fp-math=" << Known.Mode;
8862      else
8863        OS << "invalid";
8864  
8865      if (Known.ModeF32.isValid())
8866        OS << " denormal-fp-math-f32=" << Known.ModeF32;
8867      OS << ']';
8868      return Str;
8869    }
8870  };
8871  
8872  struct AADenormalFPMathFunction final : AADenormalFPMathImpl {
AADenormalFPMathFunction__anonc528723c7b11::AADenormalFPMathFunction8873    AADenormalFPMathFunction(const IRPosition &IRP, Attributor &A)
8874        : AADenormalFPMathImpl(IRP, A) {}
8875  
initialize__anonc528723c7b11::AADenormalFPMathFunction8876    void initialize(Attributor &A) override {
8877      const Function *F = getAnchorScope();
8878      DenormalMode Mode = F->getDenormalModeRaw();
8879      DenormalMode ModeF32 = F->getDenormalModeF32Raw();
8880  
8881      // TODO: Handling this here prevents handling the case where a callee has a
8882      // fixed denormal-fp-math with dynamic denormal-fp-math-f32, but called from
8883      // a function with a fully fixed mode.
8884      if (ModeF32 == DenormalMode::getInvalid())
8885        ModeF32 = Mode;
8886      Known = DenormalState{Mode, ModeF32};
8887      if (isModeFixed())
8888        indicateFixpoint();
8889    }
8890  
updateImpl__anonc528723c7b11::AADenormalFPMathFunction8891    ChangeStatus updateImpl(Attributor &A) override {
8892      ChangeStatus Change = ChangeStatus::UNCHANGED;
8893  
8894      auto CheckCallSite = [=, &Change, &A](AbstractCallSite CS) {
8895        Function *Caller = CS.getInstruction()->getFunction();
8896        LLVM_DEBUG(dbgs() << "[AADenormalFPMath] Call " << Caller->getName()
8897                          << "->" << getAssociatedFunction()->getName() << '\n');
8898  
8899        const auto *CallerInfo = A.getAAFor<AADenormalFPMath>(
8900            *this, IRPosition::function(*Caller), DepClassTy::REQUIRED);
8901        if (!CallerInfo)
8902          return false;
8903  
8904        Change = Change | clampStateAndIndicateChange(this->getState(),
8905                                                      CallerInfo->getState());
8906        return true;
8907      };
8908  
8909      bool AllCallSitesKnown = true;
8910      if (!A.checkForAllCallSites(CheckCallSite, *this, true, AllCallSitesKnown))
8911        return indicatePessimisticFixpoint();
8912  
8913      if (Change == ChangeStatus::CHANGED && isModeFixed())
8914        indicateFixpoint();
8915      return Change;
8916    }
8917  
manifest__anonc528723c7b11::AADenormalFPMathFunction8918    ChangeStatus manifest(Attributor &A) override {
8919      LLVMContext &Ctx = getAssociatedFunction()->getContext();
8920  
8921      SmallVector<Attribute, 2> AttrToAdd;
8922      SmallVector<StringRef, 2> AttrToRemove;
8923      if (Known.Mode == DenormalMode::getDefault()) {
8924        AttrToRemove.push_back("denormal-fp-math");
8925      } else {
8926        AttrToAdd.push_back(
8927            Attribute::get(Ctx, "denormal-fp-math", Known.Mode.str()));
8928      }
8929  
8930      if (Known.ModeF32 != Known.Mode) {
8931        AttrToAdd.push_back(
8932            Attribute::get(Ctx, "denormal-fp-math-f32", Known.ModeF32.str()));
8933      } else {
8934        AttrToRemove.push_back("denormal-fp-math-f32");
8935      }
8936  
8937      auto &IRP = getIRPosition();
8938  
8939      // TODO: There should be a combined add and remove API.
8940      return A.removeAttrs(IRP, AttrToRemove) |
8941             A.manifestAttrs(IRP, AttrToAdd, /*ForceReplace=*/true);
8942    }
8943  
trackStatistics__anonc528723c7b11::AADenormalFPMathFunction8944    void trackStatistics() const override {
8945      STATS_DECLTRACK_FN_ATTR(denormal_fp_math)
8946    }
8947  };
8948  } // namespace
8949  
8950  /// ------------------ Value Constant Range Attribute -------------------------
8951  
8952  namespace {
8953  struct AAValueConstantRangeImpl : AAValueConstantRange {
8954    using StateType = IntegerRangeState;
AAValueConstantRangeImpl__anonc528723c7d11::AAValueConstantRangeImpl8955    AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
8956        : AAValueConstantRange(IRP, A) {}
8957  
8958    /// See AbstractAttribute::initialize(..).
initialize__anonc528723c7d11::AAValueConstantRangeImpl8959    void initialize(Attributor &A) override {
8960      if (A.hasSimplificationCallback(getIRPosition())) {
8961        indicatePessimisticFixpoint();
8962        return;
8963      }
8964  
8965      // Intersect a range given by SCEV.
8966      intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
8967  
8968      // Intersect a range given by LVI.
8969      intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
8970    }
8971  
8972    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c7d11::AAValueConstantRangeImpl8973    const std::string getAsStr(Attributor *A) const override {
8974      std::string Str;
8975      llvm::raw_string_ostream OS(Str);
8976      OS << "range(" << getBitWidth() << ")<";
8977      getKnown().print(OS);
8978      OS << " / ";
8979      getAssumed().print(OS);
8980      OS << ">";
8981      return Str;
8982    }
8983  
8984    /// Helper function to get a SCEV expr for the associated value at program
8985    /// point \p I.
getSCEV__anonc528723c7d11::AAValueConstantRangeImpl8986    const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
8987      if (!getAnchorScope())
8988        return nullptr;
8989  
8990      ScalarEvolution *SE =
8991          A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8992              *getAnchorScope());
8993  
8994      LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
8995          *getAnchorScope());
8996  
8997      if (!SE || !LI)
8998        return nullptr;
8999  
9000      const SCEV *S = SE->getSCEV(&getAssociatedValue());
9001      if (!I)
9002        return S;
9003  
9004      return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
9005    }
9006  
9007    /// Helper function to get a range from SCEV for the associated value at
9008    /// program point \p I.
getConstantRangeFromSCEV__anonc528723c7d11::AAValueConstantRangeImpl9009    ConstantRange getConstantRangeFromSCEV(Attributor &A,
9010                                           const Instruction *I = nullptr) const {
9011      if (!getAnchorScope())
9012        return getWorstState(getBitWidth());
9013  
9014      ScalarEvolution *SE =
9015          A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
9016              *getAnchorScope());
9017  
9018      const SCEV *S = getSCEV(A, I);
9019      if (!SE || !S)
9020        return getWorstState(getBitWidth());
9021  
9022      return SE->getUnsignedRange(S);
9023    }
9024  
9025    /// Helper function to get a range from LVI for the associated value at
9026    /// program point \p I.
9027    ConstantRange
getConstantRangeFromLVI__anonc528723c7d11::AAValueConstantRangeImpl9028    getConstantRangeFromLVI(Attributor &A,
9029                            const Instruction *CtxI = nullptr) const {
9030      if (!getAnchorScope())
9031        return getWorstState(getBitWidth());
9032  
9033      LazyValueInfo *LVI =
9034          A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
9035              *getAnchorScope());
9036  
9037      if (!LVI || !CtxI)
9038        return getWorstState(getBitWidth());
9039      return LVI->getConstantRange(&getAssociatedValue(),
9040                                   const_cast<Instruction *>(CtxI),
9041                                   /*UndefAllowed*/ false);
9042    }
9043  
9044    /// Return true if \p CtxI is valid for querying outside analyses.
9045    /// This basically makes sure we do not ask intra-procedural analysis
9046    /// about a context in the wrong function or a context that violates
9047    /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
9048    /// if the original context of this AA is OK or should be considered invalid.
isValidCtxInstructionForOutsideAnalysis__anonc528723c7d11::AAValueConstantRangeImpl9049    bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
9050                                                 const Instruction *CtxI,
9051                                                 bool AllowAACtxI) const {
9052      if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
9053        return false;
9054  
9055      // Our context might be in a different function, neither intra-procedural
9056      // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
9057      if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
9058        return false;
9059  
9060      // If the context is not dominated by the value there are paths to the
9061      // context that do not define the value. This cannot be handled by
9062      // LazyValueInfo so we need to bail.
9063      if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
9064        InformationCache &InfoCache = A.getInfoCache();
9065        const DominatorTree *DT =
9066            InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
9067                *I->getFunction());
9068        return DT && DT->dominates(I, CtxI);
9069      }
9070  
9071      return true;
9072    }
9073  
9074    /// See AAValueConstantRange::getKnownConstantRange(..).
9075    ConstantRange
getKnownConstantRange__anonc528723c7d11::AAValueConstantRangeImpl9076    getKnownConstantRange(Attributor &A,
9077                          const Instruction *CtxI = nullptr) const override {
9078      if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
9079                                                   /* AllowAACtxI */ false))
9080        return getKnown();
9081  
9082      ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
9083      ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
9084      return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
9085    }
9086  
9087    /// See AAValueConstantRange::getAssumedConstantRange(..).
9088    ConstantRange
getAssumedConstantRange__anonc528723c7d11::AAValueConstantRangeImpl9089    getAssumedConstantRange(Attributor &A,
9090                            const Instruction *CtxI = nullptr) const override {
9091      // TODO: Make SCEV use Attributor assumption.
9092      //       We may be able to bound a variable range via assumptions in
9093      //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
9094      //       evolve to x^2 + x, then we can say that y is in [2, 12].
9095      if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
9096                                                   /* AllowAACtxI */ false))
9097        return getAssumed();
9098  
9099      ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
9100      ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
9101      return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
9102    }
9103  
9104    /// Helper function to create MDNode for range metadata.
9105    static MDNode *
getMDNodeForConstantRange__anonc528723c7d11::AAValueConstantRangeImpl9106    getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
9107                              const ConstantRange &AssumedConstantRange) {
9108      Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
9109                                    Ty, AssumedConstantRange.getLower())),
9110                                ConstantAsMetadata::get(ConstantInt::get(
9111                                    Ty, AssumedConstantRange.getUpper()))};
9112      return MDNode::get(Ctx, LowAndHigh);
9113    }
9114  
9115    /// Return true if \p Assumed is included in \p KnownRanges.
isBetterRange__anonc528723c7d11::AAValueConstantRangeImpl9116    static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
9117  
9118      if (Assumed.isFullSet())
9119        return false;
9120  
9121      if (!KnownRanges)
9122        return true;
9123  
9124      // If multiple ranges are annotated in IR, we give up to annotate assumed
9125      // range for now.
9126  
9127      // TODO:  If there exists a known range which containts assumed range, we
9128      // can say assumed range is better.
9129      if (KnownRanges->getNumOperands() > 2)
9130        return false;
9131  
9132      ConstantInt *Lower =
9133          mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
9134      ConstantInt *Upper =
9135          mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
9136  
9137      ConstantRange Known(Lower->getValue(), Upper->getValue());
9138      return Known.contains(Assumed) && Known != Assumed;
9139    }
9140  
9141    /// Helper function to set range metadata.
9142    static bool
setRangeMetadataIfisBetterRange__anonc528723c7d11::AAValueConstantRangeImpl9143    setRangeMetadataIfisBetterRange(Instruction *I,
9144                                    const ConstantRange &AssumedConstantRange) {
9145      auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
9146      if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
9147        if (!AssumedConstantRange.isEmptySet()) {
9148          I->setMetadata(LLVMContext::MD_range,
9149                         getMDNodeForConstantRange(I->getType(), I->getContext(),
9150                                                   AssumedConstantRange));
9151          return true;
9152        }
9153      }
9154      return false;
9155    }
9156  
9157    /// See AbstractAttribute::manifest()
manifest__anonc528723c7d11::AAValueConstantRangeImpl9158    ChangeStatus manifest(Attributor &A) override {
9159      ChangeStatus Changed = ChangeStatus::UNCHANGED;
9160      ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
9161      assert(!AssumedConstantRange.isFullSet() && "Invalid state");
9162  
9163      auto &V = getAssociatedValue();
9164      if (!AssumedConstantRange.isEmptySet() &&
9165          !AssumedConstantRange.isSingleElement()) {
9166        if (Instruction *I = dyn_cast<Instruction>(&V)) {
9167          assert(I == getCtxI() && "Should not annotate an instruction which is "
9168                                   "not the context instruction");
9169          if (isa<CallInst>(I) || isa<LoadInst>(I))
9170            if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
9171              Changed = ChangeStatus::CHANGED;
9172        }
9173      }
9174  
9175      return Changed;
9176    }
9177  };
9178  
9179  struct AAValueConstantRangeArgument final
9180      : AAArgumentFromCallSiteArguments<
9181            AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
9182            true /* BridgeCallBaseContext */> {
9183    using Base = AAArgumentFromCallSiteArguments<
9184        AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
9185        true /* BridgeCallBaseContext */>;
AAValueConstantRangeArgument__anonc528723c7d11::AAValueConstantRangeArgument9186    AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
9187        : Base(IRP, A) {}
9188  
9189    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7d11::AAValueConstantRangeArgument9190    void trackStatistics() const override {
9191      STATS_DECLTRACK_ARG_ATTR(value_range)
9192    }
9193  };
9194  
9195  struct AAValueConstantRangeReturned
9196      : AAReturnedFromReturnedValues<AAValueConstantRange,
9197                                     AAValueConstantRangeImpl,
9198                                     AAValueConstantRangeImpl::StateType,
9199                                     /* PropogateCallBaseContext */ true> {
9200    using Base =
9201        AAReturnedFromReturnedValues<AAValueConstantRange,
9202                                     AAValueConstantRangeImpl,
9203                                     AAValueConstantRangeImpl::StateType,
9204                                     /* PropogateCallBaseContext */ true>;
AAValueConstantRangeReturned__anonc528723c7d11::AAValueConstantRangeReturned9205    AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
9206        : Base(IRP, A) {}
9207  
9208    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c7d11::AAValueConstantRangeReturned9209    void initialize(Attributor &A) override {
9210      if (!A.isFunctionIPOAmendable(*getAssociatedFunction()))
9211        indicatePessimisticFixpoint();
9212    }
9213  
9214    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7d11::AAValueConstantRangeReturned9215    void trackStatistics() const override {
9216      STATS_DECLTRACK_FNRET_ATTR(value_range)
9217    }
9218  };
9219  
9220  struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
AAValueConstantRangeFloating__anonc528723c7d11::AAValueConstantRangeFloating9221    AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
9222        : AAValueConstantRangeImpl(IRP, A) {}
9223  
9224    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c7d11::AAValueConstantRangeFloating9225    void initialize(Attributor &A) override {
9226      AAValueConstantRangeImpl::initialize(A);
9227      if (isAtFixpoint())
9228        return;
9229  
9230      Value &V = getAssociatedValue();
9231  
9232      if (auto *C = dyn_cast<ConstantInt>(&V)) {
9233        unionAssumed(ConstantRange(C->getValue()));
9234        indicateOptimisticFixpoint();
9235        return;
9236      }
9237  
9238      if (isa<UndefValue>(&V)) {
9239        // Collapse the undef state to 0.
9240        unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
9241        indicateOptimisticFixpoint();
9242        return;
9243      }
9244  
9245      if (isa<CallBase>(&V))
9246        return;
9247  
9248      if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
9249        return;
9250  
9251      // If it is a load instruction with range metadata, use it.
9252      if (LoadInst *LI = dyn_cast<LoadInst>(&V))
9253        if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
9254          intersectKnown(getConstantRangeFromMetadata(*RangeMD));
9255          return;
9256        }
9257  
9258      // We can work with PHI and select instruction as we traverse their operands
9259      // during update.
9260      if (isa<SelectInst>(V) || isa<PHINode>(V))
9261        return;
9262  
9263      // Otherwise we give up.
9264      indicatePessimisticFixpoint();
9265  
9266      LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
9267                        << getAssociatedValue() << "\n");
9268    }
9269  
calculateBinaryOperator__anonc528723c7d11::AAValueConstantRangeFloating9270    bool calculateBinaryOperator(
9271        Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
9272        const Instruction *CtxI,
9273        SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
9274      Value *LHS = BinOp->getOperand(0);
9275      Value *RHS = BinOp->getOperand(1);
9276  
9277      // Simplify the operands first.
9278      bool UsedAssumedInformation = false;
9279      const auto &SimplifiedLHS = A.getAssumedSimplified(
9280          IRPosition::value(*LHS, getCallBaseContext()), *this,
9281          UsedAssumedInformation, AA::Interprocedural);
9282      if (!SimplifiedLHS.has_value())
9283        return true;
9284      if (!*SimplifiedLHS)
9285        return false;
9286      LHS = *SimplifiedLHS;
9287  
9288      const auto &SimplifiedRHS = A.getAssumedSimplified(
9289          IRPosition::value(*RHS, getCallBaseContext()), *this,
9290          UsedAssumedInformation, AA::Interprocedural);
9291      if (!SimplifiedRHS.has_value())
9292        return true;
9293      if (!*SimplifiedRHS)
9294        return false;
9295      RHS = *SimplifiedRHS;
9296  
9297      // TODO: Allow non integers as well.
9298      if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9299        return false;
9300  
9301      auto *LHSAA = A.getAAFor<AAValueConstantRange>(
9302          *this, IRPosition::value(*LHS, getCallBaseContext()),
9303          DepClassTy::REQUIRED);
9304      if (!LHSAA)
9305        return false;
9306      QuerriedAAs.push_back(LHSAA);
9307      auto LHSAARange = LHSAA->getAssumedConstantRange(A, CtxI);
9308  
9309      auto *RHSAA = A.getAAFor<AAValueConstantRange>(
9310          *this, IRPosition::value(*RHS, getCallBaseContext()),
9311          DepClassTy::REQUIRED);
9312      if (!RHSAA)
9313        return false;
9314      QuerriedAAs.push_back(RHSAA);
9315      auto RHSAARange = RHSAA->getAssumedConstantRange(A, CtxI);
9316  
9317      auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
9318  
9319      T.unionAssumed(AssumedRange);
9320  
9321      // TODO: Track a known state too.
9322  
9323      return T.isValidState();
9324    }
9325  
calculateCastInst__anonc528723c7d11::AAValueConstantRangeFloating9326    bool calculateCastInst(
9327        Attributor &A, CastInst *CastI, IntegerRangeState &T,
9328        const Instruction *CtxI,
9329        SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
9330      assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
9331      // TODO: Allow non integers as well.
9332      Value *OpV = CastI->getOperand(0);
9333  
9334      // Simplify the operand first.
9335      bool UsedAssumedInformation = false;
9336      const auto &SimplifiedOpV = A.getAssumedSimplified(
9337          IRPosition::value(*OpV, getCallBaseContext()), *this,
9338          UsedAssumedInformation, AA::Interprocedural);
9339      if (!SimplifiedOpV.has_value())
9340        return true;
9341      if (!*SimplifiedOpV)
9342        return false;
9343      OpV = *SimplifiedOpV;
9344  
9345      if (!OpV->getType()->isIntegerTy())
9346        return false;
9347  
9348      auto *OpAA = A.getAAFor<AAValueConstantRange>(
9349          *this, IRPosition::value(*OpV, getCallBaseContext()),
9350          DepClassTy::REQUIRED);
9351      if (!OpAA)
9352        return false;
9353      QuerriedAAs.push_back(OpAA);
9354      T.unionAssumed(OpAA->getAssumed().castOp(CastI->getOpcode(),
9355                                               getState().getBitWidth()));
9356      return T.isValidState();
9357    }
9358  
9359    bool
calculateCmpInst__anonc528723c7d11::AAValueConstantRangeFloating9360    calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
9361                     const Instruction *CtxI,
9362                     SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
9363      Value *LHS = CmpI->getOperand(0);
9364      Value *RHS = CmpI->getOperand(1);
9365  
9366      // Simplify the operands first.
9367      bool UsedAssumedInformation = false;
9368      const auto &SimplifiedLHS = A.getAssumedSimplified(
9369          IRPosition::value(*LHS, getCallBaseContext()), *this,
9370          UsedAssumedInformation, AA::Interprocedural);
9371      if (!SimplifiedLHS.has_value())
9372        return true;
9373      if (!*SimplifiedLHS)
9374        return false;
9375      LHS = *SimplifiedLHS;
9376  
9377      const auto &SimplifiedRHS = A.getAssumedSimplified(
9378          IRPosition::value(*RHS, getCallBaseContext()), *this,
9379          UsedAssumedInformation, AA::Interprocedural);
9380      if (!SimplifiedRHS.has_value())
9381        return true;
9382      if (!*SimplifiedRHS)
9383        return false;
9384      RHS = *SimplifiedRHS;
9385  
9386      // TODO: Allow non integers as well.
9387      if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9388        return false;
9389  
9390      auto *LHSAA = A.getAAFor<AAValueConstantRange>(
9391          *this, IRPosition::value(*LHS, getCallBaseContext()),
9392          DepClassTy::REQUIRED);
9393      if (!LHSAA)
9394        return false;
9395      QuerriedAAs.push_back(LHSAA);
9396      auto *RHSAA = A.getAAFor<AAValueConstantRange>(
9397          *this, IRPosition::value(*RHS, getCallBaseContext()),
9398          DepClassTy::REQUIRED);
9399      if (!RHSAA)
9400        return false;
9401      QuerriedAAs.push_back(RHSAA);
9402      auto LHSAARange = LHSAA->getAssumedConstantRange(A, CtxI);
9403      auto RHSAARange = RHSAA->getAssumedConstantRange(A, CtxI);
9404  
9405      // If one of them is empty set, we can't decide.
9406      if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
9407        return true;
9408  
9409      bool MustTrue = false, MustFalse = false;
9410  
9411      auto AllowedRegion =
9412          ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
9413  
9414      if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
9415        MustFalse = true;
9416  
9417      if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
9418        MustTrue = true;
9419  
9420      assert((!MustTrue || !MustFalse) &&
9421             "Either MustTrue or MustFalse should be false!");
9422  
9423      if (MustTrue)
9424        T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
9425      else if (MustFalse)
9426        T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
9427      else
9428        T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
9429  
9430      LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " after "
9431                        << (MustTrue ? "true" : (MustFalse ? "false" : "unknown"))
9432                        << ": " << T << "\n\t" << *LHSAA << "\t<op>\n\t"
9433                        << *RHSAA);
9434  
9435      // TODO: Track a known state too.
9436      return T.isValidState();
9437    }
9438  
9439    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c7d11::AAValueConstantRangeFloating9440    ChangeStatus updateImpl(Attributor &A) override {
9441  
9442      IntegerRangeState T(getBitWidth());
9443      auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
9444        Instruction *I = dyn_cast<Instruction>(&V);
9445        if (!I || isa<CallBase>(I)) {
9446  
9447          // Simplify the operand first.
9448          bool UsedAssumedInformation = false;
9449          const auto &SimplifiedOpV = A.getAssumedSimplified(
9450              IRPosition::value(V, getCallBaseContext()), *this,
9451              UsedAssumedInformation, AA::Interprocedural);
9452          if (!SimplifiedOpV.has_value())
9453            return true;
9454          if (!*SimplifiedOpV)
9455            return false;
9456          Value *VPtr = *SimplifiedOpV;
9457  
9458          // If the value is not instruction, we query AA to Attributor.
9459          const auto *AA = A.getAAFor<AAValueConstantRange>(
9460              *this, IRPosition::value(*VPtr, getCallBaseContext()),
9461              DepClassTy::REQUIRED);
9462  
9463          // Clamp operator is not used to utilize a program point CtxI.
9464          if (AA)
9465            T.unionAssumed(AA->getAssumedConstantRange(A, CtxI));
9466          else
9467            return false;
9468  
9469          return T.isValidState();
9470        }
9471  
9472        SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
9473        if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
9474          if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
9475            return false;
9476        } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
9477          if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
9478            return false;
9479        } else if (auto *CastI = dyn_cast<CastInst>(I)) {
9480          if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
9481            return false;
9482        } else {
9483          // Give up with other instructions.
9484          // TODO: Add other instructions
9485  
9486          T.indicatePessimisticFixpoint();
9487          return false;
9488        }
9489  
9490        // Catch circular reasoning in a pessimistic way for now.
9491        // TODO: Check how the range evolves and if we stripped anything, see also
9492        //       AADereferenceable or AAAlign for similar situations.
9493        for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
9494          if (QueriedAA != this)
9495            continue;
9496          // If we are in a stady state we do not need to worry.
9497          if (T.getAssumed() == getState().getAssumed())
9498            continue;
9499          T.indicatePessimisticFixpoint();
9500        }
9501  
9502        return T.isValidState();
9503      };
9504  
9505      if (!VisitValueCB(getAssociatedValue(), getCtxI()))
9506        return indicatePessimisticFixpoint();
9507  
9508      // Ensure that long def-use chains can't cause circular reasoning either by
9509      // introducing a cutoff below.
9510      if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
9511        return ChangeStatus::UNCHANGED;
9512      if (++NumChanges > MaxNumChanges) {
9513        LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges
9514                          << " but only " << MaxNumChanges
9515                          << " are allowed to avoid cyclic reasoning.");
9516        return indicatePessimisticFixpoint();
9517      }
9518      return ChangeStatus::CHANGED;
9519    }
9520  
9521    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7d11::AAValueConstantRangeFloating9522    void trackStatistics() const override {
9523      STATS_DECLTRACK_FLOATING_ATTR(value_range)
9524    }
9525  
9526    /// Tracker to bail after too many widening steps of the constant range.
9527    int NumChanges = 0;
9528  
9529    /// Upper bound for the number of allowed changes (=widening steps) for the
9530    /// constant range before we give up.
9531    static constexpr int MaxNumChanges = 5;
9532  };
9533  
9534  struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
AAValueConstantRangeFunction__anonc528723c7d11::AAValueConstantRangeFunction9535    AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
9536        : AAValueConstantRangeImpl(IRP, A) {}
9537  
9538    /// See AbstractAttribute::initialize(...).
updateImpl__anonc528723c7d11::AAValueConstantRangeFunction9539    ChangeStatus updateImpl(Attributor &A) override {
9540      llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
9541                       "not be called");
9542    }
9543  
9544    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7d11::AAValueConstantRangeFunction9545    void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
9546  };
9547  
9548  struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
AAValueConstantRangeCallSite__anonc528723c7d11::AAValueConstantRangeCallSite9549    AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
9550        : AAValueConstantRangeFunction(IRP, A) {}
9551  
9552    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7d11::AAValueConstantRangeCallSite9553    void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
9554  };
9555  
9556  struct AAValueConstantRangeCallSiteReturned
9557      : AACalleeToCallSite<AAValueConstantRange, AAValueConstantRangeImpl,
9558                           AAValueConstantRangeImpl::StateType,
9559                           /* IntroduceCallBaseContext */ true> {
AAValueConstantRangeCallSiteReturned__anonc528723c7d11::AAValueConstantRangeCallSiteReturned9560    AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
9561        : AACalleeToCallSite<AAValueConstantRange, AAValueConstantRangeImpl,
9562                             AAValueConstantRangeImpl::StateType,
9563                             /* IntroduceCallBaseContext */ true>(IRP, A) {}
9564  
9565    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c7d11::AAValueConstantRangeCallSiteReturned9566    void initialize(Attributor &A) override {
9567      // If it is a load instruction with range metadata, use the metadata.
9568      if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
9569        if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
9570          intersectKnown(getConstantRangeFromMetadata(*RangeMD));
9571  
9572      AAValueConstantRangeImpl::initialize(A);
9573    }
9574  
9575    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7d11::AAValueConstantRangeCallSiteReturned9576    void trackStatistics() const override {
9577      STATS_DECLTRACK_CSRET_ATTR(value_range)
9578    }
9579  };
9580  struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
AAValueConstantRangeCallSiteArgument__anonc528723c7d11::AAValueConstantRangeCallSiteArgument9581    AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
9582        : AAValueConstantRangeFloating(IRP, A) {}
9583  
9584    /// See AbstractAttribute::manifest()
manifest__anonc528723c7d11::AAValueConstantRangeCallSiteArgument9585    ChangeStatus manifest(Attributor &A) override {
9586      return ChangeStatus::UNCHANGED;
9587    }
9588  
9589    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7d11::AAValueConstantRangeCallSiteArgument9590    void trackStatistics() const override {
9591      STATS_DECLTRACK_CSARG_ATTR(value_range)
9592    }
9593  };
9594  } // namespace
9595  
9596  /// ------------------ Potential Values Attribute -------------------------
9597  
9598  namespace {
9599  struct AAPotentialConstantValuesImpl : AAPotentialConstantValues {
9600    using StateType = PotentialConstantIntValuesState;
9601  
AAPotentialConstantValuesImpl__anonc528723c7f11::AAPotentialConstantValuesImpl9602    AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A)
9603        : AAPotentialConstantValues(IRP, A) {}
9604  
9605    /// See AbstractAttribute::initialize(..).
initialize__anonc528723c7f11::AAPotentialConstantValuesImpl9606    void initialize(Attributor &A) override {
9607      if (A.hasSimplificationCallback(getIRPosition()))
9608        indicatePessimisticFixpoint();
9609      else
9610        AAPotentialConstantValues::initialize(A);
9611    }
9612  
fillSetWithConstantValues__anonc528723c7f11::AAPotentialConstantValuesImpl9613    bool fillSetWithConstantValues(Attributor &A, const IRPosition &IRP, SetTy &S,
9614                                   bool &ContainsUndef, bool ForSelf) {
9615      SmallVector<AA::ValueAndContext> Values;
9616      bool UsedAssumedInformation = false;
9617      if (!A.getAssumedSimplifiedValues(IRP, *this, Values, AA::Interprocedural,
9618                                        UsedAssumedInformation)) {
9619        // Avoid recursion when the caller is computing constant values for this
9620        // IRP itself.
9621        if (ForSelf)
9622          return false;
9623        if (!IRP.getAssociatedType()->isIntegerTy())
9624          return false;
9625        auto *PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>(
9626            *this, IRP, DepClassTy::REQUIRED);
9627        if (!PotentialValuesAA || !PotentialValuesAA->getState().isValidState())
9628          return false;
9629        ContainsUndef = PotentialValuesAA->getState().undefIsContained();
9630        S = PotentialValuesAA->getState().getAssumedSet();
9631        return true;
9632      }
9633  
9634      // Copy all the constant values, except UndefValue. ContainsUndef is true
9635      // iff Values contains only UndefValue instances. If there are other known
9636      // constants, then UndefValue is dropped.
9637      ContainsUndef = false;
9638      for (auto &It : Values) {
9639        if (isa<UndefValue>(It.getValue())) {
9640          ContainsUndef = true;
9641          continue;
9642        }
9643        auto *CI = dyn_cast<ConstantInt>(It.getValue());
9644        if (!CI)
9645          return false;
9646        S.insert(CI->getValue());
9647      }
9648      ContainsUndef &= S.empty();
9649  
9650      return true;
9651    }
9652  
9653    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c7f11::AAPotentialConstantValuesImpl9654    const std::string getAsStr(Attributor *A) const override {
9655      std::string Str;
9656      llvm::raw_string_ostream OS(Str);
9657      OS << getState();
9658      return Str;
9659    }
9660  
9661    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c7f11::AAPotentialConstantValuesImpl9662    ChangeStatus updateImpl(Attributor &A) override {
9663      return indicatePessimisticFixpoint();
9664    }
9665  };
9666  
9667  struct AAPotentialConstantValuesArgument final
9668      : AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
9669                                        AAPotentialConstantValuesImpl,
9670                                        PotentialConstantIntValuesState> {
9671    using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
9672                                                 AAPotentialConstantValuesImpl,
9673                                                 PotentialConstantIntValuesState>;
AAPotentialConstantValuesArgument__anonc528723c7f11::AAPotentialConstantValuesArgument9674    AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A)
9675        : Base(IRP, A) {}
9676  
9677    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7f11::AAPotentialConstantValuesArgument9678    void trackStatistics() const override {
9679      STATS_DECLTRACK_ARG_ATTR(potential_values)
9680    }
9681  };
9682  
9683  struct AAPotentialConstantValuesReturned
9684      : AAReturnedFromReturnedValues<AAPotentialConstantValues,
9685                                     AAPotentialConstantValuesImpl> {
9686    using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues,
9687                                              AAPotentialConstantValuesImpl>;
AAPotentialConstantValuesReturned__anonc528723c7f11::AAPotentialConstantValuesReturned9688    AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A)
9689        : Base(IRP, A) {}
9690  
initialize__anonc528723c7f11::AAPotentialConstantValuesReturned9691    void initialize(Attributor &A) override {
9692      if (!A.isFunctionIPOAmendable(*getAssociatedFunction()))
9693        indicatePessimisticFixpoint();
9694      Base::initialize(A);
9695    }
9696  
9697    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7f11::AAPotentialConstantValuesReturned9698    void trackStatistics() const override {
9699      STATS_DECLTRACK_FNRET_ATTR(potential_values)
9700    }
9701  };
9702  
9703  struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
AAPotentialConstantValuesFloating__anonc528723c7f11::AAPotentialConstantValuesFloating9704    AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A)
9705        : AAPotentialConstantValuesImpl(IRP, A) {}
9706  
9707    /// See AbstractAttribute::initialize(..).
initialize__anonc528723c7f11::AAPotentialConstantValuesFloating9708    void initialize(Attributor &A) override {
9709      AAPotentialConstantValuesImpl::initialize(A);
9710      if (isAtFixpoint())
9711        return;
9712  
9713      Value &V = getAssociatedValue();
9714  
9715      if (auto *C = dyn_cast<ConstantInt>(&V)) {
9716        unionAssumed(C->getValue());
9717        indicateOptimisticFixpoint();
9718        return;
9719      }
9720  
9721      if (isa<UndefValue>(&V)) {
9722        unionAssumedWithUndef();
9723        indicateOptimisticFixpoint();
9724        return;
9725      }
9726  
9727      if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
9728        return;
9729  
9730      if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
9731        return;
9732  
9733      indicatePessimisticFixpoint();
9734  
9735      LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: "
9736                        << getAssociatedValue() << "\n");
9737    }
9738  
calculateICmpInst__anonc528723c7f11::AAPotentialConstantValuesFloating9739    static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
9740                                  const APInt &RHS) {
9741      return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
9742    }
9743  
calculateCastInst__anonc528723c7f11::AAPotentialConstantValuesFloating9744    static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
9745                                   uint32_t ResultBitWidth) {
9746      Instruction::CastOps CastOp = CI->getOpcode();
9747      switch (CastOp) {
9748      default:
9749        llvm_unreachable("unsupported or not integer cast");
9750      case Instruction::Trunc:
9751        return Src.trunc(ResultBitWidth);
9752      case Instruction::SExt:
9753        return Src.sext(ResultBitWidth);
9754      case Instruction::ZExt:
9755        return Src.zext(ResultBitWidth);
9756      case Instruction::BitCast:
9757        return Src;
9758      }
9759    }
9760  
calculateBinaryOperator__anonc528723c7f11::AAPotentialConstantValuesFloating9761    static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
9762                                         const APInt &LHS, const APInt &RHS,
9763                                         bool &SkipOperation, bool &Unsupported) {
9764      Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
9765      // Unsupported is set to true when the binary operator is not supported.
9766      // SkipOperation is set to true when UB occur with the given operand pair
9767      // (LHS, RHS).
9768      // TODO: we should look at nsw and nuw keywords to handle operations
9769      //       that create poison or undef value.
9770      switch (BinOpcode) {
9771      default:
9772        Unsupported = true;
9773        return LHS;
9774      case Instruction::Add:
9775        return LHS + RHS;
9776      case Instruction::Sub:
9777        return LHS - RHS;
9778      case Instruction::Mul:
9779        return LHS * RHS;
9780      case Instruction::UDiv:
9781        if (RHS.isZero()) {
9782          SkipOperation = true;
9783          return LHS;
9784        }
9785        return LHS.udiv(RHS);
9786      case Instruction::SDiv:
9787        if (RHS.isZero()) {
9788          SkipOperation = true;
9789          return LHS;
9790        }
9791        return LHS.sdiv(RHS);
9792      case Instruction::URem:
9793        if (RHS.isZero()) {
9794          SkipOperation = true;
9795          return LHS;
9796        }
9797        return LHS.urem(RHS);
9798      case Instruction::SRem:
9799        if (RHS.isZero()) {
9800          SkipOperation = true;
9801          return LHS;
9802        }
9803        return LHS.srem(RHS);
9804      case Instruction::Shl:
9805        return LHS.shl(RHS);
9806      case Instruction::LShr:
9807        return LHS.lshr(RHS);
9808      case Instruction::AShr:
9809        return LHS.ashr(RHS);
9810      case Instruction::And:
9811        return LHS & RHS;
9812      case Instruction::Or:
9813        return LHS | RHS;
9814      case Instruction::Xor:
9815        return LHS ^ RHS;
9816      }
9817    }
9818  
calculateBinaryOperatorAndTakeUnion__anonc528723c7f11::AAPotentialConstantValuesFloating9819    bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
9820                                             const APInt &LHS, const APInt &RHS) {
9821      bool SkipOperation = false;
9822      bool Unsupported = false;
9823      APInt Result =
9824          calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
9825      if (Unsupported)
9826        return false;
9827      // If SkipOperation is true, we can ignore this operand pair (L, R).
9828      if (!SkipOperation)
9829        unionAssumed(Result);
9830      return isValidState();
9831    }
9832  
updateWithICmpInst__anonc528723c7f11::AAPotentialConstantValuesFloating9833    ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
9834      auto AssumedBefore = getAssumed();
9835      Value *LHS = ICI->getOperand(0);
9836      Value *RHS = ICI->getOperand(1);
9837  
9838      bool LHSContainsUndef = false, RHSContainsUndef = false;
9839      SetTy LHSAAPVS, RHSAAPVS;
9840      if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
9841                                     LHSContainsUndef, /* ForSelf */ false) ||
9842          !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
9843                                     RHSContainsUndef, /* ForSelf */ false))
9844        return indicatePessimisticFixpoint();
9845  
9846      // TODO: make use of undef flag to limit potential values aggressively.
9847      bool MaybeTrue = false, MaybeFalse = false;
9848      const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
9849      if (LHSContainsUndef && RHSContainsUndef) {
9850        // The result of any comparison between undefs can be soundly replaced
9851        // with undef.
9852        unionAssumedWithUndef();
9853      } else if (LHSContainsUndef) {
9854        for (const APInt &R : RHSAAPVS) {
9855          bool CmpResult = calculateICmpInst(ICI, Zero, R);
9856          MaybeTrue |= CmpResult;
9857          MaybeFalse |= !CmpResult;
9858          if (MaybeTrue & MaybeFalse)
9859            return indicatePessimisticFixpoint();
9860        }
9861      } else if (RHSContainsUndef) {
9862        for (const APInt &L : LHSAAPVS) {
9863          bool CmpResult = calculateICmpInst(ICI, L, Zero);
9864          MaybeTrue |= CmpResult;
9865          MaybeFalse |= !CmpResult;
9866          if (MaybeTrue & MaybeFalse)
9867            return indicatePessimisticFixpoint();
9868        }
9869      } else {
9870        for (const APInt &L : LHSAAPVS) {
9871          for (const APInt &R : RHSAAPVS) {
9872            bool CmpResult = calculateICmpInst(ICI, L, R);
9873            MaybeTrue |= CmpResult;
9874            MaybeFalse |= !CmpResult;
9875            if (MaybeTrue & MaybeFalse)
9876              return indicatePessimisticFixpoint();
9877          }
9878        }
9879      }
9880      if (MaybeTrue)
9881        unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
9882      if (MaybeFalse)
9883        unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
9884      return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9885                                           : ChangeStatus::CHANGED;
9886    }
9887  
updateWithSelectInst__anonc528723c7f11::AAPotentialConstantValuesFloating9888    ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
9889      auto AssumedBefore = getAssumed();
9890      Value *LHS = SI->getTrueValue();
9891      Value *RHS = SI->getFalseValue();
9892  
9893      bool UsedAssumedInformation = false;
9894      std::optional<Constant *> C = A.getAssumedConstant(
9895          *SI->getCondition(), *this, UsedAssumedInformation);
9896  
9897      // Check if we only need one operand.
9898      bool OnlyLeft = false, OnlyRight = false;
9899      if (C && *C && (*C)->isOneValue())
9900        OnlyLeft = true;
9901      else if (C && *C && (*C)->isZeroValue())
9902        OnlyRight = true;
9903  
9904      bool LHSContainsUndef = false, RHSContainsUndef = false;
9905      SetTy LHSAAPVS, RHSAAPVS;
9906      if (!OnlyRight &&
9907          !fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
9908                                     LHSContainsUndef, /* ForSelf */ false))
9909        return indicatePessimisticFixpoint();
9910  
9911      if (!OnlyLeft &&
9912          !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
9913                                     RHSContainsUndef, /* ForSelf */ false))
9914        return indicatePessimisticFixpoint();
9915  
9916      if (OnlyLeft || OnlyRight) {
9917        // select (true/false), lhs, rhs
9918        auto *OpAA = OnlyLeft ? &LHSAAPVS : &RHSAAPVS;
9919        auto Undef = OnlyLeft ? LHSContainsUndef : RHSContainsUndef;
9920  
9921        if (Undef)
9922          unionAssumedWithUndef();
9923        else {
9924          for (const auto &It : *OpAA)
9925            unionAssumed(It);
9926        }
9927  
9928      } else if (LHSContainsUndef && RHSContainsUndef) {
9929        // select i1 *, undef , undef => undef
9930        unionAssumedWithUndef();
9931      } else {
9932        for (const auto &It : LHSAAPVS)
9933          unionAssumed(It);
9934        for (const auto &It : RHSAAPVS)
9935          unionAssumed(It);
9936      }
9937      return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9938                                           : ChangeStatus::CHANGED;
9939    }
9940  
updateWithCastInst__anonc528723c7f11::AAPotentialConstantValuesFloating9941    ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
9942      auto AssumedBefore = getAssumed();
9943      if (!CI->isIntegerCast())
9944        return indicatePessimisticFixpoint();
9945      assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
9946      uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
9947      Value *Src = CI->getOperand(0);
9948  
9949      bool SrcContainsUndef = false;
9950      SetTy SrcPVS;
9951      if (!fillSetWithConstantValues(A, IRPosition::value(*Src), SrcPVS,
9952                                     SrcContainsUndef, /* ForSelf */ false))
9953        return indicatePessimisticFixpoint();
9954  
9955      if (SrcContainsUndef)
9956        unionAssumedWithUndef();
9957      else {
9958        for (const APInt &S : SrcPVS) {
9959          APInt T = calculateCastInst(CI, S, ResultBitWidth);
9960          unionAssumed(T);
9961        }
9962      }
9963      return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9964                                           : ChangeStatus::CHANGED;
9965    }
9966  
updateWithBinaryOperator__anonc528723c7f11::AAPotentialConstantValuesFloating9967    ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
9968      auto AssumedBefore = getAssumed();
9969      Value *LHS = BinOp->getOperand(0);
9970      Value *RHS = BinOp->getOperand(1);
9971  
9972      bool LHSContainsUndef = false, RHSContainsUndef = false;
9973      SetTy LHSAAPVS, RHSAAPVS;
9974      if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
9975                                     LHSContainsUndef, /* ForSelf */ false) ||
9976          !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
9977                                     RHSContainsUndef, /* ForSelf */ false))
9978        return indicatePessimisticFixpoint();
9979  
9980      const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9981  
9982      // TODO: make use of undef flag to limit potential values aggressively.
9983      if (LHSContainsUndef && RHSContainsUndef) {
9984        if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9985          return indicatePessimisticFixpoint();
9986      } else if (LHSContainsUndef) {
9987        for (const APInt &R : RHSAAPVS) {
9988          if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9989            return indicatePessimisticFixpoint();
9990        }
9991      } else if (RHSContainsUndef) {
9992        for (const APInt &L : LHSAAPVS) {
9993          if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9994            return indicatePessimisticFixpoint();
9995        }
9996      } else {
9997        for (const APInt &L : LHSAAPVS) {
9998          for (const APInt &R : RHSAAPVS) {
9999            if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
10000              return indicatePessimisticFixpoint();
10001          }
10002        }
10003      }
10004      return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
10005                                           : ChangeStatus::CHANGED;
10006    }
10007  
updateWithInstruction__anonc528723c7f11::AAPotentialConstantValuesFloating10008    ChangeStatus updateWithInstruction(Attributor &A, Instruction *Inst) {
10009      auto AssumedBefore = getAssumed();
10010      SetTy Incoming;
10011      bool ContainsUndef;
10012      if (!fillSetWithConstantValues(A, IRPosition::value(*Inst), Incoming,
10013                                     ContainsUndef, /* ForSelf */ true))
10014        return indicatePessimisticFixpoint();
10015      if (ContainsUndef) {
10016        unionAssumedWithUndef();
10017      } else {
10018        for (const auto &It : Incoming)
10019          unionAssumed(It);
10020      }
10021      return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
10022                                           : ChangeStatus::CHANGED;
10023    }
10024  
10025    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c7f11::AAPotentialConstantValuesFloating10026    ChangeStatus updateImpl(Attributor &A) override {
10027      Value &V = getAssociatedValue();
10028      Instruction *I = dyn_cast<Instruction>(&V);
10029  
10030      if (auto *ICI = dyn_cast<ICmpInst>(I))
10031        return updateWithICmpInst(A, ICI);
10032  
10033      if (auto *SI = dyn_cast<SelectInst>(I))
10034        return updateWithSelectInst(A, SI);
10035  
10036      if (auto *CI = dyn_cast<CastInst>(I))
10037        return updateWithCastInst(A, CI);
10038  
10039      if (auto *BinOp = dyn_cast<BinaryOperator>(I))
10040        return updateWithBinaryOperator(A, BinOp);
10041  
10042      if (isa<PHINode>(I) || isa<LoadInst>(I))
10043        return updateWithInstruction(A, I);
10044  
10045      return indicatePessimisticFixpoint();
10046    }
10047  
10048    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7f11::AAPotentialConstantValuesFloating10049    void trackStatistics() const override {
10050      STATS_DECLTRACK_FLOATING_ATTR(potential_values)
10051    }
10052  };
10053  
10054  struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl {
AAPotentialConstantValuesFunction__anonc528723c7f11::AAPotentialConstantValuesFunction10055    AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A)
10056        : AAPotentialConstantValuesImpl(IRP, A) {}
10057  
10058    /// See AbstractAttribute::initialize(...).
updateImpl__anonc528723c7f11::AAPotentialConstantValuesFunction10059    ChangeStatus updateImpl(Attributor &A) override {
10060      llvm_unreachable(
10061          "AAPotentialConstantValues(Function|CallSite)::updateImpl will "
10062          "not be called");
10063    }
10064  
10065    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7f11::AAPotentialConstantValuesFunction10066    void trackStatistics() const override {
10067      STATS_DECLTRACK_FN_ATTR(potential_values)
10068    }
10069  };
10070  
10071  struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction {
AAPotentialConstantValuesCallSite__anonc528723c7f11::AAPotentialConstantValuesCallSite10072    AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A)
10073        : AAPotentialConstantValuesFunction(IRP, A) {}
10074  
10075    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7f11::AAPotentialConstantValuesCallSite10076    void trackStatistics() const override {
10077      STATS_DECLTRACK_CS_ATTR(potential_values)
10078    }
10079  };
10080  
10081  struct AAPotentialConstantValuesCallSiteReturned
10082      : AACalleeToCallSite<AAPotentialConstantValues,
10083                           AAPotentialConstantValuesImpl> {
AAPotentialConstantValuesCallSiteReturned__anonc528723c7f11::AAPotentialConstantValuesCallSiteReturned10084    AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP,
10085                                              Attributor &A)
10086        : AACalleeToCallSite<AAPotentialConstantValues,
10087                             AAPotentialConstantValuesImpl>(IRP, A) {}
10088  
10089    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7f11::AAPotentialConstantValuesCallSiteReturned10090    void trackStatistics() const override {
10091      STATS_DECLTRACK_CSRET_ATTR(potential_values)
10092    }
10093  };
10094  
10095  struct AAPotentialConstantValuesCallSiteArgument
10096      : AAPotentialConstantValuesFloating {
AAPotentialConstantValuesCallSiteArgument__anonc528723c7f11::AAPotentialConstantValuesCallSiteArgument10097    AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP,
10098                                              Attributor &A)
10099        : AAPotentialConstantValuesFloating(IRP, A) {}
10100  
10101    /// See AbstractAttribute::initialize(..).
initialize__anonc528723c7f11::AAPotentialConstantValuesCallSiteArgument10102    void initialize(Attributor &A) override {
10103      AAPotentialConstantValuesImpl::initialize(A);
10104      if (isAtFixpoint())
10105        return;
10106  
10107      Value &V = getAssociatedValue();
10108  
10109      if (auto *C = dyn_cast<ConstantInt>(&V)) {
10110        unionAssumed(C->getValue());
10111        indicateOptimisticFixpoint();
10112        return;
10113      }
10114  
10115      if (isa<UndefValue>(&V)) {
10116        unionAssumedWithUndef();
10117        indicateOptimisticFixpoint();
10118        return;
10119      }
10120    }
10121  
10122    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c7f11::AAPotentialConstantValuesCallSiteArgument10123    ChangeStatus updateImpl(Attributor &A) override {
10124      Value &V = getAssociatedValue();
10125      auto AssumedBefore = getAssumed();
10126      auto *AA = A.getAAFor<AAPotentialConstantValues>(
10127          *this, IRPosition::value(V), DepClassTy::REQUIRED);
10128      if (!AA)
10129        return indicatePessimisticFixpoint();
10130      const auto &S = AA->getAssumed();
10131      unionAssumed(S);
10132      return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
10133                                           : ChangeStatus::CHANGED;
10134    }
10135  
10136    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c7f11::AAPotentialConstantValuesCallSiteArgument10137    void trackStatistics() const override {
10138      STATS_DECLTRACK_CSARG_ATTR(potential_values)
10139    }
10140  };
10141  } // namespace
10142  
10143  /// ------------------------ NoUndef Attribute ---------------------------------
isImpliedByIR(Attributor & A,const IRPosition & IRP,Attribute::AttrKind ImpliedAttributeKind,bool IgnoreSubsumingPositions)10144  bool AANoUndef::isImpliedByIR(Attributor &A, const IRPosition &IRP,
10145                                Attribute::AttrKind ImpliedAttributeKind,
10146                                bool IgnoreSubsumingPositions) {
10147    assert(ImpliedAttributeKind == Attribute::NoUndef &&
10148           "Unexpected attribute kind");
10149    if (A.hasAttr(IRP, {Attribute::NoUndef}, IgnoreSubsumingPositions,
10150                  Attribute::NoUndef))
10151      return true;
10152  
10153    Value &Val = IRP.getAssociatedValue();
10154    if (IRP.getPositionKind() != IRPosition::IRP_RETURNED &&
10155        isGuaranteedNotToBeUndefOrPoison(&Val)) {
10156      LLVMContext &Ctx = Val.getContext();
10157      A.manifestAttrs(IRP, Attribute::get(Ctx, Attribute::NoUndef));
10158      return true;
10159    }
10160  
10161    return false;
10162  }
10163  
10164  namespace {
10165  struct AANoUndefImpl : AANoUndef {
AANoUndefImpl__anonc528723c8011::AANoUndefImpl10166    AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
10167  
10168    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c8011::AANoUndefImpl10169    void initialize(Attributor &A) override {
10170      Value &V = getAssociatedValue();
10171      if (isa<UndefValue>(V))
10172        indicatePessimisticFixpoint();
10173      assert(!isImpliedByIR(A, getIRPosition(), Attribute::NoUndef));
10174    }
10175  
10176    /// See followUsesInMBEC
followUseInMBEC__anonc528723c8011::AANoUndefImpl10177    bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
10178                         AANoUndef::StateType &State) {
10179      const Value *UseV = U->get();
10180      const DominatorTree *DT = nullptr;
10181      AssumptionCache *AC = nullptr;
10182      InformationCache &InfoCache = A.getInfoCache();
10183      if (Function *F = getAnchorScope()) {
10184        DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
10185        AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
10186      }
10187      State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
10188      bool TrackUse = false;
10189      // Track use for instructions which must produce undef or poison bits when
10190      // at least one operand contains such bits.
10191      if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
10192        TrackUse = true;
10193      return TrackUse;
10194    }
10195  
10196    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c8011::AANoUndefImpl10197    const std::string getAsStr(Attributor *A) const override {
10198      return getAssumed() ? "noundef" : "may-undef-or-poison";
10199    }
10200  
manifest__anonc528723c8011::AANoUndefImpl10201    ChangeStatus manifest(Attributor &A) override {
10202      // We don't manifest noundef attribute for dead positions because the
10203      // associated values with dead positions would be replaced with undef
10204      // values.
10205      bool UsedAssumedInformation = false;
10206      if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
10207                          UsedAssumedInformation))
10208        return ChangeStatus::UNCHANGED;
10209      // A position whose simplified value does not have any value is
10210      // considered to be dead. We don't manifest noundef in such positions for
10211      // the same reason above.
10212      if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation,
10213                                  AA::Interprocedural)
10214               .has_value())
10215        return ChangeStatus::UNCHANGED;
10216      return AANoUndef::manifest(A);
10217    }
10218  };
10219  
10220  struct AANoUndefFloating : public AANoUndefImpl {
AANoUndefFloating__anonc528723c8011::AANoUndefFloating10221    AANoUndefFloating(const IRPosition &IRP, Attributor &A)
10222        : AANoUndefImpl(IRP, A) {}
10223  
10224    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c8011::AANoUndefFloating10225    void initialize(Attributor &A) override {
10226      AANoUndefImpl::initialize(A);
10227      if (!getState().isAtFixpoint() && getAnchorScope() &&
10228          !getAnchorScope()->isDeclaration())
10229        if (Instruction *CtxI = getCtxI())
10230          followUsesInMBEC(*this, A, getState(), *CtxI);
10231    }
10232  
10233    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c8011::AANoUndefFloating10234    ChangeStatus updateImpl(Attributor &A) override {
10235      auto VisitValueCB = [&](const IRPosition &IRP) -> bool {
10236        bool IsKnownNoUndef;
10237        return AA::hasAssumedIRAttr<Attribute::NoUndef>(
10238            A, this, IRP, DepClassTy::REQUIRED, IsKnownNoUndef);
10239      };
10240  
10241      bool Stripped;
10242      bool UsedAssumedInformation = false;
10243      Value *AssociatedValue = &getAssociatedValue();
10244      SmallVector<AA::ValueAndContext> Values;
10245      if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
10246                                        AA::AnyScope, UsedAssumedInformation))
10247        Stripped = false;
10248      else
10249        Stripped =
10250            Values.size() != 1 || Values.front().getValue() != AssociatedValue;
10251  
10252      if (!Stripped) {
10253        // If we haven't stripped anything we might still be able to use a
10254        // different AA, but only if the IRP changes. Effectively when we
10255        // interpret this not as a call site value but as a floating/argument
10256        // value.
10257        const IRPosition AVIRP = IRPosition::value(*AssociatedValue);
10258        if (AVIRP == getIRPosition() || !VisitValueCB(AVIRP))
10259          return indicatePessimisticFixpoint();
10260        return ChangeStatus::UNCHANGED;
10261      }
10262  
10263      for (const auto &VAC : Values)
10264        if (!VisitValueCB(IRPosition::value(*VAC.getValue())))
10265          return indicatePessimisticFixpoint();
10266  
10267      return ChangeStatus::UNCHANGED;
10268    }
10269  
10270    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8011::AANoUndefFloating10271    void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
10272  };
10273  
10274  struct AANoUndefReturned final
10275      : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
AANoUndefReturned__anonc528723c8011::AANoUndefReturned10276    AANoUndefReturned(const IRPosition &IRP, Attributor &A)
10277        : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
10278  
10279    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8011::AANoUndefReturned10280    void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
10281  };
10282  
10283  struct AANoUndefArgument final
10284      : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
AANoUndefArgument__anonc528723c8011::AANoUndefArgument10285    AANoUndefArgument(const IRPosition &IRP, Attributor &A)
10286        : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
10287  
10288    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8011::AANoUndefArgument10289    void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
10290  };
10291  
10292  struct AANoUndefCallSiteArgument final : AANoUndefFloating {
AANoUndefCallSiteArgument__anonc528723c8011::AANoUndefCallSiteArgument10293    AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
10294        : AANoUndefFloating(IRP, A) {}
10295  
10296    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8011::AANoUndefCallSiteArgument10297    void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
10298  };
10299  
10300  struct AANoUndefCallSiteReturned final
10301      : AACalleeToCallSite<AANoUndef, AANoUndefImpl> {
AANoUndefCallSiteReturned__anonc528723c8011::AANoUndefCallSiteReturned10302    AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
10303        : AACalleeToCallSite<AANoUndef, AANoUndefImpl>(IRP, A) {}
10304  
10305    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8011::AANoUndefCallSiteReturned10306    void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
10307  };
10308  
10309  /// ------------------------ NoFPClass Attribute -------------------------------
10310  
10311  struct AANoFPClassImpl : AANoFPClass {
AANoFPClassImpl__anonc528723c8011::AANoFPClassImpl10312    AANoFPClassImpl(const IRPosition &IRP, Attributor &A) : AANoFPClass(IRP, A) {}
10313  
initialize__anonc528723c8011::AANoFPClassImpl10314    void initialize(Attributor &A) override {
10315      const IRPosition &IRP = getIRPosition();
10316  
10317      Value &V = IRP.getAssociatedValue();
10318      if (isa<UndefValue>(V)) {
10319        indicateOptimisticFixpoint();
10320        return;
10321      }
10322  
10323      SmallVector<Attribute> Attrs;
10324      A.getAttrs(getIRPosition(), {Attribute::NoFPClass}, Attrs, false);
10325      for (const auto &Attr : Attrs) {
10326        addKnownBits(Attr.getNoFPClass());
10327      }
10328  
10329      const DataLayout &DL = A.getDataLayout();
10330      if (getPositionKind() != IRPosition::IRP_RETURNED) {
10331        KnownFPClass KnownFPClass = computeKnownFPClass(&V, DL);
10332        addKnownBits(~KnownFPClass.KnownFPClasses);
10333      }
10334  
10335      if (Instruction *CtxI = getCtxI())
10336        followUsesInMBEC(*this, A, getState(), *CtxI);
10337    }
10338  
10339    /// See followUsesInMBEC
followUseInMBEC__anonc528723c8011::AANoFPClassImpl10340    bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
10341                         AANoFPClass::StateType &State) {
10342      // TODO: Determine what instructions can be looked through.
10343      auto *CB = dyn_cast<CallBase>(I);
10344      if (!CB)
10345        return false;
10346  
10347      if (!CB->isArgOperand(U))
10348        return false;
10349  
10350      unsigned ArgNo = CB->getArgOperandNo(U);
10351      IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
10352      if (auto *NoFPAA = A.getAAFor<AANoFPClass>(*this, IRP, DepClassTy::NONE))
10353        State.addKnownBits(NoFPAA->getState().getKnown());
10354      return false;
10355    }
10356  
getAsStr__anonc528723c8011::AANoFPClassImpl10357    const std::string getAsStr(Attributor *A) const override {
10358      std::string Result = "nofpclass";
10359      raw_string_ostream OS(Result);
10360      OS << getKnownNoFPClass() << '/' << getAssumedNoFPClass();
10361      return Result;
10362    }
10363  
getDeducedAttributes__anonc528723c8011::AANoFPClassImpl10364    void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
10365                              SmallVectorImpl<Attribute> &Attrs) const override {
10366      Attrs.emplace_back(Attribute::getWithNoFPClass(Ctx, getAssumedNoFPClass()));
10367    }
10368  };
10369  
10370  struct AANoFPClassFloating : public AANoFPClassImpl {
AANoFPClassFloating__anonc528723c8011::AANoFPClassFloating10371    AANoFPClassFloating(const IRPosition &IRP, Attributor &A)
10372        : AANoFPClassImpl(IRP, A) {}
10373  
10374    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c8011::AANoFPClassFloating10375    ChangeStatus updateImpl(Attributor &A) override {
10376      SmallVector<AA::ValueAndContext> Values;
10377      bool UsedAssumedInformation = false;
10378      if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
10379                                        AA::AnyScope, UsedAssumedInformation)) {
10380        Values.push_back({getAssociatedValue(), getCtxI()});
10381      }
10382  
10383      StateType T;
10384      auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
10385        const auto *AA = A.getAAFor<AANoFPClass>(*this, IRPosition::value(V),
10386                                                 DepClassTy::REQUIRED);
10387        if (!AA || this == AA) {
10388          T.indicatePessimisticFixpoint();
10389        } else {
10390          const AANoFPClass::StateType &S =
10391              static_cast<const AANoFPClass::StateType &>(AA->getState());
10392          T ^= S;
10393        }
10394        return T.isValidState();
10395      };
10396  
10397      for (const auto &VAC : Values)
10398        if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI()))
10399          return indicatePessimisticFixpoint();
10400  
10401      return clampStateAndIndicateChange(getState(), T);
10402    }
10403  
10404    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8011::AANoFPClassFloating10405    void trackStatistics() const override {
10406      STATS_DECLTRACK_FNRET_ATTR(nofpclass)
10407    }
10408  };
10409  
10410  struct AANoFPClassReturned final
10411      : AAReturnedFromReturnedValues<AANoFPClass, AANoFPClassImpl,
10412                                     AANoFPClassImpl::StateType, false,
10413                                     Attribute::None, false> {
AANoFPClassReturned__anonc528723c8011::AANoFPClassReturned10414    AANoFPClassReturned(const IRPosition &IRP, Attributor &A)
10415        : AAReturnedFromReturnedValues<AANoFPClass, AANoFPClassImpl,
10416                                       AANoFPClassImpl::StateType, false,
10417                                       Attribute::None, false>(IRP, A) {}
10418  
10419    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8011::AANoFPClassReturned10420    void trackStatistics() const override {
10421      STATS_DECLTRACK_FNRET_ATTR(nofpclass)
10422    }
10423  };
10424  
10425  struct AANoFPClassArgument final
10426      : AAArgumentFromCallSiteArguments<AANoFPClass, AANoFPClassImpl> {
AANoFPClassArgument__anonc528723c8011::AANoFPClassArgument10427    AANoFPClassArgument(const IRPosition &IRP, Attributor &A)
10428        : AAArgumentFromCallSiteArguments<AANoFPClass, AANoFPClassImpl>(IRP, A) {}
10429  
10430    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8011::AANoFPClassArgument10431    void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofpclass) }
10432  };
10433  
10434  struct AANoFPClassCallSiteArgument final : AANoFPClassFloating {
AANoFPClassCallSiteArgument__anonc528723c8011::AANoFPClassCallSiteArgument10435    AANoFPClassCallSiteArgument(const IRPosition &IRP, Attributor &A)
10436        : AANoFPClassFloating(IRP, A) {}
10437  
10438    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8011::AANoFPClassCallSiteArgument10439    void trackStatistics() const override {
10440      STATS_DECLTRACK_CSARG_ATTR(nofpclass)
10441    }
10442  };
10443  
10444  struct AANoFPClassCallSiteReturned final
10445      : AACalleeToCallSite<AANoFPClass, AANoFPClassImpl> {
AANoFPClassCallSiteReturned__anonc528723c8011::AANoFPClassCallSiteReturned10446    AANoFPClassCallSiteReturned(const IRPosition &IRP, Attributor &A)
10447        : AACalleeToCallSite<AANoFPClass, AANoFPClassImpl>(IRP, A) {}
10448  
10449    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8011::AANoFPClassCallSiteReturned10450    void trackStatistics() const override {
10451      STATS_DECLTRACK_CSRET_ATTR(nofpclass)
10452    }
10453  };
10454  
10455  struct AACallEdgesImpl : public AACallEdges {
AACallEdgesImpl__anonc528723c8011::AACallEdgesImpl10456    AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
10457  
getOptimisticEdges__anonc528723c8011::AACallEdgesImpl10458    const SetVector<Function *> &getOptimisticEdges() const override {
10459      return CalledFunctions;
10460    }
10461  
hasUnknownCallee__anonc528723c8011::AACallEdgesImpl10462    bool hasUnknownCallee() const override { return HasUnknownCallee; }
10463  
hasNonAsmUnknownCallee__anonc528723c8011::AACallEdgesImpl10464    bool hasNonAsmUnknownCallee() const override {
10465      return HasUnknownCalleeNonAsm;
10466    }
10467  
getAsStr__anonc528723c8011::AACallEdgesImpl10468    const std::string getAsStr(Attributor *A) const override {
10469      return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
10470             std::to_string(CalledFunctions.size()) + "]";
10471    }
10472  
trackStatistics__anonc528723c8011::AACallEdgesImpl10473    void trackStatistics() const override {}
10474  
10475  protected:
addCalledFunction__anonc528723c8011::AACallEdgesImpl10476    void addCalledFunction(Function *Fn, ChangeStatus &Change) {
10477      if (CalledFunctions.insert(Fn)) {
10478        Change = ChangeStatus::CHANGED;
10479        LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
10480                          << "\n");
10481      }
10482    }
10483  
setHasUnknownCallee__anonc528723c8011::AACallEdgesImpl10484    void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
10485      if (!HasUnknownCallee)
10486        Change = ChangeStatus::CHANGED;
10487      if (NonAsm && !HasUnknownCalleeNonAsm)
10488        Change = ChangeStatus::CHANGED;
10489      HasUnknownCalleeNonAsm |= NonAsm;
10490      HasUnknownCallee = true;
10491    }
10492  
10493  private:
10494    /// Optimistic set of functions that might be called by this position.
10495    SetVector<Function *> CalledFunctions;
10496  
10497    /// Is there any call with a unknown callee.
10498    bool HasUnknownCallee = false;
10499  
10500    /// Is there any call with a unknown callee, excluding any inline asm.
10501    bool HasUnknownCalleeNonAsm = false;
10502  };
10503  
10504  struct AACallEdgesCallSite : public AACallEdgesImpl {
AACallEdgesCallSite__anonc528723c8011::AACallEdgesCallSite10505    AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
10506        : AACallEdgesImpl(IRP, A) {}
10507    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c8011::AACallEdgesCallSite10508    ChangeStatus updateImpl(Attributor &A) override {
10509      ChangeStatus Change = ChangeStatus::UNCHANGED;
10510  
10511      auto VisitValue = [&](Value &V, const Instruction *CtxI) -> bool {
10512        if (Function *Fn = dyn_cast<Function>(&V)) {
10513          addCalledFunction(Fn, Change);
10514        } else {
10515          LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
10516          setHasUnknownCallee(true, Change);
10517        }
10518  
10519        // Explore all values.
10520        return true;
10521      };
10522  
10523      SmallVector<AA::ValueAndContext> Values;
10524      // Process any value that we might call.
10525      auto ProcessCalledOperand = [&](Value *V, Instruction *CtxI) {
10526        if (isa<Constant>(V)) {
10527          VisitValue(*V, CtxI);
10528          return;
10529        }
10530  
10531        bool UsedAssumedInformation = false;
10532        Values.clear();
10533        if (!A.getAssumedSimplifiedValues(IRPosition::value(*V), *this, Values,
10534                                          AA::AnyScope, UsedAssumedInformation)) {
10535          Values.push_back({*V, CtxI});
10536        }
10537        for (auto &VAC : Values)
10538          VisitValue(*VAC.getValue(), VAC.getCtxI());
10539      };
10540  
10541      CallBase *CB = cast<CallBase>(getCtxI());
10542  
10543      if (auto *IA = dyn_cast<InlineAsm>(CB->getCalledOperand())) {
10544        if (IA->hasSideEffects() &&
10545            !hasAssumption(*CB->getCaller(), "ompx_no_call_asm") &&
10546            !hasAssumption(*CB, "ompx_no_call_asm")) {
10547          setHasUnknownCallee(false, Change);
10548        }
10549        return Change;
10550      }
10551  
10552      if (CB->isIndirectCall())
10553        if (auto *IndirectCallAA = A.getAAFor<AAIndirectCallInfo>(
10554                *this, getIRPosition(), DepClassTy::OPTIONAL))
10555          if (IndirectCallAA->foreachCallee(
10556                  [&](Function *Fn) { return VisitValue(*Fn, CB); }))
10557            return Change;
10558  
10559      // The most simple case.
10560      ProcessCalledOperand(CB->getCalledOperand(), CB);
10561  
10562      // Process callback functions.
10563      SmallVector<const Use *, 4u> CallbackUses;
10564      AbstractCallSite::getCallbackUses(*CB, CallbackUses);
10565      for (const Use *U : CallbackUses)
10566        ProcessCalledOperand(U->get(), CB);
10567  
10568      return Change;
10569    }
10570  };
10571  
10572  struct AACallEdgesFunction : public AACallEdgesImpl {
AACallEdgesFunction__anonc528723c8011::AACallEdgesFunction10573    AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
10574        : AACallEdgesImpl(IRP, A) {}
10575  
10576    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c8011::AACallEdgesFunction10577    ChangeStatus updateImpl(Attributor &A) override {
10578      ChangeStatus Change = ChangeStatus::UNCHANGED;
10579  
10580      auto ProcessCallInst = [&](Instruction &Inst) {
10581        CallBase &CB = cast<CallBase>(Inst);
10582  
10583        auto *CBEdges = A.getAAFor<AACallEdges>(
10584            *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
10585        if (!CBEdges)
10586          return false;
10587        if (CBEdges->hasNonAsmUnknownCallee())
10588          setHasUnknownCallee(true, Change);
10589        if (CBEdges->hasUnknownCallee())
10590          setHasUnknownCallee(false, Change);
10591  
10592        for (Function *F : CBEdges->getOptimisticEdges())
10593          addCalledFunction(F, Change);
10594  
10595        return true;
10596      };
10597  
10598      // Visit all callable instructions.
10599      bool UsedAssumedInformation = false;
10600      if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
10601                                             UsedAssumedInformation,
10602                                             /* CheckBBLivenessOnly */ true)) {
10603        // If we haven't looked at all call like instructions, assume that there
10604        // are unknown callees.
10605        setHasUnknownCallee(true, Change);
10606      }
10607  
10608      return Change;
10609    }
10610  };
10611  
10612  /// -------------------AAInterFnReachability Attribute--------------------------
10613  
10614  struct AAInterFnReachabilityFunction
10615      : public CachedReachabilityAA<AAInterFnReachability, Function> {
10616    using Base = CachedReachabilityAA<AAInterFnReachability, Function>;
AAInterFnReachabilityFunction__anonc528723c8011::AAInterFnReachabilityFunction10617    AAInterFnReachabilityFunction(const IRPosition &IRP, Attributor &A)
10618        : Base(IRP, A) {}
10619  
instructionCanReach__anonc528723c8011::AAInterFnReachabilityFunction10620    bool instructionCanReach(
10621        Attributor &A, const Instruction &From, const Function &To,
10622        const AA::InstExclusionSetTy *ExclusionSet) const override {
10623      assert(From.getFunction() == getAnchorScope() && "Queried the wrong AA!");
10624      auto *NonConstThis = const_cast<AAInterFnReachabilityFunction *>(this);
10625  
10626      RQITy StackRQI(A, From, To, ExclusionSet, false);
10627      typename RQITy::Reachable Result;
10628      if (!NonConstThis->checkQueryCache(A, StackRQI, Result))
10629        return NonConstThis->isReachableImpl(A, StackRQI,
10630                                             /*IsTemporaryRQI=*/true);
10631      return Result == RQITy::Reachable::Yes;
10632    }
10633  
isReachableImpl__anonc528723c8011::AAInterFnReachabilityFunction10634    bool isReachableImpl(Attributor &A, RQITy &RQI,
10635                         bool IsTemporaryRQI) override {
10636      const Instruction *EntryI =
10637          &RQI.From->getFunction()->getEntryBlock().front();
10638      if (EntryI != RQI.From &&
10639          !instructionCanReach(A, *EntryI, *RQI.To, nullptr))
10640        return rememberResult(A, RQITy::Reachable::No, RQI, false,
10641                              IsTemporaryRQI);
10642  
10643      auto CheckReachableCallBase = [&](CallBase *CB) {
10644        auto *CBEdges = A.getAAFor<AACallEdges>(
10645            *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
10646        if (!CBEdges || !CBEdges->getState().isValidState())
10647          return false;
10648        // TODO Check To backwards in this case.
10649        if (CBEdges->hasUnknownCallee())
10650          return false;
10651  
10652        for (Function *Fn : CBEdges->getOptimisticEdges()) {
10653          if (Fn == RQI.To)
10654            return false;
10655  
10656          if (Fn->isDeclaration()) {
10657            if (Fn->hasFnAttribute(Attribute::NoCallback))
10658              continue;
10659            // TODO Check To backwards in this case.
10660            return false;
10661          }
10662  
10663          if (Fn == getAnchorScope()) {
10664            if (EntryI == RQI.From)
10665              continue;
10666            return false;
10667          }
10668  
10669          const AAInterFnReachability *InterFnReachability =
10670              A.getAAFor<AAInterFnReachability>(*this, IRPosition::function(*Fn),
10671                                                DepClassTy::OPTIONAL);
10672  
10673          const Instruction &FnFirstInst = Fn->getEntryBlock().front();
10674          if (!InterFnReachability ||
10675              InterFnReachability->instructionCanReach(A, FnFirstInst, *RQI.To,
10676                                                       RQI.ExclusionSet))
10677            return false;
10678        }
10679        return true;
10680      };
10681  
10682      const auto *IntraFnReachability = A.getAAFor<AAIntraFnReachability>(
10683          *this, IRPosition::function(*RQI.From->getFunction()),
10684          DepClassTy::OPTIONAL);
10685  
10686      // Determine call like instructions that we can reach from the inst.
10687      auto CheckCallBase = [&](Instruction &CBInst) {
10688        // There are usually less nodes in the call graph, check inter function
10689        // reachability first.
10690        if (CheckReachableCallBase(cast<CallBase>(&CBInst)))
10691          return true;
10692        return IntraFnReachability && !IntraFnReachability->isAssumedReachable(
10693                                          A, *RQI.From, CBInst, RQI.ExclusionSet);
10694      };
10695  
10696      bool UsedExclusionSet = /* conservative */ true;
10697      bool UsedAssumedInformation = false;
10698      if (!A.checkForAllCallLikeInstructions(CheckCallBase, *this,
10699                                             UsedAssumedInformation,
10700                                             /* CheckBBLivenessOnly */ true))
10701        return rememberResult(A, RQITy::Reachable::Yes, RQI, UsedExclusionSet,
10702                              IsTemporaryRQI);
10703  
10704      return rememberResult(A, RQITy::Reachable::No, RQI, UsedExclusionSet,
10705                            IsTemporaryRQI);
10706    }
10707  
trackStatistics__anonc528723c8011::AAInterFnReachabilityFunction10708    void trackStatistics() const override {}
10709  };
10710  } // namespace
10711  
10712  template <typename AAType>
10713  static std::optional<Constant *>
askForAssumedConstant(Attributor & A,const AbstractAttribute & QueryingAA,const IRPosition & IRP,Type & Ty)10714  askForAssumedConstant(Attributor &A, const AbstractAttribute &QueryingAA,
10715                        const IRPosition &IRP, Type &Ty) {
10716    if (!Ty.isIntegerTy())
10717      return nullptr;
10718  
10719    // This will also pass the call base context.
10720    const auto *AA = A.getAAFor<AAType>(QueryingAA, IRP, DepClassTy::NONE);
10721    if (!AA)
10722      return nullptr;
10723  
10724    std::optional<Constant *> COpt = AA->getAssumedConstant(A);
10725  
10726    if (!COpt.has_value()) {
10727      A.recordDependence(*AA, QueryingAA, DepClassTy::OPTIONAL);
10728      return std::nullopt;
10729    }
10730    if (auto *C = *COpt) {
10731      A.recordDependence(*AA, QueryingAA, DepClassTy::OPTIONAL);
10732      return C;
10733    }
10734    return nullptr;
10735  }
10736  
getSingleValue(Attributor & A,const AbstractAttribute & AA,const IRPosition & IRP,SmallVectorImpl<AA::ValueAndContext> & Values)10737  Value *AAPotentialValues::getSingleValue(
10738      Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP,
10739      SmallVectorImpl<AA::ValueAndContext> &Values) {
10740    Type &Ty = *IRP.getAssociatedType();
10741    std::optional<Value *> V;
10742    for (auto &It : Values) {
10743      V = AA::combineOptionalValuesInAAValueLatice(V, It.getValue(), &Ty);
10744      if (V.has_value() && !*V)
10745        break;
10746    }
10747    if (!V.has_value())
10748      return UndefValue::get(&Ty);
10749    return *V;
10750  }
10751  
10752  namespace {
10753  struct AAPotentialValuesImpl : AAPotentialValues {
10754    using StateType = PotentialLLVMValuesState;
10755  
AAPotentialValuesImpl__anonc528723c8911::AAPotentialValuesImpl10756    AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
10757        : AAPotentialValues(IRP, A) {}
10758  
10759    /// See AbstractAttribute::initialize(..).
initialize__anonc528723c8911::AAPotentialValuesImpl10760    void initialize(Attributor &A) override {
10761      if (A.hasSimplificationCallback(getIRPosition())) {
10762        indicatePessimisticFixpoint();
10763        return;
10764      }
10765      Value *Stripped = getAssociatedValue().stripPointerCasts();
10766      if (isa<Constant>(Stripped) && !isa<ConstantExpr>(Stripped)) {
10767        addValue(A, getState(), *Stripped, getCtxI(), AA::AnyScope,
10768                 getAnchorScope());
10769        indicateOptimisticFixpoint();
10770        return;
10771      }
10772      AAPotentialValues::initialize(A);
10773    }
10774  
10775    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c8911::AAPotentialValuesImpl10776    const std::string getAsStr(Attributor *A) const override {
10777      std::string Str;
10778      llvm::raw_string_ostream OS(Str);
10779      OS << getState();
10780      return Str;
10781    }
10782  
10783    template <typename AAType>
askOtherAA__anonc528723c8911::AAPotentialValuesImpl10784    static std::optional<Value *> askOtherAA(Attributor &A,
10785                                             const AbstractAttribute &AA,
10786                                             const IRPosition &IRP, Type &Ty) {
10787      if (isa<Constant>(IRP.getAssociatedValue()))
10788        return &IRP.getAssociatedValue();
10789      std::optional<Constant *> C = askForAssumedConstant<AAType>(A, AA, IRP, Ty);
10790      if (!C)
10791        return std::nullopt;
10792      if (*C)
10793        if (auto *CC = AA::getWithType(**C, Ty))
10794          return CC;
10795      return nullptr;
10796    }
10797  
addValue__anonc528723c8911::AAPotentialValuesImpl10798    virtual void addValue(Attributor &A, StateType &State, Value &V,
10799                          const Instruction *CtxI, AA::ValueScope S,
10800                          Function *AnchorScope) const {
10801  
10802      IRPosition ValIRP = IRPosition::value(V);
10803      if (auto *CB = dyn_cast_or_null<CallBase>(CtxI)) {
10804        for (const auto &U : CB->args()) {
10805          if (U.get() != &V)
10806            continue;
10807          ValIRP = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
10808          break;
10809        }
10810      }
10811  
10812      Value *VPtr = &V;
10813      if (ValIRP.getAssociatedType()->isIntegerTy()) {
10814        Type &Ty = *getAssociatedType();
10815        std::optional<Value *> SimpleV =
10816            askOtherAA<AAValueConstantRange>(A, *this, ValIRP, Ty);
10817        if (SimpleV.has_value() && !*SimpleV) {
10818          auto *PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>(
10819              *this, ValIRP, DepClassTy::OPTIONAL);
10820          if (PotentialConstantsAA && PotentialConstantsAA->isValidState()) {
10821            for (const auto &It : PotentialConstantsAA->getAssumedSet())
10822              State.unionAssumed({{*ConstantInt::get(&Ty, It), nullptr}, S});
10823            if (PotentialConstantsAA->undefIsContained())
10824              State.unionAssumed({{*UndefValue::get(&Ty), nullptr}, S});
10825            return;
10826          }
10827        }
10828        if (!SimpleV.has_value())
10829          return;
10830  
10831        if (*SimpleV)
10832          VPtr = *SimpleV;
10833      }
10834  
10835      if (isa<ConstantInt>(VPtr))
10836        CtxI = nullptr;
10837      if (!AA::isValidInScope(*VPtr, AnchorScope))
10838        S = AA::ValueScope(S | AA::Interprocedural);
10839  
10840      State.unionAssumed({{*VPtr, CtxI}, S});
10841    }
10842  
10843    /// Helper struct to tie a value+context pair together with the scope for
10844    /// which this is the simplified version.
10845    struct ItemInfo {
10846      AA::ValueAndContext I;
10847      AA::ValueScope S;
10848  
operator ==__anonc528723c8911::AAPotentialValuesImpl::ItemInfo10849      bool operator==(const ItemInfo &II) const {
10850        return II.I == I && II.S == S;
10851      };
operator <__anonc528723c8911::AAPotentialValuesImpl::ItemInfo10852      bool operator<(const ItemInfo &II) const {
10853        if (I == II.I)
10854          return S < II.S;
10855        return I < II.I;
10856      };
10857    };
10858  
recurseForValue__anonc528723c8911::AAPotentialValuesImpl10859    bool recurseForValue(Attributor &A, const IRPosition &IRP, AA::ValueScope S) {
10860      SmallMapVector<AA::ValueAndContext, int, 8> ValueScopeMap;
10861      for (auto CS : {AA::Intraprocedural, AA::Interprocedural}) {
10862        if (!(CS & S))
10863          continue;
10864  
10865        bool UsedAssumedInformation = false;
10866        SmallVector<AA::ValueAndContext> Values;
10867        if (!A.getAssumedSimplifiedValues(IRP, this, Values, CS,
10868                                          UsedAssumedInformation))
10869          return false;
10870  
10871        for (auto &It : Values)
10872          ValueScopeMap[It] += CS;
10873      }
10874      for (auto &It : ValueScopeMap)
10875        addValue(A, getState(), *It.first.getValue(), It.first.getCtxI(),
10876                 AA::ValueScope(It.second), getAnchorScope());
10877  
10878      return true;
10879    }
10880  
giveUpOnIntraprocedural__anonc528723c8911::AAPotentialValuesImpl10881    void giveUpOnIntraprocedural(Attributor &A) {
10882      auto NewS = StateType::getBestState(getState());
10883      for (const auto &It : getAssumedSet()) {
10884        if (It.second == AA::Intraprocedural)
10885          continue;
10886        addValue(A, NewS, *It.first.getValue(), It.first.getCtxI(),
10887                 AA::Interprocedural, getAnchorScope());
10888      }
10889      assert(!undefIsContained() && "Undef should be an explicit value!");
10890      addValue(A, NewS, getAssociatedValue(), getCtxI(), AA::Intraprocedural,
10891               getAnchorScope());
10892      getState() = NewS;
10893    }
10894  
10895    /// See AbstractState::indicatePessimisticFixpoint(...).
indicatePessimisticFixpoint__anonc528723c8911::AAPotentialValuesImpl10896    ChangeStatus indicatePessimisticFixpoint() override {
10897      getState() = StateType::getBestState(getState());
10898      getState().unionAssumed({{getAssociatedValue(), getCtxI()}, AA::AnyScope});
10899      AAPotentialValues::indicateOptimisticFixpoint();
10900      return ChangeStatus::CHANGED;
10901    }
10902  
10903    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c8911::AAPotentialValuesImpl10904    ChangeStatus updateImpl(Attributor &A) override {
10905      return indicatePessimisticFixpoint();
10906    }
10907  
10908    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c8911::AAPotentialValuesImpl10909    ChangeStatus manifest(Attributor &A) override {
10910      SmallVector<AA::ValueAndContext> Values;
10911      for (AA::ValueScope S : {AA::Interprocedural, AA::Intraprocedural}) {
10912        Values.clear();
10913        if (!getAssumedSimplifiedValues(A, Values, S))
10914          continue;
10915        Value &OldV = getAssociatedValue();
10916        if (isa<UndefValue>(OldV))
10917          continue;
10918        Value *NewV = getSingleValue(A, *this, getIRPosition(), Values);
10919        if (!NewV || NewV == &OldV)
10920          continue;
10921        if (getCtxI() &&
10922            !AA::isValidAtPosition({*NewV, *getCtxI()}, A.getInfoCache()))
10923          continue;
10924        if (A.changeAfterManifest(getIRPosition(), *NewV))
10925          return ChangeStatus::CHANGED;
10926      }
10927      return ChangeStatus::UNCHANGED;
10928    }
10929  
getAssumedSimplifiedValues__anonc528723c8911::AAPotentialValuesImpl10930    bool getAssumedSimplifiedValues(
10931        Attributor &A, SmallVectorImpl<AA::ValueAndContext> &Values,
10932        AA::ValueScope S, bool RecurseForSelectAndPHI = false) const override {
10933      if (!isValidState())
10934        return false;
10935      bool UsedAssumedInformation = false;
10936      for (const auto &It : getAssumedSet())
10937        if (It.second & S) {
10938          if (RecurseForSelectAndPHI && (isa<PHINode>(It.first.getValue()) ||
10939                                         isa<SelectInst>(It.first.getValue()))) {
10940            if (A.getAssumedSimplifiedValues(
10941                    IRPosition::inst(*cast<Instruction>(It.first.getValue())),
10942                    this, Values, S, UsedAssumedInformation))
10943              continue;
10944          }
10945          Values.push_back(It.first);
10946        }
10947      assert(!undefIsContained() && "Undef should be an explicit value!");
10948      return true;
10949    }
10950  };
10951  
10952  struct AAPotentialValuesFloating : AAPotentialValuesImpl {
AAPotentialValuesFloating__anonc528723c8911::AAPotentialValuesFloating10953    AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
10954        : AAPotentialValuesImpl(IRP, A) {}
10955  
10956    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c8911::AAPotentialValuesFloating10957    ChangeStatus updateImpl(Attributor &A) override {
10958      auto AssumedBefore = getAssumed();
10959  
10960      genericValueTraversal(A, &getAssociatedValue());
10961  
10962      return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
10963                                             : ChangeStatus::CHANGED;
10964    }
10965  
10966    /// Helper struct to remember which AAIsDead instances we actually used.
10967    struct LivenessInfo {
10968      const AAIsDead *LivenessAA = nullptr;
10969      bool AnyDead = false;
10970    };
10971  
10972    /// Check if \p Cmp is a comparison we can simplify.
10973    ///
10974    /// We handle multiple cases, one in which at least one operand is an
10975    /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
10976    /// operand. Return true if successful, in that case Worklist will be updated.
handleCmp__anonc528723c8911::AAPotentialValuesFloating10977    bool handleCmp(Attributor &A, Value &Cmp, Value *LHS, Value *RHS,
10978                   CmpInst::Predicate Pred, ItemInfo II,
10979                   SmallVectorImpl<ItemInfo> &Worklist) {
10980  
10981      // Simplify the operands first.
10982      bool UsedAssumedInformation = false;
10983      SmallVector<AA::ValueAndContext> LHSValues, RHSValues;
10984      auto GetSimplifiedValues = [&](Value &V,
10985                                     SmallVector<AA::ValueAndContext> &Values) {
10986        if (!A.getAssumedSimplifiedValues(
10987                IRPosition::value(V, getCallBaseContext()), this, Values,
10988                AA::Intraprocedural, UsedAssumedInformation)) {
10989          Values.clear();
10990          Values.push_back(AA::ValueAndContext{V, II.I.getCtxI()});
10991        }
10992        return Values.empty();
10993      };
10994      if (GetSimplifiedValues(*LHS, LHSValues))
10995        return true;
10996      if (GetSimplifiedValues(*RHS, RHSValues))
10997        return true;
10998  
10999      LLVMContext &Ctx = LHS->getContext();
11000  
11001      InformationCache &InfoCache = A.getInfoCache();
11002      Instruction *CmpI = dyn_cast<Instruction>(&Cmp);
11003      Function *F = CmpI ? CmpI->getFunction() : nullptr;
11004      const auto *DT =
11005          F ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F)
11006            : nullptr;
11007      const auto *TLI =
11008          F ? A.getInfoCache().getTargetLibraryInfoForFunction(*F) : nullptr;
11009      auto *AC =
11010          F ? InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F)
11011            : nullptr;
11012  
11013      const DataLayout &DL = A.getDataLayout();
11014      SimplifyQuery Q(DL, TLI, DT, AC, CmpI);
11015  
11016      auto CheckPair = [&](Value &LHSV, Value &RHSV) {
11017        if (isa<UndefValue>(LHSV) || isa<UndefValue>(RHSV)) {
11018          addValue(A, getState(), *UndefValue::get(Cmp.getType()),
11019                   /* CtxI */ nullptr, II.S, getAnchorScope());
11020          return true;
11021        }
11022  
11023        // Handle the trivial case first in which we don't even need to think
11024        // about null or non-null.
11025        if (&LHSV == &RHSV &&
11026            (CmpInst::isTrueWhenEqual(Pred) || CmpInst::isFalseWhenEqual(Pred))) {
11027          Constant *NewV = ConstantInt::get(Type::getInt1Ty(Ctx),
11028                                            CmpInst::isTrueWhenEqual(Pred));
11029          addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S,
11030                   getAnchorScope());
11031          return true;
11032        }
11033  
11034        auto *TypedLHS = AA::getWithType(LHSV, *LHS->getType());
11035        auto *TypedRHS = AA::getWithType(RHSV, *RHS->getType());
11036        if (TypedLHS && TypedRHS) {
11037          Value *NewV = simplifyCmpInst(Pred, TypedLHS, TypedRHS, Q);
11038          if (NewV && NewV != &Cmp) {
11039            addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S,
11040                     getAnchorScope());
11041            return true;
11042          }
11043        }
11044  
11045        // From now on we only handle equalities (==, !=).
11046        if (!CmpInst::isEquality(Pred))
11047          return false;
11048  
11049        bool LHSIsNull = isa<ConstantPointerNull>(LHSV);
11050        bool RHSIsNull = isa<ConstantPointerNull>(RHSV);
11051        if (!LHSIsNull && !RHSIsNull)
11052          return false;
11053  
11054        // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
11055        // non-nullptr operand and if we assume it's non-null we can conclude the
11056        // result of the comparison.
11057        assert((LHSIsNull || RHSIsNull) &&
11058               "Expected nullptr versus non-nullptr comparison at this point");
11059  
11060        // The index is the operand that we assume is not null.
11061        unsigned PtrIdx = LHSIsNull;
11062        bool IsKnownNonNull;
11063        bool IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>(
11064            A, this, IRPosition::value(*(PtrIdx ? &RHSV : &LHSV)),
11065            DepClassTy::REQUIRED, IsKnownNonNull);
11066        if (!IsAssumedNonNull)
11067          return false;
11068  
11069        // The new value depends on the predicate, true for != and false for ==.
11070        Constant *NewV =
11071            ConstantInt::get(Type::getInt1Ty(Ctx), Pred == CmpInst::ICMP_NE);
11072        addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S,
11073                 getAnchorScope());
11074        return true;
11075      };
11076  
11077      for (auto &LHSValue : LHSValues)
11078        for (auto &RHSValue : RHSValues)
11079          if (!CheckPair(*LHSValue.getValue(), *RHSValue.getValue()))
11080            return false;
11081      return true;
11082    }
11083  
handleSelectInst__anonc528723c8911::AAPotentialValuesFloating11084    bool handleSelectInst(Attributor &A, SelectInst &SI, ItemInfo II,
11085                          SmallVectorImpl<ItemInfo> &Worklist) {
11086      const Instruction *CtxI = II.I.getCtxI();
11087      bool UsedAssumedInformation = false;
11088  
11089      std::optional<Constant *> C =
11090          A.getAssumedConstant(*SI.getCondition(), *this, UsedAssumedInformation);
11091      bool NoValueYet = !C.has_value();
11092      if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
11093        return true;
11094      if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
11095        if (CI->isZero())
11096          Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S});
11097        else
11098          Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S});
11099      } else if (&SI == &getAssociatedValue()) {
11100        // We could not simplify the condition, assume both values.
11101        Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S});
11102        Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S});
11103      } else {
11104        std::optional<Value *> SimpleV = A.getAssumedSimplified(
11105            IRPosition::inst(SI), *this, UsedAssumedInformation, II.S);
11106        if (!SimpleV.has_value())
11107          return true;
11108        if (*SimpleV) {
11109          addValue(A, getState(), **SimpleV, CtxI, II.S, getAnchorScope());
11110          return true;
11111        }
11112        return false;
11113      }
11114      return true;
11115    }
11116  
handleLoadInst__anonc528723c8911::AAPotentialValuesFloating11117    bool handleLoadInst(Attributor &A, LoadInst &LI, ItemInfo II,
11118                        SmallVectorImpl<ItemInfo> &Worklist) {
11119      SmallSetVector<Value *, 4> PotentialCopies;
11120      SmallSetVector<Instruction *, 4> PotentialValueOrigins;
11121      bool UsedAssumedInformation = false;
11122      if (!AA::getPotentiallyLoadedValues(A, LI, PotentialCopies,
11123                                          PotentialValueOrigins, *this,
11124                                          UsedAssumedInformation,
11125                                          /* OnlyExact */ true)) {
11126        LLVM_DEBUG(dbgs() << "[AAPotentialValues] Failed to get potentially "
11127                             "loaded values for load instruction "
11128                          << LI << "\n");
11129        return false;
11130      }
11131  
11132      // Do not simplify loads that are only used in llvm.assume if we cannot also
11133      // remove all stores that may feed into the load. The reason is that the
11134      // assume is probably worth something as long as the stores are around.
11135      InformationCache &InfoCache = A.getInfoCache();
11136      if (InfoCache.isOnlyUsedByAssume(LI)) {
11137        if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
11138              if (!I || isa<AssumeInst>(I))
11139                return true;
11140              if (auto *SI = dyn_cast<StoreInst>(I))
11141                return A.isAssumedDead(SI->getOperandUse(0), this,
11142                                       /* LivenessAA */ nullptr,
11143                                       UsedAssumedInformation,
11144                                       /* CheckBBLivenessOnly */ false);
11145              return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
11146                                     UsedAssumedInformation,
11147                                     /* CheckBBLivenessOnly */ false);
11148            })) {
11149          LLVM_DEBUG(dbgs() << "[AAPotentialValues] Load is onl used by assumes "
11150                               "and we cannot delete all the stores: "
11151                            << LI << "\n");
11152          return false;
11153        }
11154      }
11155  
11156      // Values have to be dynamically unique or we loose the fact that a
11157      // single llvm::Value might represent two runtime values (e.g.,
11158      // stack locations in different recursive calls).
11159      const Instruction *CtxI = II.I.getCtxI();
11160      bool ScopeIsLocal = (II.S & AA::Intraprocedural);
11161      bool AllLocal = ScopeIsLocal;
11162      bool DynamicallyUnique = llvm::all_of(PotentialCopies, [&](Value *PC) {
11163        AllLocal &= AA::isValidInScope(*PC, getAnchorScope());
11164        return AA::isDynamicallyUnique(A, *this, *PC);
11165      });
11166      if (!DynamicallyUnique) {
11167        LLVM_DEBUG(dbgs() << "[AAPotentialValues] Not all potentially loaded "
11168                             "values are dynamically unique: "
11169                          << LI << "\n");
11170        return false;
11171      }
11172  
11173      for (auto *PotentialCopy : PotentialCopies) {
11174        if (AllLocal) {
11175          Worklist.push_back({{*PotentialCopy, CtxI}, II.S});
11176        } else {
11177          Worklist.push_back({{*PotentialCopy, CtxI}, AA::Interprocedural});
11178        }
11179      }
11180      if (!AllLocal && ScopeIsLocal)
11181        addValue(A, getState(), LI, CtxI, AA::Intraprocedural, getAnchorScope());
11182      return true;
11183    }
11184  
handlePHINode__anonc528723c8911::AAPotentialValuesFloating11185    bool handlePHINode(
11186        Attributor &A, PHINode &PHI, ItemInfo II,
11187        SmallVectorImpl<ItemInfo> &Worklist,
11188        SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) {
11189      auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
11190        LivenessInfo &LI = LivenessAAs[&F];
11191        if (!LI.LivenessAA)
11192          LI.LivenessAA = A.getAAFor<AAIsDead>(*this, IRPosition::function(F),
11193                                               DepClassTy::NONE);
11194        return LI;
11195      };
11196  
11197      if (&PHI == &getAssociatedValue()) {
11198        LivenessInfo &LI = GetLivenessInfo(*PHI.getFunction());
11199        const auto *CI =
11200            A.getInfoCache().getAnalysisResultForFunction<CycleAnalysis>(
11201                *PHI.getFunction());
11202  
11203        Cycle *C = nullptr;
11204        bool CyclePHI = mayBeInCycle(CI, &PHI, /* HeaderOnly */ true, &C);
11205        for (unsigned u = 0, e = PHI.getNumIncomingValues(); u < e; u++) {
11206          BasicBlock *IncomingBB = PHI.getIncomingBlock(u);
11207          if (LI.LivenessAA &&
11208              LI.LivenessAA->isEdgeDead(IncomingBB, PHI.getParent())) {
11209            LI.AnyDead = true;
11210            continue;
11211          }
11212          Value *V = PHI.getIncomingValue(u);
11213          if (V == &PHI)
11214            continue;
11215  
11216          // If the incoming value is not the PHI but an instruction in the same
11217          // cycle we might have multiple versions of it flying around.
11218          if (CyclePHI && isa<Instruction>(V) &&
11219              (!C || C->contains(cast<Instruction>(V)->getParent())))
11220            return false;
11221  
11222          Worklist.push_back({{*V, IncomingBB->getTerminator()}, II.S});
11223        }
11224        return true;
11225      }
11226  
11227      bool UsedAssumedInformation = false;
11228      std::optional<Value *> SimpleV = A.getAssumedSimplified(
11229          IRPosition::inst(PHI), *this, UsedAssumedInformation, II.S);
11230      if (!SimpleV.has_value())
11231        return true;
11232      if (!(*SimpleV))
11233        return false;
11234      addValue(A, getState(), **SimpleV, &PHI, II.S, getAnchorScope());
11235      return true;
11236    }
11237  
11238    /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
11239    /// simplify any operand of the instruction \p I. Return true if successful,
11240    /// in that case Worklist will be updated.
handleGenericInst__anonc528723c8911::AAPotentialValuesFloating11241    bool handleGenericInst(Attributor &A, Instruction &I, ItemInfo II,
11242                           SmallVectorImpl<ItemInfo> &Worklist) {
11243      bool SomeSimplified = false;
11244      bool UsedAssumedInformation = false;
11245  
11246      SmallVector<Value *, 8> NewOps(I.getNumOperands());
11247      int Idx = 0;
11248      for (Value *Op : I.operands()) {
11249        const auto &SimplifiedOp = A.getAssumedSimplified(
11250            IRPosition::value(*Op, getCallBaseContext()), *this,
11251            UsedAssumedInformation, AA::Intraprocedural);
11252        // If we are not sure about any operand we are not sure about the entire
11253        // instruction, we'll wait.
11254        if (!SimplifiedOp.has_value())
11255          return true;
11256  
11257        if (*SimplifiedOp)
11258          NewOps[Idx] = *SimplifiedOp;
11259        else
11260          NewOps[Idx] = Op;
11261  
11262        SomeSimplified |= (NewOps[Idx] != Op);
11263        ++Idx;
11264      }
11265  
11266      // We won't bother with the InstSimplify interface if we didn't simplify any
11267      // operand ourselves.
11268      if (!SomeSimplified)
11269        return false;
11270  
11271      InformationCache &InfoCache = A.getInfoCache();
11272      Function *F = I.getFunction();
11273      const auto *DT =
11274          InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
11275      const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
11276      auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
11277  
11278      const DataLayout &DL = I.getDataLayout();
11279      SimplifyQuery Q(DL, TLI, DT, AC, &I);
11280      Value *NewV = simplifyInstructionWithOperands(&I, NewOps, Q);
11281      if (!NewV || NewV == &I)
11282        return false;
11283  
11284      LLVM_DEBUG(dbgs() << "Generic inst " << I << " assumed simplified to "
11285                        << *NewV << "\n");
11286      Worklist.push_back({{*NewV, II.I.getCtxI()}, II.S});
11287      return true;
11288    }
11289  
simplifyInstruction__anonc528723c8911::AAPotentialValuesFloating11290    bool simplifyInstruction(
11291        Attributor &A, Instruction &I, ItemInfo II,
11292        SmallVectorImpl<ItemInfo> &Worklist,
11293        SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) {
11294      if (auto *CI = dyn_cast<CmpInst>(&I))
11295        return handleCmp(A, *CI, CI->getOperand(0), CI->getOperand(1),
11296                         CI->getPredicate(), II, Worklist);
11297  
11298      switch (I.getOpcode()) {
11299      case Instruction::Select:
11300        return handleSelectInst(A, cast<SelectInst>(I), II, Worklist);
11301      case Instruction::PHI:
11302        return handlePHINode(A, cast<PHINode>(I), II, Worklist, LivenessAAs);
11303      case Instruction::Load:
11304        return handleLoadInst(A, cast<LoadInst>(I), II, Worklist);
11305      default:
11306        return handleGenericInst(A, I, II, Worklist);
11307      };
11308      return false;
11309    }
11310  
genericValueTraversal__anonc528723c8911::AAPotentialValuesFloating11311    void genericValueTraversal(Attributor &A, Value *InitialV) {
11312      SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs;
11313  
11314      SmallSet<ItemInfo, 16> Visited;
11315      SmallVector<ItemInfo, 16> Worklist;
11316      Worklist.push_back({{*InitialV, getCtxI()}, AA::AnyScope});
11317  
11318      int Iteration = 0;
11319      do {
11320        ItemInfo II = Worklist.pop_back_val();
11321        Value *V = II.I.getValue();
11322        assert(V);
11323        const Instruction *CtxI = II.I.getCtxI();
11324        AA::ValueScope S = II.S;
11325  
11326        // Check if we should process the current value. To prevent endless
11327        // recursion keep a record of the values we followed!
11328        if (!Visited.insert(II).second)
11329          continue;
11330  
11331        // Make sure we limit the compile time for complex expressions.
11332        if (Iteration++ >= MaxPotentialValuesIterations) {
11333          LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
11334                            << Iteration << "!\n");
11335          addValue(A, getState(), *V, CtxI, S, getAnchorScope());
11336          continue;
11337        }
11338  
11339        // Explicitly look through calls with a "returned" attribute if we do
11340        // not have a pointer as stripPointerCasts only works on them.
11341        Value *NewV = nullptr;
11342        if (V->getType()->isPointerTy()) {
11343          NewV = AA::getWithType(*V->stripPointerCasts(), *V->getType());
11344        } else {
11345          if (auto *CB = dyn_cast<CallBase>(V))
11346            if (auto *Callee =
11347                    dyn_cast_if_present<Function>(CB->getCalledOperand())) {
11348              for (Argument &Arg : Callee->args())
11349                if (Arg.hasReturnedAttr()) {
11350                  NewV = CB->getArgOperand(Arg.getArgNo());
11351                  break;
11352                }
11353            }
11354        }
11355        if (NewV && NewV != V) {
11356          Worklist.push_back({{*NewV, CtxI}, S});
11357          continue;
11358        }
11359  
11360        if (auto *I = dyn_cast<Instruction>(V)) {
11361          if (simplifyInstruction(A, *I, II, Worklist, LivenessAAs))
11362            continue;
11363        }
11364  
11365        if (V != InitialV || isa<Argument>(V))
11366          if (recurseForValue(A, IRPosition::value(*V), II.S))
11367            continue;
11368  
11369        // If we haven't stripped anything we give up.
11370        if (V == InitialV && CtxI == getCtxI()) {
11371          indicatePessimisticFixpoint();
11372          return;
11373        }
11374  
11375        addValue(A, getState(), *V, CtxI, S, getAnchorScope());
11376      } while (!Worklist.empty());
11377  
11378      // If we actually used liveness information so we have to record a
11379      // dependence.
11380      for (auto &It : LivenessAAs)
11381        if (It.second.AnyDead)
11382          A.recordDependence(*It.second.LivenessAA, *this, DepClassTy::OPTIONAL);
11383    }
11384  
11385    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8911::AAPotentialValuesFloating11386    void trackStatistics() const override {
11387      STATS_DECLTRACK_FLOATING_ATTR(potential_values)
11388    }
11389  };
11390  
11391  struct AAPotentialValuesArgument final : AAPotentialValuesImpl {
11392    using Base = AAPotentialValuesImpl;
AAPotentialValuesArgument__anonc528723c8911::AAPotentialValuesArgument11393    AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
11394        : Base(IRP, A) {}
11395  
11396    /// See AbstractAttribute::initialize(..).
initialize__anonc528723c8911::AAPotentialValuesArgument11397    void initialize(Attributor &A) override {
11398      auto &Arg = cast<Argument>(getAssociatedValue());
11399      if (Arg.hasPointeeInMemoryValueAttr())
11400        indicatePessimisticFixpoint();
11401    }
11402  
11403    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c8911::AAPotentialValuesArgument11404    ChangeStatus updateImpl(Attributor &A) override {
11405      auto AssumedBefore = getAssumed();
11406  
11407      unsigned ArgNo = getCalleeArgNo();
11408  
11409      bool UsedAssumedInformation = false;
11410      SmallVector<AA::ValueAndContext> Values;
11411      auto CallSitePred = [&](AbstractCallSite ACS) {
11412        const auto CSArgIRP = IRPosition::callsite_argument(ACS, ArgNo);
11413        if (CSArgIRP.getPositionKind() == IRP_INVALID)
11414          return false;
11415  
11416        if (!A.getAssumedSimplifiedValues(CSArgIRP, this, Values,
11417                                          AA::Interprocedural,
11418                                          UsedAssumedInformation))
11419          return false;
11420  
11421        return isValidState();
11422      };
11423  
11424      if (!A.checkForAllCallSites(CallSitePred, *this,
11425                                  /* RequireAllCallSites */ true,
11426                                  UsedAssumedInformation))
11427        return indicatePessimisticFixpoint();
11428  
11429      Function *Fn = getAssociatedFunction();
11430      bool AnyNonLocal = false;
11431      for (auto &It : Values) {
11432        if (isa<Constant>(It.getValue())) {
11433          addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope,
11434                   getAnchorScope());
11435          continue;
11436        }
11437        if (!AA::isDynamicallyUnique(A, *this, *It.getValue()))
11438          return indicatePessimisticFixpoint();
11439  
11440        if (auto *Arg = dyn_cast<Argument>(It.getValue()))
11441          if (Arg->getParent() == Fn) {
11442            addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope,
11443                     getAnchorScope());
11444            continue;
11445          }
11446        addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::Interprocedural,
11447                 getAnchorScope());
11448        AnyNonLocal = true;
11449      }
11450      assert(!undefIsContained() && "Undef should be an explicit value!");
11451      if (AnyNonLocal)
11452        giveUpOnIntraprocedural(A);
11453  
11454      return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
11455                                             : ChangeStatus::CHANGED;
11456    }
11457  
11458    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8911::AAPotentialValuesArgument11459    void trackStatistics() const override {
11460      STATS_DECLTRACK_ARG_ATTR(potential_values)
11461    }
11462  };
11463  
11464  struct AAPotentialValuesReturned : public AAPotentialValuesFloating {
11465    using Base = AAPotentialValuesFloating;
AAPotentialValuesReturned__anonc528723c8911::AAPotentialValuesReturned11466    AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
11467        : Base(IRP, A) {}
11468  
11469    /// See AbstractAttribute::initialize(..).
initialize__anonc528723c8911::AAPotentialValuesReturned11470    void initialize(Attributor &A) override {
11471      Function *F = getAssociatedFunction();
11472      if (!F || F->isDeclaration() || F->getReturnType()->isVoidTy()) {
11473        indicatePessimisticFixpoint();
11474        return;
11475      }
11476  
11477      for (Argument &Arg : F->args())
11478        if (Arg.hasReturnedAttr()) {
11479          addValue(A, getState(), Arg, nullptr, AA::AnyScope, F);
11480          ReturnedArg = &Arg;
11481          break;
11482        }
11483      if (!A.isFunctionIPOAmendable(*F) ||
11484          A.hasSimplificationCallback(getIRPosition())) {
11485        if (!ReturnedArg)
11486          indicatePessimisticFixpoint();
11487        else
11488          indicateOptimisticFixpoint();
11489      }
11490    }
11491  
11492    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c8911::AAPotentialValuesReturned11493    ChangeStatus updateImpl(Attributor &A) override {
11494      auto AssumedBefore = getAssumed();
11495      bool UsedAssumedInformation = false;
11496  
11497      SmallVector<AA::ValueAndContext> Values;
11498      Function *AnchorScope = getAnchorScope();
11499      auto HandleReturnedValue = [&](Value &V, Instruction *CtxI,
11500                                     bool AddValues) {
11501        for (AA::ValueScope S : {AA::Interprocedural, AA::Intraprocedural}) {
11502          Values.clear();
11503          if (!A.getAssumedSimplifiedValues(IRPosition::value(V), this, Values, S,
11504                                            UsedAssumedInformation,
11505                                            /* RecurseForSelectAndPHI */ true))
11506            return false;
11507          if (!AddValues)
11508            continue;
11509          for (const AA::ValueAndContext &VAC : Values)
11510            addValue(A, getState(), *VAC.getValue(),
11511                     VAC.getCtxI() ? VAC.getCtxI() : CtxI, S, AnchorScope);
11512        }
11513        return true;
11514      };
11515  
11516      if (ReturnedArg) {
11517        HandleReturnedValue(*ReturnedArg, nullptr, true);
11518      } else {
11519        auto RetInstPred = [&](Instruction &RetI) {
11520          bool AddValues = true;
11521          if (isa<PHINode>(RetI.getOperand(0)) ||
11522              isa<SelectInst>(RetI.getOperand(0))) {
11523            addValue(A, getState(), *RetI.getOperand(0), &RetI, AA::AnyScope,
11524                     AnchorScope);
11525            AddValues = false;
11526          }
11527          return HandleReturnedValue(*RetI.getOperand(0), &RetI, AddValues);
11528        };
11529  
11530        if (!A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
11531                                       UsedAssumedInformation,
11532                                       /* CheckBBLivenessOnly */ true))
11533          return indicatePessimisticFixpoint();
11534      }
11535  
11536      return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
11537                                             : ChangeStatus::CHANGED;
11538    }
11539  
addValue__anonc528723c8911::AAPotentialValuesReturned11540    void addValue(Attributor &A, StateType &State, Value &V,
11541                  const Instruction *CtxI, AA::ValueScope S,
11542                  Function *AnchorScope) const override {
11543      Function *F = getAssociatedFunction();
11544      if (auto *CB = dyn_cast<CallBase>(&V))
11545        if (CB->getCalledOperand() == F)
11546          return;
11547      Base::addValue(A, State, V, CtxI, S, AnchorScope);
11548    }
11549  
manifest__anonc528723c8911::AAPotentialValuesReturned11550    ChangeStatus manifest(Attributor &A) override {
11551      if (ReturnedArg)
11552        return ChangeStatus::UNCHANGED;
11553      SmallVector<AA::ValueAndContext> Values;
11554      if (!getAssumedSimplifiedValues(A, Values, AA::ValueScope::Intraprocedural,
11555                                      /* RecurseForSelectAndPHI */ true))
11556        return ChangeStatus::UNCHANGED;
11557      Value *NewVal = getSingleValue(A, *this, getIRPosition(), Values);
11558      if (!NewVal)
11559        return ChangeStatus::UNCHANGED;
11560  
11561      ChangeStatus Changed = ChangeStatus::UNCHANGED;
11562      if (auto *Arg = dyn_cast<Argument>(NewVal)) {
11563        STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
11564                        "Number of function with unique return");
11565        Changed |= A.manifestAttrs(
11566            IRPosition::argument(*Arg),
11567            {Attribute::get(Arg->getContext(), Attribute::Returned)});
11568        STATS_DECLTRACK_ARG_ATTR(returned);
11569      }
11570  
11571      auto RetInstPred = [&](Instruction &RetI) {
11572        Value *RetOp = RetI.getOperand(0);
11573        if (isa<UndefValue>(RetOp) || RetOp == NewVal)
11574          return true;
11575        if (AA::isValidAtPosition({*NewVal, RetI}, A.getInfoCache()))
11576          if (A.changeUseAfterManifest(RetI.getOperandUse(0), *NewVal))
11577            Changed = ChangeStatus::CHANGED;
11578        return true;
11579      };
11580      bool UsedAssumedInformation = false;
11581      (void)A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
11582                                      UsedAssumedInformation,
11583                                      /* CheckBBLivenessOnly */ true);
11584      return Changed;
11585    }
11586  
indicatePessimisticFixpoint__anonc528723c8911::AAPotentialValuesReturned11587    ChangeStatus indicatePessimisticFixpoint() override {
11588      return AAPotentialValues::indicatePessimisticFixpoint();
11589    }
11590  
11591    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8911::AAPotentialValuesReturned11592    void trackStatistics() const override{
11593        STATS_DECLTRACK_FNRET_ATTR(potential_values)}
11594  
11595    /// The argumented with an existing `returned` attribute.
11596    Argument *ReturnedArg = nullptr;
11597  };
11598  
11599  struct AAPotentialValuesFunction : AAPotentialValuesImpl {
AAPotentialValuesFunction__anonc528723c8911::AAPotentialValuesFunction11600    AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
11601        : AAPotentialValuesImpl(IRP, A) {}
11602  
11603    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c8911::AAPotentialValuesFunction11604    ChangeStatus updateImpl(Attributor &A) override {
11605      llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
11606                       "not be called");
11607    }
11608  
11609    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8911::AAPotentialValuesFunction11610    void trackStatistics() const override {
11611      STATS_DECLTRACK_FN_ATTR(potential_values)
11612    }
11613  };
11614  
11615  struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
AAPotentialValuesCallSite__anonc528723c8911::AAPotentialValuesCallSite11616    AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
11617        : AAPotentialValuesFunction(IRP, A) {}
11618  
11619    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8911::AAPotentialValuesCallSite11620    void trackStatistics() const override {
11621      STATS_DECLTRACK_CS_ATTR(potential_values)
11622    }
11623  };
11624  
11625  struct AAPotentialValuesCallSiteReturned : AAPotentialValuesImpl {
AAPotentialValuesCallSiteReturned__anonc528723c8911::AAPotentialValuesCallSiteReturned11626    AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
11627        : AAPotentialValuesImpl(IRP, A) {}
11628  
11629    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c8911::AAPotentialValuesCallSiteReturned11630    ChangeStatus updateImpl(Attributor &A) override {
11631      auto AssumedBefore = getAssumed();
11632  
11633      Function *Callee = getAssociatedFunction();
11634      if (!Callee)
11635        return indicatePessimisticFixpoint();
11636  
11637      bool UsedAssumedInformation = false;
11638      auto *CB = cast<CallBase>(getCtxI());
11639      if (CB->isMustTailCall() &&
11640          !A.isAssumedDead(IRPosition::inst(*CB), this, nullptr,
11641                           UsedAssumedInformation))
11642        return indicatePessimisticFixpoint();
11643  
11644      SmallVector<AA::ValueAndContext> Values;
11645      if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this,
11646                                        Values, AA::Intraprocedural,
11647                                        UsedAssumedInformation))
11648        return indicatePessimisticFixpoint();
11649  
11650      Function *Caller = CB->getCaller();
11651  
11652      bool AnyNonLocal = false;
11653      for (auto &It : Values) {
11654        Value *V = It.getValue();
11655        std::optional<Value *> CallerV = A.translateArgumentToCallSiteContent(
11656            V, *CB, *this, UsedAssumedInformation);
11657        if (!CallerV.has_value()) {
11658          // Nothing to do as long as no value was determined.
11659          continue;
11660        }
11661        V = *CallerV ? *CallerV : V;
11662        if (AA::isDynamicallyUnique(A, *this, *V) &&
11663            AA::isValidInScope(*V, Caller)) {
11664          if (*CallerV) {
11665            SmallVector<AA::ValueAndContext> ArgValues;
11666            IRPosition IRP = IRPosition::value(*V);
11667            if (auto *Arg = dyn_cast<Argument>(V))
11668              if (Arg->getParent() == CB->getCalledOperand())
11669                IRP = IRPosition::callsite_argument(*CB, Arg->getArgNo());
11670            if (recurseForValue(A, IRP, AA::AnyScope))
11671              continue;
11672          }
11673          addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope());
11674        } else {
11675          AnyNonLocal = true;
11676          break;
11677        }
11678      }
11679      if (AnyNonLocal) {
11680        Values.clear();
11681        if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this,
11682                                          Values, AA::Interprocedural,
11683                                          UsedAssumedInformation))
11684          return indicatePessimisticFixpoint();
11685        AnyNonLocal = false;
11686        getState() = PotentialLLVMValuesState::getBestState();
11687        for (auto &It : Values) {
11688          Value *V = It.getValue();
11689          if (!AA::isDynamicallyUnique(A, *this, *V))
11690            return indicatePessimisticFixpoint();
11691          if (AA::isValidInScope(*V, Caller)) {
11692            addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope());
11693          } else {
11694            AnyNonLocal = true;
11695            addValue(A, getState(), *V, CB, AA::Interprocedural,
11696                     getAnchorScope());
11697          }
11698        }
11699        if (AnyNonLocal)
11700          giveUpOnIntraprocedural(A);
11701      }
11702      return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
11703                                             : ChangeStatus::CHANGED;
11704    }
11705  
indicatePessimisticFixpoint__anonc528723c8911::AAPotentialValuesCallSiteReturned11706    ChangeStatus indicatePessimisticFixpoint() override {
11707      return AAPotentialValues::indicatePessimisticFixpoint();
11708    }
11709  
11710    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8911::AAPotentialValuesCallSiteReturned11711    void trackStatistics() const override {
11712      STATS_DECLTRACK_CSRET_ATTR(potential_values)
11713    }
11714  };
11715  
11716  struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
AAPotentialValuesCallSiteArgument__anonc528723c8911::AAPotentialValuesCallSiteArgument11717    AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
11718        : AAPotentialValuesFloating(IRP, A) {}
11719  
11720    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c8911::AAPotentialValuesCallSiteArgument11721    void trackStatistics() const override {
11722      STATS_DECLTRACK_CSARG_ATTR(potential_values)
11723    }
11724  };
11725  } // namespace
11726  
11727  /// ---------------------- Assumption Propagation ------------------------------
11728  namespace {
11729  struct AAAssumptionInfoImpl : public AAAssumptionInfo {
AAAssumptionInfoImpl__anonc528723c9311::AAAssumptionInfoImpl11730    AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
11731                         const DenseSet<StringRef> &Known)
11732        : AAAssumptionInfo(IRP, A, Known) {}
11733  
11734    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c9311::AAAssumptionInfoImpl11735    ChangeStatus manifest(Attributor &A) override {
11736      // Don't manifest a universal set if it somehow made it here.
11737      if (getKnown().isUniversal())
11738        return ChangeStatus::UNCHANGED;
11739  
11740      const IRPosition &IRP = getIRPosition();
11741      SmallVector<StringRef, 0> Set(getAssumed().getSet().begin(),
11742                                    getAssumed().getSet().end());
11743      llvm::sort(Set);
11744      return A.manifestAttrs(IRP,
11745                             Attribute::get(IRP.getAnchorValue().getContext(),
11746                                            AssumptionAttrKey,
11747                                            llvm::join(Set, ",")),
11748                             /*ForceReplace=*/true);
11749    }
11750  
hasAssumption__anonc528723c9311::AAAssumptionInfoImpl11751    bool hasAssumption(const StringRef Assumption) const override {
11752      return isValidState() && setContains(Assumption);
11753    }
11754  
11755    /// See AbstractAttribute::getAsStr()
getAsStr__anonc528723c9311::AAAssumptionInfoImpl11756    const std::string getAsStr(Attributor *A) const override {
11757      const SetContents &Known = getKnown();
11758      const SetContents &Assumed = getAssumed();
11759  
11760      SmallVector<StringRef, 0> Set(Known.getSet().begin(), Known.getSet().end());
11761      llvm::sort(Set);
11762      const std::string KnownStr = llvm::join(Set, ",");
11763  
11764      std::string AssumedStr = "Universal";
11765      if (!Assumed.isUniversal()) {
11766        Set.assign(Assumed.getSet().begin(), Assumed.getSet().end());
11767        AssumedStr = llvm::join(Set, ",");
11768      }
11769      return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
11770    }
11771  };
11772  
11773  /// Propagates assumption information from parent functions to all of their
11774  /// successors. An assumption can be propagated if the containing function
11775  /// dominates the called function.
11776  ///
11777  /// We start with a "known" set of assumptions already valid for the associated
11778  /// function and an "assumed" set that initially contains all possible
11779  /// assumptions. The assumed set is inter-procedurally updated by narrowing its
11780  /// contents as concrete values are known. The concrete values are seeded by the
11781  /// first nodes that are either entries into the call graph, or contains no
11782  /// assumptions. Each node is updated as the intersection of the assumed state
11783  /// with all of its predecessors.
11784  struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
AAAssumptionInfoFunction__anonc528723c9311::AAAssumptionInfoFunction11785    AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
11786        : AAAssumptionInfoImpl(IRP, A,
11787                               getAssumptions(*IRP.getAssociatedFunction())) {}
11788  
11789    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c9311::AAAssumptionInfoFunction11790    ChangeStatus updateImpl(Attributor &A) override {
11791      bool Changed = false;
11792  
11793      auto CallSitePred = [&](AbstractCallSite ACS) {
11794        const auto *AssumptionAA = A.getAAFor<AAAssumptionInfo>(
11795            *this, IRPosition::callsite_function(*ACS.getInstruction()),
11796            DepClassTy::REQUIRED);
11797        if (!AssumptionAA)
11798          return false;
11799        // Get the set of assumptions shared by all of this function's callers.
11800        Changed |= getIntersection(AssumptionAA->getAssumed());
11801        return !getAssumed().empty() || !getKnown().empty();
11802      };
11803  
11804      bool UsedAssumedInformation = false;
11805      // Get the intersection of all assumptions held by this node's predecessors.
11806      // If we don't know all the call sites then this is either an entry into the
11807      // call graph or an empty node. This node is known to only contain its own
11808      // assumptions and can be propagated to its successors.
11809      if (!A.checkForAllCallSites(CallSitePred, *this, true,
11810                                  UsedAssumedInformation))
11811        return indicatePessimisticFixpoint();
11812  
11813      return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
11814    }
11815  
trackStatistics__anonc528723c9311::AAAssumptionInfoFunction11816    void trackStatistics() const override {}
11817  };
11818  
11819  /// Assumption Info defined for call sites.
11820  struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
11821  
AAAssumptionInfoCallSite__anonc528723c9311::AAAssumptionInfoCallSite11822    AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
11823        : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
11824  
11825    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c9311::AAAssumptionInfoCallSite11826    void initialize(Attributor &A) override {
11827      const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
11828      A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
11829    }
11830  
11831    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c9311::AAAssumptionInfoCallSite11832    ChangeStatus updateImpl(Attributor &A) override {
11833      const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
11834      auto *AssumptionAA =
11835          A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
11836      if (!AssumptionAA)
11837        return indicatePessimisticFixpoint();
11838      bool Changed = getIntersection(AssumptionAA->getAssumed());
11839      return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
11840    }
11841  
11842    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c9311::AAAssumptionInfoCallSite11843    void trackStatistics() const override {}
11844  
11845  private:
11846    /// Helper to initialized the known set as all the assumptions this call and
11847    /// the callee contain.
getInitialAssumptions__anonc528723c9311::AAAssumptionInfoCallSite11848    DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
11849      const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
11850      auto Assumptions = getAssumptions(CB);
11851      if (const Function *F = CB.getCaller())
11852        set_union(Assumptions, getAssumptions(*F));
11853      if (Function *F = IRP.getAssociatedFunction())
11854        set_union(Assumptions, getAssumptions(*F));
11855      return Assumptions;
11856    }
11857  };
11858  } // namespace
11859  
operator *() const11860  AACallGraphNode *AACallEdgeIterator::operator*() const {
11861    return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
11862        A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
11863  }
11864  
print()11865  void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
11866  
11867  /// ------------------------ UnderlyingObjects ---------------------------------
11868  
11869  namespace {
11870  struct AAUnderlyingObjectsImpl
11871      : StateWrapper<BooleanState, AAUnderlyingObjects> {
11872    using BaseTy = StateWrapper<BooleanState, AAUnderlyingObjects>;
AAUnderlyingObjectsImpl__anonc528723c9511::AAUnderlyingObjectsImpl11873    AAUnderlyingObjectsImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
11874  
11875    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c9511::AAUnderlyingObjectsImpl11876    const std::string getAsStr(Attributor *A) const override {
11877      return std::string("UnderlyingObjects ") +
11878             (isValidState()
11879                  ? (std::string("inter #") +
11880                     std::to_string(InterAssumedUnderlyingObjects.size()) +
11881                     " objs" + std::string(", intra #") +
11882                     std::to_string(IntraAssumedUnderlyingObjects.size()) +
11883                     " objs")
11884                  : "<invalid>");
11885    }
11886  
11887    /// See AbstractAttribute::trackStatistics()
trackStatistics__anonc528723c9511::AAUnderlyingObjectsImpl11888    void trackStatistics() const override {}
11889  
11890    /// See AbstractAttribute::updateImpl(...).
updateImpl__anonc528723c9511::AAUnderlyingObjectsImpl11891    ChangeStatus updateImpl(Attributor &A) override {
11892      auto &Ptr = getAssociatedValue();
11893  
11894      auto DoUpdate = [&](SmallSetVector<Value *, 8> &UnderlyingObjects,
11895                          AA::ValueScope Scope) {
11896        bool UsedAssumedInformation = false;
11897        SmallPtrSet<Value *, 8> SeenObjects;
11898        SmallVector<AA::ValueAndContext> Values;
11899  
11900        if (!A.getAssumedSimplifiedValues(IRPosition::value(Ptr), *this, Values,
11901                                          Scope, UsedAssumedInformation))
11902          return UnderlyingObjects.insert(&Ptr);
11903  
11904        bool Changed = false;
11905  
11906        for (unsigned I = 0; I < Values.size(); ++I) {
11907          auto &VAC = Values[I];
11908          auto *Obj = VAC.getValue();
11909          Value *UO = getUnderlyingObject(Obj);
11910          if (UO && UO != VAC.getValue() && SeenObjects.insert(UO).second) {
11911            const auto *OtherAA = A.getAAFor<AAUnderlyingObjects>(
11912                *this, IRPosition::value(*UO), DepClassTy::OPTIONAL);
11913            auto Pred = [&Values](Value &V) {
11914              Values.emplace_back(V, nullptr);
11915              return true;
11916            };
11917  
11918            if (!OtherAA || !OtherAA->forallUnderlyingObjects(Pred, Scope))
11919              llvm_unreachable(
11920                  "The forall call should not return false at this position");
11921  
11922            continue;
11923          }
11924  
11925          if (isa<SelectInst>(Obj)) {
11926            Changed |= handleIndirect(A, *Obj, UnderlyingObjects, Scope);
11927            continue;
11928          }
11929          if (auto *PHI = dyn_cast<PHINode>(Obj)) {
11930            // Explicitly look through PHIs as we do not care about dynamically
11931            // uniqueness.
11932            for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
11933              Changed |= handleIndirect(A, *PHI->getIncomingValue(u),
11934                                        UnderlyingObjects, Scope);
11935            }
11936            continue;
11937          }
11938  
11939          Changed |= UnderlyingObjects.insert(Obj);
11940        }
11941  
11942        return Changed;
11943      };
11944  
11945      bool Changed = false;
11946      Changed |= DoUpdate(IntraAssumedUnderlyingObjects, AA::Intraprocedural);
11947      Changed |= DoUpdate(InterAssumedUnderlyingObjects, AA::Interprocedural);
11948  
11949      return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
11950    }
11951  
forallUnderlyingObjects__anonc528723c9511::AAUnderlyingObjectsImpl11952    bool forallUnderlyingObjects(
11953        function_ref<bool(Value &)> Pred,
11954        AA::ValueScope Scope = AA::Interprocedural) const override {
11955      if (!isValidState())
11956        return Pred(getAssociatedValue());
11957  
11958      auto &AssumedUnderlyingObjects = Scope == AA::Intraprocedural
11959                                           ? IntraAssumedUnderlyingObjects
11960                                           : InterAssumedUnderlyingObjects;
11961      for (Value *Obj : AssumedUnderlyingObjects)
11962        if (!Pred(*Obj))
11963          return false;
11964  
11965      return true;
11966    }
11967  
11968  private:
11969    /// Handle the case where the value is not the actual underlying value, such
11970    /// as a phi node or a select instruction.
handleIndirect__anonc528723c9511::AAUnderlyingObjectsImpl11971    bool handleIndirect(Attributor &A, Value &V,
11972                        SmallSetVector<Value *, 8> &UnderlyingObjects,
11973                        AA::ValueScope Scope) {
11974      bool Changed = false;
11975      const auto *AA = A.getAAFor<AAUnderlyingObjects>(
11976          *this, IRPosition::value(V), DepClassTy::OPTIONAL);
11977      auto Pred = [&](Value &V) {
11978        Changed |= UnderlyingObjects.insert(&V);
11979        return true;
11980      };
11981      if (!AA || !AA->forallUnderlyingObjects(Pred, Scope))
11982        llvm_unreachable(
11983            "The forall call should not return false at this position");
11984      return Changed;
11985    }
11986  
11987    /// All the underlying objects collected so far via intra procedural scope.
11988    SmallSetVector<Value *, 8> IntraAssumedUnderlyingObjects;
11989    /// All the underlying objects collected so far via inter procedural scope.
11990    SmallSetVector<Value *, 8> InterAssumedUnderlyingObjects;
11991  };
11992  
11993  struct AAUnderlyingObjectsFloating final : AAUnderlyingObjectsImpl {
AAUnderlyingObjectsFloating__anonc528723c9511::AAUnderlyingObjectsFloating11994    AAUnderlyingObjectsFloating(const IRPosition &IRP, Attributor &A)
11995        : AAUnderlyingObjectsImpl(IRP, A) {}
11996  };
11997  
11998  struct AAUnderlyingObjectsArgument final : AAUnderlyingObjectsImpl {
AAUnderlyingObjectsArgument__anonc528723c9511::AAUnderlyingObjectsArgument11999    AAUnderlyingObjectsArgument(const IRPosition &IRP, Attributor &A)
12000        : AAUnderlyingObjectsImpl(IRP, A) {}
12001  };
12002  
12003  struct AAUnderlyingObjectsCallSite final : AAUnderlyingObjectsImpl {
AAUnderlyingObjectsCallSite__anonc528723c9511::AAUnderlyingObjectsCallSite12004    AAUnderlyingObjectsCallSite(const IRPosition &IRP, Attributor &A)
12005        : AAUnderlyingObjectsImpl(IRP, A) {}
12006  };
12007  
12008  struct AAUnderlyingObjectsCallSiteArgument final : AAUnderlyingObjectsImpl {
AAUnderlyingObjectsCallSiteArgument__anonc528723c9511::AAUnderlyingObjectsCallSiteArgument12009    AAUnderlyingObjectsCallSiteArgument(const IRPosition &IRP, Attributor &A)
12010        : AAUnderlyingObjectsImpl(IRP, A) {}
12011  };
12012  
12013  struct AAUnderlyingObjectsReturned final : AAUnderlyingObjectsImpl {
AAUnderlyingObjectsReturned__anonc528723c9511::AAUnderlyingObjectsReturned12014    AAUnderlyingObjectsReturned(const IRPosition &IRP, Attributor &A)
12015        : AAUnderlyingObjectsImpl(IRP, A) {}
12016  };
12017  
12018  struct AAUnderlyingObjectsCallSiteReturned final : AAUnderlyingObjectsImpl {
AAUnderlyingObjectsCallSiteReturned__anonc528723c9511::AAUnderlyingObjectsCallSiteReturned12019    AAUnderlyingObjectsCallSiteReturned(const IRPosition &IRP, Attributor &A)
12020        : AAUnderlyingObjectsImpl(IRP, A) {}
12021  };
12022  
12023  struct AAUnderlyingObjectsFunction final : AAUnderlyingObjectsImpl {
AAUnderlyingObjectsFunction__anonc528723c9511::AAUnderlyingObjectsFunction12024    AAUnderlyingObjectsFunction(const IRPosition &IRP, Attributor &A)
12025        : AAUnderlyingObjectsImpl(IRP, A) {}
12026  };
12027  } // namespace
12028  
12029  /// ------------------------ Global Value Info  -------------------------------
12030  namespace {
12031  struct AAGlobalValueInfoFloating : public AAGlobalValueInfo {
AAGlobalValueInfoFloating__anonc528723c9911::AAGlobalValueInfoFloating12032    AAGlobalValueInfoFloating(const IRPosition &IRP, Attributor &A)
12033        : AAGlobalValueInfo(IRP, A) {}
12034  
12035    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c9911::AAGlobalValueInfoFloating12036    void initialize(Attributor &A) override {}
12037  
checkUse__anonc528723c9911::AAGlobalValueInfoFloating12038    bool checkUse(Attributor &A, const Use &U, bool &Follow,
12039                  SmallVectorImpl<const Value *> &Worklist) {
12040      Instruction *UInst = dyn_cast<Instruction>(U.getUser());
12041      if (!UInst) {
12042        Follow = true;
12043        return true;
12044      }
12045  
12046      LLVM_DEBUG(dbgs() << "[AAGlobalValueInfo] Check use: " << *U.get() << " in "
12047                        << *UInst << "\n");
12048  
12049      if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) {
12050        int Idx = &Cmp->getOperandUse(0) == &U;
12051        if (isa<Constant>(Cmp->getOperand(Idx)))
12052          return true;
12053        return U == &getAnchorValue();
12054      }
12055  
12056      // Explicitly catch return instructions.
12057      if (isa<ReturnInst>(UInst)) {
12058        auto CallSitePred = [&](AbstractCallSite ACS) {
12059          Worklist.push_back(ACS.getInstruction());
12060          return true;
12061        };
12062        bool UsedAssumedInformation = false;
12063        // TODO: We should traverse the uses or add a "non-call-site" CB.
12064        if (!A.checkForAllCallSites(CallSitePred, *UInst->getFunction(),
12065                                    /*RequireAllCallSites=*/true, this,
12066                                    UsedAssumedInformation))
12067          return false;
12068        return true;
12069      }
12070  
12071      // For now we only use special logic for call sites. However, the tracker
12072      // itself knows about a lot of other non-capturing cases already.
12073      auto *CB = dyn_cast<CallBase>(UInst);
12074      if (!CB)
12075        return false;
12076      // Direct calls are OK uses.
12077      if (CB->isCallee(&U))
12078        return true;
12079      // Non-argument uses are scary.
12080      if (!CB->isArgOperand(&U))
12081        return false;
12082      // TODO: Iterate callees.
12083      auto *Fn = dyn_cast<Function>(CB->getCalledOperand());
12084      if (!Fn || !A.isFunctionIPOAmendable(*Fn))
12085        return false;
12086  
12087      unsigned ArgNo = CB->getArgOperandNo(&U);
12088      Worklist.push_back(Fn->getArg(ArgNo));
12089      return true;
12090    }
12091  
updateImpl__anonc528723c9911::AAGlobalValueInfoFloating12092    ChangeStatus updateImpl(Attributor &A) override {
12093      unsigned NumUsesBefore = Uses.size();
12094  
12095      SmallPtrSet<const Value *, 8> Visited;
12096      SmallVector<const Value *> Worklist;
12097      Worklist.push_back(&getAnchorValue());
12098  
12099      auto UsePred = [&](const Use &U, bool &Follow) -> bool {
12100        Uses.insert(&U);
12101        switch (DetermineUseCaptureKind(U, nullptr)) {
12102        case UseCaptureKind::NO_CAPTURE:
12103          return checkUse(A, U, Follow, Worklist);
12104        case UseCaptureKind::MAY_CAPTURE:
12105          return checkUse(A, U, Follow, Worklist);
12106        case UseCaptureKind::PASSTHROUGH:
12107          Follow = true;
12108          return true;
12109        }
12110        return true;
12111      };
12112      auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
12113        Uses.insert(&OldU);
12114        return true;
12115      };
12116  
12117      while (!Worklist.empty()) {
12118        const Value *V = Worklist.pop_back_val();
12119        if (!Visited.insert(V).second)
12120          continue;
12121        if (!A.checkForAllUses(UsePred, *this, *V,
12122                               /* CheckBBLivenessOnly */ true,
12123                               DepClassTy::OPTIONAL,
12124                               /* IgnoreDroppableUses */ true, EquivalentUseCB)) {
12125          return indicatePessimisticFixpoint();
12126        }
12127      }
12128  
12129      return Uses.size() == NumUsesBefore ? ChangeStatus::UNCHANGED
12130                                          : ChangeStatus::CHANGED;
12131    }
12132  
isPotentialUse__anonc528723c9911::AAGlobalValueInfoFloating12133    bool isPotentialUse(const Use &U) const override {
12134      return !isValidState() || Uses.contains(&U);
12135    }
12136  
12137    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c9911::AAGlobalValueInfoFloating12138    ChangeStatus manifest(Attributor &A) override {
12139      return ChangeStatus::UNCHANGED;
12140    }
12141  
12142    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c9911::AAGlobalValueInfoFloating12143    const std::string getAsStr(Attributor *A) const override {
12144      return "[" + std::to_string(Uses.size()) + " uses]";
12145    }
12146  
trackStatistics__anonc528723c9911::AAGlobalValueInfoFloating12147    void trackStatistics() const override {
12148      STATS_DECLTRACK_FLOATING_ATTR(GlobalValuesTracked);
12149    }
12150  
12151  private:
12152    /// Set of (transitive) uses of this GlobalValue.
12153    SmallPtrSet<const Use *, 8> Uses;
12154  };
12155  } // namespace
12156  
12157  /// ------------------------ Indirect Call Info  -------------------------------
12158  namespace {
12159  struct AAIndirectCallInfoCallSite : public AAIndirectCallInfo {
AAIndirectCallInfoCallSite__anonc528723c9d11::AAIndirectCallInfoCallSite12160    AAIndirectCallInfoCallSite(const IRPosition &IRP, Attributor &A)
12161        : AAIndirectCallInfo(IRP, A) {}
12162  
12163    /// See AbstractAttribute::initialize(...).
initialize__anonc528723c9d11::AAIndirectCallInfoCallSite12164    void initialize(Attributor &A) override {
12165      auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees);
12166      if (!MD && !A.isClosedWorldModule())
12167        return;
12168  
12169      if (MD) {
12170        for (const auto &Op : MD->operands())
12171          if (Function *Callee = mdconst::dyn_extract_or_null<Function>(Op))
12172            PotentialCallees.insert(Callee);
12173      } else if (A.isClosedWorldModule()) {
12174        ArrayRef<Function *> IndirectlyCallableFunctions =
12175            A.getInfoCache().getIndirectlyCallableFunctions(A);
12176        PotentialCallees.insert(IndirectlyCallableFunctions.begin(),
12177                                IndirectlyCallableFunctions.end());
12178      }
12179  
12180      if (PotentialCallees.empty())
12181        indicateOptimisticFixpoint();
12182    }
12183  
updateImpl__anonc528723c9d11::AAIndirectCallInfoCallSite12184    ChangeStatus updateImpl(Attributor &A) override {
12185      CallBase *CB = cast<CallBase>(getCtxI());
12186      const Use &CalleeUse = CB->getCalledOperandUse();
12187      Value *FP = CB->getCalledOperand();
12188  
12189      SmallSetVector<Function *, 4> AssumedCalleesNow;
12190      bool AllCalleesKnownNow = AllCalleesKnown;
12191  
12192      auto CheckPotentialCalleeUse = [&](Function &PotentialCallee,
12193                                         bool &UsedAssumedInformation) {
12194        const auto *GIAA = A.getAAFor<AAGlobalValueInfo>(
12195            *this, IRPosition::value(PotentialCallee), DepClassTy::OPTIONAL);
12196        if (!GIAA || GIAA->isPotentialUse(CalleeUse))
12197          return true;
12198        UsedAssumedInformation = !GIAA->isAtFixpoint();
12199        return false;
12200      };
12201  
12202      auto AddPotentialCallees = [&]() {
12203        for (auto *PotentialCallee : PotentialCallees) {
12204          bool UsedAssumedInformation = false;
12205          if (CheckPotentialCalleeUse(*PotentialCallee, UsedAssumedInformation))
12206            AssumedCalleesNow.insert(PotentialCallee);
12207        }
12208      };
12209  
12210      // Use simplification to find potential callees, if !callees was present,
12211      // fallback to that set if necessary.
12212      bool UsedAssumedInformation = false;
12213      SmallVector<AA::ValueAndContext> Values;
12214      if (!A.getAssumedSimplifiedValues(IRPosition::value(*FP), this, Values,
12215                                        AA::ValueScope::AnyScope,
12216                                        UsedAssumedInformation)) {
12217        if (PotentialCallees.empty())
12218          return indicatePessimisticFixpoint();
12219        AddPotentialCallees();
12220      }
12221  
12222      // Try to find a reason for \p Fn not to be a potential callee. If none was
12223      // found, add it to the assumed callees set.
12224      auto CheckPotentialCallee = [&](Function &Fn) {
12225        if (!PotentialCallees.empty() && !PotentialCallees.count(&Fn))
12226          return false;
12227  
12228        auto &CachedResult = FilterResults[&Fn];
12229        if (CachedResult.has_value())
12230          return CachedResult.value();
12231  
12232        bool UsedAssumedInformation = false;
12233        if (!CheckPotentialCalleeUse(Fn, UsedAssumedInformation)) {
12234          if (!UsedAssumedInformation)
12235            CachedResult = false;
12236          return false;
12237        }
12238  
12239        int NumFnArgs = Fn.arg_size();
12240        int NumCBArgs = CB->arg_size();
12241  
12242        // Check if any excess argument (which we fill up with poison) is known to
12243        // be UB on undef.
12244        for (int I = NumCBArgs; I < NumFnArgs; ++I) {
12245          bool IsKnown = false;
12246          if (AA::hasAssumedIRAttr<Attribute::NoUndef>(
12247                  A, this, IRPosition::argument(*Fn.getArg(I)),
12248                  DepClassTy::OPTIONAL, IsKnown)) {
12249            if (IsKnown)
12250              CachedResult = false;
12251            return false;
12252          }
12253        }
12254  
12255        CachedResult = true;
12256        return true;
12257      };
12258  
12259      // Check simplification result, prune known UB callees, also restrict it to
12260      // the !callees set, if present.
12261      for (auto &VAC : Values) {
12262        if (isa<UndefValue>(VAC.getValue()))
12263          continue;
12264        if (isa<ConstantPointerNull>(VAC.getValue()) &&
12265            VAC.getValue()->getType()->getPointerAddressSpace() == 0)
12266          continue;
12267        // TODO: Check for known UB, e.g., poison + noundef.
12268        if (auto *VACFn = dyn_cast<Function>(VAC.getValue())) {
12269          if (CheckPotentialCallee(*VACFn))
12270            AssumedCalleesNow.insert(VACFn);
12271          continue;
12272        }
12273        if (!PotentialCallees.empty()) {
12274          AddPotentialCallees();
12275          break;
12276        }
12277        AllCalleesKnownNow = false;
12278      }
12279  
12280      if (AssumedCalleesNow == AssumedCallees &&
12281          AllCalleesKnown == AllCalleesKnownNow)
12282        return ChangeStatus::UNCHANGED;
12283  
12284      std::swap(AssumedCallees, AssumedCalleesNow);
12285      AllCalleesKnown = AllCalleesKnownNow;
12286      return ChangeStatus::CHANGED;
12287    }
12288  
12289    /// See AbstractAttribute::manifest(...).
manifest__anonc528723c9d11::AAIndirectCallInfoCallSite12290    ChangeStatus manifest(Attributor &A) override {
12291      // If we can't specialize at all, give up now.
12292      if (!AllCalleesKnown && AssumedCallees.empty())
12293        return ChangeStatus::UNCHANGED;
12294  
12295      CallBase *CB = cast<CallBase>(getCtxI());
12296      bool UsedAssumedInformation = false;
12297      if (A.isAssumedDead(*CB, this, /*LivenessAA=*/nullptr,
12298                          UsedAssumedInformation))
12299        return ChangeStatus::UNCHANGED;
12300  
12301      ChangeStatus Changed = ChangeStatus::UNCHANGED;
12302      Value *FP = CB->getCalledOperand();
12303      if (FP->getType()->getPointerAddressSpace())
12304        FP = new AddrSpaceCastInst(FP, PointerType::get(FP->getType(), 0),
12305                                   FP->getName() + ".as0", CB->getIterator());
12306  
12307      bool CBIsVoid = CB->getType()->isVoidTy();
12308      BasicBlock::iterator IP = CB->getIterator();
12309      FunctionType *CSFT = CB->getFunctionType();
12310      SmallVector<Value *> CSArgs(CB->arg_begin(), CB->arg_end());
12311  
12312      // If we know all callees and there are none, the call site is (effectively)
12313      // dead (or UB).
12314      if (AssumedCallees.empty()) {
12315        assert(AllCalleesKnown &&
12316               "Expected all callees to be known if there are none.");
12317        A.changeToUnreachableAfterManifest(CB);
12318        return ChangeStatus::CHANGED;
12319      }
12320  
12321      // Special handling for the single callee case.
12322      if (AllCalleesKnown && AssumedCallees.size() == 1) {
12323        auto *NewCallee = AssumedCallees.front();
12324        if (isLegalToPromote(*CB, NewCallee)) {
12325          promoteCall(*CB, NewCallee, nullptr);
12326          return ChangeStatus::CHANGED;
12327        }
12328        Instruction *NewCall =
12329            CallInst::Create(FunctionCallee(CSFT, NewCallee), CSArgs,
12330                             CB->getName(), CB->getIterator());
12331        if (!CBIsVoid)
12332          A.changeAfterManifest(IRPosition::callsite_returned(*CB), *NewCall);
12333        A.deleteAfterManifest(*CB);
12334        return ChangeStatus::CHANGED;
12335      }
12336  
12337      // For each potential value we create a conditional
12338      //
12339      // ```
12340      // if (ptr == value) value(args);
12341      // else ...
12342      // ```
12343      //
12344      bool SpecializedForAnyCallees = false;
12345      bool SpecializedForAllCallees = AllCalleesKnown;
12346      ICmpInst *LastCmp = nullptr;
12347      SmallVector<Function *, 8> SkippedAssumedCallees;
12348      SmallVector<std::pair<CallInst *, Instruction *>> NewCalls;
12349      for (Function *NewCallee : AssumedCallees) {
12350        if (!A.shouldSpecializeCallSiteForCallee(*this, *CB, *NewCallee)) {
12351          SkippedAssumedCallees.push_back(NewCallee);
12352          SpecializedForAllCallees = false;
12353          continue;
12354        }
12355        SpecializedForAnyCallees = true;
12356  
12357        LastCmp = new ICmpInst(IP, llvm::CmpInst::ICMP_EQ, FP, NewCallee);
12358        Instruction *ThenTI =
12359            SplitBlockAndInsertIfThen(LastCmp, IP, /* Unreachable */ false);
12360        BasicBlock *CBBB = CB->getParent();
12361        A.registerManifestAddedBasicBlock(*ThenTI->getParent());
12362        A.registerManifestAddedBasicBlock(*IP->getParent());
12363        auto *SplitTI = cast<BranchInst>(LastCmp->getNextNode());
12364        BasicBlock *ElseBB;
12365        if (&*IP == CB) {
12366          ElseBB = BasicBlock::Create(ThenTI->getContext(), "",
12367                                      ThenTI->getFunction(), CBBB);
12368          A.registerManifestAddedBasicBlock(*ElseBB);
12369          IP = BranchInst::Create(CBBB, ElseBB)->getIterator();
12370          SplitTI->replaceUsesOfWith(CBBB, ElseBB);
12371        } else {
12372          ElseBB = IP->getParent();
12373          ThenTI->replaceUsesOfWith(ElseBB, CBBB);
12374        }
12375        CastInst *RetBC = nullptr;
12376        CallInst *NewCall = nullptr;
12377        if (isLegalToPromote(*CB, NewCallee)) {
12378          auto *CBClone = cast<CallBase>(CB->clone());
12379          CBClone->insertBefore(ThenTI);
12380          NewCall = &cast<CallInst>(promoteCall(*CBClone, NewCallee, &RetBC));
12381        } else {
12382          NewCall = CallInst::Create(FunctionCallee(CSFT, NewCallee), CSArgs,
12383                                     CB->getName(), ThenTI->getIterator());
12384        }
12385        NewCalls.push_back({NewCall, RetBC});
12386      }
12387  
12388      auto AttachCalleeMetadata = [&](CallBase &IndirectCB) {
12389        if (!AllCalleesKnown)
12390          return ChangeStatus::UNCHANGED;
12391        MDBuilder MDB(IndirectCB.getContext());
12392        MDNode *Callees = MDB.createCallees(SkippedAssumedCallees);
12393        IndirectCB.setMetadata(LLVMContext::MD_callees, Callees);
12394        return ChangeStatus::CHANGED;
12395      };
12396  
12397      if (!SpecializedForAnyCallees)
12398        return AttachCalleeMetadata(*CB);
12399  
12400      // Check if we need the fallback indirect call still.
12401      if (SpecializedForAllCallees) {
12402        LastCmp->replaceAllUsesWith(ConstantInt::getTrue(LastCmp->getContext()));
12403        LastCmp->eraseFromParent();
12404        new UnreachableInst(IP->getContext(), IP);
12405        IP->eraseFromParent();
12406      } else {
12407        auto *CBClone = cast<CallInst>(CB->clone());
12408        CBClone->setName(CB->getName());
12409        CBClone->insertBefore(*IP->getParent(), IP);
12410        NewCalls.push_back({CBClone, nullptr});
12411        AttachCalleeMetadata(*CBClone);
12412      }
12413  
12414      // Check if we need a PHI to merge the results.
12415      if (!CBIsVoid) {
12416        auto *PHI = PHINode::Create(CB->getType(), NewCalls.size(),
12417                                    CB->getName() + ".phi",
12418                                    CB->getParent()->getFirstInsertionPt());
12419        for (auto &It : NewCalls) {
12420          CallBase *NewCall = It.first;
12421          Instruction *CallRet = It.second ? It.second : It.first;
12422          if (CallRet->getType() == CB->getType())
12423            PHI->addIncoming(CallRet, CallRet->getParent());
12424          else if (NewCall->getType()->isVoidTy())
12425            PHI->addIncoming(PoisonValue::get(CB->getType()),
12426                             NewCall->getParent());
12427          else
12428            llvm_unreachable("Call return should match or be void!");
12429        }
12430        A.changeAfterManifest(IRPosition::callsite_returned(*CB), *PHI);
12431      }
12432  
12433      A.deleteAfterManifest(*CB);
12434      Changed = ChangeStatus::CHANGED;
12435  
12436      return Changed;
12437    }
12438  
12439    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723c9d11::AAIndirectCallInfoCallSite12440    const std::string getAsStr(Attributor *A) const override {
12441      return std::string(AllCalleesKnown ? "eliminate" : "specialize") +
12442             " indirect call site with " + std::to_string(AssumedCallees.size()) +
12443             " functions";
12444    }
12445  
trackStatistics__anonc528723c9d11::AAIndirectCallInfoCallSite12446    void trackStatistics() const override {
12447      if (AllCalleesKnown) {
12448        STATS_DECLTRACK(
12449            Eliminated, CallSites,
12450            "Number of indirect call sites eliminated via specialization")
12451      } else {
12452        STATS_DECLTRACK(Specialized, CallSites,
12453                        "Number of indirect call sites specialized")
12454      }
12455    }
12456  
foreachCallee__anonc528723c9d11::AAIndirectCallInfoCallSite12457    bool foreachCallee(function_ref<bool(Function *)> CB) const override {
12458      return isValidState() && AllCalleesKnown && all_of(AssumedCallees, CB);
12459    }
12460  
12461  private:
12462    /// Map to remember filter results.
12463    DenseMap<Function *, std::optional<bool>> FilterResults;
12464  
12465    /// If the !callee metadata was present, this set will contain all potential
12466    /// callees (superset).
12467    SmallSetVector<Function *, 4> PotentialCallees;
12468  
12469    /// This set contains all currently assumed calllees, which might grow over
12470    /// time.
12471    SmallSetVector<Function *, 4> AssumedCallees;
12472  
12473    /// Flag to indicate if all possible callees are in the AssumedCallees set or
12474    /// if there could be others.
12475    bool AllCalleesKnown = true;
12476  };
12477  } // namespace
12478  
12479  /// ------------------------ Address Space  ------------------------------------
12480  namespace {
12481  struct AAAddressSpaceImpl : public AAAddressSpace {
AAAddressSpaceImpl__anonc528723ca211::AAAddressSpaceImpl12482    AAAddressSpaceImpl(const IRPosition &IRP, Attributor &A)
12483        : AAAddressSpace(IRP, A) {}
12484  
getAddressSpace__anonc528723ca211::AAAddressSpaceImpl12485    int32_t getAddressSpace() const override {
12486      assert(isValidState() && "the AA is invalid");
12487      return AssumedAddressSpace;
12488    }
12489  
12490    /// See AbstractAttribute::initialize(...).
initialize__anonc528723ca211::AAAddressSpaceImpl12491    void initialize(Attributor &A) override {
12492      assert(getAssociatedType()->isPtrOrPtrVectorTy() &&
12493             "Associated value is not a pointer");
12494    }
12495  
updateImpl__anonc528723ca211::AAAddressSpaceImpl12496    ChangeStatus updateImpl(Attributor &A) override {
12497      int32_t OldAddressSpace = AssumedAddressSpace;
12498      auto *AUO = A.getOrCreateAAFor<AAUnderlyingObjects>(getIRPosition(), this,
12499                                                          DepClassTy::REQUIRED);
12500      auto Pred = [&](Value &Obj) {
12501        if (isa<UndefValue>(&Obj))
12502          return true;
12503        return takeAddressSpace(Obj.getType()->getPointerAddressSpace());
12504      };
12505  
12506      if (!AUO->forallUnderlyingObjects(Pred))
12507        return indicatePessimisticFixpoint();
12508  
12509      return OldAddressSpace == AssumedAddressSpace ? ChangeStatus::UNCHANGED
12510                                                    : ChangeStatus::CHANGED;
12511    }
12512  
12513    /// See AbstractAttribute::manifest(...).
manifest__anonc528723ca211::AAAddressSpaceImpl12514    ChangeStatus manifest(Attributor &A) override {
12515      Value *AssociatedValue = &getAssociatedValue();
12516      Value *OriginalValue = peelAddrspacecast(AssociatedValue);
12517      if (getAddressSpace() == NoAddressSpace ||
12518          static_cast<uint32_t>(getAddressSpace()) ==
12519              getAssociatedType()->getPointerAddressSpace())
12520        return ChangeStatus::UNCHANGED;
12521  
12522      Type *NewPtrTy = PointerType::get(getAssociatedType()->getContext(),
12523                                        static_cast<uint32_t>(getAddressSpace()));
12524      bool UseOriginalValue =
12525          OriginalValue->getType()->getPointerAddressSpace() ==
12526          static_cast<uint32_t>(getAddressSpace());
12527  
12528      bool Changed = false;
12529  
12530      auto MakeChange = [&](Instruction *I, Use &U) {
12531        Changed = true;
12532        if (UseOriginalValue) {
12533          A.changeUseAfterManifest(U, *OriginalValue);
12534          return;
12535        }
12536        Instruction *CastInst = new AddrSpaceCastInst(OriginalValue, NewPtrTy);
12537        CastInst->insertBefore(cast<Instruction>(I));
12538        A.changeUseAfterManifest(U, *CastInst);
12539      };
12540  
12541      auto Pred = [&](const Use &U, bool &) {
12542        if (U.get() != AssociatedValue)
12543          return true;
12544        auto *Inst = dyn_cast<Instruction>(U.getUser());
12545        if (!Inst)
12546          return true;
12547        // This is a WA to make sure we only change uses from the corresponding
12548        // CGSCC if the AA is run on CGSCC instead of the entire module.
12549        if (!A.isRunOn(Inst->getFunction()))
12550          return true;
12551        if (isa<LoadInst>(Inst))
12552          MakeChange(Inst, const_cast<Use &>(U));
12553        if (isa<StoreInst>(Inst)) {
12554          // We only make changes if the use is the pointer operand.
12555          if (U.getOperandNo() == 1)
12556            MakeChange(Inst, const_cast<Use &>(U));
12557        }
12558        return true;
12559      };
12560  
12561      // It doesn't matter if we can't check all uses as we can simply
12562      // conservatively ignore those that can not be visited.
12563      (void)A.checkForAllUses(Pred, *this, getAssociatedValue(),
12564                              /* CheckBBLivenessOnly */ true);
12565  
12566      return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
12567    }
12568  
12569    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723ca211::AAAddressSpaceImpl12570    const std::string getAsStr(Attributor *A) const override {
12571      if (!isValidState())
12572        return "addrspace(<invalid>)";
12573      return "addrspace(" +
12574             (AssumedAddressSpace == NoAddressSpace
12575                  ? "none"
12576                  : std::to_string(AssumedAddressSpace)) +
12577             ")";
12578    }
12579  
12580  private:
12581    int32_t AssumedAddressSpace = NoAddressSpace;
12582  
takeAddressSpace__anonc528723ca211::AAAddressSpaceImpl12583    bool takeAddressSpace(int32_t AS) {
12584      if (AssumedAddressSpace == NoAddressSpace) {
12585        AssumedAddressSpace = AS;
12586        return true;
12587      }
12588      return AssumedAddressSpace == AS;
12589    }
12590  
peelAddrspacecast__anonc528723ca211::AAAddressSpaceImpl12591    static Value *peelAddrspacecast(Value *V) {
12592      if (auto *I = dyn_cast<AddrSpaceCastInst>(V))
12593        return peelAddrspacecast(I->getPointerOperand());
12594      if (auto *C = dyn_cast<ConstantExpr>(V))
12595        if (C->getOpcode() == Instruction::AddrSpaceCast)
12596          return peelAddrspacecast(C->getOperand(0));
12597      return V;
12598    }
12599  };
12600  
12601  struct AAAddressSpaceFloating final : AAAddressSpaceImpl {
AAAddressSpaceFloating__anonc528723ca211::AAAddressSpaceFloating12602    AAAddressSpaceFloating(const IRPosition &IRP, Attributor &A)
12603        : AAAddressSpaceImpl(IRP, A) {}
12604  
trackStatistics__anonc528723ca211::AAAddressSpaceFloating12605    void trackStatistics() const override {
12606      STATS_DECLTRACK_FLOATING_ATTR(addrspace);
12607    }
12608  };
12609  
12610  struct AAAddressSpaceReturned final : AAAddressSpaceImpl {
AAAddressSpaceReturned__anonc528723ca211::AAAddressSpaceReturned12611    AAAddressSpaceReturned(const IRPosition &IRP, Attributor &A)
12612        : AAAddressSpaceImpl(IRP, A) {}
12613  
12614    /// See AbstractAttribute::initialize(...).
initialize__anonc528723ca211::AAAddressSpaceReturned12615    void initialize(Attributor &A) override {
12616      // TODO: we don't rewrite function argument for now because it will need to
12617      // rewrite the function signature and all call sites.
12618      (void)indicatePessimisticFixpoint();
12619    }
12620  
trackStatistics__anonc528723ca211::AAAddressSpaceReturned12621    void trackStatistics() const override {
12622      STATS_DECLTRACK_FNRET_ATTR(addrspace);
12623    }
12624  };
12625  
12626  struct AAAddressSpaceCallSiteReturned final : AAAddressSpaceImpl {
AAAddressSpaceCallSiteReturned__anonc528723ca211::AAAddressSpaceCallSiteReturned12627    AAAddressSpaceCallSiteReturned(const IRPosition &IRP, Attributor &A)
12628        : AAAddressSpaceImpl(IRP, A) {}
12629  
trackStatistics__anonc528723ca211::AAAddressSpaceCallSiteReturned12630    void trackStatistics() const override {
12631      STATS_DECLTRACK_CSRET_ATTR(addrspace);
12632    }
12633  };
12634  
12635  struct AAAddressSpaceArgument final : AAAddressSpaceImpl {
AAAddressSpaceArgument__anonc528723ca211::AAAddressSpaceArgument12636    AAAddressSpaceArgument(const IRPosition &IRP, Attributor &A)
12637        : AAAddressSpaceImpl(IRP, A) {}
12638  
trackStatistics__anonc528723ca211::AAAddressSpaceArgument12639    void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(addrspace); }
12640  };
12641  
12642  struct AAAddressSpaceCallSiteArgument final : AAAddressSpaceImpl {
AAAddressSpaceCallSiteArgument__anonc528723ca211::AAAddressSpaceCallSiteArgument12643    AAAddressSpaceCallSiteArgument(const IRPosition &IRP, Attributor &A)
12644        : AAAddressSpaceImpl(IRP, A) {}
12645  
12646    /// See AbstractAttribute::initialize(...).
initialize__anonc528723ca211::AAAddressSpaceCallSiteArgument12647    void initialize(Attributor &A) override {
12648      // TODO: we don't rewrite call site argument for now because it will need to
12649      // rewrite the function signature of the callee.
12650      (void)indicatePessimisticFixpoint();
12651    }
12652  
trackStatistics__anonc528723ca211::AAAddressSpaceCallSiteArgument12653    void trackStatistics() const override {
12654      STATS_DECLTRACK_CSARG_ATTR(addrspace);
12655    }
12656  };
12657  } // namespace
12658  
12659  /// ----------- Allocation Info ----------
12660  namespace {
12661  struct AAAllocationInfoImpl : public AAAllocationInfo {
AAAllocationInfoImpl__anonc528723ca611::AAAllocationInfoImpl12662    AAAllocationInfoImpl(const IRPosition &IRP, Attributor &A)
12663        : AAAllocationInfo(IRP, A) {}
12664  
getAllocatedSize__anonc528723ca611::AAAllocationInfoImpl12665    std::optional<TypeSize> getAllocatedSize() const override {
12666      assert(isValidState() && "the AA is invalid");
12667      return AssumedAllocatedSize;
12668    }
12669  
findInitialAllocationSize__anonc528723ca611::AAAllocationInfoImpl12670    std::optional<TypeSize> findInitialAllocationSize(Instruction *I,
12671                                                      const DataLayout &DL) {
12672  
12673      // TODO: implement case for malloc like instructions
12674      switch (I->getOpcode()) {
12675      case Instruction::Alloca: {
12676        AllocaInst *AI = cast<AllocaInst>(I);
12677        return AI->getAllocationSize(DL);
12678      }
12679      default:
12680        return std::nullopt;
12681      }
12682    }
12683  
updateImpl__anonc528723ca611::AAAllocationInfoImpl12684    ChangeStatus updateImpl(Attributor &A) override {
12685  
12686      const IRPosition &IRP = getIRPosition();
12687      Instruction *I = IRP.getCtxI();
12688  
12689      // TODO: update check for malloc like calls
12690      if (!isa<AllocaInst>(I))
12691        return indicatePessimisticFixpoint();
12692  
12693      bool IsKnownNoCapture;
12694      if (!AA::hasAssumedIRAttr<Attribute::NoCapture>(
12695              A, this, IRP, DepClassTy::OPTIONAL, IsKnownNoCapture))
12696        return indicatePessimisticFixpoint();
12697  
12698      const AAPointerInfo *PI =
12699          A.getOrCreateAAFor<AAPointerInfo>(IRP, *this, DepClassTy::REQUIRED);
12700  
12701      if (!PI)
12702        return indicatePessimisticFixpoint();
12703  
12704      if (!PI->getState().isValidState())
12705        return indicatePessimisticFixpoint();
12706  
12707      const DataLayout &DL = A.getDataLayout();
12708      const auto AllocationSize = findInitialAllocationSize(I, DL);
12709  
12710      // If allocation size is nullopt, we give up.
12711      if (!AllocationSize)
12712        return indicatePessimisticFixpoint();
12713  
12714      // For zero sized allocations, we give up.
12715      // Since we can't reduce further
12716      if (*AllocationSize == 0)
12717        return indicatePessimisticFixpoint();
12718  
12719      int64_t BinSize = PI->numOffsetBins();
12720  
12721      // TODO: implement for multiple bins
12722      if (BinSize > 1)
12723        return indicatePessimisticFixpoint();
12724  
12725      if (BinSize == 0) {
12726        auto NewAllocationSize = std::optional<TypeSize>(TypeSize(0, false));
12727        if (!changeAllocationSize(NewAllocationSize))
12728          return ChangeStatus::UNCHANGED;
12729        return ChangeStatus::CHANGED;
12730      }
12731  
12732      // TODO: refactor this to be part of multiple bin case
12733      const auto &It = PI->begin();
12734  
12735      // TODO: handle if Offset is not zero
12736      if (It->first.Offset != 0)
12737        return indicatePessimisticFixpoint();
12738  
12739      uint64_t SizeOfBin = It->first.Offset + It->first.Size;
12740  
12741      if (SizeOfBin >= *AllocationSize)
12742        return indicatePessimisticFixpoint();
12743  
12744      auto NewAllocationSize =
12745          std::optional<TypeSize>(TypeSize(SizeOfBin * 8, false));
12746  
12747      if (!changeAllocationSize(NewAllocationSize))
12748        return ChangeStatus::UNCHANGED;
12749  
12750      return ChangeStatus::CHANGED;
12751    }
12752  
12753    /// See AbstractAttribute::manifest(...).
manifest__anonc528723ca611::AAAllocationInfoImpl12754    ChangeStatus manifest(Attributor &A) override {
12755  
12756      assert(isValidState() &&
12757             "Manifest should only be called if the state is valid.");
12758  
12759      Instruction *I = getIRPosition().getCtxI();
12760  
12761      auto FixedAllocatedSizeInBits = getAllocatedSize()->getFixedValue();
12762  
12763      unsigned long NumBytesToAllocate = (FixedAllocatedSizeInBits + 7) / 8;
12764  
12765      switch (I->getOpcode()) {
12766      // TODO: add case for malloc like calls
12767      case Instruction::Alloca: {
12768  
12769        AllocaInst *AI = cast<AllocaInst>(I);
12770  
12771        Type *CharType = Type::getInt8Ty(I->getContext());
12772  
12773        auto *NumBytesToValue =
12774            ConstantInt::get(I->getContext(), APInt(32, NumBytesToAllocate));
12775  
12776        BasicBlock::iterator insertPt = AI->getIterator();
12777        insertPt = std::next(insertPt);
12778        AllocaInst *NewAllocaInst =
12779            new AllocaInst(CharType, AI->getAddressSpace(), NumBytesToValue,
12780                           AI->getAlign(), AI->getName(), insertPt);
12781  
12782        if (A.changeAfterManifest(IRPosition::inst(*AI), *NewAllocaInst))
12783          return ChangeStatus::CHANGED;
12784  
12785        break;
12786      }
12787      default:
12788        break;
12789      }
12790  
12791      return ChangeStatus::UNCHANGED;
12792    }
12793  
12794    /// See AbstractAttribute::getAsStr().
getAsStr__anonc528723ca611::AAAllocationInfoImpl12795    const std::string getAsStr(Attributor *A) const override {
12796      if (!isValidState())
12797        return "allocationinfo(<invalid>)";
12798      return "allocationinfo(" +
12799             (AssumedAllocatedSize == HasNoAllocationSize
12800                  ? "none"
12801                  : std::to_string(AssumedAllocatedSize->getFixedValue())) +
12802             ")";
12803    }
12804  
12805  private:
12806    std::optional<TypeSize> AssumedAllocatedSize = HasNoAllocationSize;
12807  
12808    // Maintain the computed allocation size of the object.
12809    // Returns (bool) weather the size of the allocation was modified or not.
changeAllocationSize__anonc528723ca611::AAAllocationInfoImpl12810    bool changeAllocationSize(std::optional<TypeSize> Size) {
12811      if (AssumedAllocatedSize == HasNoAllocationSize ||
12812          AssumedAllocatedSize != Size) {
12813        AssumedAllocatedSize = Size;
12814        return true;
12815      }
12816      return false;
12817    }
12818  };
12819  
12820  struct AAAllocationInfoFloating : AAAllocationInfoImpl {
AAAllocationInfoFloating__anonc528723ca611::AAAllocationInfoFloating12821    AAAllocationInfoFloating(const IRPosition &IRP, Attributor &A)
12822        : AAAllocationInfoImpl(IRP, A) {}
12823  
trackStatistics__anonc528723ca611::AAAllocationInfoFloating12824    void trackStatistics() const override {
12825      STATS_DECLTRACK_FLOATING_ATTR(allocationinfo);
12826    }
12827  };
12828  
12829  struct AAAllocationInfoReturned : AAAllocationInfoImpl {
AAAllocationInfoReturned__anonc528723ca611::AAAllocationInfoReturned12830    AAAllocationInfoReturned(const IRPosition &IRP, Attributor &A)
12831        : AAAllocationInfoImpl(IRP, A) {}
12832  
12833    /// See AbstractAttribute::initialize(...).
initialize__anonc528723ca611::AAAllocationInfoReturned12834    void initialize(Attributor &A) override {
12835      // TODO: we don't rewrite function argument for now because it will need to
12836      // rewrite the function signature and all call sites
12837      (void)indicatePessimisticFixpoint();
12838    }
12839  
trackStatistics__anonc528723ca611::AAAllocationInfoReturned12840    void trackStatistics() const override {
12841      STATS_DECLTRACK_FNRET_ATTR(allocationinfo);
12842    }
12843  };
12844  
12845  struct AAAllocationInfoCallSiteReturned : AAAllocationInfoImpl {
AAAllocationInfoCallSiteReturned__anonc528723ca611::AAAllocationInfoCallSiteReturned12846    AAAllocationInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
12847        : AAAllocationInfoImpl(IRP, A) {}
12848  
trackStatistics__anonc528723ca611::AAAllocationInfoCallSiteReturned12849    void trackStatistics() const override {
12850      STATS_DECLTRACK_CSRET_ATTR(allocationinfo);
12851    }
12852  };
12853  
12854  struct AAAllocationInfoArgument : AAAllocationInfoImpl {
AAAllocationInfoArgument__anonc528723ca611::AAAllocationInfoArgument12855    AAAllocationInfoArgument(const IRPosition &IRP, Attributor &A)
12856        : AAAllocationInfoImpl(IRP, A) {}
12857  
trackStatistics__anonc528723ca611::AAAllocationInfoArgument12858    void trackStatistics() const override {
12859      STATS_DECLTRACK_ARG_ATTR(allocationinfo);
12860    }
12861  };
12862  
12863  struct AAAllocationInfoCallSiteArgument : AAAllocationInfoImpl {
AAAllocationInfoCallSiteArgument__anonc528723ca611::AAAllocationInfoCallSiteArgument12864    AAAllocationInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
12865        : AAAllocationInfoImpl(IRP, A) {}
12866  
12867    /// See AbstractAttribute::initialize(...).
initialize__anonc528723ca611::AAAllocationInfoCallSiteArgument12868    void initialize(Attributor &A) override {
12869  
12870      (void)indicatePessimisticFixpoint();
12871    }
12872  
trackStatistics__anonc528723ca611::AAAllocationInfoCallSiteArgument12873    void trackStatistics() const override {
12874      STATS_DECLTRACK_CSARG_ATTR(allocationinfo);
12875    }
12876  };
12877  } // namespace
12878  
12879  const char AANoUnwind::ID = 0;
12880  const char AANoSync::ID = 0;
12881  const char AANoFree::ID = 0;
12882  const char AANonNull::ID = 0;
12883  const char AAMustProgress::ID = 0;
12884  const char AANoRecurse::ID = 0;
12885  const char AANonConvergent::ID = 0;
12886  const char AAWillReturn::ID = 0;
12887  const char AAUndefinedBehavior::ID = 0;
12888  const char AANoAlias::ID = 0;
12889  const char AAIntraFnReachability::ID = 0;
12890  const char AANoReturn::ID = 0;
12891  const char AAIsDead::ID = 0;
12892  const char AADereferenceable::ID = 0;
12893  const char AAAlign::ID = 0;
12894  const char AAInstanceInfo::ID = 0;
12895  const char AANoCapture::ID = 0;
12896  const char AAValueSimplify::ID = 0;
12897  const char AAHeapToStack::ID = 0;
12898  const char AAPrivatizablePtr::ID = 0;
12899  const char AAMemoryBehavior::ID = 0;
12900  const char AAMemoryLocation::ID = 0;
12901  const char AAValueConstantRange::ID = 0;
12902  const char AAPotentialConstantValues::ID = 0;
12903  const char AAPotentialValues::ID = 0;
12904  const char AANoUndef::ID = 0;
12905  const char AANoFPClass::ID = 0;
12906  const char AACallEdges::ID = 0;
12907  const char AAInterFnReachability::ID = 0;
12908  const char AAPointerInfo::ID = 0;
12909  const char AAAssumptionInfo::ID = 0;
12910  const char AAUnderlyingObjects::ID = 0;
12911  const char AAAddressSpace::ID = 0;
12912  const char AAAllocationInfo::ID = 0;
12913  const char AAIndirectCallInfo::ID = 0;
12914  const char AAGlobalValueInfo::ID = 0;
12915  const char AADenormalFPMath::ID = 0;
12916  
12917  // Macro magic to create the static generator function for attributes that
12918  // follow the naming scheme.
12919  
12920  #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
12921    case IRPosition::PK:                                                         \
12922      llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
12923  
12924  #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
12925    case IRPosition::PK:                                                         \
12926      AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
12927      ++NumAAs;                                                                  \
12928      break;
12929  
12930  #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
12931    CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
12932      CLASS *AA = nullptr;                                                       \
12933      switch (IRP.getPositionKind()) {                                           \
12934        SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
12935        SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
12936        SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
12937        SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
12938        SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
12939        SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
12940        SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
12941        SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
12942      }                                                                          \
12943      return *AA;                                                                \
12944    }
12945  
12946  #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
12947    CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
12948      CLASS *AA = nullptr;                                                       \
12949      switch (IRP.getPositionKind()) {                                           \
12950        SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
12951        SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
12952        SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
12953        SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
12954        SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
12955        SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
12956        SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
12957        SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
12958      }                                                                          \
12959      return *AA;                                                                \
12960    }
12961  
12962  #define CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION(POS, SUFFIX, CLASS)         \
12963    CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
12964      CLASS *AA = nullptr;                                                       \
12965      switch (IRP.getPositionKind()) {                                           \
12966        SWITCH_PK_CREATE(CLASS, IRP, POS, SUFFIX)                                \
12967      default:                                                                   \
12968        llvm_unreachable("Cannot create " #CLASS " for position otherthan " #POS \
12969                         " position!");                                          \
12970      }                                                                          \
12971      return *AA;                                                                \
12972    }
12973  
12974  #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
12975    CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
12976      CLASS *AA = nullptr;                                                       \
12977      switch (IRP.getPositionKind()) {                                           \
12978        SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
12979        SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
12980        SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
12981        SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
12982        SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
12983        SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
12984        SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
12985        SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
12986      }                                                                          \
12987      return *AA;                                                                \
12988    }
12989  
12990  #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
12991    CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
12992      CLASS *AA = nullptr;                                                       \
12993      switch (IRP.getPositionKind()) {                                           \
12994        SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
12995        SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
12996        SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
12997        SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
12998        SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
12999        SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
13000        SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
13001        SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
13002      }                                                                          \
13003      return *AA;                                                                \
13004    }
13005  
13006  #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
13007    CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
13008      CLASS *AA = nullptr;                                                       \
13009      switch (IRP.getPositionKind()) {                                           \
13010        SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
13011        SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
13012        SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
13013        SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
13014        SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
13015        SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
13016        SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
13017        SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
13018      }                                                                          \
13019      return *AA;                                                                \
13020    }
13021  
13022  CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
13023  CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
13024  CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
13025  CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
13026  CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
13027  CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
13028  CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
13029  CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo)
13030  CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMustProgress)
13031  
13032  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
13033  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
13034  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
13035  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
13036  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
13037  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo)
13038  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
13039  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
13040  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues)
13041  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
13042  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
13043  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFPClass)
13044  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
13045  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAddressSpace)
13046  CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAllocationInfo)
13047  
13048  CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
13049  CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
13050  CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
13051  CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUnderlyingObjects)
13052  
13053  CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION(IRP_CALL_SITE, CallSite,
13054                                             AAIndirectCallInfo)
13055  CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION(IRP_FLOAT, Floating,
13056                                             AAGlobalValueInfo)
13057  
13058  CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
13059  CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
13060  CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonConvergent)
13061  CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIntraFnReachability)
13062  CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInterFnReachability)
13063  CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADenormalFPMath)
13064  
13065  CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
13066  
13067  #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
13068  #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
13069  #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
13070  #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
13071  #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
13072  #undef CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION
13073  #undef SWITCH_PK_CREATE
13074  #undef SWITCH_PK_INV
13075