xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/Loads.cpp (revision 770cf0a5f02dc8983a89c6568d741fbc25baa999)
1 //===- Loads.cpp - Local load analysis ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines simple local analyses for load instructions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/AssumeBundleQueries.h"
16 #include "llvm/Analysis/LoopAccessAnalysis.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/Analysis/MemoryBuiltins.h"
19 #include "llvm/Analysis/MemoryLocation.h"
20 #include "llvm/Analysis/ScalarEvolution.h"
21 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Operator.h"
26 
27 using namespace llvm;
28 
29 static bool isAligned(const Value *Base, Align Alignment,
30                       const DataLayout &DL) {
31   return Base->getPointerAlignment(DL) >= Alignment;
32 }
33 
34 static bool isDereferenceableAndAlignedPointerViaAssumption(
35     const Value *Ptr, Align Alignment,
36     function_ref<bool(const RetainedKnowledge &RK)> CheckSize,
37     const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC,
38     const DominatorTree *DT) {
39   // Dereferenceable information from assumptions is only valid if the value
40   // cannot be freed between the assumption and use. For now just use the
41   // information for values that cannot be freed in the function.
42   // TODO: More precisely check if the pointer can be freed between assumption
43   // and use.
44   if (!CtxI || Ptr->canBeFreed())
45     return false;
46   /// Look through assumes to see if both dereferencability and alignment can
47   /// be proven by an assume if needed.
48   RetainedKnowledge AlignRK;
49   RetainedKnowledge DerefRK;
50   bool IsAligned = Ptr->getPointerAlignment(DL) >= Alignment;
51   return getKnowledgeForValue(
52       Ptr, {Attribute::Dereferenceable, Attribute::Alignment}, *AC,
53       [&](RetainedKnowledge RK, Instruction *Assume, auto) {
54         if (!isValidAssumeForContext(Assume, CtxI, DT))
55           return false;
56         if (RK.AttrKind == Attribute::Alignment)
57           AlignRK = std::max(AlignRK, RK);
58         if (RK.AttrKind == Attribute::Dereferenceable)
59           DerefRK = std::max(DerefRK, RK);
60         IsAligned |= AlignRK && AlignRK.ArgValue >= Alignment.value();
61         if (IsAligned && DerefRK && CheckSize(DerefRK))
62           return true; // We have found what we needed so we stop looking
63         return false;  // Other assumes may have better information. so
64                        // keep looking
65       });
66 }
67 
68 /// Test if V is always a pointer to allocated and suitably aligned memory for
69 /// a simple load or store.
70 static bool isDereferenceableAndAlignedPointer(
71     const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
72     const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
73     const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited,
74     unsigned MaxDepth) {
75   assert(V->getType()->isPointerTy() && "Base must be pointer");
76 
77   // Recursion limit.
78   if (MaxDepth-- == 0)
79     return false;
80 
81   // Already visited?  Bail out, we've likely hit unreachable code.
82   if (!Visited.insert(V).second)
83     return false;
84 
85   // Note that it is not safe to speculate into a malloc'd region because
86   // malloc may return null.
87 
88   // For GEPs, determine if the indexing lands within the allocated object.
89   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
90     const Value *Base = GEP->getPointerOperand();
91 
92     APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
93     if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
94         !Offset.urem(APInt(Offset.getBitWidth(), Alignment.value()))
95              .isMinValue())
96       return false;
97 
98     // If the base pointer is dereferenceable for Offset+Size bytes, then the
99     // GEP (== Base + Offset) is dereferenceable for Size bytes.  If the base
100     // pointer is aligned to Align bytes, and the Offset is divisible by Align
101     // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
102     // aligned to Align bytes.
103 
104     // Offset and Size may have different bit widths if we have visited an
105     // addrspacecast, so we can't do arithmetic directly on the APInt values.
106     return isDereferenceableAndAlignedPointer(
107         Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL,
108         CtxI, AC, DT, TLI, Visited, MaxDepth);
109   }
110 
111   // bitcast instructions are no-ops as far as dereferenceability is concerned.
112   if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
113     if (BC->getSrcTy()->isPointerTy())
114       return isDereferenceableAndAlignedPointer(
115         BC->getOperand(0), Alignment, Size, DL, CtxI, AC, DT, TLI,
116           Visited, MaxDepth);
117   }
118 
119   // Recurse into both hands of select.
120   if (const SelectInst *Sel = dyn_cast<SelectInst>(V)) {
121     return isDereferenceableAndAlignedPointer(Sel->getTrueValue(), Alignment,
122                                               Size, DL, CtxI, AC, DT, TLI,
123                                               Visited, MaxDepth) &&
124            isDereferenceableAndAlignedPointer(Sel->getFalseValue(), Alignment,
125                                               Size, DL, CtxI, AC, DT, TLI,
126                                               Visited, MaxDepth);
127   }
128 
129   auto IsKnownDeref = [&]() {
130     bool CheckForNonNull, CheckForFreed;
131     if (!Size.ule(V->getPointerDereferenceableBytes(DL, CheckForNonNull,
132                                                     CheckForFreed)) ||
133         CheckForFreed)
134       return false;
135     if (CheckForNonNull &&
136         !isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)))
137       return false;
138     // When using something like !dereferenceable on a load, the
139     // dereferenceability may only be valid on a specific control-flow path.
140     // If the instruction doesn't dominate the context instruction, we're
141     // asking about dereferenceability under the assumption that the
142     // instruction has been speculated to the point of the context instruction,
143     // in which case we don't know if the dereferenceability info still holds.
144     // We don't bother handling allocas here, as they aren't speculatable
145     // anyway.
146     auto *I = dyn_cast<Instruction>(V);
147     if (I && !isa<AllocaInst>(I))
148       return CtxI && isValidAssumeForContext(I, CtxI, DT);
149     return true;
150   };
151   if (IsKnownDeref()) {
152     // As we recursed through GEPs to get here, we've incrementally checked
153     // that each step advanced by a multiple of the alignment. If our base is
154     // properly aligned, then the original offset accessed must also be.
155     return isAligned(V, Alignment, DL);
156   }
157 
158   /// TODO refactor this function to be able to search independently for
159   /// Dereferencability and Alignment requirements.
160 
161 
162   if (const auto *Call = dyn_cast<CallBase>(V)) {
163     if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
164       return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI,
165                                                 AC, DT, TLI, Visited, MaxDepth);
166 
167     // If we have a call we can't recurse through, check to see if this is an
168     // allocation function for which we can establish an minimum object size.
169     // Such a minimum object size is analogous to a deref_or_null attribute in
170     // that we still need to prove the result non-null at point of use.
171     // NOTE: We can only use the object size as a base fact as we a) need to
172     // prove alignment too, and b) don't want the compile time impact of a
173     // separate recursive walk.
174     ObjectSizeOpts Opts;
175     // TODO: It may be okay to round to align, but that would imply that
176     // accessing slightly out of bounds was legal, and we're currently
177     // inconsistent about that.  For the moment, be conservative.
178     Opts.RoundToAlign = false;
179     Opts.NullIsUnknownSize = true;
180     uint64_t ObjSize;
181     if (getObjectSize(V, ObjSize, DL, TLI, Opts)) {
182       APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
183       if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
184           isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)) &&
185           !V->canBeFreed()) {
186         // As we recursed through GEPs to get here, we've incrementally
187         // checked that each step advanced by a multiple of the alignment. If
188         // our base is properly aligned, then the original offset accessed
189         // must also be.
190         return isAligned(V, Alignment, DL);
191       }
192     }
193   }
194 
195   // For gc.relocate, look through relocations
196   if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
197     return isDereferenceableAndAlignedPointer(RelocateInst->getDerivedPtr(),
198                                               Alignment, Size, DL, CtxI, AC, DT,
199                                               TLI, Visited, MaxDepth);
200 
201   if (const AddrSpaceCastOperator *ASC = dyn_cast<AddrSpaceCastOperator>(V))
202     return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment,
203                                               Size, DL, CtxI, AC, DT, TLI,
204                                               Visited, MaxDepth);
205 
206   return AC && isDereferenceableAndAlignedPointerViaAssumption(
207                    V, Alignment,
208                    [Size](const RetainedKnowledge &RK) {
209                      return RK.ArgValue >= Size.getZExtValue();
210                    },
211                    DL, CtxI, AC, DT);
212 }
213 
214 bool llvm::isDereferenceableAndAlignedPointer(
215     const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
216     const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
217     const TargetLibraryInfo *TLI) {
218   // Note: At the moment, Size can be zero.  This ends up being interpreted as
219   // a query of whether [Base, V] is dereferenceable and V is aligned (since
220   // that's what the implementation happened to do).  It's unclear if this is
221   // the desired semantic, but at least SelectionDAG does exercise this case.
222 
223   SmallPtrSet<const Value *, 32> Visited;
224   return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC,
225                                               DT, TLI, Visited, 16);
226 }
227 
228 bool llvm::isDereferenceableAndAlignedPointer(
229     const Value *V, Type *Ty, Align Alignment, const DataLayout &DL,
230     const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
231     const TargetLibraryInfo *TLI) {
232   // For unsized types or scalable vectors we don't know exactly how many bytes
233   // are dereferenced, so bail out.
234   if (!Ty->isSized() || Ty->isScalableTy())
235     return false;
236 
237   // When dereferenceability information is provided by a dereferenceable
238   // attribute, we know exactly how many bytes are dereferenceable. If we can
239   // determine the exact offset to the attributed variable, we can use that
240   // information here.
241 
242   APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
243                    DL.getTypeStoreSize(Ty));
244   return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
245                                             AC, DT, TLI);
246 }
247 
248 bool llvm::isDereferenceablePointer(const Value *V, Type *Ty,
249                                     const DataLayout &DL,
250                                     const Instruction *CtxI,
251                                     AssumptionCache *AC,
252                                     const DominatorTree *DT,
253                                     const TargetLibraryInfo *TLI) {
254   return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, AC, DT,
255                                             TLI);
256 }
257 
258 /// Test if A and B will obviously have the same value.
259 ///
260 /// This includes recognizing that %t0 and %t1 will have the same
261 /// value in code like this:
262 /// \code
263 ///   %t0 = getelementptr \@a, 0, 3
264 ///   store i32 0, i32* %t0
265 ///   %t1 = getelementptr \@a, 0, 3
266 ///   %t2 = load i32* %t1
267 /// \endcode
268 ///
269 static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
270   // Test if the values are trivially equivalent.
271   if (A == B)
272     return true;
273 
274   // Test if the values come from identical arithmetic instructions.
275   // Use isIdenticalToWhenDefined instead of isIdenticalTo because
276   // this function is only used when one address use dominates the
277   // other, which means that they'll always either have the same
278   // value or one of them will have an undefined value.
279   if (isa<BinaryOperator>(A) || isa<CastInst>(A) || isa<PHINode>(A) ||
280       isa<GetElementPtrInst>(A))
281     if (const Instruction *BI = dyn_cast<Instruction>(B))
282       if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
283         return true;
284 
285   // Otherwise they may not be equivalent.
286   return false;
287 }
288 
289 bool llvm::isDereferenceableAndAlignedInLoop(
290     LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT,
291     AssumptionCache *AC, SmallVectorImpl<const SCEVPredicate *> *Predicates) {
292   const Align Alignment = LI->getAlign();
293   auto &DL = LI->getDataLayout();
294   Value *Ptr = LI->getPointerOperand();
295   APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
296                 DL.getTypeStoreSize(LI->getType()).getFixedValue());
297 
298   // If given a uniform (i.e. non-varying) address, see if we can prove the
299   // access is safe within the loop w/o needing predication.
300   if (L->isLoopInvariant(Ptr))
301     return isDereferenceableAndAlignedPointer(
302         Ptr, Alignment, EltSize, DL, &*L->getHeader()->getFirstNonPHIIt(), AC,
303         &DT);
304 
305   const SCEV *PtrScev = SE.getSCEV(Ptr);
306   auto *AddRec = dyn_cast<SCEVAddRecExpr>(PtrScev);
307 
308   // Check to see if we have a repeating access pattern and it's possible
309   // to prove all accesses are well aligned.
310   if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
311     return false;
312 
313   auto *Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
314   if (!Step)
315     return false;
316 
317   // For the moment, restrict ourselves to the case where the access size is a
318   // multiple of the requested alignment and the base is aligned.
319   // TODO: generalize if a case found which warrants
320   if (EltSize.urem(Alignment.value()) != 0)
321     return false;
322 
323   // TODO: Handle overlapping accesses.
324   if (EltSize.ugt(Step->getAPInt().abs()))
325     return false;
326 
327   const SCEV *MaxBECount =
328       Predicates ? SE.getPredicatedSymbolicMaxBackedgeTakenCount(L, *Predicates)
329                  : SE.getSymbolicMaxBackedgeTakenCount(L);
330   const SCEV *BECount = Predicates
331                             ? SE.getPredicatedBackedgeTakenCount(L, *Predicates)
332                             : SE.getBackedgeTakenCount(L);
333   if (isa<SCEVCouldNotCompute>(MaxBECount))
334     return false;
335 
336   if (isa<SCEVCouldNotCompute>(BECount)) {
337     // TODO: Support symbolic max backedge taken counts for loops without
338     // computable backedge taken counts.
339     MaxBECount =
340         Predicates
341             ? SE.getPredicatedConstantMaxBackedgeTakenCount(L, *Predicates)
342             : SE.getConstantMaxBackedgeTakenCount(L);
343   }
344   const auto &[AccessStart, AccessEnd] = getStartAndEndForAccess(
345       L, PtrScev, LI->getType(), BECount, MaxBECount, &SE, nullptr);
346   if (isa<SCEVCouldNotCompute>(AccessStart) ||
347       isa<SCEVCouldNotCompute>(AccessEnd))
348     return false;
349 
350   // Try to get the access size.
351   const SCEV *PtrDiff = SE.getMinusSCEV(AccessEnd, AccessStart);
352   if (isa<SCEVCouldNotCompute>(PtrDiff))
353     return false;
354   APInt MaxPtrDiff = SE.getUnsignedRangeMax(PtrDiff);
355 
356   Value *Base = nullptr;
357   APInt AccessSize;
358   const SCEV *AccessSizeSCEV = nullptr;
359   if (const SCEVUnknown *NewBase = dyn_cast<SCEVUnknown>(AccessStart)) {
360     Base = NewBase->getValue();
361     AccessSize = MaxPtrDiff;
362     AccessSizeSCEV = PtrDiff;
363   } else if (auto *MinAdd = dyn_cast<SCEVAddExpr>(AccessStart)) {
364     if (MinAdd->getNumOperands() != 2)
365       return false;
366 
367     const auto *Offset = dyn_cast<SCEVConstant>(MinAdd->getOperand(0));
368     const auto *NewBase = dyn_cast<SCEVUnknown>(MinAdd->getOperand(1));
369     if (!Offset || !NewBase)
370       return false;
371 
372     // The following code below assumes the offset is unsigned, but GEP
373     // offsets are treated as signed so we can end up with a signed value
374     // here too. For example, suppose the initial PHI value is (i8 255),
375     // the offset will be treated as (i8 -1) and sign-extended to (i64 -1).
376     if (Offset->getAPInt().isNegative())
377       return false;
378 
379     // For the moment, restrict ourselves to the case where the offset is a
380     // multiple of the requested alignment and the base is aligned.
381     // TODO: generalize if a case found which warrants
382     if (Offset->getAPInt().urem(Alignment.value()) != 0)
383       return false;
384 
385     AccessSize = MaxPtrDiff + Offset->getAPInt();
386     AccessSizeSCEV = SE.getAddExpr(PtrDiff, Offset);
387     Base = NewBase->getValue();
388   } else
389     return false;
390 
391   Instruction *HeaderFirstNonPHI = &*L->getHeader()->getFirstNonPHIIt();
392   return isDereferenceableAndAlignedPointerViaAssumption(
393              Base, Alignment,
394              [&SE, AccessSizeSCEV](const RetainedKnowledge &RK) {
395                return SE.isKnownPredicate(CmpInst::ICMP_ULE, AccessSizeSCEV,
396                                           SE.getSCEV(RK.IRArgValue));
397              },
398              DL, HeaderFirstNonPHI, AC, &DT) ||
399          isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL,
400                                             HeaderFirstNonPHI, AC, &DT);
401 }
402 
403 static bool suppressSpeculativeLoadForSanitizers(const Instruction &CtxI) {
404   const Function &F = *CtxI.getFunction();
405   // Speculative load may create a race that did not exist in the source.
406   return F.hasFnAttribute(Attribute::SanitizeThread) ||
407          // Speculative load may load data from dirty regions.
408          F.hasFnAttribute(Attribute::SanitizeAddress) ||
409          F.hasFnAttribute(Attribute::SanitizeHWAddress);
410 }
411 
412 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
413   return !LI.isUnordered() || suppressSpeculativeLoadForSanitizers(LI);
414 }
415 
416 /// Check if executing a load of this pointer value cannot trap.
417 ///
418 /// If DT and ScanFrom are specified this method performs context-sensitive
419 /// analysis and returns true if it is safe to load immediately before ScanFrom.
420 ///
421 /// If it is not obviously safe to load from the specified pointer, we do
422 /// a quick local scan of the basic block containing \c ScanFrom, to determine
423 /// if the address is already accessed.
424 ///
425 /// This uses the pointee type to determine how many bytes need to be safe to
426 /// load from the pointer.
427 bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size,
428                                        const DataLayout &DL,
429                                        Instruction *ScanFrom,
430                                        AssumptionCache *AC,
431                                        const DominatorTree *DT,
432                                        const TargetLibraryInfo *TLI) {
433   // If DT is not specified we can't make context-sensitive query
434   const Instruction* CtxI = DT ? ScanFrom : nullptr;
435   if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, DT,
436                                          TLI)) {
437     // With sanitizers `Dereferenceable` is not always enough for unconditional
438     // load.
439     if (!ScanFrom || !suppressSpeculativeLoadForSanitizers(*ScanFrom))
440       return true;
441   }
442 
443   if (!ScanFrom)
444     return false;
445 
446   if (Size.getBitWidth() > 64)
447     return false;
448   const TypeSize LoadSize = TypeSize::getFixed(Size.getZExtValue());
449 
450   // Otherwise, be a little bit aggressive by scanning the local block where we
451   // want to check to see if the pointer is already being loaded or stored
452   // from/to.  If so, the previous load or store would have already trapped,
453   // so there is no harm doing an extra load (also, CSE will later eliminate
454   // the load entirely).
455   BasicBlock::iterator BBI = ScanFrom->getIterator(),
456                        E = ScanFrom->getParent()->begin();
457 
458   // We can at least always strip pointer casts even though we can't use the
459   // base here.
460   V = V->stripPointerCasts();
461 
462   while (BBI != E) {
463     --BBI;
464 
465     // If we see a free or a call which may write to memory (i.e. which might do
466     // a free) the pointer could be marked invalid.
467     if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
468         !isa<LifetimeIntrinsic>(BBI))
469       return false;
470 
471     Value *AccessedPtr;
472     Type *AccessedTy;
473     Align AccessedAlign;
474     if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
475       // Ignore volatile loads. The execution of a volatile load cannot
476       // be used to prove an address is backed by regular memory; it can,
477       // for example, point to an MMIO register.
478       if (LI->isVolatile())
479         continue;
480       AccessedPtr = LI->getPointerOperand();
481       AccessedTy = LI->getType();
482       AccessedAlign = LI->getAlign();
483     } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
484       // Ignore volatile stores (see comment for loads).
485       if (SI->isVolatile())
486         continue;
487       AccessedPtr = SI->getPointerOperand();
488       AccessedTy = SI->getValueOperand()->getType();
489       AccessedAlign = SI->getAlign();
490     } else
491       continue;
492 
493     if (AccessedAlign < Alignment)
494       continue;
495 
496     // Handle trivial cases.
497     if (AccessedPtr == V &&
498         TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
499       return true;
500 
501     if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
502         TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
503       return true;
504   }
505   return false;
506 }
507 
508 bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
509                                        const DataLayout &DL,
510                                        Instruction *ScanFrom,
511                                        AssumptionCache *AC,
512                                        const DominatorTree *DT,
513                                        const TargetLibraryInfo *TLI) {
514   TypeSize TySize = DL.getTypeStoreSize(Ty);
515   if (TySize.isScalable())
516     return false;
517   APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue());
518   return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT,
519                                      TLI);
520 }
521 
522 /// DefMaxInstsToScan - the default number of maximum instructions
523 /// to scan in the block, used by FindAvailableLoadedValue().
524 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
525 /// threading in part by eliminating partially redundant loads.
526 /// At that point, the value of MaxInstsToScan was already set to '6'
527 /// without documented explanation.
528 cl::opt<unsigned>
529 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
530   cl::desc("Use this to specify the default maximum number of instructions "
531            "to scan backward from a given instruction, when searching for "
532            "available loaded value"));
533 
534 Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB,
535                                       BasicBlock::iterator &ScanFrom,
536                                       unsigned MaxInstsToScan,
537                                       BatchAAResults *AA, bool *IsLoad,
538                                       unsigned *NumScanedInst) {
539   // Don't CSE load that is volatile or anything stronger than unordered.
540   if (!Load->isUnordered())
541     return nullptr;
542 
543   MemoryLocation Loc = MemoryLocation::get(Load);
544   return findAvailablePtrLoadStore(Loc, Load->getType(), Load->isAtomic(),
545                                    ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoad,
546                                    NumScanedInst);
547 }
548 
549 // Check if the load and the store have the same base, constant offsets and
550 // non-overlapping access ranges.
551 static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr,
552                                               Type *LoadTy,
553                                               const Value *StorePtr,
554                                               Type *StoreTy,
555                                               const DataLayout &DL) {
556   APInt LoadOffset(DL.getIndexTypeSizeInBits(LoadPtr->getType()), 0);
557   APInt StoreOffset(DL.getIndexTypeSizeInBits(StorePtr->getType()), 0);
558   const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets(
559       DL, LoadOffset, /* AllowNonInbounds */ false);
560   const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets(
561       DL, StoreOffset, /* AllowNonInbounds */ false);
562   if (LoadBase != StoreBase)
563     return false;
564   auto LoadAccessSize = LocationSize::precise(DL.getTypeStoreSize(LoadTy));
565   auto StoreAccessSize = LocationSize::precise(DL.getTypeStoreSize(StoreTy));
566   ConstantRange LoadRange(LoadOffset,
567                           LoadOffset + LoadAccessSize.toRaw());
568   ConstantRange StoreRange(StoreOffset,
569                            StoreOffset + StoreAccessSize.toRaw());
570   return LoadRange.intersectWith(StoreRange).isEmptySet();
571 }
572 
573 static Value *getAvailableLoadStore(Instruction *Inst, const Value *Ptr,
574                                     Type *AccessTy, bool AtLeastAtomic,
575                                     const DataLayout &DL, bool *IsLoadCSE) {
576   // If this is a load of Ptr, the loaded value is available.
577   // (This is true even if the load is volatile or atomic, although
578   // those cases are unlikely.)
579   if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
580     // We can value forward from an atomic to a non-atomic, but not the
581     // other way around.
582     if (LI->isAtomic() < AtLeastAtomic)
583       return nullptr;
584 
585     Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts();
586     if (!AreEquivalentAddressValues(LoadPtr, Ptr))
587       return nullptr;
588 
589     if (CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
590       if (IsLoadCSE)
591         *IsLoadCSE = true;
592       return LI;
593     }
594   }
595 
596   // If this is a store through Ptr, the value is available!
597   // (This is true even if the store is volatile or atomic, although
598   // those cases are unlikely.)
599   if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
600     // We can value forward from an atomic to a non-atomic, but not the
601     // other way around.
602     if (SI->isAtomic() < AtLeastAtomic)
603       return nullptr;
604 
605     Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
606     if (!AreEquivalentAddressValues(StorePtr, Ptr))
607       return nullptr;
608 
609     if (IsLoadCSE)
610       *IsLoadCSE = false;
611 
612     Value *Val = SI->getValueOperand();
613     if (CastInst::isBitOrNoopPointerCastable(Val->getType(), AccessTy, DL))
614       return Val;
615 
616     TypeSize StoreSize = DL.getTypeSizeInBits(Val->getType());
617     TypeSize LoadSize = DL.getTypeSizeInBits(AccessTy);
618     if (TypeSize::isKnownLE(LoadSize, StoreSize))
619       if (auto *C = dyn_cast<Constant>(Val))
620         return ConstantFoldLoadFromConst(C, AccessTy, DL);
621   }
622 
623   if (auto *MSI = dyn_cast<MemSetInst>(Inst)) {
624     // Don't forward from (non-atomic) memset to atomic load.
625     if (AtLeastAtomic)
626       return nullptr;
627 
628     // Only handle constant memsets.
629     auto *Val = dyn_cast<ConstantInt>(MSI->getValue());
630     auto *Len = dyn_cast<ConstantInt>(MSI->getLength());
631     if (!Val || !Len)
632       return nullptr;
633 
634     // TODO: Handle offsets.
635     Value *Dst = MSI->getDest();
636     if (!AreEquivalentAddressValues(Dst, Ptr))
637       return nullptr;
638 
639     if (IsLoadCSE)
640       *IsLoadCSE = false;
641 
642     TypeSize LoadTypeSize = DL.getTypeSizeInBits(AccessTy);
643     if (LoadTypeSize.isScalable())
644       return nullptr;
645 
646     // Make sure the read bytes are contained in the memset.
647     uint64_t LoadSize = LoadTypeSize.getFixedValue();
648     if ((Len->getValue() * 8).ult(LoadSize))
649       return nullptr;
650 
651     APInt Splat = LoadSize >= 8 ? APInt::getSplat(LoadSize, Val->getValue())
652                                 : Val->getValue().trunc(LoadSize);
653     ConstantInt *SplatC = ConstantInt::get(MSI->getContext(), Splat);
654     if (CastInst::isBitOrNoopPointerCastable(SplatC->getType(), AccessTy, DL))
655       return SplatC;
656 
657     return nullptr;
658   }
659 
660   return nullptr;
661 }
662 
663 Value *llvm::findAvailablePtrLoadStore(
664     const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic,
665     BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan,
666     BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) {
667   if (MaxInstsToScan == 0)
668     MaxInstsToScan = ~0U;
669 
670   const DataLayout &DL = ScanBB->getDataLayout();
671   const Value *StrippedPtr = Loc.Ptr->stripPointerCasts();
672 
673   while (ScanFrom != ScanBB->begin()) {
674     // We must ignore debug info directives when counting (otherwise they
675     // would affect codegen).
676     Instruction *Inst = &*--ScanFrom;
677     if (Inst->isDebugOrPseudoInst())
678       continue;
679 
680     // Restore ScanFrom to expected value in case next test succeeds
681     ScanFrom++;
682 
683     if (NumScanedInst)
684       ++(*NumScanedInst);
685 
686     // Don't scan huge blocks.
687     if (MaxInstsToScan-- == 0)
688       return nullptr;
689 
690     --ScanFrom;
691 
692     if (Value *Available = getAvailableLoadStore(Inst, StrippedPtr, AccessTy,
693                                                  AtLeastAtomic, DL, IsLoadCSE))
694       return Available;
695 
696     // Try to get the store size for the type.
697     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
698       Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
699 
700       // If both StrippedPtr and StorePtr reach all the way to an alloca or
701       // global and they are different, ignore the store. This is a trivial form
702       // of alias analysis that is important for reg2mem'd code.
703       if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
704           (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
705           StrippedPtr != StorePtr)
706         continue;
707 
708       if (!AA) {
709         // When AA isn't available, but if the load and the store have the same
710         // base, constant offsets and non-overlapping access ranges, ignore the
711         // store. This is a simple form of alias analysis that is used by the
712         // inliner. FIXME: use BasicAA if possible.
713         if (areNonOverlapSameBaseLoadAndStore(
714                 Loc.Ptr, AccessTy, SI->getPointerOperand(),
715                 SI->getValueOperand()->getType(), DL))
716           continue;
717       } else {
718         // If we have alias analysis and it says the store won't modify the
719         // loaded value, ignore the store.
720         if (!isModSet(AA->getModRefInfo(SI, Loc)))
721           continue;
722       }
723 
724       // Otherwise the store that may or may not alias the pointer, bail out.
725       ++ScanFrom;
726       return nullptr;
727     }
728 
729     // If this is some other instruction that may clobber Ptr, bail out.
730     if (Inst->mayWriteToMemory()) {
731       // If alias analysis claims that it really won't modify the load,
732       // ignore it.
733       if (AA && !isModSet(AA->getModRefInfo(Inst, Loc)))
734         continue;
735 
736       // May modify the pointer, bail out.
737       ++ScanFrom;
738       return nullptr;
739     }
740   }
741 
742   // Got to the start of the block, we didn't find it, but are done for this
743   // block.
744   return nullptr;
745 }
746 
747 Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BatchAAResults &AA,
748                                       bool *IsLoadCSE,
749                                       unsigned MaxInstsToScan) {
750   const DataLayout &DL = Load->getDataLayout();
751   Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts();
752   BasicBlock *ScanBB = Load->getParent();
753   Type *AccessTy = Load->getType();
754   bool AtLeastAtomic = Load->isAtomic();
755 
756   if (!Load->isUnordered())
757     return nullptr;
758 
759   // Try to find an available value first, and delay expensive alias analysis
760   // queries until later.
761   Value *Available = nullptr;
762   SmallVector<Instruction *> MustNotAliasInsts;
763   for (Instruction &Inst : make_range(++Load->getReverseIterator(),
764                                       ScanBB->rend())) {
765     if (Inst.isDebugOrPseudoInst())
766       continue;
767 
768     if (MaxInstsToScan-- == 0)
769       return nullptr;
770 
771     Available = getAvailableLoadStore(&Inst, StrippedPtr, AccessTy,
772                                       AtLeastAtomic, DL, IsLoadCSE);
773     if (Available)
774       break;
775 
776     if (Inst.mayWriteToMemory())
777       MustNotAliasInsts.push_back(&Inst);
778   }
779 
780   // If we found an available value, ensure that the instructions in between
781   // did not modify the memory location.
782   if (Available) {
783     MemoryLocation Loc = MemoryLocation::get(Load);
784     for (Instruction *Inst : MustNotAliasInsts)
785       if (isModSet(AA.getModRefInfo(Inst, Loc)))
786         return nullptr;
787   }
788 
789   return Available;
790 }
791 
792 // Returns true if a use is either in an ICmp/PtrToInt or a Phi/Select that only
793 // feeds into them.
794 static bool isPointerUseReplacable(const Use &U) {
795   unsigned Limit = 40;
796   SmallVector<const User *> Worklist({U.getUser()});
797   SmallPtrSet<const User *, 8> Visited;
798 
799   while (!Worklist.empty() && --Limit) {
800     auto *User = Worklist.pop_back_val();
801     if (!Visited.insert(User).second)
802       continue;
803     if (isa<ICmpInst, PtrToIntInst>(User))
804       continue;
805     if (isa<PHINode, SelectInst>(User))
806       Worklist.append(User->user_begin(), User->user_end());
807     else
808       return false;
809   }
810 
811   return Limit != 0;
812 }
813 
814 // Returns true if `To` is a null pointer, constant dereferenceable pointer or
815 // both pointers have the same underlying objects.
816 static bool isPointerAlwaysReplaceable(const Value *From, const Value *To,
817                                        const DataLayout &DL) {
818   // This is not strictly correct, but we do it for now to retain important
819   // optimizations.
820   if (isa<ConstantPointerNull>(To))
821     return true;
822   if (isa<Constant>(To) &&
823       isDereferenceablePointer(To, Type::getInt8Ty(To->getContext()), DL))
824     return true;
825   return getUnderlyingObjectAggressive(From) ==
826          getUnderlyingObjectAggressive(To);
827 }
828 
829 bool llvm::canReplacePointersInUseIfEqual(const Use &U, const Value *To,
830                                           const DataLayout &DL) {
831   assert(U->getType() == To->getType() && "values must have matching types");
832   // Not a pointer, just return true.
833   if (!To->getType()->isPointerTy())
834     return true;
835 
836   if (isPointerAlwaysReplaceable(&*U, To, DL))
837     return true;
838   return isPointerUseReplacable(U);
839 }
840 
841 bool llvm::canReplacePointersIfEqual(const Value *From, const Value *To,
842                                      const DataLayout &DL) {
843   assert(From->getType() == To->getType() && "values must have matching types");
844   // Not a pointer, just return true.
845   if (!From->getType()->isPointerTy())
846     return true;
847 
848   return isPointerAlwaysReplaceable(From, To, DL);
849 }
850 
851 bool llvm::isDereferenceableReadOnlyLoop(
852     Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC,
853     SmallVectorImpl<const SCEVPredicate *> *Predicates) {
854   for (BasicBlock *BB : L->blocks()) {
855     for (Instruction &I : *BB) {
856       if (auto *LI = dyn_cast<LoadInst>(&I)) {
857         if (!isDereferenceableAndAlignedInLoop(LI, L, *SE, *DT, AC, Predicates))
858           return false;
859       } else if (I.mayReadFromMemory() || I.mayWriteToMemory() || I.mayThrow())
860         return false;
861     }
862   }
863   return true;
864 }
865