xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/Loads.cpp (revision e64bea71c21eb42e97aa615188ba91f6cce0d36d)
1 //===- Loads.cpp - Local load analysis ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines simple local analyses for load instructions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/AssumeBundleQueries.h"
16 #include "llvm/Analysis/LoopAccessAnalysis.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/Analysis/MemoryBuiltins.h"
19 #include "llvm/Analysis/MemoryLocation.h"
20 #include "llvm/Analysis/ScalarEvolution.h"
21 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Operator.h"
26 
27 using namespace llvm;
28 
isAligned(const Value * Base,Align Alignment,const DataLayout & DL)29 static bool isAligned(const Value *Base, Align Alignment,
30                       const DataLayout &DL) {
31   return Base->getPointerAlignment(DL) >= Alignment;
32 }
33 
isDereferenceableAndAlignedPointerViaAssumption(const Value * Ptr,Align Alignment,function_ref<bool (const RetainedKnowledge & RK)> CheckSize,const DataLayout & DL,const Instruction * CtxI,AssumptionCache * AC,const DominatorTree * DT)34 static bool isDereferenceableAndAlignedPointerViaAssumption(
35     const Value *Ptr, Align Alignment,
36     function_ref<bool(const RetainedKnowledge &RK)> CheckSize,
37     const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC,
38     const DominatorTree *DT) {
39   // Dereferenceable information from assumptions is only valid if the value
40   // cannot be freed between the assumption and use. For now just use the
41   // information for values that cannot be freed in the function.
42   // TODO: More precisely check if the pointer can be freed between assumption
43   // and use.
44   if (!CtxI || Ptr->canBeFreed())
45     return false;
46   /// Look through assumes to see if both dereferencability and alignment can
47   /// be proven by an assume if needed.
48   RetainedKnowledge AlignRK;
49   RetainedKnowledge DerefRK;
50   bool IsAligned = Ptr->getPointerAlignment(DL) >= Alignment;
51   return getKnowledgeForValue(
52       Ptr, {Attribute::Dereferenceable, Attribute::Alignment}, *AC,
53       [&](RetainedKnowledge RK, Instruction *Assume, auto) {
54         if (!isValidAssumeForContext(Assume, CtxI, DT))
55           return false;
56         if (RK.AttrKind == Attribute::Alignment)
57           AlignRK = std::max(AlignRK, RK);
58         if (RK.AttrKind == Attribute::Dereferenceable)
59           DerefRK = std::max(DerefRK, RK);
60         IsAligned |= AlignRK && AlignRK.ArgValue >= Alignment.value();
61         if (IsAligned && DerefRK && CheckSize(DerefRK))
62           return true; // We have found what we needed so we stop looking
63         return false;  // Other assumes may have better information. so
64                        // keep looking
65       });
66 }
67 
68 /// Test if V is always a pointer to allocated and suitably aligned memory for
69 /// a simple load or store.
isDereferenceableAndAlignedPointer(const Value * V,Align Alignment,const APInt & Size,const DataLayout & DL,const Instruction * CtxI,AssumptionCache * AC,const DominatorTree * DT,const TargetLibraryInfo * TLI,SmallPtrSetImpl<const Value * > & Visited,unsigned MaxDepth)70 static bool isDereferenceableAndAlignedPointer(
71     const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
72     const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
73     const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited,
74     unsigned MaxDepth) {
75   assert(V->getType()->isPointerTy() && "Base must be pointer");
76 
77   // Recursion limit.
78   if (MaxDepth-- == 0)
79     return false;
80 
81   // Already visited?  Bail out, we've likely hit unreachable code.
82   if (!Visited.insert(V).second)
83     return false;
84 
85   // Note that it is not safe to speculate into a malloc'd region because
86   // malloc may return null.
87 
88   // For GEPs, determine if the indexing lands within the allocated object.
89   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
90     const Value *Base = GEP->getPointerOperand();
91 
92     APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
93     if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
94         !Offset.urem(APInt(Offset.getBitWidth(), Alignment.value()))
95              .isMinValue())
96       return false;
97 
98     // If the base pointer is dereferenceable for Offset+Size bytes, then the
99     // GEP (== Base + Offset) is dereferenceable for Size bytes.  If the base
100     // pointer is aligned to Align bytes, and the Offset is divisible by Align
101     // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
102     // aligned to Align bytes.
103 
104     // Offset and Size may have different bit widths if we have visited an
105     // addrspacecast, so we can't do arithmetic directly on the APInt values.
106     return isDereferenceableAndAlignedPointer(
107         Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL,
108         CtxI, AC, DT, TLI, Visited, MaxDepth);
109   }
110 
111   // bitcast instructions are no-ops as far as dereferenceability is concerned.
112   if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
113     if (BC->getSrcTy()->isPointerTy())
114       return isDereferenceableAndAlignedPointer(
115         BC->getOperand(0), Alignment, Size, DL, CtxI, AC, DT, TLI,
116           Visited, MaxDepth);
117   }
118 
119   // Recurse into both hands of select.
120   if (const SelectInst *Sel = dyn_cast<SelectInst>(V)) {
121     return isDereferenceableAndAlignedPointer(Sel->getTrueValue(), Alignment,
122                                               Size, DL, CtxI, AC, DT, TLI,
123                                               Visited, MaxDepth) &&
124            isDereferenceableAndAlignedPointer(Sel->getFalseValue(), Alignment,
125                                               Size, DL, CtxI, AC, DT, TLI,
126                                               Visited, MaxDepth);
127   }
128 
129   auto IsKnownDeref = [&]() {
130     bool CheckForNonNull, CheckForFreed;
131     if (!Size.ule(V->getPointerDereferenceableBytes(DL, CheckForNonNull,
132                                                     CheckForFreed)) ||
133         CheckForFreed)
134       return false;
135     if (CheckForNonNull &&
136         !isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)))
137       return false;
138     // When using something like !dereferenceable on a load, the
139     // dereferenceability may only be valid on a specific control-flow path.
140     // If the instruction doesn't dominate the context instruction, we're
141     // asking about dereferenceability under the assumption that the
142     // instruction has been speculated to the point of the context instruction,
143     // in which case we don't know if the dereferenceability info still holds.
144     // We don't bother handling allocas here, as they aren't speculatable
145     // anyway.
146     auto *I = dyn_cast<Instruction>(V);
147     if (I && !isa<AllocaInst>(I))
148       return CtxI && isValidAssumeForContext(I, CtxI, DT);
149     return true;
150   };
151   if (IsKnownDeref()) {
152     // As we recursed through GEPs to get here, we've incrementally checked
153     // that each step advanced by a multiple of the alignment. If our base is
154     // properly aligned, then the original offset accessed must also be.
155     return isAligned(V, Alignment, DL);
156   }
157 
158   /// TODO refactor this function to be able to search independently for
159   /// Dereferencability and Alignment requirements.
160 
161 
162   if (const auto *Call = dyn_cast<CallBase>(V)) {
163     if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
164       return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI,
165                                                 AC, DT, TLI, Visited, MaxDepth);
166 
167     // If we have a call we can't recurse through, check to see if this is an
168     // allocation function for which we can establish an minimum object size.
169     // Such a minimum object size is analogous to a deref_or_null attribute in
170     // that we still need to prove the result non-null at point of use.
171     // NOTE: We can only use the object size as a base fact as we a) need to
172     // prove alignment too, and b) don't want the compile time impact of a
173     // separate recursive walk.
174     ObjectSizeOpts Opts;
175     // TODO: It may be okay to round to align, but that would imply that
176     // accessing slightly out of bounds was legal, and we're currently
177     // inconsistent about that.  For the moment, be conservative.
178     Opts.RoundToAlign = false;
179     Opts.NullIsUnknownSize = true;
180     uint64_t ObjSize;
181     if (getObjectSize(V, ObjSize, DL, TLI, Opts)) {
182       APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
183       if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
184           isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)) &&
185           !V->canBeFreed()) {
186         // As we recursed through GEPs to get here, we've incrementally
187         // checked that each step advanced by a multiple of the alignment. If
188         // our base is properly aligned, then the original offset accessed
189         // must also be.
190         return isAligned(V, Alignment, DL);
191       }
192     }
193   }
194 
195   // For gc.relocate, look through relocations
196   if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
197     return isDereferenceableAndAlignedPointer(RelocateInst->getDerivedPtr(),
198                                               Alignment, Size, DL, CtxI, AC, DT,
199                                               TLI, Visited, MaxDepth);
200 
201   if (const AddrSpaceCastOperator *ASC = dyn_cast<AddrSpaceCastOperator>(V))
202     return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment,
203                                               Size, DL, CtxI, AC, DT, TLI,
204                                               Visited, MaxDepth);
205 
206   return AC && isDereferenceableAndAlignedPointerViaAssumption(
207                    V, Alignment,
208                    [Size](const RetainedKnowledge &RK) {
209                      return RK.ArgValue >= Size.getZExtValue();
210                    },
211                    DL, CtxI, AC, DT);
212 }
213 
isDereferenceableAndAlignedPointer(const Value * V,Align Alignment,const APInt & Size,const DataLayout & DL,const Instruction * CtxI,AssumptionCache * AC,const DominatorTree * DT,const TargetLibraryInfo * TLI)214 bool llvm::isDereferenceableAndAlignedPointer(
215     const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
216     const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
217     const TargetLibraryInfo *TLI) {
218   // Note: At the moment, Size can be zero.  This ends up being interpreted as
219   // a query of whether [Base, V] is dereferenceable and V is aligned (since
220   // that's what the implementation happened to do).  It's unclear if this is
221   // the desired semantic, but at least SelectionDAG does exercise this case.
222 
223   SmallPtrSet<const Value *, 32> Visited;
224   return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC,
225                                               DT, TLI, Visited, 16);
226 }
227 
isDereferenceableAndAlignedPointer(const Value * V,Type * Ty,Align Alignment,const DataLayout & DL,const Instruction * CtxI,AssumptionCache * AC,const DominatorTree * DT,const TargetLibraryInfo * TLI)228 bool llvm::isDereferenceableAndAlignedPointer(
229     const Value *V, Type *Ty, Align Alignment, const DataLayout &DL,
230     const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
231     const TargetLibraryInfo *TLI) {
232   // For unsized types or scalable vectors we don't know exactly how many bytes
233   // are dereferenced, so bail out.
234   if (!Ty->isSized() || Ty->isScalableTy())
235     return false;
236 
237   // When dereferenceability information is provided by a dereferenceable
238   // attribute, we know exactly how many bytes are dereferenceable. If we can
239   // determine the exact offset to the attributed variable, we can use that
240   // information here.
241 
242   APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
243                    DL.getTypeStoreSize(Ty));
244   return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
245                                             AC, DT, TLI);
246 }
247 
isDereferenceablePointer(const Value * V,Type * Ty,const DataLayout & DL,const Instruction * CtxI,AssumptionCache * AC,const DominatorTree * DT,const TargetLibraryInfo * TLI)248 bool llvm::isDereferenceablePointer(const Value *V, Type *Ty,
249                                     const DataLayout &DL,
250                                     const Instruction *CtxI,
251                                     AssumptionCache *AC,
252                                     const DominatorTree *DT,
253                                     const TargetLibraryInfo *TLI) {
254   return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, AC, DT,
255                                             TLI);
256 }
257 
258 /// Test if A and B will obviously have the same value.
259 ///
260 /// This includes recognizing that %t0 and %t1 will have the same
261 /// value in code like this:
262 /// \code
263 ///   %t0 = getelementptr \@a, 0, 3
264 ///   store i32 0, i32* %t0
265 ///   %t1 = getelementptr \@a, 0, 3
266 ///   %t2 = load i32* %t1
267 /// \endcode
268 ///
AreEquivalentAddressValues(const Value * A,const Value * B)269 static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
270   // Test if the values are trivially equivalent.
271   if (A == B)
272     return true;
273 
274   // Test if the values come from identical arithmetic instructions.
275   // Use isIdenticalToWhenDefined instead of isIdenticalTo because
276   // this function is only used when one address use dominates the
277   // other, which means that they'll always either have the same
278   // value or one of them will have an undefined value.
279   if (isa<BinaryOperator>(A) || isa<CastInst>(A) || isa<PHINode>(A) ||
280       isa<GetElementPtrInst>(A))
281     if (const Instruction *BI = dyn_cast<Instruction>(B))
282       if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
283         return true;
284 
285   // Otherwise they may not be equivalent.
286   return false;
287 }
288 
isDereferenceableAndAlignedInLoop(LoadInst * LI,Loop * L,ScalarEvolution & SE,DominatorTree & DT,AssumptionCache * AC,SmallVectorImpl<const SCEVPredicate * > * Predicates)289 bool llvm::isDereferenceableAndAlignedInLoop(
290     LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT,
291     AssumptionCache *AC, SmallVectorImpl<const SCEVPredicate *> *Predicates) {
292   const Align Alignment = LI->getAlign();
293   auto &DL = LI->getDataLayout();
294   Value *Ptr = LI->getPointerOperand();
295   APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
296                 DL.getTypeStoreSize(LI->getType()).getFixedValue());
297 
298   // If given a uniform (i.e. non-varying) address, see if we can prove the
299   // access is safe within the loop w/o needing predication.
300   if (L->isLoopInvariant(Ptr))
301     return isDereferenceableAndAlignedPointer(
302         Ptr, Alignment, EltSize, DL, &*L->getHeader()->getFirstNonPHIIt(), AC,
303         &DT);
304 
305   const SCEV *PtrScev = SE.getSCEV(Ptr);
306   auto *AddRec = dyn_cast<SCEVAddRecExpr>(PtrScev);
307 
308   // Check to see if we have a repeating access pattern and it's possible
309   // to prove all accesses are well aligned.
310   if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
311     return false;
312 
313   auto *Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
314   if (!Step)
315     return false;
316 
317   // For the moment, restrict ourselves to the case where the access size is a
318   // multiple of the requested alignment and the base is aligned.
319   // TODO: generalize if a case found which warrants
320   if (EltSize.urem(Alignment.value()) != 0)
321     return false;
322 
323   // TODO: Handle overlapping accesses.
324   if (EltSize.ugt(Step->getAPInt().abs()))
325     return false;
326 
327   const SCEV *MaxBECount =
328       Predicates ? SE.getPredicatedSymbolicMaxBackedgeTakenCount(L, *Predicates)
329                  : SE.getSymbolicMaxBackedgeTakenCount(L);
330   const SCEV *BECount = Predicates
331                             ? SE.getPredicatedBackedgeTakenCount(L, *Predicates)
332                             : SE.getBackedgeTakenCount(L);
333   if (isa<SCEVCouldNotCompute>(MaxBECount))
334     return false;
335 
336   if (isa<SCEVCouldNotCompute>(BECount)) {
337     // TODO: Support symbolic max backedge taken counts for loops without
338     // computable backedge taken counts.
339     MaxBECount =
340         Predicates
341             ? SE.getPredicatedConstantMaxBackedgeTakenCount(L, *Predicates)
342             : SE.getConstantMaxBackedgeTakenCount(L);
343   }
344   const auto &[AccessStart, AccessEnd] = getStartAndEndForAccess(
345       L, PtrScev, LI->getType(), BECount, MaxBECount, &SE, nullptr);
346   if (isa<SCEVCouldNotCompute>(AccessStart) ||
347       isa<SCEVCouldNotCompute>(AccessEnd))
348     return false;
349 
350   // Try to get the access size.
351   const SCEV *PtrDiff = SE.getMinusSCEV(AccessEnd, AccessStart);
352   if (isa<SCEVCouldNotCompute>(PtrDiff))
353     return false;
354   APInt MaxPtrDiff = SE.getUnsignedRangeMax(PtrDiff);
355 
356   Value *Base = nullptr;
357   APInt AccessSize;
358   const SCEV *AccessSizeSCEV = nullptr;
359   if (const SCEVUnknown *NewBase = dyn_cast<SCEVUnknown>(AccessStart)) {
360     Base = NewBase->getValue();
361     AccessSize = MaxPtrDiff;
362     AccessSizeSCEV = PtrDiff;
363   } else if (auto *MinAdd = dyn_cast<SCEVAddExpr>(AccessStart)) {
364     if (MinAdd->getNumOperands() != 2)
365       return false;
366 
367     const auto *Offset = dyn_cast<SCEVConstant>(MinAdd->getOperand(0));
368     const auto *NewBase = dyn_cast<SCEVUnknown>(MinAdd->getOperand(1));
369     if (!Offset || !NewBase)
370       return false;
371 
372     // The following code below assumes the offset is unsigned, but GEP
373     // offsets are treated as signed so we can end up with a signed value
374     // here too. For example, suppose the initial PHI value is (i8 255),
375     // the offset will be treated as (i8 -1) and sign-extended to (i64 -1).
376     if (Offset->getAPInt().isNegative())
377       return false;
378 
379     // For the moment, restrict ourselves to the case where the offset is a
380     // multiple of the requested alignment and the base is aligned.
381     // TODO: generalize if a case found which warrants
382     if (Offset->getAPInt().urem(Alignment.value()) != 0)
383       return false;
384 
385     bool Overflow = false;
386     AccessSize = MaxPtrDiff.uadd_ov(Offset->getAPInt(), Overflow);
387     if (Overflow)
388       return false;
389     AccessSizeSCEV = SE.getAddExpr(PtrDiff, Offset);
390     Base = NewBase->getValue();
391   } else
392     return false;
393 
394   Instruction *HeaderFirstNonPHI = &*L->getHeader()->getFirstNonPHIIt();
395   return isDereferenceableAndAlignedPointerViaAssumption(
396              Base, Alignment,
397              [&SE, AccessSizeSCEV](const RetainedKnowledge &RK) {
398                return SE.isKnownPredicate(CmpInst::ICMP_ULE, AccessSizeSCEV,
399                                           SE.getSCEV(RK.IRArgValue));
400              },
401              DL, HeaderFirstNonPHI, AC, &DT) ||
402          isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL,
403                                             HeaderFirstNonPHI, AC, &DT);
404 }
405 
suppressSpeculativeLoadForSanitizers(const Instruction & CtxI)406 static bool suppressSpeculativeLoadForSanitizers(const Instruction &CtxI) {
407   const Function &F = *CtxI.getFunction();
408   // Speculative load may create a race that did not exist in the source.
409   return F.hasFnAttribute(Attribute::SanitizeThread) ||
410          // Speculative load may load data from dirty regions.
411          F.hasFnAttribute(Attribute::SanitizeAddress) ||
412          F.hasFnAttribute(Attribute::SanitizeHWAddress);
413 }
414 
mustSuppressSpeculation(const LoadInst & LI)415 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
416   return !LI.isUnordered() || suppressSpeculativeLoadForSanitizers(LI);
417 }
418 
419 /// Check if executing a load of this pointer value cannot trap.
420 ///
421 /// If DT and ScanFrom are specified this method performs context-sensitive
422 /// analysis and returns true if it is safe to load immediately before ScanFrom.
423 ///
424 /// If it is not obviously safe to load from the specified pointer, we do
425 /// a quick local scan of the basic block containing \c ScanFrom, to determine
426 /// if the address is already accessed.
427 ///
428 /// This uses the pointee type to determine how many bytes need to be safe to
429 /// load from the pointer.
isSafeToLoadUnconditionally(Value * V,Align Alignment,const APInt & Size,const DataLayout & DL,Instruction * ScanFrom,AssumptionCache * AC,const DominatorTree * DT,const TargetLibraryInfo * TLI)430 bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size,
431                                        const DataLayout &DL,
432                                        Instruction *ScanFrom,
433                                        AssumptionCache *AC,
434                                        const DominatorTree *DT,
435                                        const TargetLibraryInfo *TLI) {
436   // If DT is not specified we can't make context-sensitive query
437   const Instruction* CtxI = DT ? ScanFrom : nullptr;
438   if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, DT,
439                                          TLI)) {
440     // With sanitizers `Dereferenceable` is not always enough for unconditional
441     // load.
442     if (!ScanFrom || !suppressSpeculativeLoadForSanitizers(*ScanFrom))
443       return true;
444   }
445 
446   if (!ScanFrom)
447     return false;
448 
449   if (Size.getBitWidth() > 64)
450     return false;
451   const TypeSize LoadSize = TypeSize::getFixed(Size.getZExtValue());
452 
453   // Otherwise, be a little bit aggressive by scanning the local block where we
454   // want to check to see if the pointer is already being loaded or stored
455   // from/to.  If so, the previous load or store would have already trapped,
456   // so there is no harm doing an extra load (also, CSE will later eliminate
457   // the load entirely).
458   BasicBlock::iterator BBI = ScanFrom->getIterator(),
459                        E = ScanFrom->getParent()->begin();
460 
461   // We can at least always strip pointer casts even though we can't use the
462   // base here.
463   V = V->stripPointerCasts();
464 
465   while (BBI != E) {
466     --BBI;
467 
468     // If we see a free or a call which may write to memory (i.e. which might do
469     // a free) the pointer could be marked invalid.
470     if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
471         !isa<LifetimeIntrinsic>(BBI))
472       return false;
473 
474     Value *AccessedPtr;
475     Type *AccessedTy;
476     Align AccessedAlign;
477     if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
478       // Ignore volatile loads. The execution of a volatile load cannot
479       // be used to prove an address is backed by regular memory; it can,
480       // for example, point to an MMIO register.
481       if (LI->isVolatile())
482         continue;
483       AccessedPtr = LI->getPointerOperand();
484       AccessedTy = LI->getType();
485       AccessedAlign = LI->getAlign();
486     } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
487       // Ignore volatile stores (see comment for loads).
488       if (SI->isVolatile())
489         continue;
490       AccessedPtr = SI->getPointerOperand();
491       AccessedTy = SI->getValueOperand()->getType();
492       AccessedAlign = SI->getAlign();
493     } else
494       continue;
495 
496     if (AccessedAlign < Alignment)
497       continue;
498 
499     // Handle trivial cases.
500     if (AccessedPtr == V &&
501         TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
502       return true;
503 
504     if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
505         TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
506       return true;
507   }
508   return false;
509 }
510 
isSafeToLoadUnconditionally(Value * V,Type * Ty,Align Alignment,const DataLayout & DL,Instruction * ScanFrom,AssumptionCache * AC,const DominatorTree * DT,const TargetLibraryInfo * TLI)511 bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
512                                        const DataLayout &DL,
513                                        Instruction *ScanFrom,
514                                        AssumptionCache *AC,
515                                        const DominatorTree *DT,
516                                        const TargetLibraryInfo *TLI) {
517   TypeSize TySize = DL.getTypeStoreSize(Ty);
518   if (TySize.isScalable())
519     return false;
520   APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue());
521   return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT,
522                                      TLI);
523 }
524 
525 /// DefMaxInstsToScan - the default number of maximum instructions
526 /// to scan in the block, used by FindAvailableLoadedValue().
527 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
528 /// threading in part by eliminating partially redundant loads.
529 /// At that point, the value of MaxInstsToScan was already set to '6'
530 /// without documented explanation.
531 cl::opt<unsigned>
532 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
533   cl::desc("Use this to specify the default maximum number of instructions "
534            "to scan backward from a given instruction, when searching for "
535            "available loaded value"));
536 
FindAvailableLoadedValue(LoadInst * Load,BasicBlock * ScanBB,BasicBlock::iterator & ScanFrom,unsigned MaxInstsToScan,BatchAAResults * AA,bool * IsLoad,unsigned * NumScanedInst)537 Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB,
538                                       BasicBlock::iterator &ScanFrom,
539                                       unsigned MaxInstsToScan,
540                                       BatchAAResults *AA, bool *IsLoad,
541                                       unsigned *NumScanedInst) {
542   // Don't CSE load that is volatile or anything stronger than unordered.
543   if (!Load->isUnordered())
544     return nullptr;
545 
546   MemoryLocation Loc = MemoryLocation::get(Load);
547   return findAvailablePtrLoadStore(Loc, Load->getType(), Load->isAtomic(),
548                                    ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoad,
549                                    NumScanedInst);
550 }
551 
552 // Check if the load and the store have the same base, constant offsets and
553 // non-overlapping access ranges.
areNonOverlapSameBaseLoadAndStore(const Value * LoadPtr,Type * LoadTy,const Value * StorePtr,Type * StoreTy,const DataLayout & DL)554 static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr,
555                                               Type *LoadTy,
556                                               const Value *StorePtr,
557                                               Type *StoreTy,
558                                               const DataLayout &DL) {
559   APInt LoadOffset(DL.getIndexTypeSizeInBits(LoadPtr->getType()), 0);
560   APInt StoreOffset(DL.getIndexTypeSizeInBits(StorePtr->getType()), 0);
561   const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets(
562       DL, LoadOffset, /* AllowNonInbounds */ false);
563   const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets(
564       DL, StoreOffset, /* AllowNonInbounds */ false);
565   if (LoadBase != StoreBase)
566     return false;
567   auto LoadAccessSize = LocationSize::precise(DL.getTypeStoreSize(LoadTy));
568   auto StoreAccessSize = LocationSize::precise(DL.getTypeStoreSize(StoreTy));
569   ConstantRange LoadRange(LoadOffset,
570                           LoadOffset + LoadAccessSize.toRaw());
571   ConstantRange StoreRange(StoreOffset,
572                            StoreOffset + StoreAccessSize.toRaw());
573   return LoadRange.intersectWith(StoreRange).isEmptySet();
574 }
575 
getAvailableLoadStore(Instruction * Inst,const Value * Ptr,Type * AccessTy,bool AtLeastAtomic,const DataLayout & DL,bool * IsLoadCSE)576 static Value *getAvailableLoadStore(Instruction *Inst, const Value *Ptr,
577                                     Type *AccessTy, bool AtLeastAtomic,
578                                     const DataLayout &DL, bool *IsLoadCSE) {
579   // If this is a load of Ptr, the loaded value is available.
580   // (This is true even if the load is volatile or atomic, although
581   // those cases are unlikely.)
582   if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
583     // We can value forward from an atomic to a non-atomic, but not the
584     // other way around.
585     if (LI->isAtomic() < AtLeastAtomic)
586       return nullptr;
587 
588     Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts();
589     if (!AreEquivalentAddressValues(LoadPtr, Ptr))
590       return nullptr;
591 
592     if (CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
593       if (IsLoadCSE)
594         *IsLoadCSE = true;
595       return LI;
596     }
597   }
598 
599   // If this is a store through Ptr, the value is available!
600   // (This is true even if the store is volatile or atomic, although
601   // those cases are unlikely.)
602   if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
603     // We can value forward from an atomic to a non-atomic, but not the
604     // other way around.
605     if (SI->isAtomic() < AtLeastAtomic)
606       return nullptr;
607 
608     Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
609     if (!AreEquivalentAddressValues(StorePtr, Ptr))
610       return nullptr;
611 
612     if (IsLoadCSE)
613       *IsLoadCSE = false;
614 
615     Value *Val = SI->getValueOperand();
616     if (CastInst::isBitOrNoopPointerCastable(Val->getType(), AccessTy, DL))
617       return Val;
618 
619     TypeSize StoreSize = DL.getTypeSizeInBits(Val->getType());
620     TypeSize LoadSize = DL.getTypeSizeInBits(AccessTy);
621     if (TypeSize::isKnownLE(LoadSize, StoreSize))
622       if (auto *C = dyn_cast<Constant>(Val))
623         return ConstantFoldLoadFromConst(C, AccessTy, DL);
624   }
625 
626   if (auto *MSI = dyn_cast<MemSetInst>(Inst)) {
627     // Don't forward from (non-atomic) memset to atomic load.
628     if (AtLeastAtomic)
629       return nullptr;
630 
631     // Only handle constant memsets.
632     auto *Val = dyn_cast<ConstantInt>(MSI->getValue());
633     auto *Len = dyn_cast<ConstantInt>(MSI->getLength());
634     if (!Val || !Len)
635       return nullptr;
636 
637     // TODO: Handle offsets.
638     Value *Dst = MSI->getDest();
639     if (!AreEquivalentAddressValues(Dst, Ptr))
640       return nullptr;
641 
642     if (IsLoadCSE)
643       *IsLoadCSE = false;
644 
645     TypeSize LoadTypeSize = DL.getTypeSizeInBits(AccessTy);
646     if (LoadTypeSize.isScalable())
647       return nullptr;
648 
649     // Make sure the read bytes are contained in the memset.
650     uint64_t LoadSize = LoadTypeSize.getFixedValue();
651     if ((Len->getValue() * 8).ult(LoadSize))
652       return nullptr;
653 
654     APInt Splat = LoadSize >= 8 ? APInt::getSplat(LoadSize, Val->getValue())
655                                 : Val->getValue().trunc(LoadSize);
656     ConstantInt *SplatC = ConstantInt::get(MSI->getContext(), Splat);
657     if (CastInst::isBitOrNoopPointerCastable(SplatC->getType(), AccessTy, DL))
658       return SplatC;
659 
660     return nullptr;
661   }
662 
663   return nullptr;
664 }
665 
findAvailablePtrLoadStore(const MemoryLocation & Loc,Type * AccessTy,bool AtLeastAtomic,BasicBlock * ScanBB,BasicBlock::iterator & ScanFrom,unsigned MaxInstsToScan,BatchAAResults * AA,bool * IsLoadCSE,unsigned * NumScanedInst)666 Value *llvm::findAvailablePtrLoadStore(
667     const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic,
668     BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan,
669     BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) {
670   if (MaxInstsToScan == 0)
671     MaxInstsToScan = ~0U;
672 
673   const DataLayout &DL = ScanBB->getDataLayout();
674   const Value *StrippedPtr = Loc.Ptr->stripPointerCasts();
675 
676   while (ScanFrom != ScanBB->begin()) {
677     // We must ignore debug info directives when counting (otherwise they
678     // would affect codegen).
679     Instruction *Inst = &*--ScanFrom;
680     if (Inst->isDebugOrPseudoInst())
681       continue;
682 
683     // Restore ScanFrom to expected value in case next test succeeds
684     ScanFrom++;
685 
686     if (NumScanedInst)
687       ++(*NumScanedInst);
688 
689     // Don't scan huge blocks.
690     if (MaxInstsToScan-- == 0)
691       return nullptr;
692 
693     --ScanFrom;
694 
695     if (Value *Available = getAvailableLoadStore(Inst, StrippedPtr, AccessTy,
696                                                  AtLeastAtomic, DL, IsLoadCSE))
697       return Available;
698 
699     // Try to get the store size for the type.
700     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
701       Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
702 
703       // If both StrippedPtr and StorePtr reach all the way to an alloca or
704       // global and they are different, ignore the store. This is a trivial form
705       // of alias analysis that is important for reg2mem'd code.
706       if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
707           (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
708           StrippedPtr != StorePtr)
709         continue;
710 
711       if (!AA) {
712         // When AA isn't available, but if the load and the store have the same
713         // base, constant offsets and non-overlapping access ranges, ignore the
714         // store. This is a simple form of alias analysis that is used by the
715         // inliner. FIXME: use BasicAA if possible.
716         if (areNonOverlapSameBaseLoadAndStore(
717                 Loc.Ptr, AccessTy, SI->getPointerOperand(),
718                 SI->getValueOperand()->getType(), DL))
719           continue;
720       } else {
721         // If we have alias analysis and it says the store won't modify the
722         // loaded value, ignore the store.
723         if (!isModSet(AA->getModRefInfo(SI, Loc)))
724           continue;
725       }
726 
727       // Otherwise the store that may or may not alias the pointer, bail out.
728       ++ScanFrom;
729       return nullptr;
730     }
731 
732     // If this is some other instruction that may clobber Ptr, bail out.
733     if (Inst->mayWriteToMemory()) {
734       // If alias analysis claims that it really won't modify the load,
735       // ignore it.
736       if (AA && !isModSet(AA->getModRefInfo(Inst, Loc)))
737         continue;
738 
739       // May modify the pointer, bail out.
740       ++ScanFrom;
741       return nullptr;
742     }
743   }
744 
745   // Got to the start of the block, we didn't find it, but are done for this
746   // block.
747   return nullptr;
748 }
749 
FindAvailableLoadedValue(LoadInst * Load,BatchAAResults & AA,bool * IsLoadCSE,unsigned MaxInstsToScan)750 Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BatchAAResults &AA,
751                                       bool *IsLoadCSE,
752                                       unsigned MaxInstsToScan) {
753   const DataLayout &DL = Load->getDataLayout();
754   Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts();
755   BasicBlock *ScanBB = Load->getParent();
756   Type *AccessTy = Load->getType();
757   bool AtLeastAtomic = Load->isAtomic();
758 
759   if (!Load->isUnordered())
760     return nullptr;
761 
762   // Try to find an available value first, and delay expensive alias analysis
763   // queries until later.
764   Value *Available = nullptr;
765   SmallVector<Instruction *> MustNotAliasInsts;
766   for (Instruction &Inst : make_range(++Load->getReverseIterator(),
767                                       ScanBB->rend())) {
768     if (Inst.isDebugOrPseudoInst())
769       continue;
770 
771     if (MaxInstsToScan-- == 0)
772       return nullptr;
773 
774     Available = getAvailableLoadStore(&Inst, StrippedPtr, AccessTy,
775                                       AtLeastAtomic, DL, IsLoadCSE);
776     if (Available)
777       break;
778 
779     if (Inst.mayWriteToMemory())
780       MustNotAliasInsts.push_back(&Inst);
781   }
782 
783   // If we found an available value, ensure that the instructions in between
784   // did not modify the memory location.
785   if (Available) {
786     MemoryLocation Loc = MemoryLocation::get(Load);
787     for (Instruction *Inst : MustNotAliasInsts)
788       if (isModSet(AA.getModRefInfo(Inst, Loc)))
789         return nullptr;
790   }
791 
792   return Available;
793 }
794 
795 // Returns true if a use is either in an ICmp/PtrToInt or a Phi/Select that only
796 // feeds into them.
isPointerUseReplacable(const Use & U)797 static bool isPointerUseReplacable(const Use &U) {
798   unsigned Limit = 40;
799   SmallVector<const User *> Worklist({U.getUser()});
800   SmallPtrSet<const User *, 8> Visited;
801 
802   while (!Worklist.empty() && --Limit) {
803     auto *User = Worklist.pop_back_val();
804     if (!Visited.insert(User).second)
805       continue;
806     if (isa<ICmpInst, PtrToIntInst>(User))
807       continue;
808     if (isa<PHINode, SelectInst>(User))
809       Worklist.append(User->user_begin(), User->user_end());
810     else
811       return false;
812   }
813 
814   return Limit != 0;
815 }
816 
817 // Returns true if `To` is a null pointer, constant dereferenceable pointer or
818 // both pointers have the same underlying objects.
isPointerAlwaysReplaceable(const Value * From,const Value * To,const DataLayout & DL)819 static bool isPointerAlwaysReplaceable(const Value *From, const Value *To,
820                                        const DataLayout &DL) {
821   // This is not strictly correct, but we do it for now to retain important
822   // optimizations.
823   if (isa<ConstantPointerNull>(To))
824     return true;
825   if (isa<Constant>(To) &&
826       isDereferenceablePointer(To, Type::getInt8Ty(To->getContext()), DL))
827     return true;
828   return getUnderlyingObjectAggressive(From) ==
829          getUnderlyingObjectAggressive(To);
830 }
831 
canReplacePointersInUseIfEqual(const Use & U,const Value * To,const DataLayout & DL)832 bool llvm::canReplacePointersInUseIfEqual(const Use &U, const Value *To,
833                                           const DataLayout &DL) {
834   assert(U->getType() == To->getType() && "values must have matching types");
835   // Not a pointer, just return true.
836   if (!To->getType()->isPointerTy())
837     return true;
838 
839   if (isPointerAlwaysReplaceable(&*U, To, DL))
840     return true;
841   return isPointerUseReplacable(U);
842 }
843 
canReplacePointersIfEqual(const Value * From,const Value * To,const DataLayout & DL)844 bool llvm::canReplacePointersIfEqual(const Value *From, const Value *To,
845                                      const DataLayout &DL) {
846   assert(From->getType() == To->getType() && "values must have matching types");
847   // Not a pointer, just return true.
848   if (!From->getType()->isPointerTy())
849     return true;
850 
851   return isPointerAlwaysReplaceable(From, To, DL);
852 }
853 
isDereferenceableReadOnlyLoop(Loop * L,ScalarEvolution * SE,DominatorTree * DT,AssumptionCache * AC,SmallVectorImpl<const SCEVPredicate * > * Predicates)854 bool llvm::isDereferenceableReadOnlyLoop(
855     Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC,
856     SmallVectorImpl<const SCEVPredicate *> *Predicates) {
857   for (BasicBlock *BB : L->blocks()) {
858     for (Instruction &I : *BB) {
859       if (auto *LI = dyn_cast<LoadInst>(&I)) {
860         if (!isDereferenceableAndAlignedInLoop(LI, L, *SE, *DT, AC, Predicates))
861           return false;
862       } else if (I.mayReadFromMemory() || I.mayWriteToMemory() || I.mayThrow())
863         return false;
864     }
865   }
866   return true;
867 }
868