1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/Constant.h"
32 #include "llvm/IR/ConstantRange.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GetElementPtrTypeIterator.h"
39 #include "llvm/IR/GlobalAlias.h"
40 #include "llvm/IR/GlobalVariable.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/IR/Operator.h"
47 #include "llvm/IR/PatternMatch.h"
48 #include "llvm/IR/Type.h"
49 #include "llvm/IR/User.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/InitializePasses.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/KnownBits.h"
57 #include "llvm/Support/SaveAndRestore.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <optional>
62 #include <utility>
63
64 #define DEBUG_TYPE "basicaa"
65
66 using namespace llvm;
67
68 /// Enable analysis of recursive PHI nodes.
69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
70 cl::init(true));
71
72 static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
73 cl::Hidden, cl::init(true));
74
75 /// SearchLimitReached / SearchTimes shows how often the limit of
76 /// to decompose GEPs is reached. It will affect the precision
77 /// of basic alias analysis.
78 STATISTIC(SearchLimitReached, "Number of times the limit to "
79 "decompose GEPs is reached");
80 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
81
82 // The max limit of the search depth in DecomposeGEPExpression() and
83 // getUnderlyingObject().
84 static const unsigned MaxLookupSearchDepth = 6;
85
invalidate(Function & Fn,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)86 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
87 FunctionAnalysisManager::Invalidator &Inv) {
88 // We don't care if this analysis itself is preserved, it has no state. But
89 // we need to check that the analyses it depends on have been. Note that we
90 // may be created without handles to some analyses and in that case don't
91 // depend on them.
92 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
93 (DT_ && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)))
94 return true;
95
96 // Otherwise this analysis result remains valid.
97 return false;
98 }
99
100 //===----------------------------------------------------------------------===//
101 // Useful predicates
102 //===----------------------------------------------------------------------===//
103
104 /// Returns the size of the object specified by V or UnknownSize if unknown.
getObjectSize(const Value * V,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc,bool RoundToAlign=false)105 static std::optional<TypeSize> getObjectSize(const Value *V,
106 const DataLayout &DL,
107 const TargetLibraryInfo &TLI,
108 bool NullIsValidLoc,
109 bool RoundToAlign = false) {
110 uint64_t Size;
111 ObjectSizeOpts Opts;
112 Opts.RoundToAlign = RoundToAlign;
113 Opts.NullIsUnknownSize = NullIsValidLoc;
114 if (getObjectSize(V, Size, DL, &TLI, Opts))
115 return TypeSize::getFixed(Size);
116 return std::nullopt;
117 }
118
119 /// Returns true if we can prove that the object specified by V is smaller than
120 /// Size.
isObjectSmallerThan(const Value * V,TypeSize Size,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc)121 static bool isObjectSmallerThan(const Value *V, TypeSize Size,
122 const DataLayout &DL,
123 const TargetLibraryInfo &TLI,
124 bool NullIsValidLoc) {
125 // Note that the meanings of the "object" are slightly different in the
126 // following contexts:
127 // c1: llvm::getObjectSize()
128 // c2: llvm.objectsize() intrinsic
129 // c3: isObjectSmallerThan()
130 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
131 // refers to the "entire object".
132 //
133 // Consider this example:
134 // char *p = (char*)malloc(100)
135 // char *q = p+80;
136 //
137 // In the context of c1 and c2, the "object" pointed by q refers to the
138 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
139 //
140 // However, in the context of c3, the "object" refers to the chunk of memory
141 // being allocated. So, the "object" has 100 bytes, and q points to the middle
142 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
143 // parameter, before the llvm::getObjectSize() is called to get the size of
144 // entire object, we should:
145 // - either rewind the pointer q to the base-address of the object in
146 // question (in this case rewind to p), or
147 // - just give up. It is up to caller to make sure the pointer is pointing
148 // to the base address the object.
149 //
150 // We go for 2nd option for simplicity.
151 if (!isIdentifiedObject(V))
152 return false;
153
154 // This function needs to use the aligned object size because we allow
155 // reads a bit past the end given sufficient alignment.
156 std::optional<TypeSize> ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
157 /*RoundToAlign*/ true);
158
159 return ObjectSize && TypeSize::isKnownLT(*ObjectSize, Size);
160 }
161
162 /// Return the minimal extent from \p V to the end of the underlying object,
163 /// assuming the result is used in an aliasing query. E.g., we do use the query
164 /// location size and the fact that null pointers cannot alias here.
getMinimalExtentFrom(const Value & V,const LocationSize & LocSize,const DataLayout & DL,bool NullIsValidLoc)165 static TypeSize getMinimalExtentFrom(const Value &V,
166 const LocationSize &LocSize,
167 const DataLayout &DL,
168 bool NullIsValidLoc) {
169 // If we have dereferenceability information we know a lower bound for the
170 // extent as accesses for a lower offset would be valid. We need to exclude
171 // the "or null" part if null is a valid pointer. We can ignore frees, as an
172 // access after free would be undefined behavior.
173 bool CanBeNull, CanBeFreed;
174 uint64_t DerefBytes =
175 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
176 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
177 // If queried with a precise location size, we assume that location size to be
178 // accessed, thus valid.
179 if (LocSize.isPrecise())
180 DerefBytes = std::max(DerefBytes, LocSize.getValue().getKnownMinValue());
181 return TypeSize::getFixed(DerefBytes);
182 }
183
184 /// Returns true if we can prove that the object specified by V has size Size.
isObjectSize(const Value * V,TypeSize Size,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc)185 static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
186 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
187 std::optional<TypeSize> ObjectSize =
188 getObjectSize(V, DL, TLI, NullIsValidLoc);
189 return ObjectSize && *ObjectSize == Size;
190 }
191
192 /// Return true if both V1 and V2 are VScale
areBothVScale(const Value * V1,const Value * V2)193 static bool areBothVScale(const Value *V1, const Value *V2) {
194 return PatternMatch::match(V1, PatternMatch::m_VScale()) &&
195 PatternMatch::match(V2, PatternMatch::m_VScale());
196 }
197
198 //===----------------------------------------------------------------------===//
199 // CaptureInfo implementations
200 //===----------------------------------------------------------------------===//
201
202 CaptureInfo::~CaptureInfo() = default;
203
isNotCapturedBefore(const Value * Object,const Instruction * I,bool OrAt)204 bool SimpleCaptureInfo::isNotCapturedBefore(const Value *Object,
205 const Instruction *I, bool OrAt) {
206 return isNonEscapingLocalObject(Object, &IsCapturedCache);
207 }
208
isNotInCycle(const Instruction * I,const DominatorTree * DT,const LoopInfo * LI)209 static bool isNotInCycle(const Instruction *I, const DominatorTree *DT,
210 const LoopInfo *LI) {
211 BasicBlock *BB = const_cast<BasicBlock *>(I->getParent());
212 SmallVector<BasicBlock *> Succs(successors(BB));
213 return Succs.empty() ||
214 !isPotentiallyReachableFromMany(Succs, BB, nullptr, DT, LI);
215 }
216
isNotCapturedBefore(const Value * Object,const Instruction * I,bool OrAt)217 bool EarliestEscapeInfo::isNotCapturedBefore(const Value *Object,
218 const Instruction *I, bool OrAt) {
219 if (!isIdentifiedFunctionLocal(Object))
220 return false;
221
222 auto Iter = EarliestEscapes.insert({Object, nullptr});
223 if (Iter.second) {
224 Instruction *EarliestCapture = FindEarliestCapture(
225 Object, *const_cast<Function *>(DT.getRoot()->getParent()),
226 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
227 if (EarliestCapture) {
228 auto Ins = Inst2Obj.insert({EarliestCapture, {}});
229 Ins.first->second.push_back(Object);
230 }
231 Iter.first->second = EarliestCapture;
232 }
233
234 // No capturing instruction.
235 if (!Iter.first->second)
236 return true;
237
238 // No context instruction means any use is capturing.
239 if (!I)
240 return false;
241
242 if (I == Iter.first->second) {
243 if (OrAt)
244 return false;
245 return isNotInCycle(I, &DT, LI);
246 }
247
248 return !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, LI);
249 }
250
removeInstruction(Instruction * I)251 void EarliestEscapeInfo::removeInstruction(Instruction *I) {
252 auto Iter = Inst2Obj.find(I);
253 if (Iter != Inst2Obj.end()) {
254 for (const Value *Obj : Iter->second)
255 EarliestEscapes.erase(Obj);
256 Inst2Obj.erase(I);
257 }
258 }
259
260 //===----------------------------------------------------------------------===//
261 // GetElementPtr Instruction Decomposition and Analysis
262 //===----------------------------------------------------------------------===//
263
264 namespace {
265 /// Represents zext(sext(trunc(V))).
266 struct CastedValue {
267 const Value *V;
268 unsigned ZExtBits = 0;
269 unsigned SExtBits = 0;
270 unsigned TruncBits = 0;
271 /// Whether trunc(V) is non-negative.
272 bool IsNonNegative = false;
273
CastedValue__anon0efa86330111::CastedValue274 explicit CastedValue(const Value *V) : V(V) {}
CastedValue__anon0efa86330111::CastedValue275 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
276 unsigned TruncBits, bool IsNonNegative)
277 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits),
278 IsNonNegative(IsNonNegative) {}
279
getBitWidth__anon0efa86330111::CastedValue280 unsigned getBitWidth() const {
281 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
282 SExtBits;
283 }
284
withValue__anon0efa86330111::CastedValue285 CastedValue withValue(const Value *NewV, bool PreserveNonNeg) const {
286 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits,
287 IsNonNegative && PreserveNonNeg);
288 }
289
290 /// Replace V with zext(NewV)
withZExtOfValue__anon0efa86330111::CastedValue291 CastedValue withZExtOfValue(const Value *NewV, bool ZExtNonNegative) const {
292 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
293 NewV->getType()->getPrimitiveSizeInBits();
294 if (ExtendBy <= TruncBits)
295 // zext<nneg>(trunc(zext(NewV))) == zext<nneg>(trunc(NewV))
296 // The nneg can be preserved on the outer zext here.
297 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
298 IsNonNegative);
299
300 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
301 ExtendBy -= TruncBits;
302 // zext<nneg>(zext(NewV)) == zext(NewV)
303 // zext(zext<nneg>(NewV)) == zext<nneg>(NewV)
304 // The nneg can be preserved from the inner zext here but must be dropped
305 // from the outer.
306 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0,
307 ZExtNonNegative);
308 }
309
310 /// Replace V with sext(NewV)
withSExtOfValue__anon0efa86330111::CastedValue311 CastedValue withSExtOfValue(const Value *NewV) const {
312 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
313 NewV->getType()->getPrimitiveSizeInBits();
314 if (ExtendBy <= TruncBits)
315 // zext<nneg>(trunc(sext(NewV))) == zext<nneg>(trunc(NewV))
316 // The nneg can be preserved on the outer zext here
317 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
318 IsNonNegative);
319
320 // zext(sext(sext(NewV)))
321 ExtendBy -= TruncBits;
322 // zext<nneg>(sext(sext(NewV))) = zext<nneg>(sext(NewV))
323 // The nneg can be preserved on the outer zext here
324 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0, IsNonNegative);
325 }
326
evaluateWith__anon0efa86330111::CastedValue327 APInt evaluateWith(APInt N) const {
328 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
329 "Incompatible bit width");
330 if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
331 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
332 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
333 return N;
334 }
335
evaluateWith__anon0efa86330111::CastedValue336 ConstantRange evaluateWith(ConstantRange N) const {
337 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
338 "Incompatible bit width");
339 if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
340 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
341 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
342 return N;
343 }
344
canDistributeOver__anon0efa86330111::CastedValue345 bool canDistributeOver(bool NUW, bool NSW) const {
346 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
347 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
348 // trunc(x op y) == trunc(x) op trunc(y)
349 return (!ZExtBits || NUW) && (!SExtBits || NSW);
350 }
351
hasSameCastsAs__anon0efa86330111::CastedValue352 bool hasSameCastsAs(const CastedValue &Other) const {
353 if (ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
354 TruncBits == Other.TruncBits)
355 return true;
356 // If either CastedValue has a nneg zext then the sext/zext bits are
357 // interchangable for that value.
358 if (IsNonNegative || Other.IsNonNegative)
359 return (ZExtBits + SExtBits == Other.ZExtBits + Other.SExtBits &&
360 TruncBits == Other.TruncBits);
361 return false;
362 }
363 };
364
365 /// Represents zext(sext(trunc(V))) * Scale + Offset.
366 struct LinearExpression {
367 CastedValue Val;
368 APInt Scale;
369 APInt Offset;
370
371 /// True if all operations in this expression are NSW.
372 bool IsNSW;
373
LinearExpression__anon0efa86330111::LinearExpression374 LinearExpression(const CastedValue &Val, const APInt &Scale,
375 const APInt &Offset, bool IsNSW)
376 : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
377
LinearExpression__anon0efa86330111::LinearExpression378 LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) {
379 unsigned BitWidth = Val.getBitWidth();
380 Scale = APInt(BitWidth, 1);
381 Offset = APInt(BitWidth, 0);
382 }
383
mul__anon0efa86330111::LinearExpression384 LinearExpression mul(const APInt &Other, bool MulIsNSW) const {
385 // The check for zero offset is necessary, because generally
386 // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
387 bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
388 return LinearExpression(Val, Scale * Other, Offset * Other, NSW);
389 }
390 };
391 }
392
393 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
394 /// B are constant integers.
GetLinearExpression(const CastedValue & Val,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,DominatorTree * DT)395 static LinearExpression GetLinearExpression(
396 const CastedValue &Val, const DataLayout &DL, unsigned Depth,
397 AssumptionCache *AC, DominatorTree *DT) {
398 // Limit our recursion depth.
399 if (Depth == 6)
400 return Val;
401
402 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
403 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
404 Val.evaluateWith(Const->getValue()), true);
405
406 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
407 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
408 APInt RHS = Val.evaluateWith(RHSC->getValue());
409 // The only non-OBO case we deal with is or, and only limited to the
410 // case where it is both nuw and nsw.
411 bool NUW = true, NSW = true;
412 if (isa<OverflowingBinaryOperator>(BOp)) {
413 NUW &= BOp->hasNoUnsignedWrap();
414 NSW &= BOp->hasNoSignedWrap();
415 }
416 if (!Val.canDistributeOver(NUW, NSW))
417 return Val;
418
419 // While we can distribute over trunc, we cannot preserve nowrap flags
420 // in that case.
421 if (Val.TruncBits)
422 NUW = NSW = false;
423
424 LinearExpression E(Val);
425 switch (BOp->getOpcode()) {
426 default:
427 // We don't understand this instruction, so we can't decompose it any
428 // further.
429 return Val;
430 case Instruction::Or:
431 // X|C == X+C if it is disjoint. Otherwise we can't analyze it.
432 if (!cast<PossiblyDisjointInst>(BOp)->isDisjoint())
433 return Val;
434
435 [[fallthrough]];
436 case Instruction::Add: {
437 E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
438 Depth + 1, AC, DT);
439 E.Offset += RHS;
440 E.IsNSW &= NSW;
441 break;
442 }
443 case Instruction::Sub: {
444 E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
445 Depth + 1, AC, DT);
446 E.Offset -= RHS;
447 E.IsNSW &= NSW;
448 break;
449 }
450 case Instruction::Mul:
451 E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
452 Depth + 1, AC, DT)
453 .mul(RHS, NSW);
454 break;
455 case Instruction::Shl:
456 // We're trying to linearize an expression of the kind:
457 // shl i8 -128, 36
458 // where the shift count exceeds the bitwidth of the type.
459 // We can't decompose this further (the expression would return
460 // a poison value).
461 if (RHS.getLimitedValue() > Val.getBitWidth())
462 return Val;
463
464 E = GetLinearExpression(Val.withValue(BOp->getOperand(0), NSW), DL,
465 Depth + 1, AC, DT);
466 E.Offset <<= RHS.getLimitedValue();
467 E.Scale <<= RHS.getLimitedValue();
468 E.IsNSW &= NSW;
469 break;
470 }
471 return E;
472 }
473 }
474
475 if (const auto *ZExt = dyn_cast<ZExtInst>(Val.V))
476 return GetLinearExpression(
477 Val.withZExtOfValue(ZExt->getOperand(0), ZExt->hasNonNeg()), DL,
478 Depth + 1, AC, DT);
479
480 if (isa<SExtInst>(Val.V))
481 return GetLinearExpression(
482 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
483 DL, Depth + 1, AC, DT);
484
485 return Val;
486 }
487
488 /// To ensure a pointer offset fits in an integer of size IndexSize
489 /// (in bits) when that size is smaller than the maximum index size. This is
490 /// an issue, for example, in particular for 32b pointers with negative indices
491 /// that rely on two's complement wrap-arounds for precise alias information
492 /// where the maximum index size is 64b.
adjustToIndexSize(APInt & Offset,unsigned IndexSize)493 static void adjustToIndexSize(APInt &Offset, unsigned IndexSize) {
494 assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!");
495 unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
496 if (ShiftBits != 0) {
497 Offset <<= ShiftBits;
498 Offset.ashrInPlace(ShiftBits);
499 }
500 }
501
502 namespace {
503 // A linear transformation of a Value; this class represents
504 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
505 struct VariableGEPIndex {
506 CastedValue Val;
507 APInt Scale;
508
509 // Context instruction to use when querying information about this index.
510 const Instruction *CxtI;
511
512 /// True if all operations in this expression are NSW.
513 bool IsNSW;
514
515 /// True if the index should be subtracted rather than added. We don't simply
516 /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
517 /// non-wrapping, while X + INT_MIN*(-1) wraps.
518 bool IsNegated;
519
hasNegatedScaleOf__anon0efa86330211::VariableGEPIndex520 bool hasNegatedScaleOf(const VariableGEPIndex &Other) const {
521 if (IsNegated == Other.IsNegated)
522 return Scale == -Other.Scale;
523 return Scale == Other.Scale;
524 }
525
dump__anon0efa86330211::VariableGEPIndex526 void dump() const {
527 print(dbgs());
528 dbgs() << "\n";
529 }
print__anon0efa86330211::VariableGEPIndex530 void print(raw_ostream &OS) const {
531 OS << "(V=" << Val.V->getName()
532 << ", zextbits=" << Val.ZExtBits
533 << ", sextbits=" << Val.SExtBits
534 << ", truncbits=" << Val.TruncBits
535 << ", scale=" << Scale
536 << ", nsw=" << IsNSW
537 << ", negated=" << IsNegated << ")";
538 }
539 };
540 }
541
542 // Represents the internal structure of a GEP, decomposed into a base pointer,
543 // constant offsets, and variable scaled indices.
544 struct BasicAAResult::DecomposedGEP {
545 // Base pointer of the GEP
546 const Value *Base;
547 // Total constant offset from base.
548 APInt Offset;
549 // Scaled variable (non-constant) indices.
550 SmallVector<VariableGEPIndex, 4> VarIndices;
551 // Are all operations inbounds GEPs or non-indexing operations?
552 // (std::nullopt iff expression doesn't involve any geps)
553 std::optional<bool> InBounds;
554
dumpBasicAAResult::DecomposedGEP555 void dump() const {
556 print(dbgs());
557 dbgs() << "\n";
558 }
printBasicAAResult::DecomposedGEP559 void print(raw_ostream &OS) const {
560 OS << "(DecomposedGEP Base=" << Base->getName()
561 << ", Offset=" << Offset
562 << ", VarIndices=[";
563 for (size_t i = 0; i < VarIndices.size(); i++) {
564 if (i != 0)
565 OS << ", ";
566 VarIndices[i].print(OS);
567 }
568 OS << "])";
569 }
570 };
571
572
573 /// If V is a symbolic pointer expression, decompose it into a base pointer
574 /// with a constant offset and a number of scaled symbolic offsets.
575 ///
576 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
577 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
578 /// specified amount, but which may have other unrepresented high bits. As
579 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
580 BasicAAResult::DecomposedGEP
DecomposeGEPExpression(const Value * V,const DataLayout & DL,AssumptionCache * AC,DominatorTree * DT)581 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
582 AssumptionCache *AC, DominatorTree *DT) {
583 // Limit recursion depth to limit compile time in crazy cases.
584 unsigned MaxLookup = MaxLookupSearchDepth;
585 SearchTimes++;
586 const Instruction *CxtI = dyn_cast<Instruction>(V);
587
588 unsigned MaxIndexSize = DL.getMaxIndexSizeInBits();
589 DecomposedGEP Decomposed;
590 Decomposed.Offset = APInt(MaxIndexSize, 0);
591 do {
592 // See if this is a bitcast or GEP.
593 const Operator *Op = dyn_cast<Operator>(V);
594 if (!Op) {
595 // The only non-operator case we can handle are GlobalAliases.
596 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
597 if (!GA->isInterposable()) {
598 V = GA->getAliasee();
599 continue;
600 }
601 }
602 Decomposed.Base = V;
603 return Decomposed;
604 }
605
606 if (Op->getOpcode() == Instruction::BitCast ||
607 Op->getOpcode() == Instruction::AddrSpaceCast) {
608 V = Op->getOperand(0);
609 continue;
610 }
611
612 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
613 if (!GEPOp) {
614 if (const auto *PHI = dyn_cast<PHINode>(V)) {
615 // Look through single-arg phi nodes created by LCSSA.
616 if (PHI->getNumIncomingValues() == 1) {
617 V = PHI->getIncomingValue(0);
618 continue;
619 }
620 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
621 // CaptureTracking can know about special capturing properties of some
622 // intrinsics like launder.invariant.group, that can't be expressed with
623 // the attributes, but have properties like returning aliasing pointer.
624 // Because some analysis may assume that nocaptured pointer is not
625 // returned from some special intrinsic (because function would have to
626 // be marked with returns attribute), it is crucial to use this function
627 // because it should be in sync with CaptureTracking. Not using it may
628 // cause weird miscompilations where 2 aliasing pointers are assumed to
629 // noalias.
630 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
631 V = RP;
632 continue;
633 }
634 }
635
636 Decomposed.Base = V;
637 return Decomposed;
638 }
639
640 // Track whether we've seen at least one in bounds gep, and if so, whether
641 // all geps parsed were in bounds.
642 if (Decomposed.InBounds == std::nullopt)
643 Decomposed.InBounds = GEPOp->isInBounds();
644 else if (!GEPOp->isInBounds())
645 Decomposed.InBounds = false;
646
647 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
648
649 unsigned AS = GEPOp->getPointerAddressSpace();
650 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
651 gep_type_iterator GTI = gep_type_begin(GEPOp);
652 unsigned IndexSize = DL.getIndexSizeInBits(AS);
653 // Assume all GEP operands are constants until proven otherwise.
654 bool GepHasConstantOffset = true;
655 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
656 I != E; ++I, ++GTI) {
657 const Value *Index = *I;
658 // Compute the (potentially symbolic) offset in bytes for this index.
659 if (StructType *STy = GTI.getStructTypeOrNull()) {
660 // For a struct, add the member offset.
661 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
662 if (FieldNo == 0)
663 continue;
664
665 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
666 continue;
667 }
668
669 // For an array/pointer, add the element offset, explicitly scaled.
670 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
671 if (CIdx->isZero())
672 continue;
673
674 // Don't attempt to analyze GEPs if the scalable index is not zero.
675 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
676 if (AllocTypeSize.isScalable()) {
677 Decomposed.Base = V;
678 return Decomposed;
679 }
680
681 Decomposed.Offset += AllocTypeSize.getFixedValue() *
682 CIdx->getValue().sextOrTrunc(MaxIndexSize);
683 continue;
684 }
685
686 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
687 if (AllocTypeSize.isScalable()) {
688 Decomposed.Base = V;
689 return Decomposed;
690 }
691
692 GepHasConstantOffset = false;
693
694 // If the integer type is smaller than the index size, it is implicitly
695 // sign extended or truncated to index size.
696 unsigned Width = Index->getType()->getIntegerBitWidth();
697 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
698 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
699 LinearExpression LE = GetLinearExpression(
700 CastedValue(Index, 0, SExtBits, TruncBits, false), DL, 0, AC, DT);
701
702 // Scale by the type size.
703 unsigned TypeSize = AllocTypeSize.getFixedValue();
704 LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds());
705 Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
706 APInt Scale = LE.Scale.sext(MaxIndexSize);
707
708 // If we already had an occurrence of this index variable, merge this
709 // scale into it. For example, we want to handle:
710 // A[x][x] -> x*16 + x*4 -> x*20
711 // This also ensures that 'x' only appears in the index list once.
712 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
713 if ((Decomposed.VarIndices[i].Val.V == LE.Val.V ||
714 areBothVScale(Decomposed.VarIndices[i].Val.V, LE.Val.V)) &&
715 Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
716 Scale += Decomposed.VarIndices[i].Scale;
717 LE.IsNSW = false; // We cannot guarantee nsw for the merge.
718 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
719 break;
720 }
721 }
722
723 // Make sure that we have a scale that makes sense for this target's
724 // index size.
725 adjustToIndexSize(Scale, IndexSize);
726
727 if (!!Scale) {
728 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW,
729 /* IsNegated */ false};
730 Decomposed.VarIndices.push_back(Entry);
731 }
732 }
733
734 // Take care of wrap-arounds
735 if (GepHasConstantOffset)
736 adjustToIndexSize(Decomposed.Offset, IndexSize);
737
738 // Analyze the base pointer next.
739 V = GEPOp->getOperand(0);
740 } while (--MaxLookup);
741
742 // If the chain of expressions is too deep, just return early.
743 Decomposed.Base = V;
744 SearchLimitReached++;
745 return Decomposed;
746 }
747
getModRefInfoMask(const MemoryLocation & Loc,AAQueryInfo & AAQI,bool IgnoreLocals)748 ModRefInfo BasicAAResult::getModRefInfoMask(const MemoryLocation &Loc,
749 AAQueryInfo &AAQI,
750 bool IgnoreLocals) {
751 assert(Visited.empty() && "Visited must be cleared after use!");
752 auto _ = make_scope_exit([&] { Visited.clear(); });
753
754 unsigned MaxLookup = 8;
755 SmallVector<const Value *, 16> Worklist;
756 Worklist.push_back(Loc.Ptr);
757 ModRefInfo Result = ModRefInfo::NoModRef;
758
759 do {
760 const Value *V = getUnderlyingObject(Worklist.pop_back_val());
761 if (!Visited.insert(V).second)
762 continue;
763
764 // Ignore allocas if we were instructed to do so.
765 if (IgnoreLocals && isa<AllocaInst>(V))
766 continue;
767
768 // If the location points to memory that is known to be invariant for
769 // the life of the underlying SSA value, then we can exclude Mod from
770 // the set of valid memory effects.
771 //
772 // An argument that is marked readonly and noalias is known to be
773 // invariant while that function is executing.
774 if (const Argument *Arg = dyn_cast<Argument>(V)) {
775 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
776 Result |= ModRefInfo::Ref;
777 continue;
778 }
779 }
780
781 // A global constant can't be mutated.
782 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
783 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
784 // global to be marked constant in some modules and non-constant in
785 // others. GV may even be a declaration, not a definition.
786 if (!GV->isConstant())
787 return ModRefInfo::ModRef;
788 continue;
789 }
790
791 // If both select values point to local memory, then so does the select.
792 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
793 Worklist.push_back(SI->getTrueValue());
794 Worklist.push_back(SI->getFalseValue());
795 continue;
796 }
797
798 // If all values incoming to a phi node point to local memory, then so does
799 // the phi.
800 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
801 // Don't bother inspecting phi nodes with many operands.
802 if (PN->getNumIncomingValues() > MaxLookup)
803 return ModRefInfo::ModRef;
804 append_range(Worklist, PN->incoming_values());
805 continue;
806 }
807
808 // Otherwise be conservative.
809 return ModRefInfo::ModRef;
810 } while (!Worklist.empty() && --MaxLookup);
811
812 // If we hit the maximum number of instructions to examine, be conservative.
813 if (!Worklist.empty())
814 return ModRefInfo::ModRef;
815
816 return Result;
817 }
818
isIntrinsicCall(const CallBase * Call,Intrinsic::ID IID)819 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
820 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
821 return II && II->getIntrinsicID() == IID;
822 }
823
824 /// Returns the behavior when calling the given call site.
getMemoryEffects(const CallBase * Call,AAQueryInfo & AAQI)825 MemoryEffects BasicAAResult::getMemoryEffects(const CallBase *Call,
826 AAQueryInfo &AAQI) {
827 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
828
829 if (const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
830 MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
831 // Operand bundles on the call may also read or write memory, in addition
832 // to the behavior of the called function.
833 if (Call->hasReadingOperandBundles())
834 FuncME |= MemoryEffects::readOnly();
835 if (Call->hasClobberingOperandBundles())
836 FuncME |= MemoryEffects::writeOnly();
837 Min &= FuncME;
838 }
839
840 return Min;
841 }
842
843 /// Returns the behavior when calling the given function. For use when the call
844 /// site is not known.
getMemoryEffects(const Function * F)845 MemoryEffects BasicAAResult::getMemoryEffects(const Function *F) {
846 switch (F->getIntrinsicID()) {
847 case Intrinsic::experimental_guard:
848 case Intrinsic::experimental_deoptimize:
849 // These intrinsics can read arbitrary memory, and additionally modref
850 // inaccessible memory to model control dependence.
851 return MemoryEffects::readOnly() |
852 MemoryEffects::inaccessibleMemOnly(ModRefInfo::ModRef);
853 }
854
855 return F->getMemoryEffects();
856 }
857
getArgModRefInfo(const CallBase * Call,unsigned ArgIdx)858 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
859 unsigned ArgIdx) {
860 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
861 return ModRefInfo::Mod;
862
863 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
864 return ModRefInfo::Ref;
865
866 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
867 return ModRefInfo::NoModRef;
868
869 return ModRefInfo::ModRef;
870 }
871
872 #ifndef NDEBUG
getParent(const Value * V)873 static const Function *getParent(const Value *V) {
874 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
875 if (!inst->getParent())
876 return nullptr;
877 return inst->getParent()->getParent();
878 }
879
880 if (const Argument *arg = dyn_cast<Argument>(V))
881 return arg->getParent();
882
883 return nullptr;
884 }
885
notDifferentParent(const Value * O1,const Value * O2)886 static bool notDifferentParent(const Value *O1, const Value *O2) {
887
888 const Function *F1 = getParent(O1);
889 const Function *F2 = getParent(O2);
890
891 return !F1 || !F2 || F1 == F2;
892 }
893 #endif
894
alias(const MemoryLocation & LocA,const MemoryLocation & LocB,AAQueryInfo & AAQI,const Instruction * CtxI)895 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
896 const MemoryLocation &LocB, AAQueryInfo &AAQI,
897 const Instruction *CtxI) {
898 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
899 "BasicAliasAnalysis doesn't support interprocedural queries.");
900 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI, CtxI);
901 }
902
903 /// Checks to see if the specified callsite can clobber the specified memory
904 /// object.
905 ///
906 /// Since we only look at local properties of this function, we really can't
907 /// say much about this query. We do, however, use simple "address taken"
908 /// analysis on local objects.
getModRefInfo(const CallBase * Call,const MemoryLocation & Loc,AAQueryInfo & AAQI)909 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
910 const MemoryLocation &Loc,
911 AAQueryInfo &AAQI) {
912 assert(notDifferentParent(Call, Loc.Ptr) &&
913 "AliasAnalysis query involving multiple functions!");
914
915 const Value *Object = getUnderlyingObject(Loc.Ptr);
916
917 // Calls marked 'tail' cannot read or write allocas from the current frame
918 // because the current frame might be destroyed by the time they run. However,
919 // a tail call may use an alloca with byval. Calling with byval copies the
920 // contents of the alloca into argument registers or stack slots, so there is
921 // no lifetime issue.
922 if (isa<AllocaInst>(Object))
923 if (const CallInst *CI = dyn_cast<CallInst>(Call))
924 if (CI->isTailCall() &&
925 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
926 return ModRefInfo::NoModRef;
927
928 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
929 // modify them even though the alloca is not escaped.
930 if (auto *AI = dyn_cast<AllocaInst>(Object))
931 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
932 return ModRefInfo::Mod;
933
934 // A call can access a locally allocated object either because it is passed as
935 // an argument to the call, or because it has escaped prior to the call.
936 //
937 // Make sure the object has not escaped here, and then check that none of the
938 // call arguments alias the object below.
939 if (!isa<Constant>(Object) && Call != Object &&
940 AAQI.CI->isNotCapturedBefore(Object, Call, /*OrAt*/ false)) {
941
942 // Optimistically assume that call doesn't touch Object and check this
943 // assumption in the following loop.
944 ModRefInfo Result = ModRefInfo::NoModRef;
945
946 unsigned OperandNo = 0;
947 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
948 CI != CE; ++CI, ++OperandNo) {
949 if (!(*CI)->getType()->isPointerTy())
950 continue;
951
952 // Call doesn't access memory through this operand, so we don't care
953 // if it aliases with Object.
954 if (Call->doesNotAccessMemory(OperandNo))
955 continue;
956
957 // If this is a no-capture pointer argument, see if we can tell that it
958 // is impossible to alias the pointer we're checking.
959 AliasResult AR =
960 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(*CI),
961 MemoryLocation::getBeforeOrAfter(Object), AAQI);
962 // Operand doesn't alias 'Object', continue looking for other aliases
963 if (AR == AliasResult::NoAlias)
964 continue;
965 // Operand aliases 'Object', but call doesn't modify it. Strengthen
966 // initial assumption and keep looking in case if there are more aliases.
967 if (Call->onlyReadsMemory(OperandNo)) {
968 Result |= ModRefInfo::Ref;
969 continue;
970 }
971 // Operand aliases 'Object' but call only writes into it.
972 if (Call->onlyWritesMemory(OperandNo)) {
973 Result |= ModRefInfo::Mod;
974 continue;
975 }
976 // This operand aliases 'Object' and call reads and writes into it.
977 // Setting ModRef will not yield an early return below, MustAlias is not
978 // used further.
979 Result = ModRefInfo::ModRef;
980 break;
981 }
982
983 // Early return if we improved mod ref information
984 if (!isModAndRefSet(Result))
985 return Result;
986 }
987
988 // If the call is malloc/calloc like, we can assume that it doesn't
989 // modify any IR visible value. This is only valid because we assume these
990 // routines do not read values visible in the IR. TODO: Consider special
991 // casing realloc and strdup routines which access only their arguments as
992 // well. Or alternatively, replace all of this with inaccessiblememonly once
993 // that's implemented fully.
994 if (isMallocOrCallocLikeFn(Call, &TLI)) {
995 // Be conservative if the accessed pointer may alias the allocation -
996 // fallback to the generic handling below.
997 if (AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(Call), Loc, AAQI) ==
998 AliasResult::NoAlias)
999 return ModRefInfo::NoModRef;
1000 }
1001
1002 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1003 // writing so that proper control dependencies are maintained but they never
1004 // mod any particular memory location visible to the IR.
1005 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1006 // intrinsic is now modeled as reading memory. This prevents hoisting the
1007 // invariant.start intrinsic over stores. Consider:
1008 // *ptr = 40;
1009 // *ptr = 50;
1010 // invariant_start(ptr)
1011 // int val = *ptr;
1012 // print(val);
1013 //
1014 // This cannot be transformed to:
1015 //
1016 // *ptr = 40;
1017 // invariant_start(ptr)
1018 // *ptr = 50;
1019 // int val = *ptr;
1020 // print(val);
1021 //
1022 // The transformation will cause the second store to be ignored (based on
1023 // rules of invariant.start) and print 40, while the first program always
1024 // prints 50.
1025 if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1026 return ModRefInfo::Ref;
1027
1028 // Be conservative.
1029 return ModRefInfo::ModRef;
1030 }
1031
getModRefInfo(const CallBase * Call1,const CallBase * Call2,AAQueryInfo & AAQI)1032 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1033 const CallBase *Call2,
1034 AAQueryInfo &AAQI) {
1035 // Guard intrinsics are marked as arbitrarily writing so that proper control
1036 // dependencies are maintained but they never mods any particular memory
1037 // location.
1038 //
1039 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1040 // heap state at the point the guard is issued needs to be consistent in case
1041 // the guard invokes the "deopt" continuation.
1042
1043 // NB! This function is *not* commutative, so we special case two
1044 // possibilities for guard intrinsics.
1045
1046 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1047 return isModSet(getMemoryEffects(Call2, AAQI).getModRef())
1048 ? ModRefInfo::Ref
1049 : ModRefInfo::NoModRef;
1050
1051 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1052 return isModSet(getMemoryEffects(Call1, AAQI).getModRef())
1053 ? ModRefInfo::Mod
1054 : ModRefInfo::NoModRef;
1055
1056 // Be conservative.
1057 return ModRefInfo::ModRef;
1058 }
1059
1060 /// Return true if we know V to the base address of the corresponding memory
1061 /// object. This implies that any address less than V must be out of bounds
1062 /// for the underlying object. Note that just being isIdentifiedObject() is
1063 /// not enough - For example, a negative offset from a noalias argument or call
1064 /// can be inbounds w.r.t the actual underlying object.
isBaseOfObject(const Value * V)1065 static bool isBaseOfObject(const Value *V) {
1066 // TODO: We can handle other cases here
1067 // 1) For GC languages, arguments to functions are often required to be
1068 // base pointers.
1069 // 2) Result of allocation routines are often base pointers. Leverage TLI.
1070 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1071 }
1072
1073 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1074 /// another pointer.
1075 ///
1076 /// We know that V1 is a GEP, but we don't know anything about V2.
1077 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1078 /// V2.
aliasGEP(const GEPOperator * GEP1,LocationSize V1Size,const Value * V2,LocationSize V2Size,const Value * UnderlyingV1,const Value * UnderlyingV2,AAQueryInfo & AAQI)1079 AliasResult BasicAAResult::aliasGEP(
1080 const GEPOperator *GEP1, LocationSize V1Size,
1081 const Value *V2, LocationSize V2Size,
1082 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1083 if (!V1Size.hasValue() && !V2Size.hasValue()) {
1084 // TODO: This limitation exists for compile-time reasons. Relax it if we
1085 // can avoid exponential pathological cases.
1086 if (!isa<GEPOperator>(V2))
1087 return AliasResult::MayAlias;
1088
1089 // If both accesses have unknown size, we can only check whether the base
1090 // objects don't alias.
1091 AliasResult BaseAlias =
1092 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1093 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1094 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1095 : AliasResult::MayAlias;
1096 }
1097
1098 DominatorTree *DT = getDT(AAQI);
1099 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1100 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1101
1102 // Bail if we were not able to decompose anything.
1103 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1104 return AliasResult::MayAlias;
1105
1106 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1107 // symbolic difference.
1108 subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1109
1110 // If an inbounds GEP would have to start from an out of bounds address
1111 // for the two to alias, then we can assume noalias.
1112 // TODO: Remove !isScalable() once BasicAA fully support scalable location
1113 // size
1114 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1115 V2Size.hasValue() && !V2Size.isScalable() &&
1116 DecompGEP1.Offset.sge(V2Size.getValue()) &&
1117 isBaseOfObject(DecompGEP2.Base))
1118 return AliasResult::NoAlias;
1119
1120 if (isa<GEPOperator>(V2)) {
1121 // Symmetric case to above.
1122 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1123 V1Size.hasValue() && !V1Size.isScalable() &&
1124 DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1125 isBaseOfObject(DecompGEP1.Base))
1126 return AliasResult::NoAlias;
1127 }
1128
1129 // For GEPs with identical offsets, we can preserve the size and AAInfo
1130 // when performing the alias check on the underlying objects.
1131 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1132 return AAQI.AAR.alias(MemoryLocation(DecompGEP1.Base, V1Size),
1133 MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1134
1135 // Do the base pointers alias?
1136 AliasResult BaseAlias =
1137 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1138 MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1139
1140 // If we get a No or May, then return it immediately, no amount of analysis
1141 // will improve this situation.
1142 if (BaseAlias != AliasResult::MustAlias) {
1143 assert(BaseAlias == AliasResult::NoAlias ||
1144 BaseAlias == AliasResult::MayAlias);
1145 return BaseAlias;
1146 }
1147
1148 // If there is a constant difference between the pointers, but the difference
1149 // is less than the size of the associated memory object, then we know
1150 // that the objects are partially overlapping. If the difference is
1151 // greater, we know they do not overlap.
1152 if (DecompGEP1.VarIndices.empty()) {
1153 APInt &Off = DecompGEP1.Offset;
1154
1155 // Initialize for Off >= 0 (V2 <= GEP1) case.
1156 const Value *LeftPtr = V2;
1157 const Value *RightPtr = GEP1;
1158 LocationSize VLeftSize = V2Size;
1159 LocationSize VRightSize = V1Size;
1160 const bool Swapped = Off.isNegative();
1161
1162 if (Swapped) {
1163 // Swap if we have the situation where:
1164 // + +
1165 // | BaseOffset |
1166 // ---------------->|
1167 // |-->V1Size |-------> V2Size
1168 // GEP1 V2
1169 std::swap(LeftPtr, RightPtr);
1170 std::swap(VLeftSize, VRightSize);
1171 Off = -Off;
1172 }
1173
1174 if (!VLeftSize.hasValue())
1175 return AliasResult::MayAlias;
1176
1177 const TypeSize LSize = VLeftSize.getValue();
1178 if (!LSize.isScalable()) {
1179 if (Off.ult(LSize)) {
1180 // Conservatively drop processing if a phi was visited and/or offset is
1181 // too big.
1182 AliasResult AR = AliasResult::PartialAlias;
1183 if (VRightSize.hasValue() && !VRightSize.isScalable() &&
1184 Off.ule(INT32_MAX) && (Off + VRightSize.getValue()).ule(LSize)) {
1185 // Memory referenced by right pointer is nested. Save the offset in
1186 // cache. Note that originally offset estimated as GEP1-V2, but
1187 // AliasResult contains the shift that represents GEP1+Offset=V2.
1188 AR.setOffset(-Off.getSExtValue());
1189 AR.swap(Swapped);
1190 }
1191 return AR;
1192 }
1193 return AliasResult::NoAlias;
1194 } else {
1195 // We can use the getVScaleRange to prove that Off >= (CR.upper * LSize).
1196 ConstantRange CR = getVScaleRange(&F, Off.getBitWidth());
1197 bool Overflow;
1198 APInt UpperRange = CR.getUnsignedMax().umul_ov(
1199 APInt(Off.getBitWidth(), LSize.getKnownMinValue()), Overflow);
1200 if (!Overflow && Off.uge(UpperRange))
1201 return AliasResult::NoAlias;
1202 }
1203 }
1204
1205 // VScale Alias Analysis - Given one scalable offset between accesses and a
1206 // scalable typesize, we can divide each side by vscale, treating both values
1207 // as a constant. We prove that Offset/vscale >= TypeSize/vscale.
1208 if (DecompGEP1.VarIndices.size() == 1 &&
1209 DecompGEP1.VarIndices[0].Val.TruncBits == 0 &&
1210 DecompGEP1.Offset.isZero() &&
1211 PatternMatch::match(DecompGEP1.VarIndices[0].Val.V,
1212 PatternMatch::m_VScale())) {
1213 const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0];
1214 APInt Scale =
1215 ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale;
1216 LocationSize VLeftSize = Scale.isNegative() ? V1Size : V2Size;
1217
1218 // Check if the offset is known to not overflow, if it does then attempt to
1219 // prove it with the known values of vscale_range.
1220 bool Overflows = !DecompGEP1.VarIndices[0].IsNSW;
1221 if (Overflows) {
1222 ConstantRange CR = getVScaleRange(&F, Scale.getBitWidth());
1223 (void)CR.getSignedMax().smul_ov(Scale, Overflows);
1224 }
1225
1226 if (!Overflows) {
1227 // Note that we do not check that the typesize is scalable, as vscale >= 1
1228 // so noalias still holds so long as the dependency distance is at least
1229 // as big as the typesize.
1230 if (VLeftSize.hasValue() &&
1231 Scale.abs().uge(VLeftSize.getValue().getKnownMinValue()))
1232 return AliasResult::NoAlias;
1233 }
1234 }
1235
1236 // Bail on analysing scalable LocationSize
1237 if (V1Size.isScalable() || V2Size.isScalable())
1238 return AliasResult::MayAlias;
1239
1240 // We need to know both acess sizes for all the following heuristics.
1241 if (!V1Size.hasValue() || !V2Size.hasValue())
1242 return AliasResult::MayAlias;
1243
1244 APInt GCD;
1245 ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1246 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1247 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1248 const APInt &Scale = Index.Scale;
1249 APInt ScaleForGCD = Scale;
1250 if (!Index.IsNSW)
1251 ScaleForGCD =
1252 APInt::getOneBitSet(Scale.getBitWidth(), Scale.countr_zero());
1253
1254 if (i == 0)
1255 GCD = ScaleForGCD.abs();
1256 else
1257 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1258
1259 ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
1260 true, &AC, Index.CxtI);
1261 KnownBits Known =
1262 computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
1263 CR = CR.intersectWith(
1264 ConstantRange::fromKnownBits(Known, /* Signed */ true),
1265 ConstantRange::Signed);
1266 CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1267
1268 assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1269 "Bit widths are normalized to MaxIndexSize");
1270 if (Index.IsNSW)
1271 CR = CR.smul_sat(ConstantRange(Scale));
1272 else
1273 CR = CR.smul_fast(ConstantRange(Scale));
1274
1275 if (Index.IsNegated)
1276 OffsetRange = OffsetRange.sub(CR);
1277 else
1278 OffsetRange = OffsetRange.add(CR);
1279 }
1280
1281 // We now have accesses at two offsets from the same base:
1282 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1283 // 2. 0 with size V2Size
1284 // Using arithmetic modulo GCD, the accesses are at
1285 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1286 // into the range [V2Size..GCD), then we know they cannot overlap.
1287 APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1288 if (ModOffset.isNegative())
1289 ModOffset += GCD; // We want mod, not rem.
1290 if (ModOffset.uge(V2Size.getValue()) &&
1291 (GCD - ModOffset).uge(V1Size.getValue()))
1292 return AliasResult::NoAlias;
1293
1294 // Compute ranges of potentially accessed bytes for both accesses. If the
1295 // interseciton is empty, there can be no overlap.
1296 unsigned BW = OffsetRange.getBitWidth();
1297 ConstantRange Range1 = OffsetRange.add(
1298 ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1299 ConstantRange Range2 =
1300 ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1301 if (Range1.intersectWith(Range2).isEmptySet())
1302 return AliasResult::NoAlias;
1303
1304 // Try to determine the range of values for VarIndex such that
1305 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1306 std::optional<APInt> MinAbsVarIndex;
1307 if (DecompGEP1.VarIndices.size() == 1) {
1308 // VarIndex = Scale*V.
1309 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1310 if (Var.Val.TruncBits == 0 &&
1311 isKnownNonZero(Var.Val.V, SimplifyQuery(DL, DT, &AC, Var.CxtI))) {
1312 // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1313 // potentially wrapping math.
1314 auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1315 if (Var.IsNSW)
1316 return true;
1317
1318 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1319 // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1320 // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1321 // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1322 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1323 if (MaxScaleValueBW <= 0)
1324 return false;
1325 return Var.Scale.ule(
1326 APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1327 };
1328 // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1329 // presence of potentially wrapping math.
1330 if (MultiplyByScaleNoWrap(Var)) {
1331 // If V != 0 then abs(VarIndex) >= abs(Scale).
1332 MinAbsVarIndex = Var.Scale.abs();
1333 }
1334 }
1335 } else if (DecompGEP1.VarIndices.size() == 2) {
1336 // VarIndex = Scale*V0 + (-Scale)*V1.
1337 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1338 // Check that MayBeCrossIteration is false, to avoid reasoning about
1339 // inequality of values across loop iterations.
1340 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1341 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1342 if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1343 Var0.Val.hasSameCastsAs(Var1.Val) && !AAQI.MayBeCrossIteration &&
1344 isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1345 DT))
1346 MinAbsVarIndex = Var0.Scale.abs();
1347 }
1348
1349 if (MinAbsVarIndex) {
1350 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1351 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1352 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1353 // We know that Offset <= OffsetLo || Offset >= OffsetHi
1354 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1355 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1356 return AliasResult::NoAlias;
1357 }
1358
1359 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1360 return AliasResult::NoAlias;
1361
1362 // Statically, we can see that the base objects are the same, but the
1363 // pointers have dynamic offsets which we can't resolve. And none of our
1364 // little tricks above worked.
1365 return AliasResult::MayAlias;
1366 }
1367
MergeAliasResults(AliasResult A,AliasResult B)1368 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1369 // If the results agree, take it.
1370 if (A == B)
1371 return A;
1372 // A mix of PartialAlias and MustAlias is PartialAlias.
1373 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1374 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1375 return AliasResult::PartialAlias;
1376 // Otherwise, we don't know anything.
1377 return AliasResult::MayAlias;
1378 }
1379
1380 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1381 /// against another.
1382 AliasResult
aliasSelect(const SelectInst * SI,LocationSize SISize,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI)1383 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1384 const Value *V2, LocationSize V2Size,
1385 AAQueryInfo &AAQI) {
1386 // If the values are Selects with the same condition, we can do a more precise
1387 // check: just check for aliases between the values on corresponding arms.
1388 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1389 if (isValueEqualInPotentialCycles(SI->getCondition(), SI2->getCondition(),
1390 AAQI)) {
1391 AliasResult Alias =
1392 AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1393 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1394 if (Alias == AliasResult::MayAlias)
1395 return AliasResult::MayAlias;
1396 AliasResult ThisAlias =
1397 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1398 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1399 return MergeAliasResults(ThisAlias, Alias);
1400 }
1401
1402 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1403 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1404 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1405 MemoryLocation(V2, V2Size), AAQI);
1406 if (Alias == AliasResult::MayAlias)
1407 return AliasResult::MayAlias;
1408
1409 AliasResult ThisAlias =
1410 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1411 MemoryLocation(V2, V2Size), AAQI);
1412 return MergeAliasResults(ThisAlias, Alias);
1413 }
1414
1415 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1416 /// another.
aliasPHI(const PHINode * PN,LocationSize PNSize,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI)1417 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1418 const Value *V2, LocationSize V2Size,
1419 AAQueryInfo &AAQI) {
1420 if (!PN->getNumIncomingValues())
1421 return AliasResult::NoAlias;
1422 // If the values are PHIs in the same block, we can do a more precise
1423 // as well as efficient check: just check for aliases between the values
1424 // on corresponding edges.
1425 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1426 if (PN2->getParent() == PN->getParent()) {
1427 std::optional<AliasResult> Alias;
1428 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1429 AliasResult ThisAlias = AAQI.AAR.alias(
1430 MemoryLocation(PN->getIncomingValue(i), PNSize),
1431 MemoryLocation(
1432 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1433 AAQI);
1434 if (Alias)
1435 *Alias = MergeAliasResults(*Alias, ThisAlias);
1436 else
1437 Alias = ThisAlias;
1438 if (*Alias == AliasResult::MayAlias)
1439 break;
1440 }
1441 return *Alias;
1442 }
1443
1444 SmallVector<Value *, 4> V1Srcs;
1445 // If a phi operand recurses back to the phi, we can still determine NoAlias
1446 // if we don't alias the underlying objects of the other phi operands, as we
1447 // know that the recursive phi needs to be based on them in some way.
1448 bool isRecursive = false;
1449 auto CheckForRecPhi = [&](Value *PV) {
1450 if (!EnableRecPhiAnalysis)
1451 return false;
1452 if (getUnderlyingObject(PV) == PN) {
1453 isRecursive = true;
1454 return true;
1455 }
1456 return false;
1457 };
1458
1459 SmallPtrSet<Value *, 4> UniqueSrc;
1460 Value *OnePhi = nullptr;
1461 for (Value *PV1 : PN->incoming_values()) {
1462 // Skip the phi itself being the incoming value.
1463 if (PV1 == PN)
1464 continue;
1465
1466 if (isa<PHINode>(PV1)) {
1467 if (OnePhi && OnePhi != PV1) {
1468 // To control potential compile time explosion, we choose to be
1469 // conserviate when we have more than one Phi input. It is important
1470 // that we handle the single phi case as that lets us handle LCSSA
1471 // phi nodes and (combined with the recursive phi handling) simple
1472 // pointer induction variable patterns.
1473 return AliasResult::MayAlias;
1474 }
1475 OnePhi = PV1;
1476 }
1477
1478 if (CheckForRecPhi(PV1))
1479 continue;
1480
1481 if (UniqueSrc.insert(PV1).second)
1482 V1Srcs.push_back(PV1);
1483 }
1484
1485 if (OnePhi && UniqueSrc.size() > 1)
1486 // Out of an abundance of caution, allow only the trivial lcssa and
1487 // recursive phi cases.
1488 return AliasResult::MayAlias;
1489
1490 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1491 // value. This should only be possible in blocks unreachable from the entry
1492 // block, but return MayAlias just in case.
1493 if (V1Srcs.empty())
1494 return AliasResult::MayAlias;
1495
1496 // If this PHI node is recursive, indicate that the pointer may be moved
1497 // across iterations. We can only prove NoAlias if different underlying
1498 // objects are involved.
1499 if (isRecursive)
1500 PNSize = LocationSize::beforeOrAfterPointer();
1501
1502 // In the recursive alias queries below, we may compare values from two
1503 // different loop iterations.
1504 SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true);
1505
1506 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(V1Srcs[0], PNSize),
1507 MemoryLocation(V2, V2Size), AAQI);
1508
1509 // Early exit if the check of the first PHI source against V2 is MayAlias.
1510 // Other results are not possible.
1511 if (Alias == AliasResult::MayAlias)
1512 return AliasResult::MayAlias;
1513 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1514 // remain valid to all elements and needs to conservatively return MayAlias.
1515 if (isRecursive && Alias != AliasResult::NoAlias)
1516 return AliasResult::MayAlias;
1517
1518 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1519 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1520 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1521 Value *V = V1Srcs[i];
1522
1523 AliasResult ThisAlias = AAQI.AAR.alias(
1524 MemoryLocation(V, PNSize), MemoryLocation(V2, V2Size), AAQI);
1525 Alias = MergeAliasResults(ThisAlias, Alias);
1526 if (Alias == AliasResult::MayAlias)
1527 break;
1528 }
1529
1530 return Alias;
1531 }
1532
1533 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1534 /// array references.
aliasCheck(const Value * V1,LocationSize V1Size,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI,const Instruction * CtxI)1535 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1536 const Value *V2, LocationSize V2Size,
1537 AAQueryInfo &AAQI,
1538 const Instruction *CtxI) {
1539 // If either of the memory references is empty, it doesn't matter what the
1540 // pointer values are.
1541 if (V1Size.isZero() || V2Size.isZero())
1542 return AliasResult::NoAlias;
1543
1544 // Strip off any casts if they exist.
1545 V1 = V1->stripPointerCastsForAliasAnalysis();
1546 V2 = V2->stripPointerCastsForAliasAnalysis();
1547
1548 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1549 // value for undef that aliases nothing in the program.
1550 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1551 return AliasResult::NoAlias;
1552
1553 // Are we checking for alias of the same value?
1554 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1555 // different iterations. We must therefore make sure that this is not the
1556 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1557 // happen by looking at the visited phi nodes and making sure they cannot
1558 // reach the value.
1559 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1560 return AliasResult::MustAlias;
1561
1562 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1563 return AliasResult::NoAlias; // Scalars cannot alias each other
1564
1565 // Figure out what objects these things are pointing to if we can.
1566 const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1567 const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1568
1569 // Null values in the default address space don't point to any object, so they
1570 // don't alias any other pointer.
1571 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1572 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1573 return AliasResult::NoAlias;
1574 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1575 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1576 return AliasResult::NoAlias;
1577
1578 if (O1 != O2) {
1579 // If V1/V2 point to two different objects, we know that we have no alias.
1580 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1581 return AliasResult::NoAlias;
1582
1583 // Function arguments can't alias with things that are known to be
1584 // unambigously identified at the function level.
1585 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1586 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1587 return AliasResult::NoAlias;
1588
1589 // If one pointer is the result of a call/invoke or load and the other is a
1590 // non-escaping local object within the same function, then we know the
1591 // object couldn't escape to a point where the call could return it.
1592 //
1593 // Note that if the pointers are in different functions, there are a
1594 // variety of complications. A call with a nocapture argument may still
1595 // temporary store the nocapture argument's value in a temporary memory
1596 // location if that memory location doesn't escape. Or it may pass a
1597 // nocapture value to other functions as long as they don't capture it.
1598 if (isEscapeSource(O1) && AAQI.CI->isNotCapturedBefore(
1599 O2, dyn_cast<Instruction>(O1), /*OrAt*/ true))
1600 return AliasResult::NoAlias;
1601 if (isEscapeSource(O2) && AAQI.CI->isNotCapturedBefore(
1602 O1, dyn_cast<Instruction>(O2), /*OrAt*/ true))
1603 return AliasResult::NoAlias;
1604 }
1605
1606 // If the size of one access is larger than the entire object on the other
1607 // side, then we know such behavior is undefined and can assume no alias.
1608 bool NullIsValidLocation = NullPointerIsDefined(&F);
1609 if ((isObjectSmallerThan(
1610 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1611 TLI, NullIsValidLocation)) ||
1612 (isObjectSmallerThan(
1613 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1614 TLI, NullIsValidLocation)))
1615 return AliasResult::NoAlias;
1616
1617 if (EnableSeparateStorageAnalysis) {
1618 for (AssumptionCache::ResultElem &Elem : AC.assumptionsFor(O1)) {
1619 if (!Elem || Elem.Index == AssumptionCache::ExprResultIdx)
1620 continue;
1621
1622 AssumeInst *Assume = cast<AssumeInst>(Elem);
1623 OperandBundleUse OBU = Assume->getOperandBundleAt(Elem.Index);
1624 if (OBU.getTagName() == "separate_storage") {
1625 assert(OBU.Inputs.size() == 2);
1626 const Value *Hint1 = OBU.Inputs[0].get();
1627 const Value *Hint2 = OBU.Inputs[1].get();
1628 // This is often a no-op; instcombine rewrites this for us. No-op
1629 // getUnderlyingObject calls are fast, though.
1630 const Value *HintO1 = getUnderlyingObject(Hint1);
1631 const Value *HintO2 = getUnderlyingObject(Hint2);
1632
1633 DominatorTree *DT = getDT(AAQI);
1634 auto ValidAssumeForPtrContext = [&](const Value *Ptr) {
1635 if (const Instruction *PtrI = dyn_cast<Instruction>(Ptr)) {
1636 return isValidAssumeForContext(Assume, PtrI, DT,
1637 /* AllowEphemerals */ true);
1638 }
1639 if (const Argument *PtrA = dyn_cast<Argument>(Ptr)) {
1640 const Instruction *FirstI =
1641 &*PtrA->getParent()->getEntryBlock().begin();
1642 return isValidAssumeForContext(Assume, FirstI, DT,
1643 /* AllowEphemerals */ true);
1644 }
1645 return false;
1646 };
1647
1648 if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1649 // Note that we go back to V1 and V2 for the
1650 // ValidAssumeForPtrContext checks; they're dominated by O1 and O2,
1651 // so strictly more assumptions are valid for them.
1652 if ((CtxI && isValidAssumeForContext(Assume, CtxI, DT,
1653 /* AllowEphemerals */ true)) ||
1654 ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1655 return AliasResult::NoAlias;
1656 }
1657 }
1658 }
1659 }
1660 }
1661
1662 // If one the accesses may be before the accessed pointer, canonicalize this
1663 // by using unknown after-pointer sizes for both accesses. This is
1664 // equivalent, because regardless of which pointer is lower, one of them
1665 // will always came after the other, as long as the underlying objects aren't
1666 // disjoint. We do this so that the rest of BasicAA does not have to deal
1667 // with accesses before the base pointer, and to improve cache utilization by
1668 // merging equivalent states.
1669 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1670 V1Size = LocationSize::afterPointer();
1671 V2Size = LocationSize::afterPointer();
1672 }
1673
1674 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1675 // for recursive queries. For this reason, this limit is chosen to be large
1676 // enough to be very rarely hit, while still being small enough to avoid
1677 // stack overflows.
1678 if (AAQI.Depth >= 512)
1679 return AliasResult::MayAlias;
1680
1681 // Check the cache before climbing up use-def chains. This also terminates
1682 // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1683 // cache key, because some cases where MayBeCrossIteration==false returns
1684 // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1685 AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1686 {V2, V2Size, AAQI.MayBeCrossIteration});
1687 const bool Swapped = V1 > V2;
1688 if (Swapped)
1689 std::swap(Locs.first, Locs.second);
1690 const auto &Pair = AAQI.AliasCache.try_emplace(
1691 Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1692 if (!Pair.second) {
1693 auto &Entry = Pair.first->second;
1694 if (!Entry.isDefinitive()) {
1695 // Remember that we used an assumption. This may either be a direct use
1696 // of an assumption, or a use of an entry that may itself be based on an
1697 // assumption.
1698 ++AAQI.NumAssumptionUses;
1699 if (Entry.isAssumption())
1700 ++Entry.NumAssumptionUses;
1701 }
1702 // Cache contains sorted {V1,V2} pairs but we should return original order.
1703 auto Result = Entry.Result;
1704 Result.swap(Swapped);
1705 return Result;
1706 }
1707
1708 int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1709 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1710 AliasResult Result =
1711 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1712
1713 auto It = AAQI.AliasCache.find(Locs);
1714 assert(It != AAQI.AliasCache.end() && "Must be in cache");
1715 auto &Entry = It->second;
1716
1717 // Check whether a NoAlias assumption has been used, but disproven.
1718 bool AssumptionDisproven =
1719 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1720 if (AssumptionDisproven)
1721 Result = AliasResult::MayAlias;
1722
1723 // This is a definitive result now, when considered as a root query.
1724 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1725 Entry.Result = Result;
1726 // Cache contains sorted {V1,V2} pairs.
1727 Entry.Result.swap(Swapped);
1728
1729 // If the assumption has been disproven, remove any results that may have
1730 // been based on this assumption. Do this after the Entry updates above to
1731 // avoid iterator invalidation.
1732 if (AssumptionDisproven)
1733 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1734 AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1735
1736 // The result may still be based on assumptions higher up in the chain.
1737 // Remember it, so it can be purged from the cache later.
1738 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1739 Result != AliasResult::MayAlias) {
1740 AAQI.AssumptionBasedResults.push_back(Locs);
1741 Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::AssumptionBased;
1742 } else {
1743 Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1744 }
1745
1746 // Depth is incremented before this function is called, so Depth==1 indicates
1747 // a root query.
1748 if (AAQI.Depth == 1) {
1749 // Any remaining assumption based results must be based on proven
1750 // assumptions, so convert them to definitive results.
1751 for (const auto &Loc : AAQI.AssumptionBasedResults) {
1752 auto It = AAQI.AliasCache.find(Loc);
1753 if (It != AAQI.AliasCache.end())
1754 It->second.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1755 }
1756 AAQI.AssumptionBasedResults.clear();
1757 AAQI.NumAssumptionUses = 0;
1758 }
1759 return Result;
1760 }
1761
aliasCheckRecursive(const Value * V1,LocationSize V1Size,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI,const Value * O1,const Value * O2)1762 AliasResult BasicAAResult::aliasCheckRecursive(
1763 const Value *V1, LocationSize V1Size,
1764 const Value *V2, LocationSize V2Size,
1765 AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1766 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1767 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1768 if (Result != AliasResult::MayAlias)
1769 return Result;
1770 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1771 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1772 Result.swap();
1773 if (Result != AliasResult::MayAlias)
1774 return Result;
1775 }
1776
1777 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1778 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1779 if (Result != AliasResult::MayAlias)
1780 return Result;
1781 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1782 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1783 Result.swap();
1784 if (Result != AliasResult::MayAlias)
1785 return Result;
1786 }
1787
1788 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1789 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1790 if (Result != AliasResult::MayAlias)
1791 return Result;
1792 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1793 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1794 Result.swap();
1795 if (Result != AliasResult::MayAlias)
1796 return Result;
1797 }
1798
1799 // If both pointers are pointing into the same object and one of them
1800 // accesses the entire object, then the accesses must overlap in some way.
1801 if (O1 == O2) {
1802 bool NullIsValidLocation = NullPointerIsDefined(&F);
1803 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1804 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1805 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1806 return AliasResult::PartialAlias;
1807 }
1808
1809 return AliasResult::MayAlias;
1810 }
1811
1812 /// Check whether two Values can be considered equivalent.
1813 ///
1814 /// If the values may come from different cycle iterations, this will also
1815 /// check that the values are not part of cycle. We have to do this because we
1816 /// are looking through phi nodes, that is we say
1817 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
isValueEqualInPotentialCycles(const Value * V,const Value * V2,const AAQueryInfo & AAQI)1818 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1819 const Value *V2,
1820 const AAQueryInfo &AAQI) {
1821 if (V != V2)
1822 return false;
1823
1824 if (!AAQI.MayBeCrossIteration)
1825 return true;
1826
1827 // Non-instructions and instructions in the entry block cannot be part of
1828 // a loop.
1829 const Instruction *Inst = dyn_cast<Instruction>(V);
1830 if (!Inst || Inst->getParent()->isEntryBlock())
1831 return true;
1832
1833 return isNotInCycle(Inst, getDT(AAQI), /*LI*/ nullptr);
1834 }
1835
1836 /// Computes the symbolic difference between two de-composed GEPs.
subtractDecomposedGEPs(DecomposedGEP & DestGEP,const DecomposedGEP & SrcGEP,const AAQueryInfo & AAQI)1837 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1838 const DecomposedGEP &SrcGEP,
1839 const AAQueryInfo &AAQI) {
1840 DestGEP.Offset -= SrcGEP.Offset;
1841 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1842 // Find V in Dest. This is N^2, but pointer indices almost never have more
1843 // than a few variable indexes.
1844 bool Found = false;
1845 for (auto I : enumerate(DestGEP.VarIndices)) {
1846 VariableGEPIndex &Dest = I.value();
1847 if ((!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) &&
1848 !areBothVScale(Dest.Val.V, Src.Val.V)) ||
1849 !Dest.Val.hasSameCastsAs(Src.Val))
1850 continue;
1851
1852 // Normalize IsNegated if we're going to lose the NSW flag anyway.
1853 if (Dest.IsNegated) {
1854 Dest.Scale = -Dest.Scale;
1855 Dest.IsNegated = false;
1856 Dest.IsNSW = false;
1857 }
1858
1859 // If we found it, subtract off Scale V's from the entry in Dest. If it
1860 // goes to zero, remove the entry.
1861 if (Dest.Scale != Src.Scale) {
1862 Dest.Scale -= Src.Scale;
1863 Dest.IsNSW = false;
1864 } else {
1865 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1866 }
1867 Found = true;
1868 break;
1869 }
1870
1871 // If we didn't consume this entry, add it to the end of the Dest list.
1872 if (!Found) {
1873 VariableGEPIndex Entry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1874 /* IsNegated */ true};
1875 DestGEP.VarIndices.push_back(Entry);
1876 }
1877 }
1878 }
1879
constantOffsetHeuristic(const DecomposedGEP & GEP,LocationSize MaybeV1Size,LocationSize MaybeV2Size,AssumptionCache * AC,DominatorTree * DT,const AAQueryInfo & AAQI)1880 bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1881 LocationSize MaybeV1Size,
1882 LocationSize MaybeV2Size,
1883 AssumptionCache *AC,
1884 DominatorTree *DT,
1885 const AAQueryInfo &AAQI) {
1886 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1887 !MaybeV2Size.hasValue())
1888 return false;
1889
1890 const uint64_t V1Size = MaybeV1Size.getValue();
1891 const uint64_t V2Size = MaybeV2Size.getValue();
1892
1893 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1894
1895 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1896 !Var0.hasNegatedScaleOf(Var1) ||
1897 Var0.Val.V->getType() != Var1.Val.V->getType())
1898 return false;
1899
1900 // We'll strip off the Extensions of Var0 and Var1 and do another round
1901 // of GetLinearExpression decomposition. In the example above, if Var0
1902 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1903
1904 LinearExpression E0 =
1905 GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1906 LinearExpression E1 =
1907 GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1908 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1909 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1910 return false;
1911
1912 // We have a hit - Var0 and Var1 only differ by a constant offset!
1913
1914 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1915 // Var1 is possible to calculate, but we're just interested in the absolute
1916 // minimum difference between the two. The minimum distance may occur due to
1917 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1918 // the minimum distance between %i and %i + 5 is 3.
1919 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1920 MinDiff = APIntOps::umin(MinDiff, Wrapped);
1921 APInt MinDiffBytes =
1922 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1923
1924 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1925 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1926 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1927 // V2Size can fit in the MinDiffBytes gap.
1928 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1929 MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1930 }
1931
1932 //===----------------------------------------------------------------------===//
1933 // BasicAliasAnalysis Pass
1934 //===----------------------------------------------------------------------===//
1935
1936 AnalysisKey BasicAA::Key;
1937
run(Function & F,FunctionAnalysisManager & AM)1938 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1939 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1940 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1941 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1942 return BasicAAResult(F.getDataLayout(), F, TLI, AC, DT);
1943 }
1944
BasicAAWrapperPass()1945 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1946 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1947 }
1948
1949 char BasicAAWrapperPass::ID = 0;
1950
anchor()1951 void BasicAAWrapperPass::anchor() {}
1952
1953 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1954 "Basic Alias Analysis (stateless AA impl)", true, true)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)1955 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1956 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1957 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1958 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1959 "Basic Alias Analysis (stateless AA impl)", true, true)
1960
1961 FunctionPass *llvm::createBasicAAWrapperPass() {
1962 return new BasicAAWrapperPass();
1963 }
1964
runOnFunction(Function & F)1965 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1966 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1967 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1968 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1969
1970 Result.reset(new BasicAAResult(F.getDataLayout(), F,
1971 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1972 &DTWP.getDomTree()));
1973
1974 return false;
1975 }
1976
getAnalysisUsage(AnalysisUsage & AU) const1977 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1978 AU.setPreservesAll();
1979 AU.addRequiredTransitive<AssumptionCacheTracker>();
1980 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1981 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1982 }
1983