1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The implementation for the loop memory dependence that was originally 10 // developed for the loop vectorizer. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/LoopAccessAnalysis.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/DepthFirstIterator.h" 18 #include "llvm/ADT/EquivalenceClasses.h" 19 #include "llvm/ADT/PointerIntPair.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/AliasSetTracker.h" 28 #include "llvm/Analysis/LoopAnalysisManager.h" 29 #include "llvm/Analysis/LoopInfo.h" 30 #include "llvm/Analysis/MemoryLocation.h" 31 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 32 #include "llvm/Analysis/ScalarEvolution.h" 33 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 34 #include "llvm/Analysis/TargetLibraryInfo.h" 35 #include "llvm/Analysis/ValueTracking.h" 36 #include "llvm/Analysis/VectorUtils.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/Constants.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/DebugLoc.h" 41 #include "llvm/IR/DerivedTypes.h" 42 #include "llvm/IR/DiagnosticInfo.h" 43 #include "llvm/IR/Dominators.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/InstrTypes.h" 46 #include "llvm/IR/Instruction.h" 47 #include "llvm/IR/Instructions.h" 48 #include "llvm/IR/Operator.h" 49 #include "llvm/IR/PassManager.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/IR/ValueHandle.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/CommandLine.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/ErrorHandling.h" 59 #include "llvm/Support/raw_ostream.h" 60 #include <algorithm> 61 #include <cassert> 62 #include <cstdint> 63 #include <cstdlib> 64 #include <iterator> 65 #include <utility> 66 #include <vector> 67 68 using namespace llvm; 69 70 #define DEBUG_TYPE "loop-accesses" 71 72 static cl::opt<unsigned, true> 73 VectorizationFactor("force-vector-width", cl::Hidden, 74 cl::desc("Sets the SIMD width. Zero is autoselect."), 75 cl::location(VectorizerParams::VectorizationFactor)); 76 unsigned VectorizerParams::VectorizationFactor; 77 78 static cl::opt<unsigned, true> 79 VectorizationInterleave("force-vector-interleave", cl::Hidden, 80 cl::desc("Sets the vectorization interleave count. " 81 "Zero is autoselect."), 82 cl::location( 83 VectorizerParams::VectorizationInterleave)); 84 unsigned VectorizerParams::VectorizationInterleave; 85 86 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( 87 "runtime-memory-check-threshold", cl::Hidden, 88 cl::desc("When performing memory disambiguation checks at runtime do not " 89 "generate more than this number of comparisons (default = 8)."), 90 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); 91 unsigned VectorizerParams::RuntimeMemoryCheckThreshold; 92 93 /// The maximum iterations used to merge memory checks 94 static cl::opt<unsigned> MemoryCheckMergeThreshold( 95 "memory-check-merge-threshold", cl::Hidden, 96 cl::desc("Maximum number of comparisons done when trying to merge " 97 "runtime memory checks. (default = 100)"), 98 cl::init(100)); 99 100 /// Maximum SIMD width. 101 const unsigned VectorizerParams::MaxVectorWidth = 64; 102 103 /// We collect dependences up to this threshold. 104 static cl::opt<unsigned> 105 MaxDependences("max-dependences", cl::Hidden, 106 cl::desc("Maximum number of dependences collected by " 107 "loop-access analysis (default = 100)"), 108 cl::init(100)); 109 110 /// This enables versioning on the strides of symbolically striding memory 111 /// accesses in code like the following. 112 /// for (i = 0; i < N; ++i) 113 /// A[i * Stride1] += B[i * Stride2] ... 114 /// 115 /// Will be roughly translated to 116 /// if (Stride1 == 1 && Stride2 == 1) { 117 /// for (i = 0; i < N; i+=4) 118 /// A[i:i+3] += ... 119 /// } else 120 /// ... 121 static cl::opt<bool> EnableMemAccessVersioning( 122 "enable-mem-access-versioning", cl::init(true), cl::Hidden, 123 cl::desc("Enable symbolic stride memory access versioning")); 124 125 /// Enable store-to-load forwarding conflict detection. This option can 126 /// be disabled for correctness testing. 127 static cl::opt<bool> EnableForwardingConflictDetection( 128 "store-to-load-forwarding-conflict-detection", cl::Hidden, 129 cl::desc("Enable conflict detection in loop-access analysis"), 130 cl::init(true)); 131 132 bool VectorizerParams::isInterleaveForced() { 133 return ::VectorizationInterleave.getNumOccurrences() > 0; 134 } 135 136 Value *llvm::stripIntegerCast(Value *V) { 137 if (auto *CI = dyn_cast<CastInst>(V)) 138 if (CI->getOperand(0)->getType()->isIntegerTy()) 139 return CI->getOperand(0); 140 return V; 141 } 142 143 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, 144 const ValueToValueMap &PtrToStride, 145 Value *Ptr) { 146 const SCEV *OrigSCEV = PSE.getSCEV(Ptr); 147 148 // If there is an entry in the map return the SCEV of the pointer with the 149 // symbolic stride replaced by one. 150 ValueToValueMap::const_iterator SI = PtrToStride.find(Ptr); 151 if (SI == PtrToStride.end()) 152 // For a non-symbolic stride, just return the original expression. 153 return OrigSCEV; 154 155 Value *StrideVal = stripIntegerCast(SI->second); 156 157 ScalarEvolution *SE = PSE.getSE(); 158 const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal)); 159 const auto *CT = 160 static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType())); 161 162 PSE.addPredicate(*SE->getEqualPredicate(U, CT)); 163 auto *Expr = PSE.getSCEV(Ptr); 164 165 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV 166 << " by: " << *Expr << "\n"); 167 return Expr; 168 } 169 170 RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup( 171 unsigned Index, RuntimePointerChecking &RtCheck) 172 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start), 173 AddressSpace(RtCheck.Pointers[Index] 174 .PointerValue->getType() 175 ->getPointerAddressSpace()) { 176 Members.push_back(Index); 177 } 178 179 /// Calculate Start and End points of memory access. 180 /// Let's assume A is the first access and B is a memory access on N-th loop 181 /// iteration. Then B is calculated as: 182 /// B = A + Step*N . 183 /// Step value may be positive or negative. 184 /// N is a calculated back-edge taken count: 185 /// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0 186 /// Start and End points are calculated in the following way: 187 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt, 188 /// where SizeOfElt is the size of single memory access in bytes. 189 /// 190 /// There is no conflict when the intervals are disjoint: 191 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End) 192 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, 193 unsigned DepSetId, unsigned ASId, 194 const ValueToValueMap &Strides, 195 PredicatedScalarEvolution &PSE) { 196 // Get the stride replaced scev. 197 const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 198 ScalarEvolution *SE = PSE.getSE(); 199 200 const SCEV *ScStart; 201 const SCEV *ScEnd; 202 203 if (SE->isLoopInvariant(Sc, Lp)) { 204 ScStart = ScEnd = Sc; 205 } else { 206 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); 207 assert(AR && "Invalid addrec expression"); 208 const SCEV *Ex = PSE.getBackedgeTakenCount(); 209 210 ScStart = AR->getStart(); 211 ScEnd = AR->evaluateAtIteration(Ex, *SE); 212 const SCEV *Step = AR->getStepRecurrence(*SE); 213 214 // For expressions with negative step, the upper bound is ScStart and the 215 // lower bound is ScEnd. 216 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) { 217 if (CStep->getValue()->isNegative()) 218 std::swap(ScStart, ScEnd); 219 } else { 220 // Fallback case: the step is not constant, but we can still 221 // get the upper and lower bounds of the interval by using min/max 222 // expressions. 223 ScStart = SE->getUMinExpr(ScStart, ScEnd); 224 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd); 225 } 226 } 227 // Add the size of the pointed element to ScEnd. 228 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 229 Type *IdxTy = DL.getIndexType(Ptr->getType()); 230 const SCEV *EltSizeSCEV = 231 SE->getStoreSizeOfExpr(IdxTy, Ptr->getType()->getPointerElementType()); 232 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV); 233 234 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc); 235 } 236 237 SmallVector<RuntimePointerCheck, 4> 238 RuntimePointerChecking::generateChecks() const { 239 SmallVector<RuntimePointerCheck, 4> Checks; 240 241 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 242 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) { 243 const RuntimeCheckingPtrGroup &CGI = CheckingGroups[I]; 244 const RuntimeCheckingPtrGroup &CGJ = CheckingGroups[J]; 245 246 if (needsChecking(CGI, CGJ)) 247 Checks.push_back(std::make_pair(&CGI, &CGJ)); 248 } 249 } 250 return Checks; 251 } 252 253 void RuntimePointerChecking::generateChecks( 254 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 255 assert(Checks.empty() && "Checks is not empty"); 256 groupChecks(DepCands, UseDependencies); 257 Checks = generateChecks(); 258 } 259 260 bool RuntimePointerChecking::needsChecking( 261 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const { 262 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) 263 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J) 264 if (needsChecking(M.Members[I], N.Members[J])) 265 return true; 266 return false; 267 } 268 269 /// Compare \p I and \p J and return the minimum. 270 /// Return nullptr in case we couldn't find an answer. 271 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, 272 ScalarEvolution *SE) { 273 const SCEV *Diff = SE->getMinusSCEV(J, I); 274 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff); 275 276 if (!C) 277 return nullptr; 278 if (C->getValue()->isNegative()) 279 return J; 280 return I; 281 } 282 283 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, 284 RuntimePointerChecking &RtCheck) { 285 return addPointer( 286 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End, 287 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(), 288 *RtCheck.SE); 289 } 290 291 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start, 292 const SCEV *End, unsigned AS, 293 ScalarEvolution &SE) { 294 assert(AddressSpace == AS && 295 "all pointers in a checking group must be in the same address space"); 296 297 // Compare the starts and ends with the known minimum and maximum 298 // of this set. We need to know how we compare against the min/max 299 // of the set in order to be able to emit memchecks. 300 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE); 301 if (!Min0) 302 return false; 303 304 const SCEV *Min1 = getMinFromExprs(End, High, &SE); 305 if (!Min1) 306 return false; 307 308 // Update the low bound expression if we've found a new min value. 309 if (Min0 == Start) 310 Low = Start; 311 312 // Update the high bound expression if we've found a new max value. 313 if (Min1 != End) 314 High = End; 315 316 Members.push_back(Index); 317 return true; 318 } 319 320 void RuntimePointerChecking::groupChecks( 321 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 322 // We build the groups from dependency candidates equivalence classes 323 // because: 324 // - We know that pointers in the same equivalence class share 325 // the same underlying object and therefore there is a chance 326 // that we can compare pointers 327 // - We wouldn't be able to merge two pointers for which we need 328 // to emit a memcheck. The classes in DepCands are already 329 // conveniently built such that no two pointers in the same 330 // class need checking against each other. 331 332 // We use the following (greedy) algorithm to construct the groups 333 // For every pointer in the equivalence class: 334 // For each existing group: 335 // - if the difference between this pointer and the min/max bounds 336 // of the group is a constant, then make the pointer part of the 337 // group and update the min/max bounds of that group as required. 338 339 CheckingGroups.clear(); 340 341 // If we need to check two pointers to the same underlying object 342 // with a non-constant difference, we shouldn't perform any pointer 343 // grouping with those pointers. This is because we can easily get 344 // into cases where the resulting check would return false, even when 345 // the accesses are safe. 346 // 347 // The following example shows this: 348 // for (i = 0; i < 1000; ++i) 349 // a[5000 + i * m] = a[i] + a[i + 9000] 350 // 351 // Here grouping gives a check of (5000, 5000 + 1000 * m) against 352 // (0, 10000) which is always false. However, if m is 1, there is no 353 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows 354 // us to perform an accurate check in this case. 355 // 356 // The above case requires that we have an UnknownDependence between 357 // accesses to the same underlying object. This cannot happen unless 358 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies 359 // is also false. In this case we will use the fallback path and create 360 // separate checking groups for all pointers. 361 362 // If we don't have the dependency partitions, construct a new 363 // checking pointer group for each pointer. This is also required 364 // for correctness, because in this case we can have checking between 365 // pointers to the same underlying object. 366 if (!UseDependencies) { 367 for (unsigned I = 0; I < Pointers.size(); ++I) 368 CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this)); 369 return; 370 } 371 372 unsigned TotalComparisons = 0; 373 374 DenseMap<Value *, unsigned> PositionMap; 375 for (unsigned Index = 0; Index < Pointers.size(); ++Index) 376 PositionMap[Pointers[Index].PointerValue] = Index; 377 378 // We need to keep track of what pointers we've already seen so we 379 // don't process them twice. 380 SmallSet<unsigned, 2> Seen; 381 382 // Go through all equivalence classes, get the "pointer check groups" 383 // and add them to the overall solution. We use the order in which accesses 384 // appear in 'Pointers' to enforce determinism. 385 for (unsigned I = 0; I < Pointers.size(); ++I) { 386 // We've seen this pointer before, and therefore already processed 387 // its equivalence class. 388 if (Seen.count(I)) 389 continue; 390 391 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue, 392 Pointers[I].IsWritePtr); 393 394 SmallVector<RuntimeCheckingPtrGroup, 2> Groups; 395 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access)); 396 397 // Because DepCands is constructed by visiting accesses in the order in 398 // which they appear in alias sets (which is deterministic) and the 399 // iteration order within an equivalence class member is only dependent on 400 // the order in which unions and insertions are performed on the 401 // equivalence class, the iteration order is deterministic. 402 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); 403 MI != ME; ++MI) { 404 auto PointerI = PositionMap.find(MI->getPointer()); 405 assert(PointerI != PositionMap.end() && 406 "pointer in equivalence class not found in PositionMap"); 407 unsigned Pointer = PointerI->second; 408 bool Merged = false; 409 // Mark this pointer as seen. 410 Seen.insert(Pointer); 411 412 // Go through all the existing sets and see if we can find one 413 // which can include this pointer. 414 for (RuntimeCheckingPtrGroup &Group : Groups) { 415 // Don't perform more than a certain amount of comparisons. 416 // This should limit the cost of grouping the pointers to something 417 // reasonable. If we do end up hitting this threshold, the algorithm 418 // will create separate groups for all remaining pointers. 419 if (TotalComparisons > MemoryCheckMergeThreshold) 420 break; 421 422 TotalComparisons++; 423 424 if (Group.addPointer(Pointer, *this)) { 425 Merged = true; 426 break; 427 } 428 } 429 430 if (!Merged) 431 // We couldn't add this pointer to any existing set or the threshold 432 // for the number of comparisons has been reached. Create a new group 433 // to hold the current pointer. 434 Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this)); 435 } 436 437 // We've computed the grouped checks for this partition. 438 // Save the results and continue with the next one. 439 llvm::copy(Groups, std::back_inserter(CheckingGroups)); 440 } 441 } 442 443 bool RuntimePointerChecking::arePointersInSamePartition( 444 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1, 445 unsigned PtrIdx2) { 446 return (PtrToPartition[PtrIdx1] != -1 && 447 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]); 448 } 449 450 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const { 451 const PointerInfo &PointerI = Pointers[I]; 452 const PointerInfo &PointerJ = Pointers[J]; 453 454 // No need to check if two readonly pointers intersect. 455 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr) 456 return false; 457 458 // Only need to check pointers between two different dependency sets. 459 if (PointerI.DependencySetId == PointerJ.DependencySetId) 460 return false; 461 462 // Only need to check pointers in the same alias set. 463 if (PointerI.AliasSetId != PointerJ.AliasSetId) 464 return false; 465 466 return true; 467 } 468 469 void RuntimePointerChecking::printChecks( 470 raw_ostream &OS, const SmallVectorImpl<RuntimePointerCheck> &Checks, 471 unsigned Depth) const { 472 unsigned N = 0; 473 for (const auto &Check : Checks) { 474 const auto &First = Check.first->Members, &Second = Check.second->Members; 475 476 OS.indent(Depth) << "Check " << N++ << ":\n"; 477 478 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n"; 479 for (unsigned K = 0; K < First.size(); ++K) 480 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n"; 481 482 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n"; 483 for (unsigned K = 0; K < Second.size(); ++K) 484 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n"; 485 } 486 } 487 488 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const { 489 490 OS.indent(Depth) << "Run-time memory checks:\n"; 491 printChecks(OS, Checks, Depth); 492 493 OS.indent(Depth) << "Grouped accesses:\n"; 494 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 495 const auto &CG = CheckingGroups[I]; 496 497 OS.indent(Depth + 2) << "Group " << &CG << ":\n"; 498 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High 499 << ")\n"; 500 for (unsigned J = 0; J < CG.Members.size(); ++J) { 501 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr 502 << "\n"; 503 } 504 } 505 } 506 507 namespace { 508 509 /// Analyses memory accesses in a loop. 510 /// 511 /// Checks whether run time pointer checks are needed and builds sets for data 512 /// dependence checking. 513 class AccessAnalysis { 514 public: 515 /// Read or write access location. 516 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; 517 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList; 518 519 AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI, 520 MemoryDepChecker::DepCandidates &DA, 521 PredicatedScalarEvolution &PSE) 522 : TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA), PSE(PSE) {} 523 524 /// Register a load and whether it is only read from. 525 void addLoad(MemoryLocation &Loc, bool IsReadOnly) { 526 Value *Ptr = const_cast<Value*>(Loc.Ptr); 527 AST.add(Ptr, LocationSize::beforeOrAfterPointer(), Loc.AATags); 528 Accesses.insert(MemAccessInfo(Ptr, false)); 529 if (IsReadOnly) 530 ReadOnlyPtr.insert(Ptr); 531 } 532 533 /// Register a store. 534 void addStore(MemoryLocation &Loc) { 535 Value *Ptr = const_cast<Value*>(Loc.Ptr); 536 AST.add(Ptr, LocationSize::beforeOrAfterPointer(), Loc.AATags); 537 Accesses.insert(MemAccessInfo(Ptr, true)); 538 } 539 540 /// Check if we can emit a run-time no-alias check for \p Access. 541 /// 542 /// Returns true if we can emit a run-time no alias check for \p Access. 543 /// If we can check this access, this also adds it to a dependence set and 544 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true, 545 /// we will attempt to use additional run-time checks in order to get 546 /// the bounds of the pointer. 547 bool createCheckForAccess(RuntimePointerChecking &RtCheck, 548 MemAccessInfo Access, 549 const ValueToValueMap &Strides, 550 DenseMap<Value *, unsigned> &DepSetId, 551 Loop *TheLoop, unsigned &RunningDepId, 552 unsigned ASId, bool ShouldCheckStride, 553 bool Assume); 554 555 /// Check whether we can check the pointers at runtime for 556 /// non-intersection. 557 /// 558 /// Returns true if we need no check or if we do and we can generate them 559 /// (i.e. the pointers have computable bounds). 560 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, 561 Loop *TheLoop, const ValueToValueMap &Strides, 562 bool ShouldCheckWrap = false); 563 564 /// Goes over all memory accesses, checks whether a RT check is needed 565 /// and builds sets of dependent accesses. 566 void buildDependenceSets() { 567 processMemAccesses(); 568 } 569 570 /// Initial processing of memory accesses determined that we need to 571 /// perform dependency checking. 572 /// 573 /// Note that this can later be cleared if we retry memcheck analysis without 574 /// dependency checking (i.e. FoundNonConstantDistanceDependence). 575 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); } 576 577 /// We decided that no dependence analysis would be used. Reset the state. 578 void resetDepChecks(MemoryDepChecker &DepChecker) { 579 CheckDeps.clear(); 580 DepChecker.clearDependences(); 581 } 582 583 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; } 584 585 private: 586 typedef SetVector<MemAccessInfo> PtrAccessSet; 587 588 /// Go over all memory access and check whether runtime pointer checks 589 /// are needed and build sets of dependency check candidates. 590 void processMemAccesses(); 591 592 /// Set of all accesses. 593 PtrAccessSet Accesses; 594 595 /// The loop being checked. 596 const Loop *TheLoop; 597 598 /// List of accesses that need a further dependence check. 599 MemAccessInfoList CheckDeps; 600 601 /// Set of pointers that are read only. 602 SmallPtrSet<Value*, 16> ReadOnlyPtr; 603 604 /// An alias set tracker to partition the access set by underlying object and 605 //intrinsic property (such as TBAA metadata). 606 AliasSetTracker AST; 607 608 LoopInfo *LI; 609 610 /// Sets of potentially dependent accesses - members of one set share an 611 /// underlying pointer. The set "CheckDeps" identfies which sets really need a 612 /// dependence check. 613 MemoryDepChecker::DepCandidates &DepCands; 614 615 /// Initial processing of memory accesses determined that we may need 616 /// to add memchecks. Perform the analysis to determine the necessary checks. 617 /// 618 /// Note that, this is different from isDependencyCheckNeeded. When we retry 619 /// memcheck analysis without dependency checking 620 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is 621 /// cleared while this remains set if we have potentially dependent accesses. 622 bool IsRTCheckAnalysisNeeded = false; 623 624 /// The SCEV predicate containing all the SCEV-related assumptions. 625 PredicatedScalarEvolution &PSE; 626 }; 627 628 } // end anonymous namespace 629 630 /// Check whether a pointer can participate in a runtime bounds check. 631 /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr 632 /// by adding run-time checks (overflow checks) if necessary. 633 static bool hasComputableBounds(PredicatedScalarEvolution &PSE, 634 const ValueToValueMap &Strides, Value *Ptr, 635 Loop *L, bool Assume) { 636 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 637 638 // The bounds for loop-invariant pointer is trivial. 639 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 640 return true; 641 642 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 643 644 if (!AR && Assume) 645 AR = PSE.getAsAddRec(Ptr); 646 647 if (!AR) 648 return false; 649 650 return AR->isAffine(); 651 } 652 653 /// Check whether a pointer address cannot wrap. 654 static bool isNoWrap(PredicatedScalarEvolution &PSE, 655 const ValueToValueMap &Strides, Value *Ptr, Loop *L) { 656 const SCEV *PtrScev = PSE.getSCEV(Ptr); 657 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 658 return true; 659 660 Type *AccessTy = Ptr->getType()->getPointerElementType(); 661 int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides); 662 if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW)) 663 return true; 664 665 return false; 666 } 667 668 static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, 669 function_ref<void(Value *)> AddPointer) { 670 SmallPtrSet<Value *, 8> Visited; 671 SmallVector<Value *> WorkList; 672 WorkList.push_back(StartPtr); 673 674 while (!WorkList.empty()) { 675 Value *Ptr = WorkList.pop_back_val(); 676 if (!Visited.insert(Ptr).second) 677 continue; 678 auto *PN = dyn_cast<PHINode>(Ptr); 679 // SCEV does not look through non-header PHIs inside the loop. Such phis 680 // can be analyzed by adding separate accesses for each incoming pointer 681 // value. 682 if (PN && InnermostLoop.contains(PN->getParent()) && 683 PN->getParent() != InnermostLoop.getHeader()) { 684 for (const Use &Inc : PN->incoming_values()) 685 WorkList.push_back(Inc); 686 } else 687 AddPointer(Ptr); 688 } 689 } 690 691 bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck, 692 MemAccessInfo Access, 693 const ValueToValueMap &StridesMap, 694 DenseMap<Value *, unsigned> &DepSetId, 695 Loop *TheLoop, unsigned &RunningDepId, 696 unsigned ASId, bool ShouldCheckWrap, 697 bool Assume) { 698 Value *Ptr = Access.getPointer(); 699 700 if (!hasComputableBounds(PSE, StridesMap, Ptr, TheLoop, Assume)) 701 return false; 702 703 // When we run after a failing dependency check we have to make sure 704 // we don't have wrapping pointers. 705 if (ShouldCheckWrap && !isNoWrap(PSE, StridesMap, Ptr, TheLoop)) { 706 auto *Expr = PSE.getSCEV(Ptr); 707 if (!Assume || !isa<SCEVAddRecExpr>(Expr)) 708 return false; 709 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 710 } 711 712 // The id of the dependence set. 713 unsigned DepId; 714 715 if (isDependencyCheckNeeded()) { 716 Value *Leader = DepCands.getLeaderValue(Access).getPointer(); 717 unsigned &LeaderId = DepSetId[Leader]; 718 if (!LeaderId) 719 LeaderId = RunningDepId++; 720 DepId = LeaderId; 721 } else 722 // Each access has its own dependence set. 723 DepId = RunningDepId++; 724 725 bool IsWrite = Access.getInt(); 726 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE); 727 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); 728 729 return true; 730 } 731 732 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck, 733 ScalarEvolution *SE, Loop *TheLoop, 734 const ValueToValueMap &StridesMap, 735 bool ShouldCheckWrap) { 736 // Find pointers with computable bounds. We are going to use this information 737 // to place a runtime bound check. 738 bool CanDoRT = true; 739 740 bool MayNeedRTCheck = false; 741 if (!IsRTCheckAnalysisNeeded) return true; 742 743 bool IsDepCheckNeeded = isDependencyCheckNeeded(); 744 745 // We assign a consecutive id to access from different alias sets. 746 // Accesses between different groups doesn't need to be checked. 747 unsigned ASId = 0; 748 for (auto &AS : AST) { 749 int NumReadPtrChecks = 0; 750 int NumWritePtrChecks = 0; 751 bool CanDoAliasSetRT = true; 752 ++ASId; 753 754 // We assign consecutive id to access from different dependence sets. 755 // Accesses within the same set don't need a runtime check. 756 unsigned RunningDepId = 1; 757 DenseMap<Value *, unsigned> DepSetId; 758 759 SmallVector<MemAccessInfo, 4> Retries; 760 761 // First, count how many write and read accesses are in the alias set. Also 762 // collect MemAccessInfos for later. 763 SmallVector<MemAccessInfo, 4> AccessInfos; 764 for (const auto &A : AS) { 765 Value *Ptr = A.getValue(); 766 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); 767 768 if (IsWrite) 769 ++NumWritePtrChecks; 770 else 771 ++NumReadPtrChecks; 772 AccessInfos.emplace_back(Ptr, IsWrite); 773 } 774 775 // We do not need runtime checks for this alias set, if there are no writes 776 // or a single write and no reads. 777 if (NumWritePtrChecks == 0 || 778 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) { 779 assert((AS.size() <= 1 || 780 all_of(AS, 781 [this](auto AC) { 782 MemAccessInfo AccessWrite(AC.getValue(), true); 783 return DepCands.findValue(AccessWrite) == DepCands.end(); 784 })) && 785 "Can only skip updating CanDoRT below, if all entries in AS " 786 "are reads or there is at most 1 entry"); 787 continue; 788 } 789 790 for (auto &Access : AccessInfos) { 791 if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId, TheLoop, 792 RunningDepId, ASId, ShouldCheckWrap, false)) { 793 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" 794 << *Access.getPointer() << '\n'); 795 Retries.push_back(Access); 796 CanDoAliasSetRT = false; 797 } 798 } 799 800 // Note that this function computes CanDoRT and MayNeedRTCheck 801 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that 802 // we have a pointer for which we couldn't find the bounds but we don't 803 // actually need to emit any checks so it does not matter. 804 // 805 // We need runtime checks for this alias set, if there are at least 2 806 // dependence sets (in which case RunningDepId > 2) or if we need to re-try 807 // any bound checks (because in that case the number of dependence sets is 808 // incomplete). 809 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty(); 810 811 // We need to perform run-time alias checks, but some pointers had bounds 812 // that couldn't be checked. 813 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) { 814 // Reset the CanDoSetRt flag and retry all accesses that have failed. 815 // We know that we need these checks, so we can now be more aggressive 816 // and add further checks if required (overflow checks). 817 CanDoAliasSetRT = true; 818 for (auto Access : Retries) 819 if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId, 820 TheLoop, RunningDepId, ASId, 821 ShouldCheckWrap, /*Assume=*/true)) { 822 CanDoAliasSetRT = false; 823 break; 824 } 825 } 826 827 CanDoRT &= CanDoAliasSetRT; 828 MayNeedRTCheck |= NeedsAliasSetRTCheck; 829 ++ASId; 830 } 831 832 // If the pointers that we would use for the bounds comparison have different 833 // address spaces, assume the values aren't directly comparable, so we can't 834 // use them for the runtime check. We also have to assume they could 835 // overlap. In the future there should be metadata for whether address spaces 836 // are disjoint. 837 unsigned NumPointers = RtCheck.Pointers.size(); 838 for (unsigned i = 0; i < NumPointers; ++i) { 839 for (unsigned j = i + 1; j < NumPointers; ++j) { 840 // Only need to check pointers between two different dependency sets. 841 if (RtCheck.Pointers[i].DependencySetId == 842 RtCheck.Pointers[j].DependencySetId) 843 continue; 844 // Only need to check pointers in the same alias set. 845 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId) 846 continue; 847 848 Value *PtrI = RtCheck.Pointers[i].PointerValue; 849 Value *PtrJ = RtCheck.Pointers[j].PointerValue; 850 851 unsigned ASi = PtrI->getType()->getPointerAddressSpace(); 852 unsigned ASj = PtrJ->getType()->getPointerAddressSpace(); 853 if (ASi != ASj) { 854 LLVM_DEBUG( 855 dbgs() << "LAA: Runtime check would require comparison between" 856 " different address spaces\n"); 857 return false; 858 } 859 } 860 } 861 862 if (MayNeedRTCheck && CanDoRT) 863 RtCheck.generateChecks(DepCands, IsDepCheckNeeded); 864 865 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks() 866 << " pointer comparisons.\n"); 867 868 // If we can do run-time checks, but there are no checks, no runtime checks 869 // are needed. This can happen when all pointers point to the same underlying 870 // object for example. 871 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck; 872 873 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT; 874 if (!CanDoRTIfNeeded) 875 RtCheck.reset(); 876 return CanDoRTIfNeeded; 877 } 878 879 void AccessAnalysis::processMemAccesses() { 880 // We process the set twice: first we process read-write pointers, last we 881 // process read-only pointers. This allows us to skip dependence tests for 882 // read-only pointers. 883 884 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n"); 885 LLVM_DEBUG(dbgs() << " AST: "; AST.dump()); 886 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n"); 887 LLVM_DEBUG({ 888 for (auto A : Accesses) 889 dbgs() << "\t" << *A.getPointer() << " (" << 890 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ? 891 "read-only" : "read")) << ")\n"; 892 }); 893 894 // The AliasSetTracker has nicely partitioned our pointers by metadata 895 // compatibility and potential for underlying-object overlap. As a result, we 896 // only need to check for potential pointer dependencies within each alias 897 // set. 898 for (const auto &AS : AST) { 899 // Note that both the alias-set tracker and the alias sets themselves used 900 // linked lists internally and so the iteration order here is deterministic 901 // (matching the original instruction order within each set). 902 903 bool SetHasWrite = false; 904 905 // Map of pointers to last access encountered. 906 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap; 907 UnderlyingObjToAccessMap ObjToLastAccess; 908 909 // Set of access to check after all writes have been processed. 910 PtrAccessSet DeferredAccesses; 911 912 // Iterate over each alias set twice, once to process read/write pointers, 913 // and then to process read-only pointers. 914 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) { 915 bool UseDeferred = SetIteration > 0; 916 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses; 917 918 for (const auto &AV : AS) { 919 Value *Ptr = AV.getValue(); 920 921 // For a single memory access in AliasSetTracker, Accesses may contain 922 // both read and write, and they both need to be handled for CheckDeps. 923 for (const auto &AC : S) { 924 if (AC.getPointer() != Ptr) 925 continue; 926 927 bool IsWrite = AC.getInt(); 928 929 // If we're using the deferred access set, then it contains only 930 // reads. 931 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite; 932 if (UseDeferred && !IsReadOnlyPtr) 933 continue; 934 // Otherwise, the pointer must be in the PtrAccessSet, either as a 935 // read or a write. 936 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite || 937 S.count(MemAccessInfo(Ptr, false))) && 938 "Alias-set pointer not in the access set?"); 939 940 MemAccessInfo Access(Ptr, IsWrite); 941 DepCands.insert(Access); 942 943 // Memorize read-only pointers for later processing and skip them in 944 // the first round (they need to be checked after we have seen all 945 // write pointers). Note: we also mark pointer that are not 946 // consecutive as "read-only" pointers (so that we check 947 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite". 948 if (!UseDeferred && IsReadOnlyPtr) { 949 DeferredAccesses.insert(Access); 950 continue; 951 } 952 953 // If this is a write - check other reads and writes for conflicts. If 954 // this is a read only check other writes for conflicts (but only if 955 // there is no other write to the ptr - this is an optimization to 956 // catch "a[i] = a[i] + " without having to do a dependence check). 957 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) { 958 CheckDeps.push_back(Access); 959 IsRTCheckAnalysisNeeded = true; 960 } 961 962 if (IsWrite) 963 SetHasWrite = true; 964 965 // Create sets of pointers connected by a shared alias set and 966 // underlying object. 967 typedef SmallVector<const Value *, 16> ValueVector; 968 ValueVector TempObjects; 969 970 getUnderlyingObjects(Ptr, TempObjects, LI); 971 LLVM_DEBUG(dbgs() 972 << "Underlying objects for pointer " << *Ptr << "\n"); 973 for (const Value *UnderlyingObj : TempObjects) { 974 // nullptr never alias, don't join sets for pointer that have "null" 975 // in their UnderlyingObjects list. 976 if (isa<ConstantPointerNull>(UnderlyingObj) && 977 !NullPointerIsDefined( 978 TheLoop->getHeader()->getParent(), 979 UnderlyingObj->getType()->getPointerAddressSpace())) 980 continue; 981 982 UnderlyingObjToAccessMap::iterator Prev = 983 ObjToLastAccess.find(UnderlyingObj); 984 if (Prev != ObjToLastAccess.end()) 985 DepCands.unionSets(Access, Prev->second); 986 987 ObjToLastAccess[UnderlyingObj] = Access; 988 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n"); 989 } 990 } 991 } 992 } 993 } 994 } 995 996 static bool isInBoundsGep(Value *Ptr) { 997 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) 998 return GEP->isInBounds(); 999 return false; 1000 } 1001 1002 /// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, 1003 /// i.e. monotonically increasing/decreasing. 1004 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, 1005 PredicatedScalarEvolution &PSE, const Loop *L) { 1006 // FIXME: This should probably only return true for NUW. 1007 if (AR->getNoWrapFlags(SCEV::NoWrapMask)) 1008 return true; 1009 1010 // Scalar evolution does not propagate the non-wrapping flags to values that 1011 // are derived from a non-wrapping induction variable because non-wrapping 1012 // could be flow-sensitive. 1013 // 1014 // Look through the potentially overflowing instruction to try to prove 1015 // non-wrapping for the *specific* value of Ptr. 1016 1017 // The arithmetic implied by an inbounds GEP can't overflow. 1018 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); 1019 if (!GEP || !GEP->isInBounds()) 1020 return false; 1021 1022 // Make sure there is only one non-const index and analyze that. 1023 Value *NonConstIndex = nullptr; 1024 for (Value *Index : GEP->indices()) 1025 if (!isa<ConstantInt>(Index)) { 1026 if (NonConstIndex) 1027 return false; 1028 NonConstIndex = Index; 1029 } 1030 if (!NonConstIndex) 1031 // The recurrence is on the pointer, ignore for now. 1032 return false; 1033 1034 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW 1035 // AddRec using a NSW operation. 1036 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex)) 1037 if (OBO->hasNoSignedWrap() && 1038 // Assume constant for other the operand so that the AddRec can be 1039 // easily found. 1040 isa<ConstantInt>(OBO->getOperand(1))) { 1041 auto *OpScev = PSE.getSCEV(OBO->getOperand(0)); 1042 1043 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev)) 1044 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW); 1045 } 1046 1047 return false; 1048 } 1049 1050 /// Check whether the access through \p Ptr has a constant stride. 1051 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, 1052 Value *Ptr, const Loop *Lp, 1053 const ValueToValueMap &StridesMap, bool Assume, 1054 bool ShouldCheckWrap) { 1055 Type *Ty = Ptr->getType(); 1056 assert(Ty->isPointerTy() && "Unexpected non-ptr"); 1057 1058 if (isa<ScalableVectorType>(AccessTy)) { 1059 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy 1060 << "\n"); 1061 return 0; 1062 } 1063 1064 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr); 1065 1066 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 1067 if (Assume && !AR) 1068 AR = PSE.getAsAddRec(Ptr); 1069 1070 if (!AR) { 1071 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr 1072 << " SCEV: " << *PtrScev << "\n"); 1073 return 0; 1074 } 1075 1076 // The access function must stride over the innermost loop. 1077 if (Lp != AR->getLoop()) { 1078 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " 1079 << *Ptr << " SCEV: " << *AR << "\n"); 1080 return 0; 1081 } 1082 1083 // The address calculation must not wrap. Otherwise, a dependence could be 1084 // inverted. 1085 // An inbounds getelementptr that is a AddRec with a unit stride 1086 // cannot wrap per definition. The unit stride requirement is checked later. 1087 // An getelementptr without an inbounds attribute and unit stride would have 1088 // to access the pointer value "0" which is undefined behavior in address 1089 // space 0, therefore we can also vectorize this case. 1090 unsigned AddrSpace = Ty->getPointerAddressSpace(); 1091 bool IsInBoundsGEP = isInBoundsGep(Ptr); 1092 bool IsNoWrapAddRec = !ShouldCheckWrap || 1093 PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) || 1094 isNoWrapAddRec(Ptr, AR, PSE, Lp); 1095 if (!IsNoWrapAddRec && !IsInBoundsGEP && 1096 NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace)) { 1097 if (Assume) { 1098 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 1099 IsNoWrapAddRec = true; 1100 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n" 1101 << "LAA: Pointer: " << *Ptr << "\n" 1102 << "LAA: SCEV: " << *AR << "\n" 1103 << "LAA: Added an overflow assumption\n"); 1104 } else { 1105 LLVM_DEBUG( 1106 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " 1107 << *Ptr << " SCEV: " << *AR << "\n"); 1108 return 0; 1109 } 1110 } 1111 1112 // Check the step is constant. 1113 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); 1114 1115 // Calculate the pointer stride and check if it is constant. 1116 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 1117 if (!C) { 1118 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr 1119 << " SCEV: " << *AR << "\n"); 1120 return 0; 1121 } 1122 1123 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 1124 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy); 1125 int64_t Size = AllocSize.getFixedSize(); 1126 const APInt &APStepVal = C->getAPInt(); 1127 1128 // Huge step value - give up. 1129 if (APStepVal.getBitWidth() > 64) 1130 return 0; 1131 1132 int64_t StepVal = APStepVal.getSExtValue(); 1133 1134 // Strided access. 1135 int64_t Stride = StepVal / Size; 1136 int64_t Rem = StepVal % Size; 1137 if (Rem) 1138 return 0; 1139 1140 // If the SCEV could wrap but we have an inbounds gep with a unit stride we 1141 // know we can't "wrap around the address space". In case of address space 1142 // zero we know that this won't happen without triggering undefined behavior. 1143 if (!IsNoWrapAddRec && Stride != 1 && Stride != -1 && 1144 (IsInBoundsGEP || !NullPointerIsDefined(Lp->getHeader()->getParent(), 1145 AddrSpace))) { 1146 if (Assume) { 1147 // We can avoid this case by adding a run-time check. 1148 LLVM_DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either " 1149 << "inbounds or in address space 0 may wrap:\n" 1150 << "LAA: Pointer: " << *Ptr << "\n" 1151 << "LAA: SCEV: " << *AR << "\n" 1152 << "LAA: Added an overflow assumption\n"); 1153 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 1154 } else 1155 return 0; 1156 } 1157 1158 return Stride; 1159 } 1160 1161 Optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, 1162 Value *PtrB, const DataLayout &DL, 1163 ScalarEvolution &SE, bool StrictCheck, 1164 bool CheckType) { 1165 assert(PtrA && PtrB && "Expected non-nullptr pointers."); 1166 assert(cast<PointerType>(PtrA->getType()) 1167 ->isOpaqueOrPointeeTypeMatches(ElemTyA) && "Wrong PtrA type"); 1168 assert(cast<PointerType>(PtrB->getType()) 1169 ->isOpaqueOrPointeeTypeMatches(ElemTyB) && "Wrong PtrB type"); 1170 1171 // Make sure that A and B are different pointers. 1172 if (PtrA == PtrB) 1173 return 0; 1174 1175 // Make sure that the element types are the same if required. 1176 if (CheckType && ElemTyA != ElemTyB) 1177 return None; 1178 1179 unsigned ASA = PtrA->getType()->getPointerAddressSpace(); 1180 unsigned ASB = PtrB->getType()->getPointerAddressSpace(); 1181 1182 // Check that the address spaces match. 1183 if (ASA != ASB) 1184 return None; 1185 unsigned IdxWidth = DL.getIndexSizeInBits(ASA); 1186 1187 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0); 1188 Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 1189 Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 1190 1191 int Val; 1192 if (PtrA1 == PtrB1) { 1193 // Retrieve the address space again as pointer stripping now tracks through 1194 // `addrspacecast`. 1195 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace(); 1196 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace(); 1197 // Check that the address spaces match and that the pointers are valid. 1198 if (ASA != ASB) 1199 return None; 1200 1201 IdxWidth = DL.getIndexSizeInBits(ASA); 1202 OffsetA = OffsetA.sextOrTrunc(IdxWidth); 1203 OffsetB = OffsetB.sextOrTrunc(IdxWidth); 1204 1205 OffsetB -= OffsetA; 1206 Val = OffsetB.getSExtValue(); 1207 } else { 1208 // Otherwise compute the distance with SCEV between the base pointers. 1209 const SCEV *PtrSCEVA = SE.getSCEV(PtrA); 1210 const SCEV *PtrSCEVB = SE.getSCEV(PtrB); 1211 const auto *Diff = 1212 dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA)); 1213 if (!Diff) 1214 return None; 1215 Val = Diff->getAPInt().getSExtValue(); 1216 } 1217 int Size = DL.getTypeStoreSize(ElemTyA); 1218 int Dist = Val / Size; 1219 1220 // Ensure that the calculated distance matches the type-based one after all 1221 // the bitcasts removal in the provided pointers. 1222 if (!StrictCheck || Dist * Size == Val) 1223 return Dist; 1224 return None; 1225 } 1226 1227 bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy, 1228 const DataLayout &DL, ScalarEvolution &SE, 1229 SmallVectorImpl<unsigned> &SortedIndices) { 1230 assert(llvm::all_of( 1231 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) && 1232 "Expected list of pointer operands."); 1233 // Walk over the pointers, and map each of them to an offset relative to 1234 // first pointer in the array. 1235 Value *Ptr0 = VL[0]; 1236 1237 using DistOrdPair = std::pair<int64_t, int>; 1238 auto Compare = [](const DistOrdPair &L, const DistOrdPair &R) { 1239 return L.first < R.first; 1240 }; 1241 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare); 1242 Offsets.emplace(0, 0); 1243 int Cnt = 1; 1244 bool IsConsecutive = true; 1245 for (auto *Ptr : VL.drop_front()) { 1246 Optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE, 1247 /*StrictCheck=*/true); 1248 if (!Diff) 1249 return false; 1250 1251 // Check if the pointer with the same offset is found. 1252 int64_t Offset = *Diff; 1253 auto Res = Offsets.emplace(Offset, Cnt); 1254 if (!Res.second) 1255 return false; 1256 // Consecutive order if the inserted element is the last one. 1257 IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end(); 1258 ++Cnt; 1259 } 1260 SortedIndices.clear(); 1261 if (!IsConsecutive) { 1262 // Fill SortedIndices array only if it is non-consecutive. 1263 SortedIndices.resize(VL.size()); 1264 Cnt = 0; 1265 for (const std::pair<int64_t, int> &Pair : Offsets) { 1266 SortedIndices[Cnt] = Pair.second; 1267 ++Cnt; 1268 } 1269 } 1270 return true; 1271 } 1272 1273 /// Returns true if the memory operations \p A and \p B are consecutive. 1274 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, 1275 ScalarEvolution &SE, bool CheckType) { 1276 Value *PtrA = getLoadStorePointerOperand(A); 1277 Value *PtrB = getLoadStorePointerOperand(B); 1278 if (!PtrA || !PtrB) 1279 return false; 1280 Type *ElemTyA = getLoadStoreType(A); 1281 Type *ElemTyB = getLoadStoreType(B); 1282 Optional<int> Diff = getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE, 1283 /*StrictCheck=*/true, CheckType); 1284 return Diff && *Diff == 1; 1285 } 1286 1287 void MemoryDepChecker::addAccess(StoreInst *SI) { 1288 visitPointers(SI->getPointerOperand(), *InnermostLoop, 1289 [this, SI](Value *Ptr) { 1290 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx); 1291 InstMap.push_back(SI); 1292 ++AccessIdx; 1293 }); 1294 } 1295 1296 void MemoryDepChecker::addAccess(LoadInst *LI) { 1297 visitPointers(LI->getPointerOperand(), *InnermostLoop, 1298 [this, LI](Value *Ptr) { 1299 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx); 1300 InstMap.push_back(LI); 1301 ++AccessIdx; 1302 }); 1303 } 1304 1305 MemoryDepChecker::VectorizationSafetyStatus 1306 MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) { 1307 switch (Type) { 1308 case NoDep: 1309 case Forward: 1310 case BackwardVectorizable: 1311 return VectorizationSafetyStatus::Safe; 1312 1313 case Unknown: 1314 return VectorizationSafetyStatus::PossiblySafeWithRtChecks; 1315 case ForwardButPreventsForwarding: 1316 case Backward: 1317 case BackwardVectorizableButPreventsForwarding: 1318 return VectorizationSafetyStatus::Unsafe; 1319 } 1320 llvm_unreachable("unexpected DepType!"); 1321 } 1322 1323 bool MemoryDepChecker::Dependence::isBackward() const { 1324 switch (Type) { 1325 case NoDep: 1326 case Forward: 1327 case ForwardButPreventsForwarding: 1328 case Unknown: 1329 return false; 1330 1331 case BackwardVectorizable: 1332 case Backward: 1333 case BackwardVectorizableButPreventsForwarding: 1334 return true; 1335 } 1336 llvm_unreachable("unexpected DepType!"); 1337 } 1338 1339 bool MemoryDepChecker::Dependence::isPossiblyBackward() const { 1340 return isBackward() || Type == Unknown; 1341 } 1342 1343 bool MemoryDepChecker::Dependence::isForward() const { 1344 switch (Type) { 1345 case Forward: 1346 case ForwardButPreventsForwarding: 1347 return true; 1348 1349 case NoDep: 1350 case Unknown: 1351 case BackwardVectorizable: 1352 case Backward: 1353 case BackwardVectorizableButPreventsForwarding: 1354 return false; 1355 } 1356 llvm_unreachable("unexpected DepType!"); 1357 } 1358 1359 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance, 1360 uint64_t TypeByteSize) { 1361 // If loads occur at a distance that is not a multiple of a feasible vector 1362 // factor store-load forwarding does not take place. 1363 // Positive dependences might cause troubles because vectorizing them might 1364 // prevent store-load forwarding making vectorized code run a lot slower. 1365 // a[i] = a[i-3] ^ a[i-8]; 1366 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and 1367 // hence on your typical architecture store-load forwarding does not take 1368 // place. Vectorizing in such cases does not make sense. 1369 // Store-load forwarding distance. 1370 1371 // After this many iterations store-to-load forwarding conflicts should not 1372 // cause any slowdowns. 1373 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize; 1374 // Maximum vector factor. 1375 uint64_t MaxVFWithoutSLForwardIssues = std::min( 1376 VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes); 1377 1378 // Compute the smallest VF at which the store and load would be misaligned. 1379 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues; 1380 VF *= 2) { 1381 // If the number of vector iteration between the store and the load are 1382 // small we could incur conflicts. 1383 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) { 1384 MaxVFWithoutSLForwardIssues = (VF >> 1); 1385 break; 1386 } 1387 } 1388 1389 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) { 1390 LLVM_DEBUG( 1391 dbgs() << "LAA: Distance " << Distance 1392 << " that could cause a store-load forwarding conflict\n"); 1393 return true; 1394 } 1395 1396 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes && 1397 MaxVFWithoutSLForwardIssues != 1398 VectorizerParams::MaxVectorWidth * TypeByteSize) 1399 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues; 1400 return false; 1401 } 1402 1403 void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) { 1404 if (Status < S) 1405 Status = S; 1406 } 1407 1408 /// Given a non-constant (unknown) dependence-distance \p Dist between two 1409 /// memory accesses, that have the same stride whose absolute value is given 1410 /// in \p Stride, and that have the same type size \p TypeByteSize, 1411 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is 1412 /// possible to prove statically that the dependence distance is larger 1413 /// than the range that the accesses will travel through the execution of 1414 /// the loop. If so, return true; false otherwise. This is useful for 1415 /// example in loops such as the following (PR31098): 1416 /// for (i = 0; i < D; ++i) { 1417 /// = out[i]; 1418 /// out[i+D] = 1419 /// } 1420 static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, 1421 const SCEV &BackedgeTakenCount, 1422 const SCEV &Dist, uint64_t Stride, 1423 uint64_t TypeByteSize) { 1424 1425 // If we can prove that 1426 // (**) |Dist| > BackedgeTakenCount * Step 1427 // where Step is the absolute stride of the memory accesses in bytes, 1428 // then there is no dependence. 1429 // 1430 // Rationale: 1431 // We basically want to check if the absolute distance (|Dist/Step|) 1432 // is >= the loop iteration count (or > BackedgeTakenCount). 1433 // This is equivalent to the Strong SIV Test (Practical Dependence Testing, 1434 // Section 4.2.1); Note, that for vectorization it is sufficient to prove 1435 // that the dependence distance is >= VF; This is checked elsewhere. 1436 // But in some cases we can prune unknown dependence distances early, and 1437 // even before selecting the VF, and without a runtime test, by comparing 1438 // the distance against the loop iteration count. Since the vectorized code 1439 // will be executed only if LoopCount >= VF, proving distance >= LoopCount 1440 // also guarantees that distance >= VF. 1441 // 1442 const uint64_t ByteStride = Stride * TypeByteSize; 1443 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride); 1444 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step); 1445 1446 const SCEV *CastedDist = &Dist; 1447 const SCEV *CastedProduct = Product; 1448 uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType()); 1449 uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType()); 1450 1451 // The dependence distance can be positive/negative, so we sign extend Dist; 1452 // The multiplication of the absolute stride in bytes and the 1453 // backedgeTakenCount is non-negative, so we zero extend Product. 1454 if (DistTypeSize > ProductTypeSize) 1455 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType()); 1456 else 1457 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType()); 1458 1459 // Is Dist - (BackedgeTakenCount * Step) > 0 ? 1460 // (If so, then we have proven (**) because |Dist| >= Dist) 1461 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct); 1462 if (SE.isKnownPositive(Minus)) 1463 return true; 1464 1465 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ? 1466 // (If so, then we have proven (**) because |Dist| >= -1*Dist) 1467 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist); 1468 Minus = SE.getMinusSCEV(NegDist, CastedProduct); 1469 if (SE.isKnownPositive(Minus)) 1470 return true; 1471 1472 return false; 1473 } 1474 1475 /// Check the dependence for two accesses with the same stride \p Stride. 1476 /// \p Distance is the positive distance and \p TypeByteSize is type size in 1477 /// bytes. 1478 /// 1479 /// \returns true if they are independent. 1480 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, 1481 uint64_t TypeByteSize) { 1482 assert(Stride > 1 && "The stride must be greater than 1"); 1483 assert(TypeByteSize > 0 && "The type size in byte must be non-zero"); 1484 assert(Distance > 0 && "The distance must be non-zero"); 1485 1486 // Skip if the distance is not multiple of type byte size. 1487 if (Distance % TypeByteSize) 1488 return false; 1489 1490 uint64_t ScaledDist = Distance / TypeByteSize; 1491 1492 // No dependence if the scaled distance is not multiple of the stride. 1493 // E.g. 1494 // for (i = 0; i < 1024 ; i += 4) 1495 // A[i+2] = A[i] + 1; 1496 // 1497 // Two accesses in memory (scaled distance is 2, stride is 4): 1498 // | A[0] | | | | A[4] | | | | 1499 // | | | A[2] | | | | A[6] | | 1500 // 1501 // E.g. 1502 // for (i = 0; i < 1024 ; i += 3) 1503 // A[i+4] = A[i] + 1; 1504 // 1505 // Two accesses in memory (scaled distance is 4, stride is 3): 1506 // | A[0] | | | A[3] | | | A[6] | | | 1507 // | | | | | A[4] | | | A[7] | | 1508 return ScaledDist % Stride; 1509 } 1510 1511 MemoryDepChecker::Dependence::DepType 1512 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, 1513 const MemAccessInfo &B, unsigned BIdx, 1514 const ValueToValueMap &Strides) { 1515 assert (AIdx < BIdx && "Must pass arguments in program order"); 1516 1517 Value *APtr = A.getPointer(); 1518 Value *BPtr = B.getPointer(); 1519 bool AIsWrite = A.getInt(); 1520 bool BIsWrite = B.getInt(); 1521 Type *ATy = APtr->getType()->getPointerElementType(); 1522 Type *BTy = BPtr->getType()->getPointerElementType(); 1523 1524 // Two reads are independent. 1525 if (!AIsWrite && !BIsWrite) 1526 return Dependence::NoDep; 1527 1528 // We cannot check pointers in different address spaces. 1529 if (APtr->getType()->getPointerAddressSpace() != 1530 BPtr->getType()->getPointerAddressSpace()) 1531 return Dependence::Unknown; 1532 1533 int64_t StrideAPtr = 1534 getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides, true); 1535 int64_t StrideBPtr = 1536 getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides, true); 1537 1538 const SCEV *Src = PSE.getSCEV(APtr); 1539 const SCEV *Sink = PSE.getSCEV(BPtr); 1540 1541 // If the induction step is negative we have to invert source and sink of the 1542 // dependence. 1543 if (StrideAPtr < 0) { 1544 std::swap(APtr, BPtr); 1545 std::swap(ATy, BTy); 1546 std::swap(Src, Sink); 1547 std::swap(AIsWrite, BIsWrite); 1548 std::swap(AIdx, BIdx); 1549 std::swap(StrideAPtr, StrideBPtr); 1550 } 1551 1552 const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src); 1553 1554 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink 1555 << "(Induction step: " << StrideAPtr << ")\n"); 1556 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to " 1557 << *InstMap[BIdx] << ": " << *Dist << "\n"); 1558 1559 // Need accesses with constant stride. We don't want to vectorize 1560 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in 1561 // the address space. 1562 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){ 1563 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n"); 1564 return Dependence::Unknown; 1565 } 1566 1567 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); 1568 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy); 1569 bool HasSameSize = 1570 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy); 1571 uint64_t Stride = std::abs(StrideAPtr); 1572 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist); 1573 if (!C) { 1574 if (!isa<SCEVCouldNotCompute>(Dist) && HasSameSize && 1575 isSafeDependenceDistance(DL, *(PSE.getSE()), 1576 *(PSE.getBackedgeTakenCount()), *Dist, Stride, 1577 TypeByteSize)) 1578 return Dependence::NoDep; 1579 1580 LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); 1581 FoundNonConstantDistanceDependence = true; 1582 return Dependence::Unknown; 1583 } 1584 1585 const APInt &Val = C->getAPInt(); 1586 int64_t Distance = Val.getSExtValue(); 1587 1588 // Attempt to prove strided accesses independent. 1589 if (std::abs(Distance) > 0 && Stride > 1 && HasSameSize && 1590 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) { 1591 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); 1592 return Dependence::NoDep; 1593 } 1594 1595 // Negative distances are not plausible dependencies. 1596 if (Val.isNegative()) { 1597 bool IsTrueDataDependence = (AIsWrite && !BIsWrite); 1598 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1599 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) || 1600 !HasSameSize)) { 1601 LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n"); 1602 return Dependence::ForwardButPreventsForwarding; 1603 } 1604 1605 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n"); 1606 return Dependence::Forward; 1607 } 1608 1609 // Write to the same location with the same size. 1610 if (Val == 0) { 1611 if (HasSameSize) 1612 return Dependence::Forward; 1613 LLVM_DEBUG( 1614 dbgs() << "LAA: Zero dependence difference but different type sizes\n"); 1615 return Dependence::Unknown; 1616 } 1617 1618 assert(Val.isStrictlyPositive() && "Expect a positive value"); 1619 1620 if (!HasSameSize) { 1621 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with " 1622 "different type sizes\n"); 1623 return Dependence::Unknown; 1624 } 1625 1626 // Bail out early if passed-in parameters make vectorization not feasible. 1627 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ? 1628 VectorizerParams::VectorizationFactor : 1); 1629 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ? 1630 VectorizerParams::VectorizationInterleave : 1); 1631 // The minimum number of iterations for a vectorized/unrolled version. 1632 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U); 1633 1634 // It's not vectorizable if the distance is smaller than the minimum distance 1635 // needed for a vectroized/unrolled version. Vectorizing one iteration in 1636 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs 1637 // TypeByteSize (No need to plus the last gap distance). 1638 // 1639 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1640 // foo(int *A) { 1641 // int *B = (int *)((char *)A + 14); 1642 // for (i = 0 ; i < 1024 ; i += 2) 1643 // B[i] = A[i] + 1; 1644 // } 1645 // 1646 // Two accesses in memory (stride is 2): 1647 // | A[0] | | A[2] | | A[4] | | A[6] | | 1648 // | B[0] | | B[2] | | B[4] | 1649 // 1650 // Distance needs for vectorizing iterations except the last iteration: 1651 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4. 1652 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4. 1653 // 1654 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is 1655 // 12, which is less than distance. 1656 // 1657 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4), 1658 // the minimum distance needed is 28, which is greater than distance. It is 1659 // not safe to do vectorization. 1660 uint64_t MinDistanceNeeded = 1661 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize; 1662 if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) { 1663 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance " 1664 << Distance << '\n'); 1665 return Dependence::Backward; 1666 } 1667 1668 // Unsafe if the minimum distance needed is greater than max safe distance. 1669 if (MinDistanceNeeded > MaxSafeDepDistBytes) { 1670 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least " 1671 << MinDistanceNeeded << " size in bytes"); 1672 return Dependence::Backward; 1673 } 1674 1675 // Positive distance bigger than max vectorization factor. 1676 // FIXME: Should use max factor instead of max distance in bytes, which could 1677 // not handle different types. 1678 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1679 // void foo (int *A, char *B) { 1680 // for (unsigned i = 0; i < 1024; i++) { 1681 // A[i+2] = A[i] + 1; 1682 // B[i+2] = B[i] + 1; 1683 // } 1684 // } 1685 // 1686 // This case is currently unsafe according to the max safe distance. If we 1687 // analyze the two accesses on array B, the max safe dependence distance 1688 // is 2. Then we analyze the accesses on array A, the minimum distance needed 1689 // is 8, which is less than 2 and forbidden vectorization, But actually 1690 // both A and B could be vectorized by 2 iterations. 1691 MaxSafeDepDistBytes = 1692 std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes); 1693 1694 bool IsTrueDataDependence = (!AIsWrite && BIsWrite); 1695 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1696 couldPreventStoreLoadForward(Distance, TypeByteSize)) 1697 return Dependence::BackwardVectorizableButPreventsForwarding; 1698 1699 uint64_t MaxVF = MaxSafeDepDistBytes / (TypeByteSize * Stride); 1700 LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() 1701 << " with max VF = " << MaxVF << '\n'); 1702 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8; 1703 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits); 1704 return Dependence::BackwardVectorizable; 1705 } 1706 1707 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets, 1708 MemAccessInfoList &CheckDeps, 1709 const ValueToValueMap &Strides) { 1710 1711 MaxSafeDepDistBytes = -1; 1712 SmallPtrSet<MemAccessInfo, 8> Visited; 1713 for (MemAccessInfo CurAccess : CheckDeps) { 1714 if (Visited.count(CurAccess)) 1715 continue; 1716 1717 // Get the relevant memory access set. 1718 EquivalenceClasses<MemAccessInfo>::iterator I = 1719 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess)); 1720 1721 // Check accesses within this set. 1722 EquivalenceClasses<MemAccessInfo>::member_iterator AI = 1723 AccessSets.member_begin(I); 1724 EquivalenceClasses<MemAccessInfo>::member_iterator AE = 1725 AccessSets.member_end(); 1726 1727 // Check every access pair. 1728 while (AI != AE) { 1729 Visited.insert(*AI); 1730 bool AIIsWrite = AI->getInt(); 1731 // Check loads only against next equivalent class, but stores also against 1732 // other stores in the same equivalence class - to the same address. 1733 EquivalenceClasses<MemAccessInfo>::member_iterator OI = 1734 (AIIsWrite ? AI : std::next(AI)); 1735 while (OI != AE) { 1736 // Check every accessing instruction pair in program order. 1737 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(), 1738 I1E = Accesses[*AI].end(); I1 != I1E; ++I1) 1739 // Scan all accesses of another equivalence class, but only the next 1740 // accesses of the same equivalent class. 1741 for (std::vector<unsigned>::iterator 1742 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()), 1743 I2E = (OI == AI ? I1E : Accesses[*OI].end()); 1744 I2 != I2E; ++I2) { 1745 auto A = std::make_pair(&*AI, *I1); 1746 auto B = std::make_pair(&*OI, *I2); 1747 1748 assert(*I1 != *I2); 1749 if (*I1 > *I2) 1750 std::swap(A, B); 1751 1752 Dependence::DepType Type = 1753 isDependent(*A.first, A.second, *B.first, B.second, Strides); 1754 mergeInStatus(Dependence::isSafeForVectorization(Type)); 1755 1756 // Gather dependences unless we accumulated MaxDependences 1757 // dependences. In that case return as soon as we find the first 1758 // unsafe dependence. This puts a limit on this quadratic 1759 // algorithm. 1760 if (RecordDependences) { 1761 if (Type != Dependence::NoDep) 1762 Dependences.push_back(Dependence(A.second, B.second, Type)); 1763 1764 if (Dependences.size() >= MaxDependences) { 1765 RecordDependences = false; 1766 Dependences.clear(); 1767 LLVM_DEBUG(dbgs() 1768 << "Too many dependences, stopped recording\n"); 1769 } 1770 } 1771 if (!RecordDependences && !isSafeForVectorization()) 1772 return false; 1773 } 1774 ++OI; 1775 } 1776 AI++; 1777 } 1778 } 1779 1780 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n"); 1781 return isSafeForVectorization(); 1782 } 1783 1784 SmallVector<Instruction *, 4> 1785 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const { 1786 MemAccessInfo Access(Ptr, isWrite); 1787 auto &IndexVector = Accesses.find(Access)->second; 1788 1789 SmallVector<Instruction *, 4> Insts; 1790 transform(IndexVector, 1791 std::back_inserter(Insts), 1792 [&](unsigned Idx) { return this->InstMap[Idx]; }); 1793 return Insts; 1794 } 1795 1796 const char *MemoryDepChecker::Dependence::DepName[] = { 1797 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward", 1798 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"}; 1799 1800 void MemoryDepChecker::Dependence::print( 1801 raw_ostream &OS, unsigned Depth, 1802 const SmallVectorImpl<Instruction *> &Instrs) const { 1803 OS.indent(Depth) << DepName[Type] << ":\n"; 1804 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n"; 1805 OS.indent(Depth + 2) << *Instrs[Destination] << "\n"; 1806 } 1807 1808 bool LoopAccessInfo::canAnalyzeLoop() { 1809 // We need to have a loop header. 1810 LLVM_DEBUG(dbgs() << "LAA: Found a loop in " 1811 << TheLoop->getHeader()->getParent()->getName() << ": " 1812 << TheLoop->getHeader()->getName() << '\n'); 1813 1814 // We can only analyze innermost loops. 1815 if (!TheLoop->isInnermost()) { 1816 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n"); 1817 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop"; 1818 return false; 1819 } 1820 1821 // We must have a single backedge. 1822 if (TheLoop->getNumBackEdges() != 1) { 1823 LLVM_DEBUG( 1824 dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1825 recordAnalysis("CFGNotUnderstood") 1826 << "loop control flow is not understood by analyzer"; 1827 return false; 1828 } 1829 1830 // ScalarEvolution needs to be able to find the exit count. 1831 const SCEV *ExitCount = PSE->getBackedgeTakenCount(); 1832 if (isa<SCEVCouldNotCompute>(ExitCount)) { 1833 recordAnalysis("CantComputeNumberOfIterations") 1834 << "could not determine number of loop iterations"; 1835 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n"); 1836 return false; 1837 } 1838 1839 return true; 1840 } 1841 1842 void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI, 1843 const TargetLibraryInfo *TLI, 1844 DominatorTree *DT) { 1845 typedef SmallPtrSet<Value*, 16> ValueSet; 1846 1847 // Holds the Load and Store instructions. 1848 SmallVector<LoadInst *, 16> Loads; 1849 SmallVector<StoreInst *, 16> Stores; 1850 1851 // Holds all the different accesses in the loop. 1852 unsigned NumReads = 0; 1853 unsigned NumReadWrites = 0; 1854 1855 bool HasComplexMemInst = false; 1856 1857 // A runtime check is only legal to insert if there are no convergent calls. 1858 HasConvergentOp = false; 1859 1860 PtrRtChecking->Pointers.clear(); 1861 PtrRtChecking->Need = false; 1862 1863 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 1864 1865 const bool EnableMemAccessVersioningOfLoop = 1866 EnableMemAccessVersioning && 1867 !TheLoop->getHeader()->getParent()->hasOptSize(); 1868 1869 // For each block. 1870 for (BasicBlock *BB : TheLoop->blocks()) { 1871 // Scan the BB and collect legal loads and stores. Also detect any 1872 // convergent instructions. 1873 for (Instruction &I : *BB) { 1874 if (auto *Call = dyn_cast<CallBase>(&I)) { 1875 if (Call->isConvergent()) 1876 HasConvergentOp = true; 1877 } 1878 1879 // With both a non-vectorizable memory instruction and a convergent 1880 // operation, found in this loop, no reason to continue the search. 1881 if (HasComplexMemInst && HasConvergentOp) { 1882 CanVecMem = false; 1883 return; 1884 } 1885 1886 // Avoid hitting recordAnalysis multiple times. 1887 if (HasComplexMemInst) 1888 continue; 1889 1890 // If this is a load, save it. If this instruction can read from memory 1891 // but is not a load, then we quit. Notice that we don't handle function 1892 // calls that read or write. 1893 if (I.mayReadFromMemory()) { 1894 // Many math library functions read the rounding mode. We will only 1895 // vectorize a loop if it contains known function calls that don't set 1896 // the flag. Therefore, it is safe to ignore this read from memory. 1897 auto *Call = dyn_cast<CallInst>(&I); 1898 if (Call && getVectorIntrinsicIDForCall(Call, TLI)) 1899 continue; 1900 1901 // If the function has an explicit vectorized counterpart, we can safely 1902 // assume that it can be vectorized. 1903 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() && 1904 !VFDatabase::getMappings(*Call).empty()) 1905 continue; 1906 1907 auto *Ld = dyn_cast<LoadInst>(&I); 1908 if (!Ld) { 1909 recordAnalysis("CantVectorizeInstruction", Ld) 1910 << "instruction cannot be vectorized"; 1911 HasComplexMemInst = true; 1912 continue; 1913 } 1914 if (!Ld->isSimple() && !IsAnnotatedParallel) { 1915 recordAnalysis("NonSimpleLoad", Ld) 1916 << "read with atomic ordering or volatile read"; 1917 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n"); 1918 HasComplexMemInst = true; 1919 continue; 1920 } 1921 NumLoads++; 1922 Loads.push_back(Ld); 1923 DepChecker->addAccess(Ld); 1924 if (EnableMemAccessVersioningOfLoop) 1925 collectStridedAccess(Ld); 1926 continue; 1927 } 1928 1929 // Save 'store' instructions. Abort if other instructions write to memory. 1930 if (I.mayWriteToMemory()) { 1931 auto *St = dyn_cast<StoreInst>(&I); 1932 if (!St) { 1933 recordAnalysis("CantVectorizeInstruction", St) 1934 << "instruction cannot be vectorized"; 1935 HasComplexMemInst = true; 1936 continue; 1937 } 1938 if (!St->isSimple() && !IsAnnotatedParallel) { 1939 recordAnalysis("NonSimpleStore", St) 1940 << "write with atomic ordering or volatile write"; 1941 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n"); 1942 HasComplexMemInst = true; 1943 continue; 1944 } 1945 NumStores++; 1946 Stores.push_back(St); 1947 DepChecker->addAccess(St); 1948 if (EnableMemAccessVersioningOfLoop) 1949 collectStridedAccess(St); 1950 } 1951 } // Next instr. 1952 } // Next block. 1953 1954 if (HasComplexMemInst) { 1955 CanVecMem = false; 1956 return; 1957 } 1958 1959 // Now we have two lists that hold the loads and the stores. 1960 // Next, we find the pointers that they use. 1961 1962 // Check if we see any stores. If there are no stores, then we don't 1963 // care if the pointers are *restrict*. 1964 if (!Stores.size()) { 1965 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n"); 1966 CanVecMem = true; 1967 return; 1968 } 1969 1970 MemoryDepChecker::DepCandidates DependentAccesses; 1971 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE); 1972 1973 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects 1974 // multiple times on the same object. If the ptr is accessed twice, once 1975 // for read and once for write, it will only appear once (on the write 1976 // list). This is okay, since we are going to check for conflicts between 1977 // writes and between reads and writes, but not between reads and reads. 1978 ValueSet Seen; 1979 1980 // Record uniform store addresses to identify if we have multiple stores 1981 // to the same address. 1982 ValueSet UniformStores; 1983 1984 for (StoreInst *ST : Stores) { 1985 Value *Ptr = ST->getPointerOperand(); 1986 1987 if (isUniform(Ptr)) 1988 HasDependenceInvolvingLoopInvariantAddress |= 1989 !UniformStores.insert(Ptr).second; 1990 1991 // If we did *not* see this pointer before, insert it to the read-write 1992 // list. At this phase it is only a 'write' list. 1993 if (Seen.insert(Ptr).second) { 1994 ++NumReadWrites; 1995 1996 MemoryLocation Loc = MemoryLocation::get(ST); 1997 // The TBAA metadata could have a control dependency on the predication 1998 // condition, so we cannot rely on it when determining whether or not we 1999 // need runtime pointer checks. 2000 if (blockNeedsPredication(ST->getParent(), TheLoop, DT)) 2001 Loc.AATags.TBAA = nullptr; 2002 2003 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop, 2004 [&Accesses, Loc](Value *Ptr) { 2005 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr); 2006 Accesses.addStore(NewLoc); 2007 }); 2008 } 2009 } 2010 2011 if (IsAnnotatedParallel) { 2012 LLVM_DEBUG( 2013 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency " 2014 << "checks.\n"); 2015 CanVecMem = true; 2016 return; 2017 } 2018 2019 for (LoadInst *LD : Loads) { 2020 Value *Ptr = LD->getPointerOperand(); 2021 // If we did *not* see this pointer before, insert it to the 2022 // read list. If we *did* see it before, then it is already in 2023 // the read-write list. This allows us to vectorize expressions 2024 // such as A[i] += x; Because the address of A[i] is a read-write 2025 // pointer. This only works if the index of A[i] is consecutive. 2026 // If the address of i is unknown (for example A[B[i]]) then we may 2027 // read a few words, modify, and write a few words, and some of the 2028 // words may be written to the same address. 2029 bool IsReadOnlyPtr = false; 2030 if (Seen.insert(Ptr).second || 2031 !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides)) { 2032 ++NumReads; 2033 IsReadOnlyPtr = true; 2034 } 2035 2036 // See if there is an unsafe dependency between a load to a uniform address and 2037 // store to the same uniform address. 2038 if (UniformStores.count(Ptr)) { 2039 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform " 2040 "load and uniform store to the same address!\n"); 2041 HasDependenceInvolvingLoopInvariantAddress = true; 2042 } 2043 2044 MemoryLocation Loc = MemoryLocation::get(LD); 2045 // The TBAA metadata could have a control dependency on the predication 2046 // condition, so we cannot rely on it when determining whether or not we 2047 // need runtime pointer checks. 2048 if (blockNeedsPredication(LD->getParent(), TheLoop, DT)) 2049 Loc.AATags.TBAA = nullptr; 2050 2051 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop, 2052 [&Accesses, Loc, IsReadOnlyPtr](Value *Ptr) { 2053 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr); 2054 Accesses.addLoad(NewLoc, IsReadOnlyPtr); 2055 }); 2056 } 2057 2058 // If we write (or read-write) to a single destination and there are no 2059 // other reads in this loop then is it safe to vectorize. 2060 if (NumReadWrites == 1 && NumReads == 0) { 2061 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n"); 2062 CanVecMem = true; 2063 return; 2064 } 2065 2066 // Build dependence sets and check whether we need a runtime pointer bounds 2067 // check. 2068 Accesses.buildDependenceSets(); 2069 2070 // Find pointers with computable bounds. We are going to use this information 2071 // to place a runtime bound check. 2072 bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), 2073 TheLoop, SymbolicStrides); 2074 if (!CanDoRTIfNeeded) { 2075 recordAnalysis("CantIdentifyArrayBounds") << "cannot identify array bounds"; 2076 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " 2077 << "the array bounds.\n"); 2078 CanVecMem = false; 2079 return; 2080 } 2081 2082 LLVM_DEBUG( 2083 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n"); 2084 2085 CanVecMem = true; 2086 if (Accesses.isDependencyCheckNeeded()) { 2087 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n"); 2088 CanVecMem = DepChecker->areDepsSafe( 2089 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides); 2090 MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes(); 2091 2092 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) { 2093 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); 2094 2095 // Clear the dependency checks. We assume they are not needed. 2096 Accesses.resetDepChecks(*DepChecker); 2097 2098 PtrRtChecking->reset(); 2099 PtrRtChecking->Need = true; 2100 2101 auto *SE = PSE->getSE(); 2102 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, SE, TheLoop, 2103 SymbolicStrides, true); 2104 2105 // Check that we found the bounds for the pointer. 2106 if (!CanDoRTIfNeeded) { 2107 recordAnalysis("CantCheckMemDepsAtRunTime") 2108 << "cannot check memory dependencies at runtime"; 2109 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n"); 2110 CanVecMem = false; 2111 return; 2112 } 2113 2114 CanVecMem = true; 2115 } 2116 } 2117 2118 if (HasConvergentOp) { 2119 recordAnalysis("CantInsertRuntimeCheckWithConvergent") 2120 << "cannot add control dependency to convergent operation"; 2121 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check " 2122 "would be needed with a convergent operation\n"); 2123 CanVecMem = false; 2124 return; 2125 } 2126 2127 if (CanVecMem) 2128 LLVM_DEBUG( 2129 dbgs() << "LAA: No unsafe dependent memory operations in loop. We" 2130 << (PtrRtChecking->Need ? "" : " don't") 2131 << " need runtime memory checks.\n"); 2132 else { 2133 recordAnalysis("UnsafeMemDep") 2134 << "unsafe dependent memory operations in loop. Use " 2135 "#pragma loop distribute(enable) to allow loop distribution " 2136 "to attempt to isolate the offending operations into a separate " 2137 "loop"; 2138 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n"); 2139 } 2140 } 2141 2142 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, 2143 DominatorTree *DT) { 2144 assert(TheLoop->contains(BB) && "Unknown block used"); 2145 2146 // Blocks that do not dominate the latch need predication. 2147 BasicBlock* Latch = TheLoop->getLoopLatch(); 2148 return !DT->dominates(BB, Latch); 2149 } 2150 2151 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName, 2152 Instruction *I) { 2153 assert(!Report && "Multiple reports generated"); 2154 2155 Value *CodeRegion = TheLoop->getHeader(); 2156 DebugLoc DL = TheLoop->getStartLoc(); 2157 2158 if (I) { 2159 CodeRegion = I->getParent(); 2160 // If there is no debug location attached to the instruction, revert back to 2161 // using the loop's. 2162 if (I->getDebugLoc()) 2163 DL = I->getDebugLoc(); 2164 } 2165 2166 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL, 2167 CodeRegion); 2168 return *Report; 2169 } 2170 2171 bool LoopAccessInfo::isUniform(Value *V) const { 2172 auto *SE = PSE->getSE(); 2173 // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is 2174 // never considered uniform. 2175 // TODO: Is this really what we want? Even without FP SCEV, we may want some 2176 // trivially loop-invariant FP values to be considered uniform. 2177 if (!SE->isSCEVable(V->getType())) 2178 return false; 2179 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop)); 2180 } 2181 2182 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) { 2183 Value *Ptr = getLoadStorePointerOperand(MemAccess); 2184 if (!Ptr) 2185 return; 2186 2187 Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop); 2188 if (!Stride) 2189 return; 2190 2191 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for " 2192 "versioning:"); 2193 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n"); 2194 2195 // Avoid adding the "Stride == 1" predicate when we know that 2196 // Stride >= Trip-Count. Such a predicate will effectively optimize a single 2197 // or zero iteration loop, as Trip-Count <= Stride == 1. 2198 // 2199 // TODO: We are currently not making a very informed decision on when it is 2200 // beneficial to apply stride versioning. It might make more sense that the 2201 // users of this analysis (such as the vectorizer) will trigger it, based on 2202 // their specific cost considerations; For example, in cases where stride 2203 // versioning does not help resolving memory accesses/dependences, the 2204 // vectorizer should evaluate the cost of the runtime test, and the benefit 2205 // of various possible stride specializations, considering the alternatives 2206 // of using gather/scatters (if available). 2207 2208 const SCEV *StrideExpr = PSE->getSCEV(Stride); 2209 const SCEV *BETakenCount = PSE->getBackedgeTakenCount(); 2210 2211 // Match the types so we can compare the stride and the BETakenCount. 2212 // The Stride can be positive/negative, so we sign extend Stride; 2213 // The backedgeTakenCount is non-negative, so we zero extend BETakenCount. 2214 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 2215 uint64_t StrideTypeSize = DL.getTypeAllocSize(StrideExpr->getType()); 2216 uint64_t BETypeSize = DL.getTypeAllocSize(BETakenCount->getType()); 2217 const SCEV *CastedStride = StrideExpr; 2218 const SCEV *CastedBECount = BETakenCount; 2219 ScalarEvolution *SE = PSE->getSE(); 2220 if (BETypeSize >= StrideTypeSize) 2221 CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType()); 2222 else 2223 CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType()); 2224 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount); 2225 // Since TripCount == BackEdgeTakenCount + 1, checking: 2226 // "Stride >= TripCount" is equivalent to checking: 2227 // Stride - BETakenCount > 0 2228 if (SE->isKnownPositive(StrideMinusBETaken)) { 2229 LLVM_DEBUG( 2230 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the " 2231 "Stride==1 predicate will imply that the loop executes " 2232 "at most once.\n"); 2233 return; 2234 } 2235 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version."); 2236 2237 SymbolicStrides[Ptr] = Stride; 2238 StrideSet.insert(Stride); 2239 } 2240 2241 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, 2242 const TargetLibraryInfo *TLI, AAResults *AA, 2243 DominatorTree *DT, LoopInfo *LI) 2244 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)), 2245 PtrRtChecking(std::make_unique<RuntimePointerChecking>(SE)), 2246 DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L) { 2247 if (canAnalyzeLoop()) 2248 analyzeLoop(AA, LI, TLI, DT); 2249 } 2250 2251 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { 2252 if (CanVecMem) { 2253 OS.indent(Depth) << "Memory dependences are safe"; 2254 if (MaxSafeDepDistBytes != -1ULL) 2255 OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes 2256 << " bytes"; 2257 if (PtrRtChecking->Need) 2258 OS << " with run-time checks"; 2259 OS << "\n"; 2260 } 2261 2262 if (HasConvergentOp) 2263 OS.indent(Depth) << "Has convergent operation in loop\n"; 2264 2265 if (Report) 2266 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n"; 2267 2268 if (auto *Dependences = DepChecker->getDependences()) { 2269 OS.indent(Depth) << "Dependences:\n"; 2270 for (auto &Dep : *Dependences) { 2271 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions()); 2272 OS << "\n"; 2273 } 2274 } else 2275 OS.indent(Depth) << "Too many dependences, not recorded\n"; 2276 2277 // List the pair of accesses need run-time checks to prove independence. 2278 PtrRtChecking->print(OS, Depth); 2279 OS << "\n"; 2280 2281 OS.indent(Depth) << "Non vectorizable stores to invariant address were " 2282 << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ") 2283 << "found in loop.\n"; 2284 2285 OS.indent(Depth) << "SCEV assumptions:\n"; 2286 PSE->getUnionPredicate().print(OS, Depth); 2287 2288 OS << "\n"; 2289 2290 OS.indent(Depth) << "Expressions re-written:\n"; 2291 PSE->print(OS, Depth); 2292 } 2293 2294 LoopAccessLegacyAnalysis::LoopAccessLegacyAnalysis() : FunctionPass(ID) { 2295 initializeLoopAccessLegacyAnalysisPass(*PassRegistry::getPassRegistry()); 2296 } 2297 2298 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) { 2299 auto &LAI = LoopAccessInfoMap[L]; 2300 2301 if (!LAI) 2302 LAI = std::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI); 2303 2304 return *LAI.get(); 2305 } 2306 2307 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const { 2308 LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this); 2309 2310 for (Loop *TopLevelLoop : *LI) 2311 for (Loop *L : depth_first(TopLevelLoop)) { 2312 OS.indent(2) << L->getHeader()->getName() << ":\n"; 2313 auto &LAI = LAA.getInfo(L); 2314 LAI.print(OS, 4); 2315 } 2316 } 2317 2318 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) { 2319 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2320 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2321 TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2322 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2323 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2324 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2325 2326 return false; 2327 } 2328 2329 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 2330 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>(); 2331 AU.addRequiredTransitive<AAResultsWrapperPass>(); 2332 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 2333 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 2334 2335 AU.setPreservesAll(); 2336 } 2337 2338 char LoopAccessLegacyAnalysis::ID = 0; 2339 static const char laa_name[] = "Loop Access Analysis"; 2340 #define LAA_NAME "loop-accesses" 2341 2342 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true) 2343 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2344 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 2345 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2346 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 2347 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true) 2348 2349 AnalysisKey LoopAccessAnalysis::Key; 2350 2351 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM, 2352 LoopStandardAnalysisResults &AR) { 2353 return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI); 2354 } 2355 2356 namespace llvm { 2357 2358 Pass *createLAAPass() { 2359 return new LoopAccessLegacyAnalysis(); 2360 } 2361 2362 } // end namespace llvm 2363