1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/STLExtras.h" 68 #include "llvm/ADT/ScopeExit.h" 69 #include "llvm/ADT/Sequence.h" 70 #include "llvm/ADT/SmallPtrSet.h" 71 #include "llvm/ADT/SmallSet.h" 72 #include "llvm/ADT/SmallVector.h" 73 #include "llvm/ADT/Statistic.h" 74 #include "llvm/ADT/StringExtras.h" 75 #include "llvm/ADT/StringRef.h" 76 #include "llvm/Analysis/AssumptionCache.h" 77 #include "llvm/Analysis/ConstantFolding.h" 78 #include "llvm/Analysis/InstructionSimplify.h" 79 #include "llvm/Analysis/LoopInfo.h" 80 #include "llvm/Analysis/MemoryBuiltins.h" 81 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 82 #include "llvm/Analysis/TargetLibraryInfo.h" 83 #include "llvm/Analysis/ValueTracking.h" 84 #include "llvm/Config/llvm-config.h" 85 #include "llvm/IR/Argument.h" 86 #include "llvm/IR/BasicBlock.h" 87 #include "llvm/IR/CFG.h" 88 #include "llvm/IR/Constant.h" 89 #include "llvm/IR/ConstantRange.h" 90 #include "llvm/IR/Constants.h" 91 #include "llvm/IR/DataLayout.h" 92 #include "llvm/IR/DerivedTypes.h" 93 #include "llvm/IR/Dominators.h" 94 #include "llvm/IR/Function.h" 95 #include "llvm/IR/GlobalAlias.h" 96 #include "llvm/IR/GlobalValue.h" 97 #include "llvm/IR/InstIterator.h" 98 #include "llvm/IR/InstrTypes.h" 99 #include "llvm/IR/Instruction.h" 100 #include "llvm/IR/Instructions.h" 101 #include "llvm/IR/IntrinsicInst.h" 102 #include "llvm/IR/Intrinsics.h" 103 #include "llvm/IR/LLVMContext.h" 104 #include "llvm/IR/Operator.h" 105 #include "llvm/IR/PatternMatch.h" 106 #include "llvm/IR/Type.h" 107 #include "llvm/IR/Use.h" 108 #include "llvm/IR/User.h" 109 #include "llvm/IR/Value.h" 110 #include "llvm/IR/Verifier.h" 111 #include "llvm/InitializePasses.h" 112 #include "llvm/Pass.h" 113 #include "llvm/Support/Casting.h" 114 #include "llvm/Support/CommandLine.h" 115 #include "llvm/Support/Compiler.h" 116 #include "llvm/Support/Debug.h" 117 #include "llvm/Support/ErrorHandling.h" 118 #include "llvm/Support/KnownBits.h" 119 #include "llvm/Support/SaveAndRestore.h" 120 #include "llvm/Support/raw_ostream.h" 121 #include <algorithm> 122 #include <cassert> 123 #include <climits> 124 #include <cstdint> 125 #include <cstdlib> 126 #include <map> 127 #include <memory> 128 #include <numeric> 129 #include <optional> 130 #include <tuple> 131 #include <utility> 132 #include <vector> 133 134 using namespace llvm; 135 using namespace PatternMatch; 136 137 #define DEBUG_TYPE "scalar-evolution" 138 139 STATISTIC(NumExitCountsComputed, 140 "Number of loop exits with predictable exit counts"); 141 STATISTIC(NumExitCountsNotComputed, 142 "Number of loop exits without predictable exit counts"); 143 STATISTIC(NumBruteForceTripCountsComputed, 144 "Number of loops with trip counts computed by force"); 145 146 #ifdef EXPENSIVE_CHECKS 147 bool llvm::VerifySCEV = true; 148 #else 149 bool llvm::VerifySCEV = false; 150 #endif 151 152 static cl::opt<unsigned> 153 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 154 cl::desc("Maximum number of iterations SCEV will " 155 "symbolically execute a constant " 156 "derived loop"), 157 cl::init(100)); 158 159 static cl::opt<bool, true> VerifySCEVOpt( 160 "verify-scev", cl::Hidden, cl::location(VerifySCEV), 161 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 162 static cl::opt<bool> VerifySCEVStrict( 163 "verify-scev-strict", cl::Hidden, 164 cl::desc("Enable stricter verification with -verify-scev is passed")); 165 166 static cl::opt<bool> VerifyIR( 167 "scev-verify-ir", cl::Hidden, 168 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 169 cl::init(false)); 170 171 static cl::opt<unsigned> MulOpsInlineThreshold( 172 "scev-mulops-inline-threshold", cl::Hidden, 173 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 174 cl::init(32)); 175 176 static cl::opt<unsigned> AddOpsInlineThreshold( 177 "scev-addops-inline-threshold", cl::Hidden, 178 cl::desc("Threshold for inlining addition operands into a SCEV"), 179 cl::init(500)); 180 181 static cl::opt<unsigned> MaxSCEVCompareDepth( 182 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 183 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 184 cl::init(32)); 185 186 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 187 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 188 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 189 cl::init(2)); 190 191 static cl::opt<unsigned> MaxValueCompareDepth( 192 "scalar-evolution-max-value-compare-depth", cl::Hidden, 193 cl::desc("Maximum depth of recursive value complexity comparisons"), 194 cl::init(2)); 195 196 static cl::opt<unsigned> 197 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 198 cl::desc("Maximum depth of recursive arithmetics"), 199 cl::init(32)); 200 201 static cl::opt<unsigned> MaxConstantEvolvingDepth( 202 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 203 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 204 205 static cl::opt<unsigned> 206 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 207 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 208 cl::init(8)); 209 210 static cl::opt<unsigned> 211 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 212 cl::desc("Max coefficients in AddRec during evolving"), 213 cl::init(8)); 214 215 static cl::opt<unsigned> 216 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 217 cl::desc("Size of the expression which is considered huge"), 218 cl::init(4096)); 219 220 static cl::opt<unsigned> RangeIterThreshold( 221 "scev-range-iter-threshold", cl::Hidden, 222 cl::desc("Threshold for switching to iteratively computing SCEV ranges"), 223 cl::init(32)); 224 225 static cl::opt<bool> 226 ClassifyExpressions("scalar-evolution-classify-expressions", 227 cl::Hidden, cl::init(true), 228 cl::desc("When printing analysis, include information on every instruction")); 229 230 static cl::opt<bool> UseExpensiveRangeSharpening( 231 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, 232 cl::init(false), 233 cl::desc("Use more powerful methods of sharpening expression ranges. May " 234 "be costly in terms of compile time")); 235 236 static cl::opt<unsigned> MaxPhiSCCAnalysisSize( 237 "scalar-evolution-max-scc-analysis-depth", cl::Hidden, 238 cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " 239 "Phi strongly connected components"), 240 cl::init(8)); 241 242 static cl::opt<bool> 243 EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, 244 cl::desc("Handle <= and >= in finite loops"), 245 cl::init(true)); 246 247 static cl::opt<bool> UseContextForNoWrapFlagInference( 248 "scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, 249 cl::desc("Infer nuw/nsw flags using context where suitable"), 250 cl::init(true)); 251 252 //===----------------------------------------------------------------------===// 253 // SCEV class definitions 254 //===----------------------------------------------------------------------===// 255 256 //===----------------------------------------------------------------------===// 257 // Implementation of the SCEV class. 258 // 259 260 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 261 LLVM_DUMP_METHOD void SCEV::dump() const { 262 print(dbgs()); 263 dbgs() << '\n'; 264 } 265 #endif 266 267 void SCEV::print(raw_ostream &OS) const { 268 switch (getSCEVType()) { 269 case scConstant: 270 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 271 return; 272 case scVScale: 273 OS << "vscale"; 274 return; 275 case scPtrToInt: { 276 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this); 277 const SCEV *Op = PtrToInt->getOperand(); 278 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " 279 << *PtrToInt->getType() << ")"; 280 return; 281 } 282 case scTruncate: { 283 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 284 const SCEV *Op = Trunc->getOperand(); 285 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 286 << *Trunc->getType() << ")"; 287 return; 288 } 289 case scZeroExtend: { 290 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 291 const SCEV *Op = ZExt->getOperand(); 292 OS << "(zext " << *Op->getType() << " " << *Op << " to " 293 << *ZExt->getType() << ")"; 294 return; 295 } 296 case scSignExtend: { 297 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 298 const SCEV *Op = SExt->getOperand(); 299 OS << "(sext " << *Op->getType() << " " << *Op << " to " 300 << *SExt->getType() << ")"; 301 return; 302 } 303 case scAddRecExpr: { 304 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 305 OS << "{" << *AR->getOperand(0); 306 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 307 OS << ",+," << *AR->getOperand(i); 308 OS << "}<"; 309 if (AR->hasNoUnsignedWrap()) 310 OS << "nuw><"; 311 if (AR->hasNoSignedWrap()) 312 OS << "nsw><"; 313 if (AR->hasNoSelfWrap() && 314 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 315 OS << "nw><"; 316 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 317 OS << ">"; 318 return; 319 } 320 case scAddExpr: 321 case scMulExpr: 322 case scUMaxExpr: 323 case scSMaxExpr: 324 case scUMinExpr: 325 case scSMinExpr: 326 case scSequentialUMinExpr: { 327 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 328 const char *OpStr = nullptr; 329 switch (NAry->getSCEVType()) { 330 case scAddExpr: OpStr = " + "; break; 331 case scMulExpr: OpStr = " * "; break; 332 case scUMaxExpr: OpStr = " umax "; break; 333 case scSMaxExpr: OpStr = " smax "; break; 334 case scUMinExpr: 335 OpStr = " umin "; 336 break; 337 case scSMinExpr: 338 OpStr = " smin "; 339 break; 340 case scSequentialUMinExpr: 341 OpStr = " umin_seq "; 342 break; 343 default: 344 llvm_unreachable("There are no other nary expression types."); 345 } 346 OS << "("; 347 ListSeparator LS(OpStr); 348 for (const SCEV *Op : NAry->operands()) 349 OS << LS << *Op; 350 OS << ")"; 351 switch (NAry->getSCEVType()) { 352 case scAddExpr: 353 case scMulExpr: 354 if (NAry->hasNoUnsignedWrap()) 355 OS << "<nuw>"; 356 if (NAry->hasNoSignedWrap()) 357 OS << "<nsw>"; 358 break; 359 default: 360 // Nothing to print for other nary expressions. 361 break; 362 } 363 return; 364 } 365 case scUDivExpr: { 366 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 367 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 368 return; 369 } 370 case scUnknown: 371 cast<SCEVUnknown>(this)->getValue()->printAsOperand(OS, false); 372 return; 373 case scCouldNotCompute: 374 OS << "***COULDNOTCOMPUTE***"; 375 return; 376 } 377 llvm_unreachable("Unknown SCEV kind!"); 378 } 379 380 Type *SCEV::getType() const { 381 switch (getSCEVType()) { 382 case scConstant: 383 return cast<SCEVConstant>(this)->getType(); 384 case scVScale: 385 return cast<SCEVVScale>(this)->getType(); 386 case scPtrToInt: 387 case scTruncate: 388 case scZeroExtend: 389 case scSignExtend: 390 return cast<SCEVCastExpr>(this)->getType(); 391 case scAddRecExpr: 392 return cast<SCEVAddRecExpr>(this)->getType(); 393 case scMulExpr: 394 return cast<SCEVMulExpr>(this)->getType(); 395 case scUMaxExpr: 396 case scSMaxExpr: 397 case scUMinExpr: 398 case scSMinExpr: 399 return cast<SCEVMinMaxExpr>(this)->getType(); 400 case scSequentialUMinExpr: 401 return cast<SCEVSequentialMinMaxExpr>(this)->getType(); 402 case scAddExpr: 403 return cast<SCEVAddExpr>(this)->getType(); 404 case scUDivExpr: 405 return cast<SCEVUDivExpr>(this)->getType(); 406 case scUnknown: 407 return cast<SCEVUnknown>(this)->getType(); 408 case scCouldNotCompute: 409 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 410 } 411 llvm_unreachable("Unknown SCEV kind!"); 412 } 413 414 ArrayRef<const SCEV *> SCEV::operands() const { 415 switch (getSCEVType()) { 416 case scConstant: 417 case scVScale: 418 case scUnknown: 419 return {}; 420 case scPtrToInt: 421 case scTruncate: 422 case scZeroExtend: 423 case scSignExtend: 424 return cast<SCEVCastExpr>(this)->operands(); 425 case scAddRecExpr: 426 case scAddExpr: 427 case scMulExpr: 428 case scUMaxExpr: 429 case scSMaxExpr: 430 case scUMinExpr: 431 case scSMinExpr: 432 case scSequentialUMinExpr: 433 return cast<SCEVNAryExpr>(this)->operands(); 434 case scUDivExpr: 435 return cast<SCEVUDivExpr>(this)->operands(); 436 case scCouldNotCompute: 437 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 438 } 439 llvm_unreachable("Unknown SCEV kind!"); 440 } 441 442 bool SCEV::isZero() const { 443 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 444 return SC->getValue()->isZero(); 445 return false; 446 } 447 448 bool SCEV::isOne() const { 449 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 450 return SC->getValue()->isOne(); 451 return false; 452 } 453 454 bool SCEV::isAllOnesValue() const { 455 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 456 return SC->getValue()->isMinusOne(); 457 return false; 458 } 459 460 bool SCEV::isNonConstantNegative() const { 461 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 462 if (!Mul) return false; 463 464 // If there is a constant factor, it will be first. 465 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 466 if (!SC) return false; 467 468 // Return true if the value is negative, this matches things like (-42 * V). 469 return SC->getAPInt().isNegative(); 470 } 471 472 SCEVCouldNotCompute::SCEVCouldNotCompute() : 473 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 474 475 bool SCEVCouldNotCompute::classof(const SCEV *S) { 476 return S->getSCEVType() == scCouldNotCompute; 477 } 478 479 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 480 FoldingSetNodeID ID; 481 ID.AddInteger(scConstant); 482 ID.AddPointer(V); 483 void *IP = nullptr; 484 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 485 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 486 UniqueSCEVs.InsertNode(S, IP); 487 return S; 488 } 489 490 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 491 return getConstant(ConstantInt::get(getContext(), Val)); 492 } 493 494 const SCEV * 495 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 496 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 497 return getConstant(ConstantInt::get(ITy, V, isSigned)); 498 } 499 500 const SCEV *ScalarEvolution::getVScale(Type *Ty) { 501 FoldingSetNodeID ID; 502 ID.AddInteger(scVScale); 503 ID.AddPointer(Ty); 504 void *IP = nullptr; 505 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 506 return S; 507 SCEV *S = new (SCEVAllocator) SCEVVScale(ID.Intern(SCEVAllocator), Ty); 508 UniqueSCEVs.InsertNode(S, IP); 509 return S; 510 } 511 512 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, 513 const SCEV *op, Type *ty) 514 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Op(op), Ty(ty) {} 515 516 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, 517 Type *ITy) 518 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { 519 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() && 520 "Must be a non-bit-width-changing pointer-to-integer cast!"); 521 } 522 523 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, 524 SCEVTypes SCEVTy, const SCEV *op, 525 Type *ty) 526 : SCEVCastExpr(ID, SCEVTy, op, ty) {} 527 528 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, 529 Type *ty) 530 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { 531 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 532 "Cannot truncate non-integer value!"); 533 } 534 535 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 536 const SCEV *op, Type *ty) 537 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { 538 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 539 "Cannot zero extend non-integer value!"); 540 } 541 542 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 543 const SCEV *op, Type *ty) 544 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { 545 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 546 "Cannot sign extend non-integer value!"); 547 } 548 549 void SCEVUnknown::deleted() { 550 // Clear this SCEVUnknown from various maps. 551 SE->forgetMemoizedResults(this); 552 553 // Remove this SCEVUnknown from the uniquing map. 554 SE->UniqueSCEVs.RemoveNode(this); 555 556 // Release the value. 557 setValPtr(nullptr); 558 } 559 560 void SCEVUnknown::allUsesReplacedWith(Value *New) { 561 // Clear this SCEVUnknown from various maps. 562 SE->forgetMemoizedResults(this); 563 564 // Remove this SCEVUnknown from the uniquing map. 565 SE->UniqueSCEVs.RemoveNode(this); 566 567 // Replace the value pointer in case someone is still using this SCEVUnknown. 568 setValPtr(New); 569 } 570 571 //===----------------------------------------------------------------------===// 572 // SCEV Utilities 573 //===----------------------------------------------------------------------===// 574 575 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 576 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 577 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 578 /// have been previously deemed to be "equally complex" by this routine. It is 579 /// intended to avoid exponential time complexity in cases like: 580 /// 581 /// %a = f(%x, %y) 582 /// %b = f(%a, %a) 583 /// %c = f(%b, %b) 584 /// 585 /// %d = f(%x, %y) 586 /// %e = f(%d, %d) 587 /// %f = f(%e, %e) 588 /// 589 /// CompareValueComplexity(%f, %c) 590 /// 591 /// Since we do not continue running this routine on expression trees once we 592 /// have seen unequal values, there is no need to track them in the cache. 593 static int 594 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 595 const LoopInfo *const LI, Value *LV, Value *RV, 596 unsigned Depth) { 597 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 598 return 0; 599 600 // Order pointer values after integer values. This helps SCEVExpander form 601 // GEPs. 602 bool LIsPointer = LV->getType()->isPointerTy(), 603 RIsPointer = RV->getType()->isPointerTy(); 604 if (LIsPointer != RIsPointer) 605 return (int)LIsPointer - (int)RIsPointer; 606 607 // Compare getValueID values. 608 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 609 if (LID != RID) 610 return (int)LID - (int)RID; 611 612 // Sort arguments by their position. 613 if (const auto *LA = dyn_cast<Argument>(LV)) { 614 const auto *RA = cast<Argument>(RV); 615 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 616 return (int)LArgNo - (int)RArgNo; 617 } 618 619 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 620 const auto *RGV = cast<GlobalValue>(RV); 621 622 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 623 auto LT = GV->getLinkage(); 624 return !(GlobalValue::isPrivateLinkage(LT) || 625 GlobalValue::isInternalLinkage(LT)); 626 }; 627 628 // Use the names to distinguish the two values, but only if the 629 // names are semantically important. 630 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 631 return LGV->getName().compare(RGV->getName()); 632 } 633 634 // For instructions, compare their loop depth, and their operand count. This 635 // is pretty loose. 636 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 637 const auto *RInst = cast<Instruction>(RV); 638 639 // Compare loop depths. 640 const BasicBlock *LParent = LInst->getParent(), 641 *RParent = RInst->getParent(); 642 if (LParent != RParent) { 643 unsigned LDepth = LI->getLoopDepth(LParent), 644 RDepth = LI->getLoopDepth(RParent); 645 if (LDepth != RDepth) 646 return (int)LDepth - (int)RDepth; 647 } 648 649 // Compare the number of operands. 650 unsigned LNumOps = LInst->getNumOperands(), 651 RNumOps = RInst->getNumOperands(); 652 if (LNumOps != RNumOps) 653 return (int)LNumOps - (int)RNumOps; 654 655 for (unsigned Idx : seq(LNumOps)) { 656 int Result = 657 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 658 RInst->getOperand(Idx), Depth + 1); 659 if (Result != 0) 660 return Result; 661 } 662 } 663 664 EqCacheValue.unionSets(LV, RV); 665 return 0; 666 } 667 668 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 669 // than RHS, respectively. A three-way result allows recursive comparisons to be 670 // more efficient. 671 // If the max analysis depth was reached, return std::nullopt, assuming we do 672 // not know if they are equivalent for sure. 673 static std::optional<int> 674 CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV, 675 EquivalenceClasses<const Value *> &EqCacheValue, 676 const LoopInfo *const LI, const SCEV *LHS, 677 const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) { 678 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 679 if (LHS == RHS) 680 return 0; 681 682 // Primarily, sort the SCEVs by their getSCEVType(). 683 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 684 if (LType != RType) 685 return (int)LType - (int)RType; 686 687 if (EqCacheSCEV.isEquivalent(LHS, RHS)) 688 return 0; 689 690 if (Depth > MaxSCEVCompareDepth) 691 return std::nullopt; 692 693 // Aside from the getSCEVType() ordering, the particular ordering 694 // isn't very important except that it's beneficial to be consistent, 695 // so that (a + b) and (b + a) don't end up as different expressions. 696 switch (LType) { 697 case scUnknown: { 698 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 699 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 700 701 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 702 RU->getValue(), Depth + 1); 703 if (X == 0) 704 EqCacheSCEV.unionSets(LHS, RHS); 705 return X; 706 } 707 708 case scConstant: { 709 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 710 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 711 712 // Compare constant values. 713 const APInt &LA = LC->getAPInt(); 714 const APInt &RA = RC->getAPInt(); 715 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 716 if (LBitWidth != RBitWidth) 717 return (int)LBitWidth - (int)RBitWidth; 718 return LA.ult(RA) ? -1 : 1; 719 } 720 721 case scVScale: { 722 const auto *LTy = cast<IntegerType>(cast<SCEVVScale>(LHS)->getType()); 723 const auto *RTy = cast<IntegerType>(cast<SCEVVScale>(RHS)->getType()); 724 return LTy->getBitWidth() - RTy->getBitWidth(); 725 } 726 727 case scAddRecExpr: { 728 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 729 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 730 731 // There is always a dominance between two recs that are used by one SCEV, 732 // so we can safely sort recs by loop header dominance. We require such 733 // order in getAddExpr. 734 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 735 if (LLoop != RLoop) { 736 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 737 assert(LHead != RHead && "Two loops share the same header?"); 738 if (DT.dominates(LHead, RHead)) 739 return 1; 740 assert(DT.dominates(RHead, LHead) && 741 "No dominance between recurrences used by one SCEV?"); 742 return -1; 743 } 744 745 [[fallthrough]]; 746 } 747 748 case scTruncate: 749 case scZeroExtend: 750 case scSignExtend: 751 case scPtrToInt: 752 case scAddExpr: 753 case scMulExpr: 754 case scUDivExpr: 755 case scSMaxExpr: 756 case scUMaxExpr: 757 case scSMinExpr: 758 case scUMinExpr: 759 case scSequentialUMinExpr: { 760 ArrayRef<const SCEV *> LOps = LHS->operands(); 761 ArrayRef<const SCEV *> ROps = RHS->operands(); 762 763 // Lexicographically compare n-ary-like expressions. 764 unsigned LNumOps = LOps.size(), RNumOps = ROps.size(); 765 if (LNumOps != RNumOps) 766 return (int)LNumOps - (int)RNumOps; 767 768 for (unsigned i = 0; i != LNumOps; ++i) { 769 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LOps[i], 770 ROps[i], DT, Depth + 1); 771 if (X != 0) 772 return X; 773 } 774 EqCacheSCEV.unionSets(LHS, RHS); 775 return 0; 776 } 777 778 case scCouldNotCompute: 779 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 780 } 781 llvm_unreachable("Unknown SCEV kind!"); 782 } 783 784 /// Given a list of SCEV objects, order them by their complexity, and group 785 /// objects of the same complexity together by value. When this routine is 786 /// finished, we know that any duplicates in the vector are consecutive and that 787 /// complexity is monotonically increasing. 788 /// 789 /// Note that we go take special precautions to ensure that we get deterministic 790 /// results from this routine. In other words, we don't want the results of 791 /// this to depend on where the addresses of various SCEV objects happened to 792 /// land in memory. 793 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 794 LoopInfo *LI, DominatorTree &DT) { 795 if (Ops.size() < 2) return; // Noop 796 797 EquivalenceClasses<const SCEV *> EqCacheSCEV; 798 EquivalenceClasses<const Value *> EqCacheValue; 799 800 // Whether LHS has provably less complexity than RHS. 801 auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) { 802 auto Complexity = 803 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT); 804 return Complexity && *Complexity < 0; 805 }; 806 if (Ops.size() == 2) { 807 // This is the common case, which also happens to be trivially simple. 808 // Special case it. 809 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 810 if (IsLessComplex(RHS, LHS)) 811 std::swap(LHS, RHS); 812 return; 813 } 814 815 // Do the rough sort by complexity. 816 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 817 return IsLessComplex(LHS, RHS); 818 }); 819 820 // Now that we are sorted by complexity, group elements of the same 821 // complexity. Note that this is, at worst, N^2, but the vector is likely to 822 // be extremely short in practice. Note that we take this approach because we 823 // do not want to depend on the addresses of the objects we are grouping. 824 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 825 const SCEV *S = Ops[i]; 826 unsigned Complexity = S->getSCEVType(); 827 828 // If there are any objects of the same complexity and same value as this 829 // one, group them. 830 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 831 if (Ops[j] == S) { // Found a duplicate. 832 // Move it to immediately after i'th element. 833 std::swap(Ops[i+1], Ops[j]); 834 ++i; // no need to rescan it. 835 if (i == e-2) return; // Done! 836 } 837 } 838 } 839 } 840 841 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 842 /// least HugeExprThreshold nodes). 843 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 844 return any_of(Ops, [](const SCEV *S) { 845 return S->getExpressionSize() >= HugeExprThreshold; 846 }); 847 } 848 849 //===----------------------------------------------------------------------===// 850 // Simple SCEV method implementations 851 //===----------------------------------------------------------------------===// 852 853 /// Compute BC(It, K). The result has width W. Assume, K > 0. 854 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 855 ScalarEvolution &SE, 856 Type *ResultTy) { 857 // Handle the simplest case efficiently. 858 if (K == 1) 859 return SE.getTruncateOrZeroExtend(It, ResultTy); 860 861 // We are using the following formula for BC(It, K): 862 // 863 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 864 // 865 // Suppose, W is the bitwidth of the return value. We must be prepared for 866 // overflow. Hence, we must assure that the result of our computation is 867 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 868 // safe in modular arithmetic. 869 // 870 // However, this code doesn't use exactly that formula; the formula it uses 871 // is something like the following, where T is the number of factors of 2 in 872 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 873 // exponentiation: 874 // 875 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 876 // 877 // This formula is trivially equivalent to the previous formula. However, 878 // this formula can be implemented much more efficiently. The trick is that 879 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 880 // arithmetic. To do exact division in modular arithmetic, all we have 881 // to do is multiply by the inverse. Therefore, this step can be done at 882 // width W. 883 // 884 // The next issue is how to safely do the division by 2^T. The way this 885 // is done is by doing the multiplication step at a width of at least W + T 886 // bits. This way, the bottom W+T bits of the product are accurate. Then, 887 // when we perform the division by 2^T (which is equivalent to a right shift 888 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 889 // truncated out after the division by 2^T. 890 // 891 // In comparison to just directly using the first formula, this technique 892 // is much more efficient; using the first formula requires W * K bits, 893 // but this formula less than W + K bits. Also, the first formula requires 894 // a division step, whereas this formula only requires multiplies and shifts. 895 // 896 // It doesn't matter whether the subtraction step is done in the calculation 897 // width or the input iteration count's width; if the subtraction overflows, 898 // the result must be zero anyway. We prefer here to do it in the width of 899 // the induction variable because it helps a lot for certain cases; CodeGen 900 // isn't smart enough to ignore the overflow, which leads to much less 901 // efficient code if the width of the subtraction is wider than the native 902 // register width. 903 // 904 // (It's possible to not widen at all by pulling out factors of 2 before 905 // the multiplication; for example, K=2 can be calculated as 906 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 907 // extra arithmetic, so it's not an obvious win, and it gets 908 // much more complicated for K > 3.) 909 910 // Protection from insane SCEVs; this bound is conservative, 911 // but it probably doesn't matter. 912 if (K > 1000) 913 return SE.getCouldNotCompute(); 914 915 unsigned W = SE.getTypeSizeInBits(ResultTy); 916 917 // Calculate K! / 2^T and T; we divide out the factors of two before 918 // multiplying for calculating K! / 2^T to avoid overflow. 919 // Other overflow doesn't matter because we only care about the bottom 920 // W bits of the result. 921 APInt OddFactorial(W, 1); 922 unsigned T = 1; 923 for (unsigned i = 3; i <= K; ++i) { 924 APInt Mult(W, i); 925 unsigned TwoFactors = Mult.countr_zero(); 926 T += TwoFactors; 927 Mult.lshrInPlace(TwoFactors); 928 OddFactorial *= Mult; 929 } 930 931 // We need at least W + T bits for the multiplication step 932 unsigned CalculationBits = W + T; 933 934 // Calculate 2^T, at width T+W. 935 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 936 937 // Calculate the multiplicative inverse of K! / 2^T; 938 // this multiplication factor will perform the exact division by 939 // K! / 2^T. 940 APInt Mod = APInt::getSignedMinValue(W+1); 941 APInt MultiplyFactor = OddFactorial.zext(W+1); 942 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 943 MultiplyFactor = MultiplyFactor.trunc(W); 944 945 // Calculate the product, at width T+W 946 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 947 CalculationBits); 948 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 949 for (unsigned i = 1; i != K; ++i) { 950 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 951 Dividend = SE.getMulExpr(Dividend, 952 SE.getTruncateOrZeroExtend(S, CalculationTy)); 953 } 954 955 // Divide by 2^T 956 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 957 958 // Truncate the result, and divide by K! / 2^T. 959 960 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 961 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 962 } 963 964 /// Return the value of this chain of recurrences at the specified iteration 965 /// number. We can evaluate this recurrence by multiplying each element in the 966 /// chain by the binomial coefficient corresponding to it. In other words, we 967 /// can evaluate {A,+,B,+,C,+,D} as: 968 /// 969 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 970 /// 971 /// where BC(It, k) stands for binomial coefficient. 972 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 973 ScalarEvolution &SE) const { 974 return evaluateAtIteration(operands(), It, SE); 975 } 976 977 const SCEV * 978 SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands, 979 const SCEV *It, ScalarEvolution &SE) { 980 assert(Operands.size() > 0); 981 const SCEV *Result = Operands[0]; 982 for (unsigned i = 1, e = Operands.size(); i != e; ++i) { 983 // The computation is correct in the face of overflow provided that the 984 // multiplication is performed _after_ the evaluation of the binomial 985 // coefficient. 986 const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType()); 987 if (isa<SCEVCouldNotCompute>(Coeff)) 988 return Coeff; 989 990 Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff)); 991 } 992 return Result; 993 } 994 995 //===----------------------------------------------------------------------===// 996 // SCEV Expression folder implementations 997 //===----------------------------------------------------------------------===// 998 999 const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op, 1000 unsigned Depth) { 1001 assert(Depth <= 1 && 1002 "getLosslessPtrToIntExpr() should self-recurse at most once."); 1003 1004 // We could be called with an integer-typed operands during SCEV rewrites. 1005 // Since the operand is an integer already, just perform zext/trunc/self cast. 1006 if (!Op->getType()->isPointerTy()) 1007 return Op; 1008 1009 // What would be an ID for such a SCEV cast expression? 1010 FoldingSetNodeID ID; 1011 ID.AddInteger(scPtrToInt); 1012 ID.AddPointer(Op); 1013 1014 void *IP = nullptr; 1015 1016 // Is there already an expression for such a cast? 1017 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1018 return S; 1019 1020 // It isn't legal for optimizations to construct new ptrtoint expressions 1021 // for non-integral pointers. 1022 if (getDataLayout().isNonIntegralPointerType(Op->getType())) 1023 return getCouldNotCompute(); 1024 1025 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); 1026 1027 // We can only trivially model ptrtoint if SCEV's effective (integer) type 1028 // is sufficiently wide to represent all possible pointer values. 1029 // We could theoretically teach SCEV to truncate wider pointers, but 1030 // that isn't implemented for now. 1031 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) != 1032 getDataLayout().getTypeSizeInBits(IntPtrTy)) 1033 return getCouldNotCompute(); 1034 1035 // If not, is this expression something we can't reduce any further? 1036 if (auto *U = dyn_cast<SCEVUnknown>(Op)) { 1037 // Perform some basic constant folding. If the operand of the ptr2int cast 1038 // is a null pointer, don't create a ptr2int SCEV expression (that will be 1039 // left as-is), but produce a zero constant. 1040 // NOTE: We could handle a more general case, but lack motivational cases. 1041 if (isa<ConstantPointerNull>(U->getValue())) 1042 return getZero(IntPtrTy); 1043 1044 // Create an explicit cast node. 1045 // We can reuse the existing insert position since if we get here, 1046 // we won't have made any changes which would invalidate it. 1047 SCEV *S = new (SCEVAllocator) 1048 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); 1049 UniqueSCEVs.InsertNode(S, IP); 1050 registerUser(S, Op); 1051 return S; 1052 } 1053 1054 assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for " 1055 "non-SCEVUnknown's."); 1056 1057 // Otherwise, we've got some expression that is more complex than just a 1058 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an 1059 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown 1060 // only, and the expressions must otherwise be integer-typed. 1061 // So sink the cast down to the SCEVUnknown's. 1062 1063 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, 1064 /// which computes a pointer-typed value, and rewrites the whole expression 1065 /// tree so that *all* the computations are done on integers, and the only 1066 /// pointer-typed operands in the expression are SCEVUnknown. 1067 class SCEVPtrToIntSinkingRewriter 1068 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> { 1069 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>; 1070 1071 public: 1072 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} 1073 1074 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { 1075 SCEVPtrToIntSinkingRewriter Rewriter(SE); 1076 return Rewriter.visit(Scev); 1077 } 1078 1079 const SCEV *visit(const SCEV *S) { 1080 Type *STy = S->getType(); 1081 // If the expression is not pointer-typed, just keep it as-is. 1082 if (!STy->isPointerTy()) 1083 return S; 1084 // Else, recursively sink the cast down into it. 1085 return Base::visit(S); 1086 } 1087 1088 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { 1089 SmallVector<const SCEV *, 2> Operands; 1090 bool Changed = false; 1091 for (const auto *Op : Expr->operands()) { 1092 Operands.push_back(visit(Op)); 1093 Changed |= Op != Operands.back(); 1094 } 1095 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); 1096 } 1097 1098 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { 1099 SmallVector<const SCEV *, 2> Operands; 1100 bool Changed = false; 1101 for (const auto *Op : Expr->operands()) { 1102 Operands.push_back(visit(Op)); 1103 Changed |= Op != Operands.back(); 1104 } 1105 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); 1106 } 1107 1108 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 1109 assert(Expr->getType()->isPointerTy() && 1110 "Should only reach pointer-typed SCEVUnknown's."); 1111 return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1); 1112 } 1113 }; 1114 1115 // And actually perform the cast sinking. 1116 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); 1117 assert(IntOp->getType()->isIntegerTy() && 1118 "We must have succeeded in sinking the cast, " 1119 "and ending up with an integer-typed expression!"); 1120 return IntOp; 1121 } 1122 1123 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) { 1124 assert(Ty->isIntegerTy() && "Target type must be an integer type!"); 1125 1126 const SCEV *IntOp = getLosslessPtrToIntExpr(Op); 1127 if (isa<SCEVCouldNotCompute>(IntOp)) 1128 return IntOp; 1129 1130 return getTruncateOrZeroExtend(IntOp, Ty); 1131 } 1132 1133 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1134 unsigned Depth) { 1135 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1136 "This is not a truncating conversion!"); 1137 assert(isSCEVable(Ty) && 1138 "This is not a conversion to a SCEVable type!"); 1139 assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!"); 1140 Ty = getEffectiveSCEVType(Ty); 1141 1142 FoldingSetNodeID ID; 1143 ID.AddInteger(scTruncate); 1144 ID.AddPointer(Op); 1145 ID.AddPointer(Ty); 1146 void *IP = nullptr; 1147 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1148 1149 // Fold if the operand is constant. 1150 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1151 return getConstant( 1152 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1153 1154 // trunc(trunc(x)) --> trunc(x) 1155 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1156 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1157 1158 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1159 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1160 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1161 1162 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1163 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1164 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1165 1166 if (Depth > MaxCastDepth) { 1167 SCEV *S = 1168 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1169 UniqueSCEVs.InsertNode(S, IP); 1170 registerUser(S, Op); 1171 return S; 1172 } 1173 1174 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1175 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1176 // if after transforming we have at most one truncate, not counting truncates 1177 // that replace other casts. 1178 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1179 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1180 SmallVector<const SCEV *, 4> Operands; 1181 unsigned numTruncs = 0; 1182 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1183 ++i) { 1184 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1185 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && 1186 isa<SCEVTruncateExpr>(S)) 1187 numTruncs++; 1188 Operands.push_back(S); 1189 } 1190 if (numTruncs < 2) { 1191 if (isa<SCEVAddExpr>(Op)) 1192 return getAddExpr(Operands); 1193 if (isa<SCEVMulExpr>(Op)) 1194 return getMulExpr(Operands); 1195 llvm_unreachable("Unexpected SCEV type for Op."); 1196 } 1197 // Although we checked in the beginning that ID is not in the cache, it is 1198 // possible that during recursion and different modification ID was inserted 1199 // into the cache. So if we find it, just return it. 1200 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1201 return S; 1202 } 1203 1204 // If the input value is a chrec scev, truncate the chrec's operands. 1205 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1206 SmallVector<const SCEV *, 4> Operands; 1207 for (const SCEV *Op : AddRec->operands()) 1208 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1209 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1210 } 1211 1212 // Return zero if truncating to known zeros. 1213 uint32_t MinTrailingZeros = getMinTrailingZeros(Op); 1214 if (MinTrailingZeros >= getTypeSizeInBits(Ty)) 1215 return getZero(Ty); 1216 1217 // The cast wasn't folded; create an explicit cast node. We can reuse 1218 // the existing insert position since if we get here, we won't have 1219 // made any changes which would invalidate it. 1220 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1221 Op, Ty); 1222 UniqueSCEVs.InsertNode(S, IP); 1223 registerUser(S, Op); 1224 return S; 1225 } 1226 1227 // Get the limit of a recurrence such that incrementing by Step cannot cause 1228 // signed overflow as long as the value of the recurrence within the 1229 // loop does not exceed this limit before incrementing. 1230 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1231 ICmpInst::Predicate *Pred, 1232 ScalarEvolution *SE) { 1233 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1234 if (SE->isKnownPositive(Step)) { 1235 *Pred = ICmpInst::ICMP_SLT; 1236 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1237 SE->getSignedRangeMax(Step)); 1238 } 1239 if (SE->isKnownNegative(Step)) { 1240 *Pred = ICmpInst::ICMP_SGT; 1241 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1242 SE->getSignedRangeMin(Step)); 1243 } 1244 return nullptr; 1245 } 1246 1247 // Get the limit of a recurrence such that incrementing by Step cannot cause 1248 // unsigned overflow as long as the value of the recurrence within the loop does 1249 // not exceed this limit before incrementing. 1250 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1251 ICmpInst::Predicate *Pred, 1252 ScalarEvolution *SE) { 1253 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1254 *Pred = ICmpInst::ICMP_ULT; 1255 1256 return SE->getConstant(APInt::getMinValue(BitWidth) - 1257 SE->getUnsignedRangeMax(Step)); 1258 } 1259 1260 namespace { 1261 1262 struct ExtendOpTraitsBase { 1263 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1264 unsigned); 1265 }; 1266 1267 // Used to make code generic over signed and unsigned overflow. 1268 template <typename ExtendOp> struct ExtendOpTraits { 1269 // Members present: 1270 // 1271 // static const SCEV::NoWrapFlags WrapType; 1272 // 1273 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1274 // 1275 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1276 // ICmpInst::Predicate *Pred, 1277 // ScalarEvolution *SE); 1278 }; 1279 1280 template <> 1281 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1282 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1283 1284 static const GetExtendExprTy GetExtendExpr; 1285 1286 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1287 ICmpInst::Predicate *Pred, 1288 ScalarEvolution *SE) { 1289 return getSignedOverflowLimitForStep(Step, Pred, SE); 1290 } 1291 }; 1292 1293 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1294 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1295 1296 template <> 1297 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1298 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1299 1300 static const GetExtendExprTy GetExtendExpr; 1301 1302 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1303 ICmpInst::Predicate *Pred, 1304 ScalarEvolution *SE) { 1305 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1306 } 1307 }; 1308 1309 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1310 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1311 1312 } // end anonymous namespace 1313 1314 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1315 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1316 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1317 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1318 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1319 // expression "Step + sext/zext(PreIncAR)" is congruent with 1320 // "sext/zext(PostIncAR)" 1321 template <typename ExtendOpTy> 1322 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1323 ScalarEvolution *SE, unsigned Depth) { 1324 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1325 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1326 1327 const Loop *L = AR->getLoop(); 1328 const SCEV *Start = AR->getStart(); 1329 const SCEV *Step = AR->getStepRecurrence(*SE); 1330 1331 // Check for a simple looking step prior to loop entry. 1332 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1333 if (!SA) 1334 return nullptr; 1335 1336 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1337 // subtraction is expensive. For this purpose, perform a quick and dirty 1338 // difference, by checking for Step in the operand list. Note, that 1339 // SA might have repeated ops, like %a + %a + ..., so only remove one. 1340 SmallVector<const SCEV *, 4> DiffOps(SA->operands()); 1341 for (auto It = DiffOps.begin(); It != DiffOps.end(); ++It) 1342 if (*It == Step) { 1343 DiffOps.erase(It); 1344 break; 1345 } 1346 1347 if (DiffOps.size() == SA->getNumOperands()) 1348 return nullptr; 1349 1350 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1351 // `Step`: 1352 1353 // 1. NSW/NUW flags on the step increment. 1354 auto PreStartFlags = 1355 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1356 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1357 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1358 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1359 1360 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1361 // "S+X does not sign/unsign-overflow". 1362 // 1363 1364 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1365 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1366 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1367 return PreStart; 1368 1369 // 2. Direct overflow check on the step operation's expression. 1370 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1371 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1372 const SCEV *OperandExtendedStart = 1373 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1374 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1375 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1376 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1377 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1378 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1379 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1380 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType); 1381 } 1382 return PreStart; 1383 } 1384 1385 // 3. Loop precondition. 1386 ICmpInst::Predicate Pred; 1387 const SCEV *OverflowLimit = 1388 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1389 1390 if (OverflowLimit && 1391 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1392 return PreStart; 1393 1394 return nullptr; 1395 } 1396 1397 // Get the normalized zero or sign extended expression for this AddRec's Start. 1398 template <typename ExtendOpTy> 1399 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1400 ScalarEvolution *SE, 1401 unsigned Depth) { 1402 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1403 1404 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1405 if (!PreStart) 1406 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1407 1408 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1409 Depth), 1410 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1411 } 1412 1413 // Try to prove away overflow by looking at "nearby" add recurrences. A 1414 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1415 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1416 // 1417 // Formally: 1418 // 1419 // {S,+,X} == {S-T,+,X} + T 1420 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1421 // 1422 // If ({S-T,+,X} + T) does not overflow ... (1) 1423 // 1424 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1425 // 1426 // If {S-T,+,X} does not overflow ... (2) 1427 // 1428 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1429 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1430 // 1431 // If (S-T)+T does not overflow ... (3) 1432 // 1433 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1434 // == {Ext(S),+,Ext(X)} == LHS 1435 // 1436 // Thus, if (1), (2) and (3) are true for some T, then 1437 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1438 // 1439 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1440 // does not overflow" restricted to the 0th iteration. Therefore we only need 1441 // to check for (1) and (2). 1442 // 1443 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1444 // is `Delta` (defined below). 1445 template <typename ExtendOpTy> 1446 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1447 const SCEV *Step, 1448 const Loop *L) { 1449 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1450 1451 // We restrict `Start` to a constant to prevent SCEV from spending too much 1452 // time here. It is correct (but more expensive) to continue with a 1453 // non-constant `Start` and do a general SCEV subtraction to compute 1454 // `PreStart` below. 1455 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1456 if (!StartC) 1457 return false; 1458 1459 APInt StartAI = StartC->getAPInt(); 1460 1461 for (unsigned Delta : {-2, -1, 1, 2}) { 1462 const SCEV *PreStart = getConstant(StartAI - Delta); 1463 1464 FoldingSetNodeID ID; 1465 ID.AddInteger(scAddRecExpr); 1466 ID.AddPointer(PreStart); 1467 ID.AddPointer(Step); 1468 ID.AddPointer(L); 1469 void *IP = nullptr; 1470 const auto *PreAR = 1471 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1472 1473 // Give up if we don't already have the add recurrence we need because 1474 // actually constructing an add recurrence is relatively expensive. 1475 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1476 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1477 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1478 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1479 DeltaS, &Pred, this); 1480 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1481 return true; 1482 } 1483 } 1484 1485 return false; 1486 } 1487 1488 // Finds an integer D for an expression (C + x + y + ...) such that the top 1489 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1490 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1491 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1492 // the (C + x + y + ...) expression is \p WholeAddExpr. 1493 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1494 const SCEVConstant *ConstantTerm, 1495 const SCEVAddExpr *WholeAddExpr) { 1496 const APInt &C = ConstantTerm->getAPInt(); 1497 const unsigned BitWidth = C.getBitWidth(); 1498 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1499 uint32_t TZ = BitWidth; 1500 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1501 TZ = std::min(TZ, SE.getMinTrailingZeros(WholeAddExpr->getOperand(I))); 1502 if (TZ) { 1503 // Set D to be as many least significant bits of C as possible while still 1504 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1505 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1506 } 1507 return APInt(BitWidth, 0); 1508 } 1509 1510 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1511 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1512 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1513 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1514 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1515 const APInt &ConstantStart, 1516 const SCEV *Step) { 1517 const unsigned BitWidth = ConstantStart.getBitWidth(); 1518 const uint32_t TZ = SE.getMinTrailingZeros(Step); 1519 if (TZ) 1520 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1521 : ConstantStart; 1522 return APInt(BitWidth, 0); 1523 } 1524 1525 static void insertFoldCacheEntry( 1526 const ScalarEvolution::FoldID &ID, const SCEV *S, 1527 DenseMap<ScalarEvolution::FoldID, const SCEV *> &FoldCache, 1528 DenseMap<const SCEV *, SmallVector<ScalarEvolution::FoldID, 2>> 1529 &FoldCacheUser) { 1530 auto I = FoldCache.insert({ID, S}); 1531 if (!I.second) { 1532 // Remove FoldCacheUser entry for ID when replacing an existing FoldCache 1533 // entry. 1534 auto &UserIDs = FoldCacheUser[I.first->second]; 1535 assert(count(UserIDs, ID) == 1 && "unexpected duplicates in UserIDs"); 1536 for (unsigned I = 0; I != UserIDs.size(); ++I) 1537 if (UserIDs[I] == ID) { 1538 std::swap(UserIDs[I], UserIDs.back()); 1539 break; 1540 } 1541 UserIDs.pop_back(); 1542 I.first->second = S; 1543 } 1544 auto R = FoldCacheUser.insert({S, {}}); 1545 R.first->second.push_back(ID); 1546 } 1547 1548 const SCEV * 1549 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1550 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1551 "This is not an extending conversion!"); 1552 assert(isSCEVable(Ty) && 1553 "This is not a conversion to a SCEVable type!"); 1554 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); 1555 Ty = getEffectiveSCEVType(Ty); 1556 1557 FoldID ID(scZeroExtend, Op, Ty); 1558 auto Iter = FoldCache.find(ID); 1559 if (Iter != FoldCache.end()) 1560 return Iter->second; 1561 1562 const SCEV *S = getZeroExtendExprImpl(Op, Ty, Depth); 1563 if (!isa<SCEVZeroExtendExpr>(S)) 1564 insertFoldCacheEntry(ID, S, FoldCache, FoldCacheUser); 1565 return S; 1566 } 1567 1568 const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty, 1569 unsigned Depth) { 1570 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1571 "This is not an extending conversion!"); 1572 assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!"); 1573 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); 1574 1575 // Fold if the operand is constant. 1576 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1577 return getConstant(SC->getAPInt().zext(getTypeSizeInBits(Ty))); 1578 1579 // zext(zext(x)) --> zext(x) 1580 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1581 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1582 1583 // Before doing any expensive analysis, check to see if we've already 1584 // computed a SCEV for this Op and Ty. 1585 FoldingSetNodeID ID; 1586 ID.AddInteger(scZeroExtend); 1587 ID.AddPointer(Op); 1588 ID.AddPointer(Ty); 1589 void *IP = nullptr; 1590 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1591 if (Depth > MaxCastDepth) { 1592 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1593 Op, Ty); 1594 UniqueSCEVs.InsertNode(S, IP); 1595 registerUser(S, Op); 1596 return S; 1597 } 1598 1599 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1600 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1601 // It's possible the bits taken off by the truncate were all zero bits. If 1602 // so, we should be able to simplify this further. 1603 const SCEV *X = ST->getOperand(); 1604 ConstantRange CR = getUnsignedRange(X); 1605 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1606 unsigned NewBits = getTypeSizeInBits(Ty); 1607 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1608 CR.zextOrTrunc(NewBits))) 1609 return getTruncateOrZeroExtend(X, Ty, Depth); 1610 } 1611 1612 // If the input value is a chrec scev, and we can prove that the value 1613 // did not overflow the old, smaller, value, we can zero extend all of the 1614 // operands (often constants). This allows analysis of something like 1615 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1616 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1617 if (AR->isAffine()) { 1618 const SCEV *Start = AR->getStart(); 1619 const SCEV *Step = AR->getStepRecurrence(*this); 1620 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1621 const Loop *L = AR->getLoop(); 1622 1623 // If we have special knowledge that this addrec won't overflow, 1624 // we don't need to do any further analysis. 1625 if (AR->hasNoUnsignedWrap()) { 1626 Start = 1627 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); 1628 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1629 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1630 } 1631 1632 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1633 // Note that this serves two purposes: It filters out loops that are 1634 // simply not analyzable, and it covers the case where this code is 1635 // being called from within backedge-taken count analysis, such that 1636 // attempting to ask for the backedge-taken count would likely result 1637 // in infinite recursion. In the later case, the analysis code will 1638 // cope with a conservative value, and it will take care to purge 1639 // that value once it has finished. 1640 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1641 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1642 // Manually compute the final value for AR, checking for overflow. 1643 1644 // Check whether the backedge-taken count can be losslessly casted to 1645 // the addrec's type. The count is always unsigned. 1646 const SCEV *CastedMaxBECount = 1647 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1648 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1649 CastedMaxBECount, MaxBECount->getType(), Depth); 1650 if (MaxBECount == RecastedMaxBECount) { 1651 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1652 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1653 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1654 SCEV::FlagAnyWrap, Depth + 1); 1655 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1656 SCEV::FlagAnyWrap, 1657 Depth + 1), 1658 WideTy, Depth + 1); 1659 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1660 const SCEV *WideMaxBECount = 1661 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1662 const SCEV *OperandExtendedAdd = 1663 getAddExpr(WideStart, 1664 getMulExpr(WideMaxBECount, 1665 getZeroExtendExpr(Step, WideTy, Depth + 1), 1666 SCEV::FlagAnyWrap, Depth + 1), 1667 SCEV::FlagAnyWrap, Depth + 1); 1668 if (ZAdd == OperandExtendedAdd) { 1669 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1670 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1671 // Return the expression with the addrec on the outside. 1672 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1673 Depth + 1); 1674 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1675 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1676 } 1677 // Similar to above, only this time treat the step value as signed. 1678 // This covers loops that count down. 1679 OperandExtendedAdd = 1680 getAddExpr(WideStart, 1681 getMulExpr(WideMaxBECount, 1682 getSignExtendExpr(Step, WideTy, Depth + 1), 1683 SCEV::FlagAnyWrap, Depth + 1), 1684 SCEV::FlagAnyWrap, Depth + 1); 1685 if (ZAdd == OperandExtendedAdd) { 1686 // Cache knowledge of AR NW, which is propagated to this AddRec. 1687 // Negative step causes unsigned wrap, but it still can't self-wrap. 1688 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1689 // Return the expression with the addrec on the outside. 1690 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1691 Depth + 1); 1692 Step = getSignExtendExpr(Step, Ty, Depth + 1); 1693 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1694 } 1695 } 1696 } 1697 1698 // Normally, in the cases we can prove no-overflow via a 1699 // backedge guarding condition, we can also compute a backedge 1700 // taken count for the loop. The exceptions are assumptions and 1701 // guards present in the loop -- SCEV is not great at exploiting 1702 // these to compute max backedge taken counts, but can still use 1703 // these to prove lack of overflow. Use this fact to avoid 1704 // doing extra work that may not pay off. 1705 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1706 !AC.assumptions().empty()) { 1707 1708 auto NewFlags = proveNoUnsignedWrapViaInduction(AR); 1709 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1710 if (AR->hasNoUnsignedWrap()) { 1711 // Same as nuw case above - duplicated here to avoid a compile time 1712 // issue. It's not clear that the order of checks does matter, but 1713 // it's one of two issue possible causes for a change which was 1714 // reverted. Be conservative for the moment. 1715 Start = 1716 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); 1717 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1718 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1719 } 1720 1721 // For a negative step, we can extend the operands iff doing so only 1722 // traverses values in the range zext([0,UINT_MAX]). 1723 if (isKnownNegative(Step)) { 1724 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1725 getSignedRangeMin(Step)); 1726 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1727 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1728 // Cache knowledge of AR NW, which is propagated to this 1729 // AddRec. Negative step causes unsigned wrap, but it 1730 // still can't self-wrap. 1731 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1732 // Return the expression with the addrec on the outside. 1733 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1734 Depth + 1); 1735 Step = getSignExtendExpr(Step, Ty, Depth + 1); 1736 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1737 } 1738 } 1739 } 1740 1741 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1742 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1743 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1744 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1745 const APInt &C = SC->getAPInt(); 1746 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1747 if (D != 0) { 1748 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1749 const SCEV *SResidual = 1750 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1751 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1752 return getAddExpr(SZExtD, SZExtR, 1753 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1754 Depth + 1); 1755 } 1756 } 1757 1758 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1759 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1760 Start = 1761 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); 1762 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1763 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1764 } 1765 } 1766 1767 // zext(A % B) --> zext(A) % zext(B) 1768 { 1769 const SCEV *LHS; 1770 const SCEV *RHS; 1771 if (matchURem(Op, LHS, RHS)) 1772 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1773 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1774 } 1775 1776 // zext(A / B) --> zext(A) / zext(B). 1777 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1778 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1779 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1780 1781 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1782 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1783 if (SA->hasNoUnsignedWrap()) { 1784 // If the addition does not unsign overflow then we can, by definition, 1785 // commute the zero extension with the addition operation. 1786 SmallVector<const SCEV *, 4> Ops; 1787 for (const auto *Op : SA->operands()) 1788 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1789 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1790 } 1791 1792 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1793 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1794 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1795 // 1796 // Often address arithmetics contain expressions like 1797 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1798 // This transformation is useful while proving that such expressions are 1799 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1800 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1801 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1802 if (D != 0) { 1803 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1804 const SCEV *SResidual = 1805 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1806 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1807 return getAddExpr(SZExtD, SZExtR, 1808 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1809 Depth + 1); 1810 } 1811 } 1812 } 1813 1814 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1815 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1816 if (SM->hasNoUnsignedWrap()) { 1817 // If the multiply does not unsign overflow then we can, by definition, 1818 // commute the zero extension with the multiply operation. 1819 SmallVector<const SCEV *, 4> Ops; 1820 for (const auto *Op : SM->operands()) 1821 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1822 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1823 } 1824 1825 // zext(2^K * (trunc X to iN)) to iM -> 1826 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1827 // 1828 // Proof: 1829 // 1830 // zext(2^K * (trunc X to iN)) to iM 1831 // = zext((trunc X to iN) << K) to iM 1832 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1833 // (because shl removes the top K bits) 1834 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1835 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1836 // 1837 if (SM->getNumOperands() == 2) 1838 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1839 if (MulLHS->getAPInt().isPowerOf2()) 1840 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1841 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1842 MulLHS->getAPInt().logBase2(); 1843 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1844 return getMulExpr( 1845 getZeroExtendExpr(MulLHS, Ty), 1846 getZeroExtendExpr( 1847 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1848 SCEV::FlagNUW, Depth + 1); 1849 } 1850 } 1851 1852 // zext(umin(x, y)) -> umin(zext(x), zext(y)) 1853 // zext(umax(x, y)) -> umax(zext(x), zext(y)) 1854 if (isa<SCEVUMinExpr>(Op) || isa<SCEVUMaxExpr>(Op)) { 1855 auto *MinMax = cast<SCEVMinMaxExpr>(Op); 1856 SmallVector<const SCEV *, 4> Operands; 1857 for (auto *Operand : MinMax->operands()) 1858 Operands.push_back(getZeroExtendExpr(Operand, Ty)); 1859 if (isa<SCEVUMinExpr>(MinMax)) 1860 return getUMinExpr(Operands); 1861 return getUMaxExpr(Operands); 1862 } 1863 1864 // zext(umin_seq(x, y)) -> umin_seq(zext(x), zext(y)) 1865 if (auto *MinMax = dyn_cast<SCEVSequentialMinMaxExpr>(Op)) { 1866 assert(isa<SCEVSequentialUMinExpr>(MinMax) && "Not supported!"); 1867 SmallVector<const SCEV *, 4> Operands; 1868 for (auto *Operand : MinMax->operands()) 1869 Operands.push_back(getZeroExtendExpr(Operand, Ty)); 1870 return getUMinExpr(Operands, /*Sequential*/ true); 1871 } 1872 1873 // The cast wasn't folded; create an explicit cast node. 1874 // Recompute the insert position, as it may have been invalidated. 1875 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1876 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1877 Op, Ty); 1878 UniqueSCEVs.InsertNode(S, IP); 1879 registerUser(S, Op); 1880 return S; 1881 } 1882 1883 const SCEV * 1884 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1885 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1886 "This is not an extending conversion!"); 1887 assert(isSCEVable(Ty) && 1888 "This is not a conversion to a SCEVable type!"); 1889 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); 1890 Ty = getEffectiveSCEVType(Ty); 1891 1892 FoldID ID(scSignExtend, Op, Ty); 1893 auto Iter = FoldCache.find(ID); 1894 if (Iter != FoldCache.end()) 1895 return Iter->second; 1896 1897 const SCEV *S = getSignExtendExprImpl(Op, Ty, Depth); 1898 if (!isa<SCEVSignExtendExpr>(S)) 1899 insertFoldCacheEntry(ID, S, FoldCache, FoldCacheUser); 1900 return S; 1901 } 1902 1903 const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty, 1904 unsigned Depth) { 1905 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1906 "This is not an extending conversion!"); 1907 assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!"); 1908 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); 1909 Ty = getEffectiveSCEVType(Ty); 1910 1911 // Fold if the operand is constant. 1912 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1913 return getConstant(SC->getAPInt().sext(getTypeSizeInBits(Ty))); 1914 1915 // sext(sext(x)) --> sext(x) 1916 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1917 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1918 1919 // sext(zext(x)) --> zext(x) 1920 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1921 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1922 1923 // Before doing any expensive analysis, check to see if we've already 1924 // computed a SCEV for this Op and Ty. 1925 FoldingSetNodeID ID; 1926 ID.AddInteger(scSignExtend); 1927 ID.AddPointer(Op); 1928 ID.AddPointer(Ty); 1929 void *IP = nullptr; 1930 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1931 // Limit recursion depth. 1932 if (Depth > MaxCastDepth) { 1933 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1934 Op, Ty); 1935 UniqueSCEVs.InsertNode(S, IP); 1936 registerUser(S, Op); 1937 return S; 1938 } 1939 1940 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1941 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1942 // It's possible the bits taken off by the truncate were all sign bits. If 1943 // so, we should be able to simplify this further. 1944 const SCEV *X = ST->getOperand(); 1945 ConstantRange CR = getSignedRange(X); 1946 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1947 unsigned NewBits = getTypeSizeInBits(Ty); 1948 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1949 CR.sextOrTrunc(NewBits))) 1950 return getTruncateOrSignExtend(X, Ty, Depth); 1951 } 1952 1953 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1954 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1955 if (SA->hasNoSignedWrap()) { 1956 // If the addition does not sign overflow then we can, by definition, 1957 // commute the sign extension with the addition operation. 1958 SmallVector<const SCEV *, 4> Ops; 1959 for (const auto *Op : SA->operands()) 1960 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1961 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1962 } 1963 1964 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1965 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1966 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1967 // 1968 // For instance, this will bring two seemingly different expressions: 1969 // 1 + sext(5 + 20 * %x + 24 * %y) and 1970 // sext(6 + 20 * %x + 24 * %y) 1971 // to the same form: 1972 // 2 + sext(4 + 20 * %x + 24 * %y) 1973 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1974 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1975 if (D != 0) { 1976 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1977 const SCEV *SResidual = 1978 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1979 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1980 return getAddExpr(SSExtD, SSExtR, 1981 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1982 Depth + 1); 1983 } 1984 } 1985 } 1986 // If the input value is a chrec scev, and we can prove that the value 1987 // did not overflow the old, smaller, value, we can sign extend all of the 1988 // operands (often constants). This allows analysis of something like 1989 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1990 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1991 if (AR->isAffine()) { 1992 const SCEV *Start = AR->getStart(); 1993 const SCEV *Step = AR->getStepRecurrence(*this); 1994 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1995 const Loop *L = AR->getLoop(); 1996 1997 // If we have special knowledge that this addrec won't overflow, 1998 // we don't need to do any further analysis. 1999 if (AR->hasNoSignedWrap()) { 2000 Start = 2001 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); 2002 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2003 return getAddRecExpr(Start, Step, L, SCEV::FlagNSW); 2004 } 2005 2006 // Check whether the backedge-taken count is SCEVCouldNotCompute. 2007 // Note that this serves two purposes: It filters out loops that are 2008 // simply not analyzable, and it covers the case where this code is 2009 // being called from within backedge-taken count analysis, such that 2010 // attempting to ask for the backedge-taken count would likely result 2011 // in infinite recursion. In the later case, the analysis code will 2012 // cope with a conservative value, and it will take care to purge 2013 // that value once it has finished. 2014 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 2015 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 2016 // Manually compute the final value for AR, checking for 2017 // overflow. 2018 2019 // Check whether the backedge-taken count can be losslessly casted to 2020 // the addrec's type. The count is always unsigned. 2021 const SCEV *CastedMaxBECount = 2022 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 2023 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 2024 CastedMaxBECount, MaxBECount->getType(), Depth); 2025 if (MaxBECount == RecastedMaxBECount) { 2026 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 2027 // Check whether Start+Step*MaxBECount has no signed overflow. 2028 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 2029 SCEV::FlagAnyWrap, Depth + 1); 2030 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 2031 SCEV::FlagAnyWrap, 2032 Depth + 1), 2033 WideTy, Depth + 1); 2034 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 2035 const SCEV *WideMaxBECount = 2036 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 2037 const SCEV *OperandExtendedAdd = 2038 getAddExpr(WideStart, 2039 getMulExpr(WideMaxBECount, 2040 getSignExtendExpr(Step, WideTy, Depth + 1), 2041 SCEV::FlagAnyWrap, Depth + 1), 2042 SCEV::FlagAnyWrap, Depth + 1); 2043 if (SAdd == OperandExtendedAdd) { 2044 // Cache knowledge of AR NSW, which is propagated to this AddRec. 2045 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2046 // Return the expression with the addrec on the outside. 2047 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2048 Depth + 1); 2049 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2050 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2051 } 2052 // Similar to above, only this time treat the step value as unsigned. 2053 // This covers loops that count up with an unsigned step. 2054 OperandExtendedAdd = 2055 getAddExpr(WideStart, 2056 getMulExpr(WideMaxBECount, 2057 getZeroExtendExpr(Step, WideTy, Depth + 1), 2058 SCEV::FlagAnyWrap, Depth + 1), 2059 SCEV::FlagAnyWrap, Depth + 1); 2060 if (SAdd == OperandExtendedAdd) { 2061 // If AR wraps around then 2062 // 2063 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2064 // => SAdd != OperandExtendedAdd 2065 // 2066 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2067 // (SAdd == OperandExtendedAdd => AR is NW) 2068 2069 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 2070 2071 // Return the expression with the addrec on the outside. 2072 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2073 Depth + 1); 2074 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 2075 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2076 } 2077 } 2078 } 2079 2080 auto NewFlags = proveNoSignedWrapViaInduction(AR); 2081 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 2082 if (AR->hasNoSignedWrap()) { 2083 // Same as nsw case above - duplicated here to avoid a compile time 2084 // issue. It's not clear that the order of checks does matter, but 2085 // it's one of two issue possible causes for a change which was 2086 // reverted. Be conservative for the moment. 2087 Start = 2088 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); 2089 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2090 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2091 } 2092 2093 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2094 // if D + (C - D + Step * n) could be proven to not signed wrap 2095 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2096 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2097 const APInt &C = SC->getAPInt(); 2098 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2099 if (D != 0) { 2100 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2101 const SCEV *SResidual = 2102 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2103 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2104 return getAddExpr(SSExtD, SSExtR, 2105 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2106 Depth + 1); 2107 } 2108 } 2109 2110 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2111 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2112 Start = 2113 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); 2114 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2115 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2116 } 2117 } 2118 2119 // If the input value is provably positive and we could not simplify 2120 // away the sext build a zext instead. 2121 if (isKnownNonNegative(Op)) 2122 return getZeroExtendExpr(Op, Ty, Depth + 1); 2123 2124 // sext(smin(x, y)) -> smin(sext(x), sext(y)) 2125 // sext(smax(x, y)) -> smax(sext(x), sext(y)) 2126 if (isa<SCEVSMinExpr>(Op) || isa<SCEVSMaxExpr>(Op)) { 2127 auto *MinMax = cast<SCEVMinMaxExpr>(Op); 2128 SmallVector<const SCEV *, 4> Operands; 2129 for (auto *Operand : MinMax->operands()) 2130 Operands.push_back(getSignExtendExpr(Operand, Ty)); 2131 if (isa<SCEVSMinExpr>(MinMax)) 2132 return getSMinExpr(Operands); 2133 return getSMaxExpr(Operands); 2134 } 2135 2136 // The cast wasn't folded; create an explicit cast node. 2137 // Recompute the insert position, as it may have been invalidated. 2138 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2139 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2140 Op, Ty); 2141 UniqueSCEVs.InsertNode(S, IP); 2142 registerUser(S, { Op }); 2143 return S; 2144 } 2145 2146 const SCEV *ScalarEvolution::getCastExpr(SCEVTypes Kind, const SCEV *Op, 2147 Type *Ty) { 2148 switch (Kind) { 2149 case scTruncate: 2150 return getTruncateExpr(Op, Ty); 2151 case scZeroExtend: 2152 return getZeroExtendExpr(Op, Ty); 2153 case scSignExtend: 2154 return getSignExtendExpr(Op, Ty); 2155 case scPtrToInt: 2156 return getPtrToIntExpr(Op, Ty); 2157 default: 2158 llvm_unreachable("Not a SCEV cast expression!"); 2159 } 2160 } 2161 2162 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2163 /// unspecified bits out to the given type. 2164 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2165 Type *Ty) { 2166 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2167 "This is not an extending conversion!"); 2168 assert(isSCEVable(Ty) && 2169 "This is not a conversion to a SCEVable type!"); 2170 Ty = getEffectiveSCEVType(Ty); 2171 2172 // Sign-extend negative constants. 2173 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2174 if (SC->getAPInt().isNegative()) 2175 return getSignExtendExpr(Op, Ty); 2176 2177 // Peel off a truncate cast. 2178 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2179 const SCEV *NewOp = T->getOperand(); 2180 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2181 return getAnyExtendExpr(NewOp, Ty); 2182 return getTruncateOrNoop(NewOp, Ty); 2183 } 2184 2185 // Next try a zext cast. If the cast is folded, use it. 2186 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2187 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2188 return ZExt; 2189 2190 // Next try a sext cast. If the cast is folded, use it. 2191 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2192 if (!isa<SCEVSignExtendExpr>(SExt)) 2193 return SExt; 2194 2195 // Force the cast to be folded into the operands of an addrec. 2196 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2197 SmallVector<const SCEV *, 4> Ops; 2198 for (const SCEV *Op : AR->operands()) 2199 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2200 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2201 } 2202 2203 // If the expression is obviously signed, use the sext cast value. 2204 if (isa<SCEVSMaxExpr>(Op)) 2205 return SExt; 2206 2207 // Absent any other information, use the zext cast value. 2208 return ZExt; 2209 } 2210 2211 /// Process the given Ops list, which is a list of operands to be added under 2212 /// the given scale, update the given map. This is a helper function for 2213 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2214 /// that would form an add expression like this: 2215 /// 2216 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2217 /// 2218 /// where A and B are constants, update the map with these values: 2219 /// 2220 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2221 /// 2222 /// and add 13 + A*B*29 to AccumulatedConstant. 2223 /// This will allow getAddRecExpr to produce this: 2224 /// 2225 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2226 /// 2227 /// This form often exposes folding opportunities that are hidden in 2228 /// the original operand list. 2229 /// 2230 /// Return true iff it appears that any interesting folding opportunities 2231 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2232 /// the common case where no interesting opportunities are present, and 2233 /// is also used as a check to avoid infinite recursion. 2234 static bool 2235 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2236 SmallVectorImpl<const SCEV *> &NewOps, 2237 APInt &AccumulatedConstant, 2238 ArrayRef<const SCEV *> Ops, const APInt &Scale, 2239 ScalarEvolution &SE) { 2240 bool Interesting = false; 2241 2242 // Iterate over the add operands. They are sorted, with constants first. 2243 unsigned i = 0; 2244 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2245 ++i; 2246 // Pull a buried constant out to the outside. 2247 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2248 Interesting = true; 2249 AccumulatedConstant += Scale * C->getAPInt(); 2250 } 2251 2252 // Next comes everything else. We're especially interested in multiplies 2253 // here, but they're in the middle, so just visit the rest with one loop. 2254 for (; i != Ops.size(); ++i) { 2255 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2256 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2257 APInt NewScale = 2258 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2259 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2260 // A multiplication of a constant with another add; recurse. 2261 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2262 Interesting |= 2263 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2264 Add->operands(), NewScale, SE); 2265 } else { 2266 // A multiplication of a constant with some other value. Update 2267 // the map. 2268 SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands())); 2269 const SCEV *Key = SE.getMulExpr(MulOps); 2270 auto Pair = M.insert({Key, NewScale}); 2271 if (Pair.second) { 2272 NewOps.push_back(Pair.first->first); 2273 } else { 2274 Pair.first->second += NewScale; 2275 // The map already had an entry for this value, which may indicate 2276 // a folding opportunity. 2277 Interesting = true; 2278 } 2279 } 2280 } else { 2281 // An ordinary operand. Update the map. 2282 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2283 M.insert({Ops[i], Scale}); 2284 if (Pair.second) { 2285 NewOps.push_back(Pair.first->first); 2286 } else { 2287 Pair.first->second += Scale; 2288 // The map already had an entry for this value, which may indicate 2289 // a folding opportunity. 2290 Interesting = true; 2291 } 2292 } 2293 } 2294 2295 return Interesting; 2296 } 2297 2298 bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, 2299 const SCEV *LHS, const SCEV *RHS, 2300 const Instruction *CtxI) { 2301 const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *, 2302 SCEV::NoWrapFlags, unsigned); 2303 switch (BinOp) { 2304 default: 2305 llvm_unreachable("Unsupported binary op"); 2306 case Instruction::Add: 2307 Operation = &ScalarEvolution::getAddExpr; 2308 break; 2309 case Instruction::Sub: 2310 Operation = &ScalarEvolution::getMinusSCEV; 2311 break; 2312 case Instruction::Mul: 2313 Operation = &ScalarEvolution::getMulExpr; 2314 break; 2315 } 2316 2317 const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) = 2318 Signed ? &ScalarEvolution::getSignExtendExpr 2319 : &ScalarEvolution::getZeroExtendExpr; 2320 2321 // Check ext(LHS op RHS) == ext(LHS) op ext(RHS) 2322 auto *NarrowTy = cast<IntegerType>(LHS->getType()); 2323 auto *WideTy = 2324 IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2); 2325 2326 const SCEV *A = (this->*Extension)( 2327 (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0); 2328 const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0); 2329 const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0); 2330 const SCEV *B = (this->*Operation)(LHSB, RHSB, SCEV::FlagAnyWrap, 0); 2331 if (A == B) 2332 return true; 2333 // Can we use context to prove the fact we need? 2334 if (!CtxI) 2335 return false; 2336 // TODO: Support mul. 2337 if (BinOp == Instruction::Mul) 2338 return false; 2339 auto *RHSC = dyn_cast<SCEVConstant>(RHS); 2340 // TODO: Lift this limitation. 2341 if (!RHSC) 2342 return false; 2343 APInt C = RHSC->getAPInt(); 2344 unsigned NumBits = C.getBitWidth(); 2345 bool IsSub = (BinOp == Instruction::Sub); 2346 bool IsNegativeConst = (Signed && C.isNegative()); 2347 // Compute the direction and magnitude by which we need to check overflow. 2348 bool OverflowDown = IsSub ^ IsNegativeConst; 2349 APInt Magnitude = C; 2350 if (IsNegativeConst) { 2351 if (C == APInt::getSignedMinValue(NumBits)) 2352 // TODO: SINT_MIN on inversion gives the same negative value, we don't 2353 // want to deal with that. 2354 return false; 2355 Magnitude = -C; 2356 } 2357 2358 ICmpInst::Predicate Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 2359 if (OverflowDown) { 2360 // To avoid overflow down, we need to make sure that MIN + Magnitude <= LHS. 2361 APInt Min = Signed ? APInt::getSignedMinValue(NumBits) 2362 : APInt::getMinValue(NumBits); 2363 APInt Limit = Min + Magnitude; 2364 return isKnownPredicateAt(Pred, getConstant(Limit), LHS, CtxI); 2365 } else { 2366 // To avoid overflow up, we need to make sure that LHS <= MAX - Magnitude. 2367 APInt Max = Signed ? APInt::getSignedMaxValue(NumBits) 2368 : APInt::getMaxValue(NumBits); 2369 APInt Limit = Max - Magnitude; 2370 return isKnownPredicateAt(Pred, LHS, getConstant(Limit), CtxI); 2371 } 2372 } 2373 2374 std::optional<SCEV::NoWrapFlags> 2375 ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp( 2376 const OverflowingBinaryOperator *OBO) { 2377 // It cannot be done any better. 2378 if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap()) 2379 return std::nullopt; 2380 2381 SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap; 2382 2383 if (OBO->hasNoUnsignedWrap()) 2384 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2385 if (OBO->hasNoSignedWrap()) 2386 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2387 2388 bool Deduced = false; 2389 2390 if (OBO->getOpcode() != Instruction::Add && 2391 OBO->getOpcode() != Instruction::Sub && 2392 OBO->getOpcode() != Instruction::Mul) 2393 return std::nullopt; 2394 2395 const SCEV *LHS = getSCEV(OBO->getOperand(0)); 2396 const SCEV *RHS = getSCEV(OBO->getOperand(1)); 2397 2398 const Instruction *CtxI = 2399 UseContextForNoWrapFlagInference ? dyn_cast<Instruction>(OBO) : nullptr; 2400 if (!OBO->hasNoUnsignedWrap() && 2401 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), 2402 /* Signed */ false, LHS, RHS, CtxI)) { 2403 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2404 Deduced = true; 2405 } 2406 2407 if (!OBO->hasNoSignedWrap() && 2408 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), 2409 /* Signed */ true, LHS, RHS, CtxI)) { 2410 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2411 Deduced = true; 2412 } 2413 2414 if (Deduced) 2415 return Flags; 2416 return std::nullopt; 2417 } 2418 2419 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2420 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2421 // can't-overflow flags for the operation if possible. 2422 static SCEV::NoWrapFlags 2423 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2424 const ArrayRef<const SCEV *> Ops, 2425 SCEV::NoWrapFlags Flags) { 2426 using namespace std::placeholders; 2427 2428 using OBO = OverflowingBinaryOperator; 2429 2430 bool CanAnalyze = 2431 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2432 (void)CanAnalyze; 2433 assert(CanAnalyze && "don't call from other places!"); 2434 2435 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2436 SCEV::NoWrapFlags SignOrUnsignWrap = 2437 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2438 2439 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2440 auto IsKnownNonNegative = [&](const SCEV *S) { 2441 return SE->isKnownNonNegative(S); 2442 }; 2443 2444 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2445 Flags = 2446 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2447 2448 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2449 2450 if (SignOrUnsignWrap != SignOrUnsignMask && 2451 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2452 isa<SCEVConstant>(Ops[0])) { 2453 2454 auto Opcode = [&] { 2455 switch (Type) { 2456 case scAddExpr: 2457 return Instruction::Add; 2458 case scMulExpr: 2459 return Instruction::Mul; 2460 default: 2461 llvm_unreachable("Unexpected SCEV op."); 2462 } 2463 }(); 2464 2465 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2466 2467 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2468 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2469 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2470 Opcode, C, OBO::NoSignedWrap); 2471 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2472 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2473 } 2474 2475 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2476 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2477 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2478 Opcode, C, OBO::NoUnsignedWrap); 2479 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2480 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2481 } 2482 } 2483 2484 // <0,+,nonnegative><nw> is also nuw 2485 // TODO: Add corresponding nsw case 2486 if (Type == scAddRecExpr && ScalarEvolution::hasFlags(Flags, SCEV::FlagNW) && 2487 !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && Ops.size() == 2 && 2488 Ops[0]->isZero() && IsKnownNonNegative(Ops[1])) 2489 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2490 2491 // both (udiv X, Y) * Y and Y * (udiv X, Y) are always NUW 2492 if (Type == scMulExpr && !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && 2493 Ops.size() == 2) { 2494 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0])) 2495 if (UDiv->getOperand(1) == Ops[1]) 2496 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2497 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1])) 2498 if (UDiv->getOperand(1) == Ops[0]) 2499 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2500 } 2501 2502 return Flags; 2503 } 2504 2505 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2506 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2507 } 2508 2509 /// Get a canonical add expression, or something simpler if possible. 2510 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2511 SCEV::NoWrapFlags OrigFlags, 2512 unsigned Depth) { 2513 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2514 "only nuw or nsw allowed"); 2515 assert(!Ops.empty() && "Cannot get empty add!"); 2516 if (Ops.size() == 1) return Ops[0]; 2517 #ifndef NDEBUG 2518 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2519 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2520 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2521 "SCEVAddExpr operand types don't match!"); 2522 unsigned NumPtrs = count_if( 2523 Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); }); 2524 assert(NumPtrs <= 1 && "add has at most one pointer operand"); 2525 #endif 2526 2527 // Sort by complexity, this groups all similar expression types together. 2528 GroupByComplexity(Ops, &LI, DT); 2529 2530 // If there are any constants, fold them together. 2531 unsigned Idx = 0; 2532 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2533 ++Idx; 2534 assert(Idx < Ops.size()); 2535 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2536 // We found two constants, fold them together! 2537 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2538 if (Ops.size() == 2) return Ops[0]; 2539 Ops.erase(Ops.begin()+1); // Erase the folded element 2540 LHSC = cast<SCEVConstant>(Ops[0]); 2541 } 2542 2543 // If we are left with a constant zero being added, strip it off. 2544 if (LHSC->getValue()->isZero()) { 2545 Ops.erase(Ops.begin()); 2546 --Idx; 2547 } 2548 2549 if (Ops.size() == 1) return Ops[0]; 2550 } 2551 2552 // Delay expensive flag strengthening until necessary. 2553 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2554 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); 2555 }; 2556 2557 // Limit recursion calls depth. 2558 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2559 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2560 2561 if (SCEV *S = findExistingSCEVInCache(scAddExpr, Ops)) { 2562 // Don't strengthen flags if we have no new information. 2563 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S); 2564 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) 2565 Add->setNoWrapFlags(ComputeFlags(Ops)); 2566 return S; 2567 } 2568 2569 // Okay, check to see if the same value occurs in the operand list more than 2570 // once. If so, merge them together into an multiply expression. Since we 2571 // sorted the list, these values are required to be adjacent. 2572 Type *Ty = Ops[0]->getType(); 2573 bool FoundMatch = false; 2574 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2575 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2576 // Scan ahead to count how many equal operands there are. 2577 unsigned Count = 2; 2578 while (i+Count != e && Ops[i+Count] == Ops[i]) 2579 ++Count; 2580 // Merge the values into a multiply. 2581 const SCEV *Scale = getConstant(Ty, Count); 2582 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2583 if (Ops.size() == Count) 2584 return Mul; 2585 Ops[i] = Mul; 2586 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2587 --i; e -= Count - 1; 2588 FoundMatch = true; 2589 } 2590 if (FoundMatch) 2591 return getAddExpr(Ops, OrigFlags, Depth + 1); 2592 2593 // Check for truncates. If all the operands are truncated from the same 2594 // type, see if factoring out the truncate would permit the result to be 2595 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2596 // if the contents of the resulting outer trunc fold to something simple. 2597 auto FindTruncSrcType = [&]() -> Type * { 2598 // We're ultimately looking to fold an addrec of truncs and muls of only 2599 // constants and truncs, so if we find any other types of SCEV 2600 // as operands of the addrec then we bail and return nullptr here. 2601 // Otherwise, we return the type of the operand of a trunc that we find. 2602 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2603 return T->getOperand()->getType(); 2604 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2605 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2606 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2607 return T->getOperand()->getType(); 2608 } 2609 return nullptr; 2610 }; 2611 if (auto *SrcType = FindTruncSrcType()) { 2612 SmallVector<const SCEV *, 8> LargeOps; 2613 bool Ok = true; 2614 // Check all the operands to see if they can be represented in the 2615 // source type of the truncate. 2616 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2617 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2618 if (T->getOperand()->getType() != SrcType) { 2619 Ok = false; 2620 break; 2621 } 2622 LargeOps.push_back(T->getOperand()); 2623 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2624 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2625 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2626 SmallVector<const SCEV *, 8> LargeMulOps; 2627 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2628 if (const SCEVTruncateExpr *T = 2629 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2630 if (T->getOperand()->getType() != SrcType) { 2631 Ok = false; 2632 break; 2633 } 2634 LargeMulOps.push_back(T->getOperand()); 2635 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2636 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2637 } else { 2638 Ok = false; 2639 break; 2640 } 2641 } 2642 if (Ok) 2643 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2644 } else { 2645 Ok = false; 2646 break; 2647 } 2648 } 2649 if (Ok) { 2650 // Evaluate the expression in the larger type. 2651 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2652 // If it folds to something simple, use it. Otherwise, don't. 2653 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2654 return getTruncateExpr(Fold, Ty); 2655 } 2656 } 2657 2658 if (Ops.size() == 2) { 2659 // Check if we have an expression of the form ((X + C1) - C2), where C1 and 2660 // C2 can be folded in a way that allows retaining wrapping flags of (X + 2661 // C1). 2662 const SCEV *A = Ops[0]; 2663 const SCEV *B = Ops[1]; 2664 auto *AddExpr = dyn_cast<SCEVAddExpr>(B); 2665 auto *C = dyn_cast<SCEVConstant>(A); 2666 if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) { 2667 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt(); 2668 auto C2 = C->getAPInt(); 2669 SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap; 2670 2671 APInt ConstAdd = C1 + C2; 2672 auto AddFlags = AddExpr->getNoWrapFlags(); 2673 // Adding a smaller constant is NUW if the original AddExpr was NUW. 2674 if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNUW) && 2675 ConstAdd.ule(C1)) { 2676 PreservedFlags = 2677 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW); 2678 } 2679 2680 // Adding a constant with the same sign and small magnitude is NSW, if the 2681 // original AddExpr was NSW. 2682 if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNSW) && 2683 C1.isSignBitSet() == ConstAdd.isSignBitSet() && 2684 ConstAdd.abs().ule(C1.abs())) { 2685 PreservedFlags = 2686 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW); 2687 } 2688 2689 if (PreservedFlags != SCEV::FlagAnyWrap) { 2690 SmallVector<const SCEV *, 4> NewOps(AddExpr->operands()); 2691 NewOps[0] = getConstant(ConstAdd); 2692 return getAddExpr(NewOps, PreservedFlags); 2693 } 2694 } 2695 } 2696 2697 // Canonicalize (-1 * urem X, Y) + X --> (Y * X/Y) 2698 if (Ops.size() == 2) { 2699 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[0]); 2700 if (Mul && Mul->getNumOperands() == 2 && 2701 Mul->getOperand(0)->isAllOnesValue()) { 2702 const SCEV *X; 2703 const SCEV *Y; 2704 if (matchURem(Mul->getOperand(1), X, Y) && X == Ops[1]) { 2705 return getMulExpr(Y, getUDivExpr(X, Y)); 2706 } 2707 } 2708 } 2709 2710 // Skip past any other cast SCEVs. 2711 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2712 ++Idx; 2713 2714 // If there are add operands they would be next. 2715 if (Idx < Ops.size()) { 2716 bool DeletedAdd = false; 2717 // If the original flags and all inlined SCEVAddExprs are NUW, use the 2718 // common NUW flag for expression after inlining. Other flags cannot be 2719 // preserved, because they may depend on the original order of operations. 2720 SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW); 2721 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2722 if (Ops.size() > AddOpsInlineThreshold || 2723 Add->getNumOperands() > AddOpsInlineThreshold) 2724 break; 2725 // If we have an add, expand the add operands onto the end of the operands 2726 // list. 2727 Ops.erase(Ops.begin()+Idx); 2728 append_range(Ops, Add->operands()); 2729 DeletedAdd = true; 2730 CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags()); 2731 } 2732 2733 // If we deleted at least one add, we added operands to the end of the list, 2734 // and they are not necessarily sorted. Recurse to resort and resimplify 2735 // any operands we just acquired. 2736 if (DeletedAdd) 2737 return getAddExpr(Ops, CommonFlags, Depth + 1); 2738 } 2739 2740 // Skip over the add expression until we get to a multiply. 2741 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2742 ++Idx; 2743 2744 // Check to see if there are any folding opportunities present with 2745 // operands multiplied by constant values. 2746 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2747 uint64_t BitWidth = getTypeSizeInBits(Ty); 2748 DenseMap<const SCEV *, APInt> M; 2749 SmallVector<const SCEV *, 8> NewOps; 2750 APInt AccumulatedConstant(BitWidth, 0); 2751 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2752 Ops, APInt(BitWidth, 1), *this)) { 2753 struct APIntCompare { 2754 bool operator()(const APInt &LHS, const APInt &RHS) const { 2755 return LHS.ult(RHS); 2756 } 2757 }; 2758 2759 // Some interesting folding opportunity is present, so its worthwhile to 2760 // re-generate the operands list. Group the operands by constant scale, 2761 // to avoid multiplying by the same constant scale multiple times. 2762 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2763 for (const SCEV *NewOp : NewOps) 2764 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2765 // Re-generate the operands list. 2766 Ops.clear(); 2767 if (AccumulatedConstant != 0) 2768 Ops.push_back(getConstant(AccumulatedConstant)); 2769 for (auto &MulOp : MulOpLists) { 2770 if (MulOp.first == 1) { 2771 Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1)); 2772 } else if (MulOp.first != 0) { 2773 Ops.push_back(getMulExpr( 2774 getConstant(MulOp.first), 2775 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2776 SCEV::FlagAnyWrap, Depth + 1)); 2777 } 2778 } 2779 if (Ops.empty()) 2780 return getZero(Ty); 2781 if (Ops.size() == 1) 2782 return Ops[0]; 2783 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2784 } 2785 } 2786 2787 // If we are adding something to a multiply expression, make sure the 2788 // something is not already an operand of the multiply. If so, merge it into 2789 // the multiply. 2790 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2791 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2792 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2793 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2794 if (isa<SCEVConstant>(MulOpSCEV)) 2795 continue; 2796 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2797 if (MulOpSCEV == Ops[AddOp]) { 2798 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2799 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2800 if (Mul->getNumOperands() != 2) { 2801 // If the multiply has more than two operands, we must get the 2802 // Y*Z term. 2803 SmallVector<const SCEV *, 4> MulOps( 2804 Mul->operands().take_front(MulOp)); 2805 append_range(MulOps, Mul->operands().drop_front(MulOp + 1)); 2806 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2807 } 2808 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2809 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2810 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2811 SCEV::FlagAnyWrap, Depth + 1); 2812 if (Ops.size() == 2) return OuterMul; 2813 if (AddOp < Idx) { 2814 Ops.erase(Ops.begin()+AddOp); 2815 Ops.erase(Ops.begin()+Idx-1); 2816 } else { 2817 Ops.erase(Ops.begin()+Idx); 2818 Ops.erase(Ops.begin()+AddOp-1); 2819 } 2820 Ops.push_back(OuterMul); 2821 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2822 } 2823 2824 // Check this multiply against other multiplies being added together. 2825 for (unsigned OtherMulIdx = Idx+1; 2826 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2827 ++OtherMulIdx) { 2828 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2829 // If MulOp occurs in OtherMul, we can fold the two multiplies 2830 // together. 2831 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2832 OMulOp != e; ++OMulOp) 2833 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2834 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2835 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2836 if (Mul->getNumOperands() != 2) { 2837 SmallVector<const SCEV *, 4> MulOps( 2838 Mul->operands().take_front(MulOp)); 2839 append_range(MulOps, Mul->operands().drop_front(MulOp+1)); 2840 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2841 } 2842 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2843 if (OtherMul->getNumOperands() != 2) { 2844 SmallVector<const SCEV *, 4> MulOps( 2845 OtherMul->operands().take_front(OMulOp)); 2846 append_range(MulOps, OtherMul->operands().drop_front(OMulOp+1)); 2847 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2848 } 2849 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2850 const SCEV *InnerMulSum = 2851 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2852 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2853 SCEV::FlagAnyWrap, Depth + 1); 2854 if (Ops.size() == 2) return OuterMul; 2855 Ops.erase(Ops.begin()+Idx); 2856 Ops.erase(Ops.begin()+OtherMulIdx-1); 2857 Ops.push_back(OuterMul); 2858 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2859 } 2860 } 2861 } 2862 } 2863 2864 // If there are any add recurrences in the operands list, see if any other 2865 // added values are loop invariant. If so, we can fold them into the 2866 // recurrence. 2867 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2868 ++Idx; 2869 2870 // Scan over all recurrences, trying to fold loop invariants into them. 2871 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2872 // Scan all of the other operands to this add and add them to the vector if 2873 // they are loop invariant w.r.t. the recurrence. 2874 SmallVector<const SCEV *, 8> LIOps; 2875 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2876 const Loop *AddRecLoop = AddRec->getLoop(); 2877 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2878 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2879 LIOps.push_back(Ops[i]); 2880 Ops.erase(Ops.begin()+i); 2881 --i; --e; 2882 } 2883 2884 // If we found some loop invariants, fold them into the recurrence. 2885 if (!LIOps.empty()) { 2886 // Compute nowrap flags for the addition of the loop-invariant ops and 2887 // the addrec. Temporarily push it as an operand for that purpose. These 2888 // flags are valid in the scope of the addrec only. 2889 LIOps.push_back(AddRec); 2890 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); 2891 LIOps.pop_back(); 2892 2893 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2894 LIOps.push_back(AddRec->getStart()); 2895 2896 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2897 2898 // It is not in general safe to propagate flags valid on an add within 2899 // the addrec scope to one outside it. We must prove that the inner 2900 // scope is guaranteed to execute if the outer one does to be able to 2901 // safely propagate. We know the program is undefined if poison is 2902 // produced on the inner scoped addrec. We also know that *for this use* 2903 // the outer scoped add can't overflow (because of the flags we just 2904 // computed for the inner scoped add) without the program being undefined. 2905 // Proving that entry to the outer scope neccesitates entry to the inner 2906 // scope, thus proves the program undefined if the flags would be violated 2907 // in the outer scope. 2908 SCEV::NoWrapFlags AddFlags = Flags; 2909 if (AddFlags != SCEV::FlagAnyWrap) { 2910 auto *DefI = getDefiningScopeBound(LIOps); 2911 auto *ReachI = &*AddRecLoop->getHeader()->begin(); 2912 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI)) 2913 AddFlags = SCEV::FlagAnyWrap; 2914 } 2915 AddRecOps[0] = getAddExpr(LIOps, AddFlags, Depth + 1); 2916 2917 // Build the new addrec. Propagate the NUW and NSW flags if both the 2918 // outer add and the inner addrec are guaranteed to have no overflow. 2919 // Always propagate NW. 2920 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2921 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2922 2923 // If all of the other operands were loop invariant, we are done. 2924 if (Ops.size() == 1) return NewRec; 2925 2926 // Otherwise, add the folded AddRec by the non-invariant parts. 2927 for (unsigned i = 0;; ++i) 2928 if (Ops[i] == AddRec) { 2929 Ops[i] = NewRec; 2930 break; 2931 } 2932 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2933 } 2934 2935 // Okay, if there weren't any loop invariants to be folded, check to see if 2936 // there are multiple AddRec's with the same loop induction variable being 2937 // added together. If so, we can fold them. 2938 for (unsigned OtherIdx = Idx+1; 2939 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2940 ++OtherIdx) { 2941 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2942 // so that the 1st found AddRecExpr is dominated by all others. 2943 assert(DT.dominates( 2944 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2945 AddRec->getLoop()->getHeader()) && 2946 "AddRecExprs are not sorted in reverse dominance order?"); 2947 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2948 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2949 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2950 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2951 ++OtherIdx) { 2952 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2953 if (OtherAddRec->getLoop() == AddRecLoop) { 2954 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2955 i != e; ++i) { 2956 if (i >= AddRecOps.size()) { 2957 append_range(AddRecOps, OtherAddRec->operands().drop_front(i)); 2958 break; 2959 } 2960 SmallVector<const SCEV *, 2> TwoOps = { 2961 AddRecOps[i], OtherAddRec->getOperand(i)}; 2962 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2963 } 2964 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2965 } 2966 } 2967 // Step size has changed, so we cannot guarantee no self-wraparound. 2968 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2969 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2970 } 2971 } 2972 2973 // Otherwise couldn't fold anything into this recurrence. Move onto the 2974 // next one. 2975 } 2976 2977 // Okay, it looks like we really DO need an add expr. Check to see if we 2978 // already have one, otherwise create a new one. 2979 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2980 } 2981 2982 const SCEV * 2983 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2984 SCEV::NoWrapFlags Flags) { 2985 FoldingSetNodeID ID; 2986 ID.AddInteger(scAddExpr); 2987 for (const SCEV *Op : Ops) 2988 ID.AddPointer(Op); 2989 void *IP = nullptr; 2990 SCEVAddExpr *S = 2991 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2992 if (!S) { 2993 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2994 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2995 S = new (SCEVAllocator) 2996 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2997 UniqueSCEVs.InsertNode(S, IP); 2998 registerUser(S, Ops); 2999 } 3000 S->setNoWrapFlags(Flags); 3001 return S; 3002 } 3003 3004 const SCEV * 3005 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 3006 const Loop *L, SCEV::NoWrapFlags Flags) { 3007 FoldingSetNodeID ID; 3008 ID.AddInteger(scAddRecExpr); 3009 for (const SCEV *Op : Ops) 3010 ID.AddPointer(Op); 3011 ID.AddPointer(L); 3012 void *IP = nullptr; 3013 SCEVAddRecExpr *S = 3014 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3015 if (!S) { 3016 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3017 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3018 S = new (SCEVAllocator) 3019 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 3020 UniqueSCEVs.InsertNode(S, IP); 3021 LoopUsers[L].push_back(S); 3022 registerUser(S, Ops); 3023 } 3024 setNoWrapFlags(S, Flags); 3025 return S; 3026 } 3027 3028 const SCEV * 3029 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 3030 SCEV::NoWrapFlags Flags) { 3031 FoldingSetNodeID ID; 3032 ID.AddInteger(scMulExpr); 3033 for (const SCEV *Op : Ops) 3034 ID.AddPointer(Op); 3035 void *IP = nullptr; 3036 SCEVMulExpr *S = 3037 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3038 if (!S) { 3039 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3040 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3041 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 3042 O, Ops.size()); 3043 UniqueSCEVs.InsertNode(S, IP); 3044 registerUser(S, Ops); 3045 } 3046 S->setNoWrapFlags(Flags); 3047 return S; 3048 } 3049 3050 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 3051 uint64_t k = i*j; 3052 if (j > 1 && k / j != i) Overflow = true; 3053 return k; 3054 } 3055 3056 /// Compute the result of "n choose k", the binomial coefficient. If an 3057 /// intermediate computation overflows, Overflow will be set and the return will 3058 /// be garbage. Overflow is not cleared on absence of overflow. 3059 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 3060 // We use the multiplicative formula: 3061 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 3062 // At each iteration, we take the n-th term of the numeral and divide by the 3063 // (k-n)th term of the denominator. This division will always produce an 3064 // integral result, and helps reduce the chance of overflow in the 3065 // intermediate computations. However, we can still overflow even when the 3066 // final result would fit. 3067 3068 if (n == 0 || n == k) return 1; 3069 if (k > n) return 0; 3070 3071 if (k > n/2) 3072 k = n-k; 3073 3074 uint64_t r = 1; 3075 for (uint64_t i = 1; i <= k; ++i) { 3076 r = umul_ov(r, n-(i-1), Overflow); 3077 r /= i; 3078 } 3079 return r; 3080 } 3081 3082 /// Determine if any of the operands in this SCEV are a constant or if 3083 /// any of the add or multiply expressions in this SCEV contain a constant. 3084 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 3085 struct FindConstantInAddMulChain { 3086 bool FoundConstant = false; 3087 3088 bool follow(const SCEV *S) { 3089 FoundConstant |= isa<SCEVConstant>(S); 3090 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 3091 } 3092 3093 bool isDone() const { 3094 return FoundConstant; 3095 } 3096 }; 3097 3098 FindConstantInAddMulChain F; 3099 SCEVTraversal<FindConstantInAddMulChain> ST(F); 3100 ST.visitAll(StartExpr); 3101 return F.FoundConstant; 3102 } 3103 3104 /// Get a canonical multiply expression, or something simpler if possible. 3105 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 3106 SCEV::NoWrapFlags OrigFlags, 3107 unsigned Depth) { 3108 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) && 3109 "only nuw or nsw allowed"); 3110 assert(!Ops.empty() && "Cannot get empty mul!"); 3111 if (Ops.size() == 1) return Ops[0]; 3112 #ifndef NDEBUG 3113 Type *ETy = Ops[0]->getType(); 3114 assert(!ETy->isPointerTy()); 3115 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3116 assert(Ops[i]->getType() == ETy && 3117 "SCEVMulExpr operand types don't match!"); 3118 #endif 3119 3120 // Sort by complexity, this groups all similar expression types together. 3121 GroupByComplexity(Ops, &LI, DT); 3122 3123 // If there are any constants, fold them together. 3124 unsigned Idx = 0; 3125 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3126 ++Idx; 3127 assert(Idx < Ops.size()); 3128 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3129 // We found two constants, fold them together! 3130 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); 3131 if (Ops.size() == 2) return Ops[0]; 3132 Ops.erase(Ops.begin()+1); // Erase the folded element 3133 LHSC = cast<SCEVConstant>(Ops[0]); 3134 } 3135 3136 // If we have a multiply of zero, it will always be zero. 3137 if (LHSC->getValue()->isZero()) 3138 return LHSC; 3139 3140 // If we are left with a constant one being multiplied, strip it off. 3141 if (LHSC->getValue()->isOne()) { 3142 Ops.erase(Ops.begin()); 3143 --Idx; 3144 } 3145 3146 if (Ops.size() == 1) 3147 return Ops[0]; 3148 } 3149 3150 // Delay expensive flag strengthening until necessary. 3151 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 3152 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); 3153 }; 3154 3155 // Limit recursion calls depth. 3156 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 3157 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3158 3159 if (SCEV *S = findExistingSCEVInCache(scMulExpr, Ops)) { 3160 // Don't strengthen flags if we have no new information. 3161 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S); 3162 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) 3163 Mul->setNoWrapFlags(ComputeFlags(Ops)); 3164 return S; 3165 } 3166 3167 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3168 if (Ops.size() == 2) { 3169 // C1*(C2+V) -> C1*C2 + C1*V 3170 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 3171 // If any of Add's ops are Adds or Muls with a constant, apply this 3172 // transformation as well. 3173 // 3174 // TODO: There are some cases where this transformation is not 3175 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 3176 // this transformation should be narrowed down. 3177 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) { 3178 const SCEV *LHS = getMulExpr(LHSC, Add->getOperand(0), 3179 SCEV::FlagAnyWrap, Depth + 1); 3180 const SCEV *RHS = getMulExpr(LHSC, Add->getOperand(1), 3181 SCEV::FlagAnyWrap, Depth + 1); 3182 return getAddExpr(LHS, RHS, SCEV::FlagAnyWrap, Depth + 1); 3183 } 3184 3185 if (Ops[0]->isAllOnesValue()) { 3186 // If we have a mul by -1 of an add, try distributing the -1 among the 3187 // add operands. 3188 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 3189 SmallVector<const SCEV *, 4> NewOps; 3190 bool AnyFolded = false; 3191 for (const SCEV *AddOp : Add->operands()) { 3192 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 3193 Depth + 1); 3194 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 3195 NewOps.push_back(Mul); 3196 } 3197 if (AnyFolded) 3198 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 3199 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 3200 // Negation preserves a recurrence's no self-wrap property. 3201 SmallVector<const SCEV *, 4> Operands; 3202 for (const SCEV *AddRecOp : AddRec->operands()) 3203 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 3204 Depth + 1)); 3205 // Let M be the minimum representable signed value. AddRec with nsw 3206 // multiplied by -1 can have signed overflow if and only if it takes a 3207 // value of M: M * (-1) would stay M and (M + 1) * (-1) would be the 3208 // maximum signed value. In all other cases signed overflow is 3209 // impossible. 3210 auto FlagsMask = SCEV::FlagNW; 3211 if (hasFlags(AddRec->getNoWrapFlags(), SCEV::FlagNSW)) { 3212 auto MinInt = 3213 APInt::getSignedMinValue(getTypeSizeInBits(AddRec->getType())); 3214 if (getSignedRangeMin(AddRec) != MinInt) 3215 FlagsMask = setFlags(FlagsMask, SCEV::FlagNSW); 3216 } 3217 return getAddRecExpr(Operands, AddRec->getLoop(), 3218 AddRec->getNoWrapFlags(FlagsMask)); 3219 } 3220 } 3221 } 3222 } 3223 3224 // Skip over the add expression until we get to a multiply. 3225 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 3226 ++Idx; 3227 3228 // If there are mul operands inline them all into this expression. 3229 if (Idx < Ops.size()) { 3230 bool DeletedMul = false; 3231 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 3232 if (Ops.size() > MulOpsInlineThreshold) 3233 break; 3234 // If we have an mul, expand the mul operands onto the end of the 3235 // operands list. 3236 Ops.erase(Ops.begin()+Idx); 3237 append_range(Ops, Mul->operands()); 3238 DeletedMul = true; 3239 } 3240 3241 // If we deleted at least one mul, we added operands to the end of the 3242 // list, and they are not necessarily sorted. Recurse to resort and 3243 // resimplify any operands we just acquired. 3244 if (DeletedMul) 3245 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3246 } 3247 3248 // If there are any add recurrences in the operands list, see if any other 3249 // added values are loop invariant. If so, we can fold them into the 3250 // recurrence. 3251 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 3252 ++Idx; 3253 3254 // Scan over all recurrences, trying to fold loop invariants into them. 3255 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 3256 // Scan all of the other operands to this mul and add them to the vector 3257 // if they are loop invariant w.r.t. the recurrence. 3258 SmallVector<const SCEV *, 8> LIOps; 3259 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 3260 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3261 if (isAvailableAtLoopEntry(Ops[i], AddRec->getLoop())) { 3262 LIOps.push_back(Ops[i]); 3263 Ops.erase(Ops.begin()+i); 3264 --i; --e; 3265 } 3266 3267 // If we found some loop invariants, fold them into the recurrence. 3268 if (!LIOps.empty()) { 3269 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 3270 SmallVector<const SCEV *, 4> NewOps; 3271 NewOps.reserve(AddRec->getNumOperands()); 3272 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 3273 3274 // If both the mul and addrec are nuw, we can preserve nuw. 3275 // If both the mul and addrec are nsw, we can only preserve nsw if either 3276 // a) they are also nuw, or 3277 // b) all multiplications of addrec operands with scale are nsw. 3278 SCEV::NoWrapFlags Flags = 3279 AddRec->getNoWrapFlags(ComputeFlags({Scale, AddRec})); 3280 3281 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 3282 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 3283 SCEV::FlagAnyWrap, Depth + 1)); 3284 3285 if (hasFlags(Flags, SCEV::FlagNSW) && !hasFlags(Flags, SCEV::FlagNUW)) { 3286 ConstantRange NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 3287 Instruction::Mul, getSignedRange(Scale), 3288 OverflowingBinaryOperator::NoSignedWrap); 3289 if (!NSWRegion.contains(getSignedRange(AddRec->getOperand(i)))) 3290 Flags = clearFlags(Flags, SCEV::FlagNSW); 3291 } 3292 } 3293 3294 const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(), Flags); 3295 3296 // If all of the other operands were loop invariant, we are done. 3297 if (Ops.size() == 1) return NewRec; 3298 3299 // Otherwise, multiply the folded AddRec by the non-invariant parts. 3300 for (unsigned i = 0;; ++i) 3301 if (Ops[i] == AddRec) { 3302 Ops[i] = NewRec; 3303 break; 3304 } 3305 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3306 } 3307 3308 // Okay, if there weren't any loop invariants to be folded, check to see 3309 // if there are multiple AddRec's with the same loop induction variable 3310 // being multiplied together. If so, we can fold them. 3311 3312 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 3313 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 3314 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 3315 // ]]],+,...up to x=2n}. 3316 // Note that the arguments to choose() are always integers with values 3317 // known at compile time, never SCEV objects. 3318 // 3319 // The implementation avoids pointless extra computations when the two 3320 // addrec's are of different length (mathematically, it's equivalent to 3321 // an infinite stream of zeros on the right). 3322 bool OpsModified = false; 3323 for (unsigned OtherIdx = Idx+1; 3324 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3325 ++OtherIdx) { 3326 const SCEVAddRecExpr *OtherAddRec = 3327 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3328 if (!OtherAddRec || OtherAddRec->getLoop() != AddRec->getLoop()) 3329 continue; 3330 3331 // Limit max number of arguments to avoid creation of unreasonably big 3332 // SCEVAddRecs with very complex operands. 3333 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3334 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 3335 continue; 3336 3337 bool Overflow = false; 3338 Type *Ty = AddRec->getType(); 3339 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3340 SmallVector<const SCEV*, 7> AddRecOps; 3341 for (int x = 0, xe = AddRec->getNumOperands() + 3342 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3343 SmallVector <const SCEV *, 7> SumOps; 3344 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3345 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3346 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3347 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3348 z < ze && !Overflow; ++z) { 3349 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3350 uint64_t Coeff; 3351 if (LargerThan64Bits) 3352 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3353 else 3354 Coeff = Coeff1*Coeff2; 3355 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3356 const SCEV *Term1 = AddRec->getOperand(y-z); 3357 const SCEV *Term2 = OtherAddRec->getOperand(z); 3358 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3359 SCEV::FlagAnyWrap, Depth + 1)); 3360 } 3361 } 3362 if (SumOps.empty()) 3363 SumOps.push_back(getZero(Ty)); 3364 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3365 } 3366 if (!Overflow) { 3367 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), 3368 SCEV::FlagAnyWrap); 3369 if (Ops.size() == 2) return NewAddRec; 3370 Ops[Idx] = NewAddRec; 3371 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3372 OpsModified = true; 3373 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3374 if (!AddRec) 3375 break; 3376 } 3377 } 3378 if (OpsModified) 3379 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3380 3381 // Otherwise couldn't fold anything into this recurrence. Move onto the 3382 // next one. 3383 } 3384 3385 // Okay, it looks like we really DO need an mul expr. Check to see if we 3386 // already have one, otherwise create a new one. 3387 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3388 } 3389 3390 /// Represents an unsigned remainder expression based on unsigned division. 3391 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3392 const SCEV *RHS) { 3393 assert(getEffectiveSCEVType(LHS->getType()) == 3394 getEffectiveSCEVType(RHS->getType()) && 3395 "SCEVURemExpr operand types don't match!"); 3396 3397 // Short-circuit easy cases 3398 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3399 // If constant is one, the result is trivial 3400 if (RHSC->getValue()->isOne()) 3401 return getZero(LHS->getType()); // X urem 1 --> 0 3402 3403 // If constant is a power of two, fold into a zext(trunc(LHS)). 3404 if (RHSC->getAPInt().isPowerOf2()) { 3405 Type *FullTy = LHS->getType(); 3406 Type *TruncTy = 3407 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3408 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3409 } 3410 } 3411 3412 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3413 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3414 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3415 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3416 } 3417 3418 /// Get a canonical unsigned division expression, or something simpler if 3419 /// possible. 3420 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3421 const SCEV *RHS) { 3422 assert(!LHS->getType()->isPointerTy() && 3423 "SCEVUDivExpr operand can't be pointer!"); 3424 assert(LHS->getType() == RHS->getType() && 3425 "SCEVUDivExpr operand types don't match!"); 3426 3427 FoldingSetNodeID ID; 3428 ID.AddInteger(scUDivExpr); 3429 ID.AddPointer(LHS); 3430 ID.AddPointer(RHS); 3431 void *IP = nullptr; 3432 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3433 return S; 3434 3435 // 0 udiv Y == 0 3436 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) 3437 if (LHSC->getValue()->isZero()) 3438 return LHS; 3439 3440 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3441 if (RHSC->getValue()->isOne()) 3442 return LHS; // X udiv 1 --> x 3443 // If the denominator is zero, the result of the udiv is undefined. Don't 3444 // try to analyze it, because the resolution chosen here may differ from 3445 // the resolution chosen in other parts of the compiler. 3446 if (!RHSC->getValue()->isZero()) { 3447 // Determine if the division can be folded into the operands of 3448 // its operands. 3449 // TODO: Generalize this to non-constants by using known-bits information. 3450 Type *Ty = LHS->getType(); 3451 unsigned LZ = RHSC->getAPInt().countl_zero(); 3452 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3453 // For non-power-of-two values, effectively round the value up to the 3454 // nearest power of two. 3455 if (!RHSC->getAPInt().isPowerOf2()) 3456 ++MaxShiftAmt; 3457 IntegerType *ExtTy = 3458 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3459 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3460 if (const SCEVConstant *Step = 3461 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3462 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3463 const APInt &StepInt = Step->getAPInt(); 3464 const APInt &DivInt = RHSC->getAPInt(); 3465 if (!StepInt.urem(DivInt) && 3466 getZeroExtendExpr(AR, ExtTy) == 3467 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3468 getZeroExtendExpr(Step, ExtTy), 3469 AR->getLoop(), SCEV::FlagAnyWrap)) { 3470 SmallVector<const SCEV *, 4> Operands; 3471 for (const SCEV *Op : AR->operands()) 3472 Operands.push_back(getUDivExpr(Op, RHS)); 3473 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3474 } 3475 /// Get a canonical UDivExpr for a recurrence. 3476 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3477 // We can currently only fold X%N if X is constant. 3478 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3479 if (StartC && !DivInt.urem(StepInt) && 3480 getZeroExtendExpr(AR, ExtTy) == 3481 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3482 getZeroExtendExpr(Step, ExtTy), 3483 AR->getLoop(), SCEV::FlagAnyWrap)) { 3484 const APInt &StartInt = StartC->getAPInt(); 3485 const APInt &StartRem = StartInt.urem(StepInt); 3486 if (StartRem != 0) { 3487 const SCEV *NewLHS = 3488 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3489 AR->getLoop(), SCEV::FlagNW); 3490 if (LHS != NewLHS) { 3491 LHS = NewLHS; 3492 3493 // Reset the ID to include the new LHS, and check if it is 3494 // already cached. 3495 ID.clear(); 3496 ID.AddInteger(scUDivExpr); 3497 ID.AddPointer(LHS); 3498 ID.AddPointer(RHS); 3499 IP = nullptr; 3500 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3501 return S; 3502 } 3503 } 3504 } 3505 } 3506 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3507 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3508 SmallVector<const SCEV *, 4> Operands; 3509 for (const SCEV *Op : M->operands()) 3510 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3511 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3512 // Find an operand that's safely divisible. 3513 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3514 const SCEV *Op = M->getOperand(i); 3515 const SCEV *Div = getUDivExpr(Op, RHSC); 3516 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3517 Operands = SmallVector<const SCEV *, 4>(M->operands()); 3518 Operands[i] = Div; 3519 return getMulExpr(Operands); 3520 } 3521 } 3522 } 3523 3524 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3525 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3526 if (auto *DivisorConstant = 3527 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3528 bool Overflow = false; 3529 APInt NewRHS = 3530 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3531 if (Overflow) { 3532 return getConstant(RHSC->getType(), 0, false); 3533 } 3534 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3535 } 3536 } 3537 3538 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3539 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3540 SmallVector<const SCEV *, 4> Operands; 3541 for (const SCEV *Op : A->operands()) 3542 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3543 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3544 Operands.clear(); 3545 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3546 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3547 if (isa<SCEVUDivExpr>(Op) || 3548 getMulExpr(Op, RHS) != A->getOperand(i)) 3549 break; 3550 Operands.push_back(Op); 3551 } 3552 if (Operands.size() == A->getNumOperands()) 3553 return getAddExpr(Operands); 3554 } 3555 } 3556 3557 // Fold if both operands are constant. 3558 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) 3559 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt())); 3560 } 3561 } 3562 3563 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3564 // changes). Make sure we get a new one. 3565 IP = nullptr; 3566 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3567 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3568 LHS, RHS); 3569 UniqueSCEVs.InsertNode(S, IP); 3570 registerUser(S, {LHS, RHS}); 3571 return S; 3572 } 3573 3574 APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3575 APInt A = C1->getAPInt().abs(); 3576 APInt B = C2->getAPInt().abs(); 3577 uint32_t ABW = A.getBitWidth(); 3578 uint32_t BBW = B.getBitWidth(); 3579 3580 if (ABW > BBW) 3581 B = B.zext(ABW); 3582 else if (ABW < BBW) 3583 A = A.zext(BBW); 3584 3585 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3586 } 3587 3588 /// Get a canonical unsigned division expression, or something simpler if 3589 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3590 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3591 /// it's not exact because the udiv may be clearing bits. 3592 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3593 const SCEV *RHS) { 3594 // TODO: we could try to find factors in all sorts of things, but for now we 3595 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3596 // end of this file for inspiration. 3597 3598 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3599 if (!Mul || !Mul->hasNoUnsignedWrap()) 3600 return getUDivExpr(LHS, RHS); 3601 3602 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3603 // If the mulexpr multiplies by a constant, then that constant must be the 3604 // first element of the mulexpr. 3605 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3606 if (LHSCst == RHSCst) { 3607 SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands())); 3608 return getMulExpr(Operands); 3609 } 3610 3611 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3612 // that there's a factor provided by one of the other terms. We need to 3613 // check. 3614 APInt Factor = gcd(LHSCst, RHSCst); 3615 if (!Factor.isIntN(1)) { 3616 LHSCst = 3617 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3618 RHSCst = 3619 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3620 SmallVector<const SCEV *, 2> Operands; 3621 Operands.push_back(LHSCst); 3622 append_range(Operands, Mul->operands().drop_front()); 3623 LHS = getMulExpr(Operands); 3624 RHS = RHSCst; 3625 Mul = dyn_cast<SCEVMulExpr>(LHS); 3626 if (!Mul) 3627 return getUDivExactExpr(LHS, RHS); 3628 } 3629 } 3630 } 3631 3632 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3633 if (Mul->getOperand(i) == RHS) { 3634 SmallVector<const SCEV *, 2> Operands; 3635 append_range(Operands, Mul->operands().take_front(i)); 3636 append_range(Operands, Mul->operands().drop_front(i + 1)); 3637 return getMulExpr(Operands); 3638 } 3639 } 3640 3641 return getUDivExpr(LHS, RHS); 3642 } 3643 3644 /// Get an add recurrence expression for the specified loop. Simplify the 3645 /// expression as much as possible. 3646 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3647 const Loop *L, 3648 SCEV::NoWrapFlags Flags) { 3649 SmallVector<const SCEV *, 4> Operands; 3650 Operands.push_back(Start); 3651 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3652 if (StepChrec->getLoop() == L) { 3653 append_range(Operands, StepChrec->operands()); 3654 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3655 } 3656 3657 Operands.push_back(Step); 3658 return getAddRecExpr(Operands, L, Flags); 3659 } 3660 3661 /// Get an add recurrence expression for the specified loop. Simplify the 3662 /// expression as much as possible. 3663 const SCEV * 3664 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3665 const Loop *L, SCEV::NoWrapFlags Flags) { 3666 if (Operands.size() == 1) return Operands[0]; 3667 #ifndef NDEBUG 3668 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3669 for (unsigned i = 1, e = Operands.size(); i != e; ++i) { 3670 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3671 "SCEVAddRecExpr operand types don't match!"); 3672 assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer"); 3673 } 3674 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3675 assert(isAvailableAtLoopEntry(Operands[i], L) && 3676 "SCEVAddRecExpr operand is not available at loop entry!"); 3677 #endif 3678 3679 if (Operands.back()->isZero()) { 3680 Operands.pop_back(); 3681 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3682 } 3683 3684 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3685 // use that information to infer NUW and NSW flags. However, computing a 3686 // BE count requires calling getAddRecExpr, so we may not yet have a 3687 // meaningful BE count at this point (and if we don't, we'd be stuck 3688 // with a SCEVCouldNotCompute as the cached BE count). 3689 3690 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3691 3692 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3693 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3694 const Loop *NestedLoop = NestedAR->getLoop(); 3695 if (L->contains(NestedLoop) 3696 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3697 : (!NestedLoop->contains(L) && 3698 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3699 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands()); 3700 Operands[0] = NestedAR->getStart(); 3701 // AddRecs require their operands be loop-invariant with respect to their 3702 // loops. Don't perform this transformation if it would break this 3703 // requirement. 3704 bool AllInvariant = all_of( 3705 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3706 3707 if (AllInvariant) { 3708 // Create a recurrence for the outer loop with the same step size. 3709 // 3710 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3711 // inner recurrence has the same property. 3712 SCEV::NoWrapFlags OuterFlags = 3713 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3714 3715 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3716 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3717 return isLoopInvariant(Op, NestedLoop); 3718 }); 3719 3720 if (AllInvariant) { 3721 // Ok, both add recurrences are valid after the transformation. 3722 // 3723 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3724 // the outer recurrence has the same property. 3725 SCEV::NoWrapFlags InnerFlags = 3726 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3727 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3728 } 3729 } 3730 // Reset Operands to its original state. 3731 Operands[0] = NestedAR; 3732 } 3733 } 3734 3735 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3736 // already have one, otherwise create a new one. 3737 return getOrCreateAddRecExpr(Operands, L, Flags); 3738 } 3739 3740 const SCEV * 3741 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3742 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3743 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3744 // getSCEV(Base)->getType() has the same address space as Base->getType() 3745 // because SCEV::getType() preserves the address space. 3746 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3747 const bool AssumeInBoundsFlags = [&]() { 3748 if (!GEP->isInBounds()) 3749 return false; 3750 3751 // We'd like to propagate flags from the IR to the corresponding SCEV nodes, 3752 // but to do that, we have to ensure that said flag is valid in the entire 3753 // defined scope of the SCEV. 3754 auto *GEPI = dyn_cast<Instruction>(GEP); 3755 // TODO: non-instructions have global scope. We might be able to prove 3756 // some global scope cases 3757 return GEPI && isSCEVExprNeverPoison(GEPI); 3758 }(); 3759 3760 SCEV::NoWrapFlags OffsetWrap = 3761 AssumeInBoundsFlags ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3762 3763 Type *CurTy = GEP->getType(); 3764 bool FirstIter = true; 3765 SmallVector<const SCEV *, 4> Offsets; 3766 for (const SCEV *IndexExpr : IndexExprs) { 3767 // Compute the (potentially symbolic) offset in bytes for this index. 3768 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3769 // For a struct, add the member offset. 3770 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3771 unsigned FieldNo = Index->getZExtValue(); 3772 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3773 Offsets.push_back(FieldOffset); 3774 3775 // Update CurTy to the type of the field at Index. 3776 CurTy = STy->getTypeAtIndex(Index); 3777 } else { 3778 // Update CurTy to its element type. 3779 if (FirstIter) { 3780 assert(isa<PointerType>(CurTy) && 3781 "The first index of a GEP indexes a pointer"); 3782 CurTy = GEP->getSourceElementType(); 3783 FirstIter = false; 3784 } else { 3785 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3786 } 3787 // For an array, add the element offset, explicitly scaled. 3788 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3789 // Getelementptr indices are signed. 3790 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3791 3792 // Multiply the index by the element size to compute the element offset. 3793 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); 3794 Offsets.push_back(LocalOffset); 3795 } 3796 } 3797 3798 // Handle degenerate case of GEP without offsets. 3799 if (Offsets.empty()) 3800 return BaseExpr; 3801 3802 // Add the offsets together, assuming nsw if inbounds. 3803 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); 3804 // Add the base address and the offset. We cannot use the nsw flag, as the 3805 // base address is unsigned. However, if we know that the offset is 3806 // non-negative, we can use nuw. 3807 SCEV::NoWrapFlags BaseWrap = AssumeInBoundsFlags && isKnownNonNegative(Offset) 3808 ? SCEV::FlagNUW : SCEV::FlagAnyWrap; 3809 auto *GEPExpr = getAddExpr(BaseExpr, Offset, BaseWrap); 3810 assert(BaseExpr->getType() == GEPExpr->getType() && 3811 "GEP should not change type mid-flight."); 3812 return GEPExpr; 3813 } 3814 3815 SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, 3816 ArrayRef<const SCEV *> Ops) { 3817 FoldingSetNodeID ID; 3818 ID.AddInteger(SCEVType); 3819 for (const SCEV *Op : Ops) 3820 ID.AddPointer(Op); 3821 void *IP = nullptr; 3822 return UniqueSCEVs.FindNodeOrInsertPos(ID, IP); 3823 } 3824 3825 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { 3826 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3827 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); 3828 } 3829 3830 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, 3831 SmallVectorImpl<const SCEV *> &Ops) { 3832 assert(SCEVMinMaxExpr::isMinMaxType(Kind) && "Not a SCEVMinMaxExpr!"); 3833 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3834 if (Ops.size() == 1) return Ops[0]; 3835 #ifndef NDEBUG 3836 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3837 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 3838 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3839 "Operand types don't match!"); 3840 assert(Ops[0]->getType()->isPointerTy() == 3841 Ops[i]->getType()->isPointerTy() && 3842 "min/max should be consistently pointerish"); 3843 } 3844 #endif 3845 3846 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3847 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3848 3849 // Sort by complexity, this groups all similar expression types together. 3850 GroupByComplexity(Ops, &LI, DT); 3851 3852 // Check if we have created the same expression before. 3853 if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) { 3854 return S; 3855 } 3856 3857 // If there are any constants, fold them together. 3858 unsigned Idx = 0; 3859 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3860 ++Idx; 3861 assert(Idx < Ops.size()); 3862 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3863 switch (Kind) { 3864 case scSMaxExpr: 3865 return APIntOps::smax(LHS, RHS); 3866 case scSMinExpr: 3867 return APIntOps::smin(LHS, RHS); 3868 case scUMaxExpr: 3869 return APIntOps::umax(LHS, RHS); 3870 case scUMinExpr: 3871 return APIntOps::umin(LHS, RHS); 3872 default: 3873 llvm_unreachable("Unknown SCEV min/max opcode"); 3874 } 3875 }; 3876 3877 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3878 // We found two constants, fold them together! 3879 ConstantInt *Fold = ConstantInt::get( 3880 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3881 Ops[0] = getConstant(Fold); 3882 Ops.erase(Ops.begin()+1); // Erase the folded element 3883 if (Ops.size() == 1) return Ops[0]; 3884 LHSC = cast<SCEVConstant>(Ops[0]); 3885 } 3886 3887 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3888 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3889 3890 if (IsMax ? IsMinV : IsMaxV) { 3891 // If we are left with a constant minimum(/maximum)-int, strip it off. 3892 Ops.erase(Ops.begin()); 3893 --Idx; 3894 } else if (IsMax ? IsMaxV : IsMinV) { 3895 // If we have a max(/min) with a constant maximum(/minimum)-int, 3896 // it will always be the extremum. 3897 return LHSC; 3898 } 3899 3900 if (Ops.size() == 1) return Ops[0]; 3901 } 3902 3903 // Find the first operation of the same kind 3904 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3905 ++Idx; 3906 3907 // Check to see if one of the operands is of the same kind. If so, expand its 3908 // operands onto our operand list, and recurse to simplify. 3909 if (Idx < Ops.size()) { 3910 bool DeletedAny = false; 3911 while (Ops[Idx]->getSCEVType() == Kind) { 3912 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3913 Ops.erase(Ops.begin()+Idx); 3914 append_range(Ops, SMME->operands()); 3915 DeletedAny = true; 3916 } 3917 3918 if (DeletedAny) 3919 return getMinMaxExpr(Kind, Ops); 3920 } 3921 3922 // Okay, check to see if the same value occurs in the operand list twice. If 3923 // so, delete one. Since we sorted the list, these values are required to 3924 // be adjacent. 3925 llvm::CmpInst::Predicate GEPred = 3926 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3927 llvm::CmpInst::Predicate LEPred = 3928 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3929 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3930 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3931 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3932 if (Ops[i] == Ops[i + 1] || 3933 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3934 // X op Y op Y --> X op Y 3935 // X op Y --> X, if we know X, Y are ordered appropriately 3936 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3937 --i; 3938 --e; 3939 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3940 Ops[i + 1])) { 3941 // X op Y --> Y, if we know X, Y are ordered appropriately 3942 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3943 --i; 3944 --e; 3945 } 3946 } 3947 3948 if (Ops.size() == 1) return Ops[0]; 3949 3950 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3951 3952 // Okay, it looks like we really DO need an expr. Check to see if we 3953 // already have one, otherwise create a new one. 3954 FoldingSetNodeID ID; 3955 ID.AddInteger(Kind); 3956 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3957 ID.AddPointer(Ops[i]); 3958 void *IP = nullptr; 3959 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP); 3960 if (ExistingSCEV) 3961 return ExistingSCEV; 3962 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3963 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3964 SCEV *S = new (SCEVAllocator) 3965 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 3966 3967 UniqueSCEVs.InsertNode(S, IP); 3968 registerUser(S, Ops); 3969 return S; 3970 } 3971 3972 namespace { 3973 3974 class SCEVSequentialMinMaxDeduplicatingVisitor final 3975 : public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor, 3976 std::optional<const SCEV *>> { 3977 using RetVal = std::optional<const SCEV *>; 3978 using Base = SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor, RetVal>; 3979 3980 ScalarEvolution &SE; 3981 const SCEVTypes RootKind; // Must be a sequential min/max expression. 3982 const SCEVTypes NonSequentialRootKind; // Non-sequential variant of RootKind. 3983 SmallPtrSet<const SCEV *, 16> SeenOps; 3984 3985 bool canRecurseInto(SCEVTypes Kind) const { 3986 // We can only recurse into the SCEV expression of the same effective type 3987 // as the type of our root SCEV expression. 3988 return RootKind == Kind || NonSequentialRootKind == Kind; 3989 }; 3990 3991 RetVal visitAnyMinMaxExpr(const SCEV *S) { 3992 assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) && 3993 "Only for min/max expressions."); 3994 SCEVTypes Kind = S->getSCEVType(); 3995 3996 if (!canRecurseInto(Kind)) 3997 return S; 3998 3999 auto *NAry = cast<SCEVNAryExpr>(S); 4000 SmallVector<const SCEV *> NewOps; 4001 bool Changed = visit(Kind, NAry->operands(), NewOps); 4002 4003 if (!Changed) 4004 return S; 4005 if (NewOps.empty()) 4006 return std::nullopt; 4007 4008 return isa<SCEVSequentialMinMaxExpr>(S) 4009 ? SE.getSequentialMinMaxExpr(Kind, NewOps) 4010 : SE.getMinMaxExpr(Kind, NewOps); 4011 } 4012 4013 RetVal visit(const SCEV *S) { 4014 // Has the whole operand been seen already? 4015 if (!SeenOps.insert(S).second) 4016 return std::nullopt; 4017 return Base::visit(S); 4018 } 4019 4020 public: 4021 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE, 4022 SCEVTypes RootKind) 4023 : SE(SE), RootKind(RootKind), 4024 NonSequentialRootKind( 4025 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType( 4026 RootKind)) {} 4027 4028 bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef<const SCEV *> OrigOps, 4029 SmallVectorImpl<const SCEV *> &NewOps) { 4030 bool Changed = false; 4031 SmallVector<const SCEV *> Ops; 4032 Ops.reserve(OrigOps.size()); 4033 4034 for (const SCEV *Op : OrigOps) { 4035 RetVal NewOp = visit(Op); 4036 if (NewOp != Op) 4037 Changed = true; 4038 if (NewOp) 4039 Ops.emplace_back(*NewOp); 4040 } 4041 4042 if (Changed) 4043 NewOps = std::move(Ops); 4044 return Changed; 4045 } 4046 4047 RetVal visitConstant(const SCEVConstant *Constant) { return Constant; } 4048 4049 RetVal visitVScale(const SCEVVScale *VScale) { return VScale; } 4050 4051 RetVal visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) { return Expr; } 4052 4053 RetVal visitTruncateExpr(const SCEVTruncateExpr *Expr) { return Expr; } 4054 4055 RetVal visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { return Expr; } 4056 4057 RetVal visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { return Expr; } 4058 4059 RetVal visitAddExpr(const SCEVAddExpr *Expr) { return Expr; } 4060 4061 RetVal visitMulExpr(const SCEVMulExpr *Expr) { return Expr; } 4062 4063 RetVal visitUDivExpr(const SCEVUDivExpr *Expr) { return Expr; } 4064 4065 RetVal visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 4066 4067 RetVal visitSMaxExpr(const SCEVSMaxExpr *Expr) { 4068 return visitAnyMinMaxExpr(Expr); 4069 } 4070 4071 RetVal visitUMaxExpr(const SCEVUMaxExpr *Expr) { 4072 return visitAnyMinMaxExpr(Expr); 4073 } 4074 4075 RetVal visitSMinExpr(const SCEVSMinExpr *Expr) { 4076 return visitAnyMinMaxExpr(Expr); 4077 } 4078 4079 RetVal visitUMinExpr(const SCEVUMinExpr *Expr) { 4080 return visitAnyMinMaxExpr(Expr); 4081 } 4082 4083 RetVal visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) { 4084 return visitAnyMinMaxExpr(Expr); 4085 } 4086 4087 RetVal visitUnknown(const SCEVUnknown *Expr) { return Expr; } 4088 4089 RetVal visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { return Expr; } 4090 }; 4091 4092 } // namespace 4093 4094 static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind) { 4095 switch (Kind) { 4096 case scConstant: 4097 case scVScale: 4098 case scTruncate: 4099 case scZeroExtend: 4100 case scSignExtend: 4101 case scPtrToInt: 4102 case scAddExpr: 4103 case scMulExpr: 4104 case scUDivExpr: 4105 case scAddRecExpr: 4106 case scUMaxExpr: 4107 case scSMaxExpr: 4108 case scUMinExpr: 4109 case scSMinExpr: 4110 case scUnknown: 4111 // If any operand is poison, the whole expression is poison. 4112 return true; 4113 case scSequentialUMinExpr: 4114 // FIXME: if the *first* operand is poison, the whole expression is poison. 4115 return false; // Pessimistically, say that it does not propagate poison. 4116 case scCouldNotCompute: 4117 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 4118 } 4119 llvm_unreachable("Unknown SCEV kind!"); 4120 } 4121 4122 namespace { 4123 // The only way poison may be introduced in a SCEV expression is from a 4124 // poison SCEVUnknown (ConstantExprs are also represented as SCEVUnknown, 4125 // not SCEVConstant). Notably, nowrap flags in SCEV nodes can *not* 4126 // introduce poison -- they encode guaranteed, non-speculated knowledge. 4127 // 4128 // Additionally, all SCEV nodes propagate poison from inputs to outputs, 4129 // with the notable exception of umin_seq, where only poison from the first 4130 // operand is (unconditionally) propagated. 4131 struct SCEVPoisonCollector { 4132 bool LookThroughMaybePoisonBlocking; 4133 SmallPtrSet<const SCEVUnknown *, 4> MaybePoison; 4134 SCEVPoisonCollector(bool LookThroughMaybePoisonBlocking) 4135 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {} 4136 4137 bool follow(const SCEV *S) { 4138 if (!LookThroughMaybePoisonBlocking && 4139 !scevUnconditionallyPropagatesPoisonFromOperands(S->getSCEVType())) 4140 return false; 4141 4142 if (auto *SU = dyn_cast<SCEVUnknown>(S)) { 4143 if (!isGuaranteedNotToBePoison(SU->getValue())) 4144 MaybePoison.insert(SU); 4145 } 4146 return true; 4147 } 4148 bool isDone() const { return false; } 4149 }; 4150 } // namespace 4151 4152 /// Return true if V is poison given that AssumedPoison is already poison. 4153 static bool impliesPoison(const SCEV *AssumedPoison, const SCEV *S) { 4154 // First collect all SCEVs that might result in AssumedPoison to be poison. 4155 // We need to look through potentially poison-blocking operations here, 4156 // because we want to find all SCEVs that *might* result in poison, not only 4157 // those that are *required* to. 4158 SCEVPoisonCollector PC1(/* LookThroughMaybePoisonBlocking */ true); 4159 visitAll(AssumedPoison, PC1); 4160 4161 // AssumedPoison is never poison. As the assumption is false, the implication 4162 // is true. Don't bother walking the other SCEV in this case. 4163 if (PC1.MaybePoison.empty()) 4164 return true; 4165 4166 // Collect all SCEVs in S that, if poison, *will* result in S being poison 4167 // as well. We cannot look through potentially poison-blocking operations 4168 // here, as their arguments only *may* make the result poison. 4169 SCEVPoisonCollector PC2(/* LookThroughMaybePoisonBlocking */ false); 4170 visitAll(S, PC2); 4171 4172 // Make sure that no matter which SCEV in PC1.MaybePoison is actually poison, 4173 // it will also make S poison by being part of PC2.MaybePoison. 4174 return all_of(PC1.MaybePoison, [&](const SCEVUnknown *S) { 4175 return PC2.MaybePoison.contains(S); 4176 }); 4177 } 4178 4179 void ScalarEvolution::getPoisonGeneratingValues( 4180 SmallPtrSetImpl<const Value *> &Result, const SCEV *S) { 4181 SCEVPoisonCollector PC(/* LookThroughMaybePoisonBlocking */ false); 4182 visitAll(S, PC); 4183 for (const SCEVUnknown *SU : PC.MaybePoison) 4184 Result.insert(SU->getValue()); 4185 } 4186 4187 bool ScalarEvolution::canReuseInstruction( 4188 const SCEV *S, Instruction *I, 4189 SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts) { 4190 // If the instruction cannot be poison, it's always safe to reuse. 4191 if (programUndefinedIfPoison(I)) 4192 return true; 4193 4194 // Otherwise, it is possible that I is more poisonous that S. Collect the 4195 // poison-contributors of S, and then check whether I has any additional 4196 // poison-contributors. Poison that is contributed through poison-generating 4197 // flags is handled by dropping those flags instead. 4198 SmallPtrSet<const Value *, 8> PoisonVals; 4199 getPoisonGeneratingValues(PoisonVals, S); 4200 4201 SmallVector<Value *> Worklist; 4202 SmallPtrSet<Value *, 8> Visited; 4203 Worklist.push_back(I); 4204 while (!Worklist.empty()) { 4205 Value *V = Worklist.pop_back_val(); 4206 if (!Visited.insert(V).second) 4207 continue; 4208 4209 // Avoid walking large instruction graphs. 4210 if (Visited.size() > 16) 4211 return false; 4212 4213 // Either the value can't be poison, or the S would also be poison if it 4214 // is. 4215 if (PoisonVals.contains(V) || isGuaranteedNotToBePoison(V)) 4216 continue; 4217 4218 auto *I = dyn_cast<Instruction>(V); 4219 if (!I) 4220 return false; 4221 4222 // Disjoint or instructions are interpreted as adds by SCEV. However, we 4223 // can't replace an arbitrary add with disjoint or, even if we drop the 4224 // flag. We would need to convert the or into an add. 4225 if (auto *PDI = dyn_cast<PossiblyDisjointInst>(I)) 4226 if (PDI->isDisjoint()) 4227 return false; 4228 4229 // FIXME: Ignore vscale, even though it technically could be poison. Do this 4230 // because SCEV currently assumes it can't be poison. Remove this special 4231 // case once we proper model when vscale can be poison. 4232 if (auto *II = dyn_cast<IntrinsicInst>(I); 4233 II && II->getIntrinsicID() == Intrinsic::vscale) 4234 continue; 4235 4236 if (canCreatePoison(cast<Operator>(I), /*ConsiderFlagsAndMetadata*/ false)) 4237 return false; 4238 4239 // If the instruction can't create poison, we can recurse to its operands. 4240 if (I->hasPoisonGeneratingFlagsOrMetadata()) 4241 DropPoisonGeneratingInsts.push_back(I); 4242 4243 for (Value *Op : I->operands()) 4244 Worklist.push_back(Op); 4245 } 4246 return true; 4247 } 4248 4249 const SCEV * 4250 ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind, 4251 SmallVectorImpl<const SCEV *> &Ops) { 4252 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) && 4253 "Not a SCEVSequentialMinMaxExpr!"); 4254 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 4255 if (Ops.size() == 1) 4256 return Ops[0]; 4257 #ifndef NDEBUG 4258 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 4259 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 4260 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 4261 "Operand types don't match!"); 4262 assert(Ops[0]->getType()->isPointerTy() == 4263 Ops[i]->getType()->isPointerTy() && 4264 "min/max should be consistently pointerish"); 4265 } 4266 #endif 4267 4268 // Note that SCEVSequentialMinMaxExpr is *NOT* commutative, 4269 // so we can *NOT* do any kind of sorting of the expressions! 4270 4271 // Check if we have created the same expression before. 4272 if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) 4273 return S; 4274 4275 // FIXME: there are *some* simplifications that we can do here. 4276 4277 // Keep only the first instance of an operand. 4278 { 4279 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*this, Kind); 4280 bool Changed = Deduplicator.visit(Kind, Ops, Ops); 4281 if (Changed) 4282 return getSequentialMinMaxExpr(Kind, Ops); 4283 } 4284 4285 // Check to see if one of the operands is of the same kind. If so, expand its 4286 // operands onto our operand list, and recurse to simplify. 4287 { 4288 unsigned Idx = 0; 4289 bool DeletedAny = false; 4290 while (Idx < Ops.size()) { 4291 if (Ops[Idx]->getSCEVType() != Kind) { 4292 ++Idx; 4293 continue; 4294 } 4295 const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[Idx]); 4296 Ops.erase(Ops.begin() + Idx); 4297 Ops.insert(Ops.begin() + Idx, SMME->operands().begin(), 4298 SMME->operands().end()); 4299 DeletedAny = true; 4300 } 4301 4302 if (DeletedAny) 4303 return getSequentialMinMaxExpr(Kind, Ops); 4304 } 4305 4306 const SCEV *SaturationPoint; 4307 ICmpInst::Predicate Pred; 4308 switch (Kind) { 4309 case scSequentialUMinExpr: 4310 SaturationPoint = getZero(Ops[0]->getType()); 4311 Pred = ICmpInst::ICMP_ULE; 4312 break; 4313 default: 4314 llvm_unreachable("Not a sequential min/max type."); 4315 } 4316 4317 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 4318 // We can replace %x umin_seq %y with %x umin %y if either: 4319 // * %y being poison implies %x is also poison. 4320 // * %x cannot be the saturating value (e.g. zero for umin). 4321 if (::impliesPoison(Ops[i], Ops[i - 1]) || 4322 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, Ops[i - 1], 4323 SaturationPoint)) { 4324 SmallVector<const SCEV *> SeqOps = {Ops[i - 1], Ops[i]}; 4325 Ops[i - 1] = getMinMaxExpr( 4326 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(Kind), 4327 SeqOps); 4328 Ops.erase(Ops.begin() + i); 4329 return getSequentialMinMaxExpr(Kind, Ops); 4330 } 4331 // Fold %x umin_seq %y to %x if %x ule %y. 4332 // TODO: We might be able to prove the predicate for a later operand. 4333 if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) { 4334 Ops.erase(Ops.begin() + i); 4335 return getSequentialMinMaxExpr(Kind, Ops); 4336 } 4337 } 4338 4339 // Okay, it looks like we really DO need an expr. Check to see if we 4340 // already have one, otherwise create a new one. 4341 FoldingSetNodeID ID; 4342 ID.AddInteger(Kind); 4343 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 4344 ID.AddPointer(Ops[i]); 4345 void *IP = nullptr; 4346 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP); 4347 if (ExistingSCEV) 4348 return ExistingSCEV; 4349 4350 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 4351 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 4352 SCEV *S = new (SCEVAllocator) 4353 SCEVSequentialMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 4354 4355 UniqueSCEVs.InsertNode(S, IP); 4356 registerUser(S, Ops); 4357 return S; 4358 } 4359 4360 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 4361 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 4362 return getSMaxExpr(Ops); 4363 } 4364 4365 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 4366 return getMinMaxExpr(scSMaxExpr, Ops); 4367 } 4368 4369 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 4370 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 4371 return getUMaxExpr(Ops); 4372 } 4373 4374 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 4375 return getMinMaxExpr(scUMaxExpr, Ops); 4376 } 4377 4378 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 4379 const SCEV *RHS) { 4380 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4381 return getSMinExpr(Ops); 4382 } 4383 4384 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 4385 return getMinMaxExpr(scSMinExpr, Ops); 4386 } 4387 4388 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, const SCEV *RHS, 4389 bool Sequential) { 4390 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4391 return getUMinExpr(Ops, Sequential); 4392 } 4393 4394 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops, 4395 bool Sequential) { 4396 return Sequential ? getSequentialMinMaxExpr(scSequentialUMinExpr, Ops) 4397 : getMinMaxExpr(scUMinExpr, Ops); 4398 } 4399 4400 const SCEV * 4401 ScalarEvolution::getSizeOfExpr(Type *IntTy, TypeSize Size) { 4402 const SCEV *Res = getConstant(IntTy, Size.getKnownMinValue()); 4403 if (Size.isScalable()) 4404 Res = getMulExpr(Res, getVScale(IntTy)); 4405 return Res; 4406 } 4407 4408 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 4409 return getSizeOfExpr(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 4410 } 4411 4412 const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) { 4413 return getSizeOfExpr(IntTy, getDataLayout().getTypeStoreSize(StoreTy)); 4414 } 4415 4416 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 4417 StructType *STy, 4418 unsigned FieldNo) { 4419 // We can bypass creating a target-independent constant expression and then 4420 // folding it back into a ConstantInt. This is just a compile-time 4421 // optimization. 4422 const StructLayout *SL = getDataLayout().getStructLayout(STy); 4423 assert(!SL->getSizeInBits().isScalable() && 4424 "Cannot get offset for structure containing scalable vector types"); 4425 return getConstant(IntTy, SL->getElementOffset(FieldNo)); 4426 } 4427 4428 const SCEV *ScalarEvolution::getUnknown(Value *V) { 4429 // Don't attempt to do anything other than create a SCEVUnknown object 4430 // here. createSCEV only calls getUnknown after checking for all other 4431 // interesting possibilities, and any other code that calls getUnknown 4432 // is doing so in order to hide a value from SCEV canonicalization. 4433 4434 FoldingSetNodeID ID; 4435 ID.AddInteger(scUnknown); 4436 ID.AddPointer(V); 4437 void *IP = nullptr; 4438 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 4439 assert(cast<SCEVUnknown>(S)->getValue() == V && 4440 "Stale SCEVUnknown in uniquing map!"); 4441 return S; 4442 } 4443 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 4444 FirstUnknown); 4445 FirstUnknown = cast<SCEVUnknown>(S); 4446 UniqueSCEVs.InsertNode(S, IP); 4447 return S; 4448 } 4449 4450 //===----------------------------------------------------------------------===// 4451 // Basic SCEV Analysis and PHI Idiom Recognition Code 4452 // 4453 4454 /// Test if values of the given type are analyzable within the SCEV 4455 /// framework. This primarily includes integer types, and it can optionally 4456 /// include pointer types if the ScalarEvolution class has access to 4457 /// target-specific information. 4458 bool ScalarEvolution::isSCEVable(Type *Ty) const { 4459 // Integers and pointers are always SCEVable. 4460 return Ty->isIntOrPtrTy(); 4461 } 4462 4463 /// Return the size in bits of the specified type, for which isSCEVable must 4464 /// return true. 4465 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 4466 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 4467 if (Ty->isPointerTy()) 4468 return getDataLayout().getIndexTypeSizeInBits(Ty); 4469 return getDataLayout().getTypeSizeInBits(Ty); 4470 } 4471 4472 /// Return a type with the same bitwidth as the given type and which represents 4473 /// how SCEV will treat the given type, for which isSCEVable must return 4474 /// true. For pointer types, this is the pointer index sized integer type. 4475 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 4476 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 4477 4478 if (Ty->isIntegerTy()) 4479 return Ty; 4480 4481 // The only other support type is pointer. 4482 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 4483 return getDataLayout().getIndexType(Ty); 4484 } 4485 4486 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 4487 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 4488 } 4489 4490 bool ScalarEvolution::instructionCouldExistWithOperands(const SCEV *A, 4491 const SCEV *B) { 4492 /// For a valid use point to exist, the defining scope of one operand 4493 /// must dominate the other. 4494 bool PreciseA, PreciseB; 4495 auto *ScopeA = getDefiningScopeBound({A}, PreciseA); 4496 auto *ScopeB = getDefiningScopeBound({B}, PreciseB); 4497 if (!PreciseA || !PreciseB) 4498 // Can't tell. 4499 return false; 4500 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) || 4501 DT.dominates(ScopeB, ScopeA); 4502 } 4503 4504 const SCEV *ScalarEvolution::getCouldNotCompute() { 4505 return CouldNotCompute.get(); 4506 } 4507 4508 bool ScalarEvolution::checkValidity(const SCEV *S) const { 4509 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 4510 auto *SU = dyn_cast<SCEVUnknown>(S); 4511 return SU && SU->getValue() == nullptr; 4512 }); 4513 4514 return !ContainsNulls; 4515 } 4516 4517 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 4518 HasRecMapType::iterator I = HasRecMap.find(S); 4519 if (I != HasRecMap.end()) 4520 return I->second; 4521 4522 bool FoundAddRec = 4523 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 4524 HasRecMap.insert({S, FoundAddRec}); 4525 return FoundAddRec; 4526 } 4527 4528 /// Return the ValueOffsetPair set for \p S. \p S can be represented 4529 /// by the value and offset from any ValueOffsetPair in the set. 4530 ArrayRef<Value *> ScalarEvolution::getSCEVValues(const SCEV *S) { 4531 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 4532 if (SI == ExprValueMap.end()) 4533 return std::nullopt; 4534 return SI->second.getArrayRef(); 4535 } 4536 4537 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 4538 /// cannot be used separately. eraseValueFromMap should be used to remove 4539 /// V from ValueExprMap and ExprValueMap at the same time. 4540 void ScalarEvolution::eraseValueFromMap(Value *V) { 4541 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 4542 if (I != ValueExprMap.end()) { 4543 auto EVIt = ExprValueMap.find(I->second); 4544 bool Removed = EVIt->second.remove(V); 4545 (void) Removed; 4546 assert(Removed && "Value not in ExprValueMap?"); 4547 ValueExprMap.erase(I); 4548 } 4549 } 4550 4551 void ScalarEvolution::insertValueToMap(Value *V, const SCEV *S) { 4552 // A recursive query may have already computed the SCEV. It should be 4553 // equivalent, but may not necessarily be exactly the same, e.g. due to lazily 4554 // inferred nowrap flags. 4555 auto It = ValueExprMap.find_as(V); 4556 if (It == ValueExprMap.end()) { 4557 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 4558 ExprValueMap[S].insert(V); 4559 } 4560 } 4561 4562 /// Return an existing SCEV if it exists, otherwise analyze the expression and 4563 /// create a new one. 4564 const SCEV *ScalarEvolution::getSCEV(Value *V) { 4565 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 4566 4567 if (const SCEV *S = getExistingSCEV(V)) 4568 return S; 4569 return createSCEVIter(V); 4570 } 4571 4572 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 4573 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 4574 4575 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 4576 if (I != ValueExprMap.end()) { 4577 const SCEV *S = I->second; 4578 assert(checkValidity(S) && 4579 "existing SCEV has not been properly invalidated"); 4580 return S; 4581 } 4582 return nullptr; 4583 } 4584 4585 /// Return a SCEV corresponding to -V = -1*V 4586 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 4587 SCEV::NoWrapFlags Flags) { 4588 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4589 return getConstant( 4590 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 4591 4592 Type *Ty = V->getType(); 4593 Ty = getEffectiveSCEVType(Ty); 4594 return getMulExpr(V, getMinusOne(Ty), Flags); 4595 } 4596 4597 /// If Expr computes ~A, return A else return nullptr 4598 static const SCEV *MatchNotExpr(const SCEV *Expr) { 4599 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 4600 if (!Add || Add->getNumOperands() != 2 || 4601 !Add->getOperand(0)->isAllOnesValue()) 4602 return nullptr; 4603 4604 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 4605 if (!AddRHS || AddRHS->getNumOperands() != 2 || 4606 !AddRHS->getOperand(0)->isAllOnesValue()) 4607 return nullptr; 4608 4609 return AddRHS->getOperand(1); 4610 } 4611 4612 /// Return a SCEV corresponding to ~V = -1-V 4613 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 4614 assert(!V->getType()->isPointerTy() && "Can't negate pointer"); 4615 4616 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4617 return getConstant( 4618 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 4619 4620 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 4621 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 4622 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 4623 SmallVector<const SCEV *, 2> MatchedOperands; 4624 for (const SCEV *Operand : MME->operands()) { 4625 const SCEV *Matched = MatchNotExpr(Operand); 4626 if (!Matched) 4627 return (const SCEV *)nullptr; 4628 MatchedOperands.push_back(Matched); 4629 } 4630 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), 4631 MatchedOperands); 4632 }; 4633 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 4634 return Replaced; 4635 } 4636 4637 Type *Ty = V->getType(); 4638 Ty = getEffectiveSCEVType(Ty); 4639 return getMinusSCEV(getMinusOne(Ty), V); 4640 } 4641 4642 const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) { 4643 assert(P->getType()->isPointerTy()); 4644 4645 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) { 4646 // The base of an AddRec is the first operand. 4647 SmallVector<const SCEV *> Ops{AddRec->operands()}; 4648 Ops[0] = removePointerBase(Ops[0]); 4649 // Don't try to transfer nowrap flags for now. We could in some cases 4650 // (for example, if pointer operand of the AddRec is a SCEVUnknown). 4651 return getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap); 4652 } 4653 if (auto *Add = dyn_cast<SCEVAddExpr>(P)) { 4654 // The base of an Add is the pointer operand. 4655 SmallVector<const SCEV *> Ops{Add->operands()}; 4656 const SCEV **PtrOp = nullptr; 4657 for (const SCEV *&AddOp : Ops) { 4658 if (AddOp->getType()->isPointerTy()) { 4659 assert(!PtrOp && "Cannot have multiple pointer ops"); 4660 PtrOp = &AddOp; 4661 } 4662 } 4663 *PtrOp = removePointerBase(*PtrOp); 4664 // Don't try to transfer nowrap flags for now. We could in some cases 4665 // (for example, if the pointer operand of the Add is a SCEVUnknown). 4666 return getAddExpr(Ops); 4667 } 4668 // Any other expression must be a pointer base. 4669 return getZero(P->getType()); 4670 } 4671 4672 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 4673 SCEV::NoWrapFlags Flags, 4674 unsigned Depth) { 4675 // Fast path: X - X --> 0. 4676 if (LHS == RHS) 4677 return getZero(LHS->getType()); 4678 4679 // If we subtract two pointers with different pointer bases, bail. 4680 // Eventually, we're going to add an assertion to getMulExpr that we 4681 // can't multiply by a pointer. 4682 if (RHS->getType()->isPointerTy()) { 4683 if (!LHS->getType()->isPointerTy() || 4684 getPointerBase(LHS) != getPointerBase(RHS)) 4685 return getCouldNotCompute(); 4686 LHS = removePointerBase(LHS); 4687 RHS = removePointerBase(RHS); 4688 } 4689 4690 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 4691 // makes it so that we cannot make much use of NUW. 4692 auto AddFlags = SCEV::FlagAnyWrap; 4693 const bool RHSIsNotMinSigned = 4694 !getSignedRangeMin(RHS).isMinSignedValue(); 4695 if (hasFlags(Flags, SCEV::FlagNSW)) { 4696 // Let M be the minimum representable signed value. Then (-1)*RHS 4697 // signed-wraps if and only if RHS is M. That can happen even for 4698 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 4699 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 4700 // (-1)*RHS, we need to prove that RHS != M. 4701 // 4702 // If LHS is non-negative and we know that LHS - RHS does not 4703 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 4704 // either by proving that RHS > M or that LHS >= 0. 4705 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 4706 AddFlags = SCEV::FlagNSW; 4707 } 4708 } 4709 4710 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 4711 // RHS is NSW and LHS >= 0. 4712 // 4713 // The difficulty here is that the NSW flag may have been proven 4714 // relative to a loop that is to be found in a recurrence in LHS and 4715 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4716 // larger scope than intended. 4717 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4718 4719 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4720 } 4721 4722 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 4723 unsigned Depth) { 4724 Type *SrcTy = V->getType(); 4725 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4726 "Cannot truncate or zero extend with non-integer arguments!"); 4727 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4728 return V; // No conversion 4729 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4730 return getTruncateExpr(V, Ty, Depth); 4731 return getZeroExtendExpr(V, Ty, Depth); 4732 } 4733 4734 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 4735 unsigned Depth) { 4736 Type *SrcTy = V->getType(); 4737 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4738 "Cannot truncate or zero extend with non-integer arguments!"); 4739 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4740 return V; // No conversion 4741 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4742 return getTruncateExpr(V, Ty, Depth); 4743 return getSignExtendExpr(V, Ty, Depth); 4744 } 4745 4746 const SCEV * 4747 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4748 Type *SrcTy = V->getType(); 4749 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4750 "Cannot noop or zero extend with non-integer arguments!"); 4751 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4752 "getNoopOrZeroExtend cannot truncate!"); 4753 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4754 return V; // No conversion 4755 return getZeroExtendExpr(V, Ty); 4756 } 4757 4758 const SCEV * 4759 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4760 Type *SrcTy = V->getType(); 4761 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4762 "Cannot noop or sign extend with non-integer arguments!"); 4763 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4764 "getNoopOrSignExtend cannot truncate!"); 4765 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4766 return V; // No conversion 4767 return getSignExtendExpr(V, Ty); 4768 } 4769 4770 const SCEV * 4771 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4772 Type *SrcTy = V->getType(); 4773 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4774 "Cannot noop or any extend with non-integer arguments!"); 4775 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4776 "getNoopOrAnyExtend cannot truncate!"); 4777 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4778 return V; // No conversion 4779 return getAnyExtendExpr(V, Ty); 4780 } 4781 4782 const SCEV * 4783 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4784 Type *SrcTy = V->getType(); 4785 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4786 "Cannot truncate or noop with non-integer arguments!"); 4787 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4788 "getTruncateOrNoop cannot extend!"); 4789 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4790 return V; // No conversion 4791 return getTruncateExpr(V, Ty); 4792 } 4793 4794 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4795 const SCEV *RHS) { 4796 const SCEV *PromotedLHS = LHS; 4797 const SCEV *PromotedRHS = RHS; 4798 4799 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4800 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4801 else 4802 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4803 4804 return getUMaxExpr(PromotedLHS, PromotedRHS); 4805 } 4806 4807 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4808 const SCEV *RHS, 4809 bool Sequential) { 4810 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4811 return getUMinFromMismatchedTypes(Ops, Sequential); 4812 } 4813 4814 const SCEV * 4815 ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops, 4816 bool Sequential) { 4817 assert(!Ops.empty() && "At least one operand must be!"); 4818 // Trivial case. 4819 if (Ops.size() == 1) 4820 return Ops[0]; 4821 4822 // Find the max type first. 4823 Type *MaxType = nullptr; 4824 for (const auto *S : Ops) 4825 if (MaxType) 4826 MaxType = getWiderType(MaxType, S->getType()); 4827 else 4828 MaxType = S->getType(); 4829 assert(MaxType && "Failed to find maximum type!"); 4830 4831 // Extend all ops to max type. 4832 SmallVector<const SCEV *, 2> PromotedOps; 4833 for (const auto *S : Ops) 4834 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4835 4836 // Generate umin. 4837 return getUMinExpr(PromotedOps, Sequential); 4838 } 4839 4840 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4841 // A pointer operand may evaluate to a nonpointer expression, such as null. 4842 if (!V->getType()->isPointerTy()) 4843 return V; 4844 4845 while (true) { 4846 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 4847 V = AddRec->getStart(); 4848 } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) { 4849 const SCEV *PtrOp = nullptr; 4850 for (const SCEV *AddOp : Add->operands()) { 4851 if (AddOp->getType()->isPointerTy()) { 4852 assert(!PtrOp && "Cannot have multiple pointer ops"); 4853 PtrOp = AddOp; 4854 } 4855 } 4856 assert(PtrOp && "Must have pointer op"); 4857 V = PtrOp; 4858 } else // Not something we can look further into. 4859 return V; 4860 } 4861 } 4862 4863 /// Push users of the given Instruction onto the given Worklist. 4864 static void PushDefUseChildren(Instruction *I, 4865 SmallVectorImpl<Instruction *> &Worklist, 4866 SmallPtrSetImpl<Instruction *> &Visited) { 4867 // Push the def-use children onto the Worklist stack. 4868 for (User *U : I->users()) { 4869 auto *UserInsn = cast<Instruction>(U); 4870 if (Visited.insert(UserInsn).second) 4871 Worklist.push_back(UserInsn); 4872 } 4873 } 4874 4875 namespace { 4876 4877 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4878 /// expression in case its Loop is L. If it is not L then 4879 /// if IgnoreOtherLoops is true then use AddRec itself 4880 /// otherwise rewrite cannot be done. 4881 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4882 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4883 public: 4884 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4885 bool IgnoreOtherLoops = true) { 4886 SCEVInitRewriter Rewriter(L, SE); 4887 const SCEV *Result = Rewriter.visit(S); 4888 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4889 return SE.getCouldNotCompute(); 4890 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4891 ? SE.getCouldNotCompute() 4892 : Result; 4893 } 4894 4895 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4896 if (!SE.isLoopInvariant(Expr, L)) 4897 SeenLoopVariantSCEVUnknown = true; 4898 return Expr; 4899 } 4900 4901 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4902 // Only re-write AddRecExprs for this loop. 4903 if (Expr->getLoop() == L) 4904 return Expr->getStart(); 4905 SeenOtherLoops = true; 4906 return Expr; 4907 } 4908 4909 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4910 4911 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4912 4913 private: 4914 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4915 : SCEVRewriteVisitor(SE), L(L) {} 4916 4917 const Loop *L; 4918 bool SeenLoopVariantSCEVUnknown = false; 4919 bool SeenOtherLoops = false; 4920 }; 4921 4922 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4923 /// increment expression in case its Loop is L. If it is not L then 4924 /// use AddRec itself. 4925 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4926 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4927 public: 4928 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4929 SCEVPostIncRewriter Rewriter(L, SE); 4930 const SCEV *Result = Rewriter.visit(S); 4931 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4932 ? SE.getCouldNotCompute() 4933 : Result; 4934 } 4935 4936 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4937 if (!SE.isLoopInvariant(Expr, L)) 4938 SeenLoopVariantSCEVUnknown = true; 4939 return Expr; 4940 } 4941 4942 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4943 // Only re-write AddRecExprs for this loop. 4944 if (Expr->getLoop() == L) 4945 return Expr->getPostIncExpr(SE); 4946 SeenOtherLoops = true; 4947 return Expr; 4948 } 4949 4950 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4951 4952 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4953 4954 private: 4955 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4956 : SCEVRewriteVisitor(SE), L(L) {} 4957 4958 const Loop *L; 4959 bool SeenLoopVariantSCEVUnknown = false; 4960 bool SeenOtherLoops = false; 4961 }; 4962 4963 /// This class evaluates the compare condition by matching it against the 4964 /// condition of loop latch. If there is a match we assume a true value 4965 /// for the condition while building SCEV nodes. 4966 class SCEVBackedgeConditionFolder 4967 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4968 public: 4969 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4970 ScalarEvolution &SE) { 4971 bool IsPosBECond = false; 4972 Value *BECond = nullptr; 4973 if (BasicBlock *Latch = L->getLoopLatch()) { 4974 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4975 if (BI && BI->isConditional()) { 4976 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4977 "Both outgoing branches should not target same header!"); 4978 BECond = BI->getCondition(); 4979 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4980 } else { 4981 return S; 4982 } 4983 } 4984 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4985 return Rewriter.visit(S); 4986 } 4987 4988 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4989 const SCEV *Result = Expr; 4990 bool InvariantF = SE.isLoopInvariant(Expr, L); 4991 4992 if (!InvariantF) { 4993 Instruction *I = cast<Instruction>(Expr->getValue()); 4994 switch (I->getOpcode()) { 4995 case Instruction::Select: { 4996 SelectInst *SI = cast<SelectInst>(I); 4997 std::optional<const SCEV *> Res = 4998 compareWithBackedgeCondition(SI->getCondition()); 4999 if (Res) { 5000 bool IsOne = cast<SCEVConstant>(*Res)->getValue()->isOne(); 5001 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 5002 } 5003 break; 5004 } 5005 default: { 5006 std::optional<const SCEV *> Res = compareWithBackedgeCondition(I); 5007 if (Res) 5008 Result = *Res; 5009 break; 5010 } 5011 } 5012 } 5013 return Result; 5014 } 5015 5016 private: 5017 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 5018 bool IsPosBECond, ScalarEvolution &SE) 5019 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 5020 IsPositiveBECond(IsPosBECond) {} 5021 5022 std::optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 5023 5024 const Loop *L; 5025 /// Loop back condition. 5026 Value *BackedgeCond = nullptr; 5027 /// Set to true if loop back is on positive branch condition. 5028 bool IsPositiveBECond; 5029 }; 5030 5031 std::optional<const SCEV *> 5032 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 5033 5034 // If value matches the backedge condition for loop latch, 5035 // then return a constant evolution node based on loopback 5036 // branch taken. 5037 if (BackedgeCond == IC) 5038 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 5039 : SE.getZero(Type::getInt1Ty(SE.getContext())); 5040 return std::nullopt; 5041 } 5042 5043 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 5044 public: 5045 static const SCEV *rewrite(const SCEV *S, const Loop *L, 5046 ScalarEvolution &SE) { 5047 SCEVShiftRewriter Rewriter(L, SE); 5048 const SCEV *Result = Rewriter.visit(S); 5049 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 5050 } 5051 5052 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 5053 // Only allow AddRecExprs for this loop. 5054 if (!SE.isLoopInvariant(Expr, L)) 5055 Valid = false; 5056 return Expr; 5057 } 5058 5059 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 5060 if (Expr->getLoop() == L && Expr->isAffine()) 5061 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 5062 Valid = false; 5063 return Expr; 5064 } 5065 5066 bool isValid() { return Valid; } 5067 5068 private: 5069 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 5070 : SCEVRewriteVisitor(SE), L(L) {} 5071 5072 const Loop *L; 5073 bool Valid = true; 5074 }; 5075 5076 } // end anonymous namespace 5077 5078 SCEV::NoWrapFlags 5079 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 5080 if (!AR->isAffine()) 5081 return SCEV::FlagAnyWrap; 5082 5083 using OBO = OverflowingBinaryOperator; 5084 5085 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 5086 5087 if (!AR->hasNoSelfWrap()) { 5088 const SCEV *BECount = getConstantMaxBackedgeTakenCount(AR->getLoop()); 5089 if (const SCEVConstant *BECountMax = dyn_cast<SCEVConstant>(BECount)) { 5090 ConstantRange StepCR = getSignedRange(AR->getStepRecurrence(*this)); 5091 const APInt &BECountAP = BECountMax->getAPInt(); 5092 unsigned NoOverflowBitWidth = 5093 BECountAP.getActiveBits() + StepCR.getMinSignedBits(); 5094 if (NoOverflowBitWidth <= getTypeSizeInBits(AR->getType())) 5095 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNW); 5096 } 5097 } 5098 5099 if (!AR->hasNoSignedWrap()) { 5100 ConstantRange AddRecRange = getSignedRange(AR); 5101 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 5102 5103 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 5104 Instruction::Add, IncRange, OBO::NoSignedWrap); 5105 if (NSWRegion.contains(AddRecRange)) 5106 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 5107 } 5108 5109 if (!AR->hasNoUnsignedWrap()) { 5110 ConstantRange AddRecRange = getUnsignedRange(AR); 5111 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 5112 5113 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 5114 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 5115 if (NUWRegion.contains(AddRecRange)) 5116 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 5117 } 5118 5119 return Result; 5120 } 5121 5122 SCEV::NoWrapFlags 5123 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { 5124 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 5125 5126 if (AR->hasNoSignedWrap()) 5127 return Result; 5128 5129 if (!AR->isAffine()) 5130 return Result; 5131 5132 // This function can be expensive, only try to prove NSW once per AddRec. 5133 if (!SignedWrapViaInductionTried.insert(AR).second) 5134 return Result; 5135 5136 const SCEV *Step = AR->getStepRecurrence(*this); 5137 const Loop *L = AR->getLoop(); 5138 5139 // Check whether the backedge-taken count is SCEVCouldNotCompute. 5140 // Note that this serves two purposes: It filters out loops that are 5141 // simply not analyzable, and it covers the case where this code is 5142 // being called from within backedge-taken count analysis, such that 5143 // attempting to ask for the backedge-taken count would likely result 5144 // in infinite recursion. In the later case, the analysis code will 5145 // cope with a conservative value, and it will take care to purge 5146 // that value once it has finished. 5147 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 5148 5149 // Normally, in the cases we can prove no-overflow via a 5150 // backedge guarding condition, we can also compute a backedge 5151 // taken count for the loop. The exceptions are assumptions and 5152 // guards present in the loop -- SCEV is not great at exploiting 5153 // these to compute max backedge taken counts, but can still use 5154 // these to prove lack of overflow. Use this fact to avoid 5155 // doing extra work that may not pay off. 5156 5157 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 5158 AC.assumptions().empty()) 5159 return Result; 5160 5161 // If the backedge is guarded by a comparison with the pre-inc value the 5162 // addrec is safe. Also, if the entry is guarded by a comparison with the 5163 // start value and the backedge is guarded by a comparison with the post-inc 5164 // value, the addrec is safe. 5165 ICmpInst::Predicate Pred; 5166 const SCEV *OverflowLimit = 5167 getSignedOverflowLimitForStep(Step, &Pred, this); 5168 if (OverflowLimit && 5169 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 5170 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 5171 Result = setFlags(Result, SCEV::FlagNSW); 5172 } 5173 return Result; 5174 } 5175 SCEV::NoWrapFlags 5176 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { 5177 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 5178 5179 if (AR->hasNoUnsignedWrap()) 5180 return Result; 5181 5182 if (!AR->isAffine()) 5183 return Result; 5184 5185 // This function can be expensive, only try to prove NUW once per AddRec. 5186 if (!UnsignedWrapViaInductionTried.insert(AR).second) 5187 return Result; 5188 5189 const SCEV *Step = AR->getStepRecurrence(*this); 5190 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 5191 const Loop *L = AR->getLoop(); 5192 5193 // Check whether the backedge-taken count is SCEVCouldNotCompute. 5194 // Note that this serves two purposes: It filters out loops that are 5195 // simply not analyzable, and it covers the case where this code is 5196 // being called from within backedge-taken count analysis, such that 5197 // attempting to ask for the backedge-taken count would likely result 5198 // in infinite recursion. In the later case, the analysis code will 5199 // cope with a conservative value, and it will take care to purge 5200 // that value once it has finished. 5201 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 5202 5203 // Normally, in the cases we can prove no-overflow via a 5204 // backedge guarding condition, we can also compute a backedge 5205 // taken count for the loop. The exceptions are assumptions and 5206 // guards present in the loop -- SCEV is not great at exploiting 5207 // these to compute max backedge taken counts, but can still use 5208 // these to prove lack of overflow. Use this fact to avoid 5209 // doing extra work that may not pay off. 5210 5211 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 5212 AC.assumptions().empty()) 5213 return Result; 5214 5215 // If the backedge is guarded by a comparison with the pre-inc value the 5216 // addrec is safe. Also, if the entry is guarded by a comparison with the 5217 // start value and the backedge is guarded by a comparison with the post-inc 5218 // value, the addrec is safe. 5219 if (isKnownPositive(Step)) { 5220 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 5221 getUnsignedRangeMax(Step)); 5222 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 5223 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 5224 Result = setFlags(Result, SCEV::FlagNUW); 5225 } 5226 } 5227 5228 return Result; 5229 } 5230 5231 namespace { 5232 5233 /// Represents an abstract binary operation. This may exist as a 5234 /// normal instruction or constant expression, or may have been 5235 /// derived from an expression tree. 5236 struct BinaryOp { 5237 unsigned Opcode; 5238 Value *LHS; 5239 Value *RHS; 5240 bool IsNSW = false; 5241 bool IsNUW = false; 5242 5243 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 5244 /// constant expression. 5245 Operator *Op = nullptr; 5246 5247 explicit BinaryOp(Operator *Op) 5248 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 5249 Op(Op) { 5250 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 5251 IsNSW = OBO->hasNoSignedWrap(); 5252 IsNUW = OBO->hasNoUnsignedWrap(); 5253 } 5254 } 5255 5256 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 5257 bool IsNUW = false) 5258 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 5259 }; 5260 5261 } // end anonymous namespace 5262 5263 /// Try to map \p V into a BinaryOp, and return \c std::nullopt on failure. 5264 static std::optional<BinaryOp> MatchBinaryOp(Value *V, const DataLayout &DL, 5265 AssumptionCache &AC, 5266 const DominatorTree &DT, 5267 const Instruction *CxtI) { 5268 auto *Op = dyn_cast<Operator>(V); 5269 if (!Op) 5270 return std::nullopt; 5271 5272 // Implementation detail: all the cleverness here should happen without 5273 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 5274 // SCEV expressions when possible, and we should not break that. 5275 5276 switch (Op->getOpcode()) { 5277 case Instruction::Add: 5278 case Instruction::Sub: 5279 case Instruction::Mul: 5280 case Instruction::UDiv: 5281 case Instruction::URem: 5282 case Instruction::And: 5283 case Instruction::AShr: 5284 case Instruction::Shl: 5285 return BinaryOp(Op); 5286 5287 case Instruction::Or: { 5288 // Convert or disjoint into add nuw nsw. 5289 if (cast<PossiblyDisjointInst>(Op)->isDisjoint()) 5290 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1), 5291 /*IsNSW=*/true, /*IsNUW=*/true); 5292 return BinaryOp(Op); 5293 } 5294 5295 case Instruction::Xor: 5296 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 5297 // If the RHS of the xor is a signmask, then this is just an add. 5298 // Instcombine turns add of signmask into xor as a strength reduction step. 5299 if (RHSC->getValue().isSignMask()) 5300 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 5301 // Binary `xor` is a bit-wise `add`. 5302 if (V->getType()->isIntegerTy(1)) 5303 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 5304 return BinaryOp(Op); 5305 5306 case Instruction::LShr: 5307 // Turn logical shift right of a constant into a unsigned divide. 5308 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 5309 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 5310 5311 // If the shift count is not less than the bitwidth, the result of 5312 // the shift is undefined. Don't try to analyze it, because the 5313 // resolution chosen here may differ from the resolution chosen in 5314 // other parts of the compiler. 5315 if (SA->getValue().ult(BitWidth)) { 5316 Constant *X = 5317 ConstantInt::get(SA->getContext(), 5318 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 5319 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 5320 } 5321 } 5322 return BinaryOp(Op); 5323 5324 case Instruction::ExtractValue: { 5325 auto *EVI = cast<ExtractValueInst>(Op); 5326 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 5327 break; 5328 5329 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 5330 if (!WO) 5331 break; 5332 5333 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 5334 bool Signed = WO->isSigned(); 5335 // TODO: Should add nuw/nsw flags for mul as well. 5336 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 5337 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 5338 5339 // Now that we know that all uses of the arithmetic-result component of 5340 // CI are guarded by the overflow check, we can go ahead and pretend 5341 // that the arithmetic is non-overflowing. 5342 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 5343 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 5344 } 5345 5346 default: 5347 break; 5348 } 5349 5350 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 5351 // semantics as a Sub, return a binary sub expression. 5352 if (auto *II = dyn_cast<IntrinsicInst>(V)) 5353 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 5354 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 5355 5356 return std::nullopt; 5357 } 5358 5359 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 5360 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 5361 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 5362 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 5363 /// follows one of the following patterns: 5364 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 5365 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 5366 /// If the SCEV expression of \p Op conforms with one of the expected patterns 5367 /// we return the type of the truncation operation, and indicate whether the 5368 /// truncated type should be treated as signed/unsigned by setting 5369 /// \p Signed to true/false, respectively. 5370 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 5371 bool &Signed, ScalarEvolution &SE) { 5372 // The case where Op == SymbolicPHI (that is, with no type conversions on 5373 // the way) is handled by the regular add recurrence creating logic and 5374 // would have already been triggered in createAddRecForPHI. Reaching it here 5375 // means that createAddRecFromPHI had failed for this PHI before (e.g., 5376 // because one of the other operands of the SCEVAddExpr updating this PHI is 5377 // not invariant). 5378 // 5379 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 5380 // this case predicates that allow us to prove that Op == SymbolicPHI will 5381 // be added. 5382 if (Op == SymbolicPHI) 5383 return nullptr; 5384 5385 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 5386 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 5387 if (SourceBits != NewBits) 5388 return nullptr; 5389 5390 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 5391 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 5392 if (!SExt && !ZExt) 5393 return nullptr; 5394 const SCEVTruncateExpr *Trunc = 5395 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 5396 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 5397 if (!Trunc) 5398 return nullptr; 5399 const SCEV *X = Trunc->getOperand(); 5400 if (X != SymbolicPHI) 5401 return nullptr; 5402 Signed = SExt != nullptr; 5403 return Trunc->getType(); 5404 } 5405 5406 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 5407 if (!PN->getType()->isIntegerTy()) 5408 return nullptr; 5409 const Loop *L = LI.getLoopFor(PN->getParent()); 5410 if (!L || L->getHeader() != PN->getParent()) 5411 return nullptr; 5412 return L; 5413 } 5414 5415 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 5416 // computation that updates the phi follows the following pattern: 5417 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 5418 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 5419 // If so, try to see if it can be rewritten as an AddRecExpr under some 5420 // Predicates. If successful, return them as a pair. Also cache the results 5421 // of the analysis. 5422 // 5423 // Example usage scenario: 5424 // Say the Rewriter is called for the following SCEV: 5425 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 5426 // where: 5427 // %X = phi i64 (%Start, %BEValue) 5428 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 5429 // and call this function with %SymbolicPHI = %X. 5430 // 5431 // The analysis will find that the value coming around the backedge has 5432 // the following SCEV: 5433 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 5434 // Upon concluding that this matches the desired pattern, the function 5435 // will return the pair {NewAddRec, SmallPredsVec} where: 5436 // NewAddRec = {%Start,+,%Step} 5437 // SmallPredsVec = {P1, P2, P3} as follows: 5438 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 5439 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 5440 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 5441 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 5442 // under the predicates {P1,P2,P3}. 5443 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 5444 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 5445 // 5446 // TODO's: 5447 // 5448 // 1) Extend the Induction descriptor to also support inductions that involve 5449 // casts: When needed (namely, when we are called in the context of the 5450 // vectorizer induction analysis), a Set of cast instructions will be 5451 // populated by this method, and provided back to isInductionPHI. This is 5452 // needed to allow the vectorizer to properly record them to be ignored by 5453 // the cost model and to avoid vectorizing them (otherwise these casts, 5454 // which are redundant under the runtime overflow checks, will be 5455 // vectorized, which can be costly). 5456 // 5457 // 2) Support additional induction/PHISCEV patterns: We also want to support 5458 // inductions where the sext-trunc / zext-trunc operations (partly) occur 5459 // after the induction update operation (the induction increment): 5460 // 5461 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 5462 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 5463 // 5464 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 5465 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 5466 // 5467 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 5468 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5469 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 5470 SmallVector<const SCEVPredicate *, 3> Predicates; 5471 5472 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 5473 // return an AddRec expression under some predicate. 5474 5475 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 5476 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 5477 assert(L && "Expecting an integer loop header phi"); 5478 5479 // The loop may have multiple entrances or multiple exits; we can analyze 5480 // this phi as an addrec if it has a unique entry value and a unique 5481 // backedge value. 5482 Value *BEValueV = nullptr, *StartValueV = nullptr; 5483 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5484 Value *V = PN->getIncomingValue(i); 5485 if (L->contains(PN->getIncomingBlock(i))) { 5486 if (!BEValueV) { 5487 BEValueV = V; 5488 } else if (BEValueV != V) { 5489 BEValueV = nullptr; 5490 break; 5491 } 5492 } else if (!StartValueV) { 5493 StartValueV = V; 5494 } else if (StartValueV != V) { 5495 StartValueV = nullptr; 5496 break; 5497 } 5498 } 5499 if (!BEValueV || !StartValueV) 5500 return std::nullopt; 5501 5502 const SCEV *BEValue = getSCEV(BEValueV); 5503 5504 // If the value coming around the backedge is an add with the symbolic 5505 // value we just inserted, possibly with casts that we can ignore under 5506 // an appropriate runtime guard, then we found a simple induction variable! 5507 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 5508 if (!Add) 5509 return std::nullopt; 5510 5511 // If there is a single occurrence of the symbolic value, possibly 5512 // casted, replace it with a recurrence. 5513 unsigned FoundIndex = Add->getNumOperands(); 5514 Type *TruncTy = nullptr; 5515 bool Signed; 5516 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5517 if ((TruncTy = 5518 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 5519 if (FoundIndex == e) { 5520 FoundIndex = i; 5521 break; 5522 } 5523 5524 if (FoundIndex == Add->getNumOperands()) 5525 return std::nullopt; 5526 5527 // Create an add with everything but the specified operand. 5528 SmallVector<const SCEV *, 8> Ops; 5529 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5530 if (i != FoundIndex) 5531 Ops.push_back(Add->getOperand(i)); 5532 const SCEV *Accum = getAddExpr(Ops); 5533 5534 // The runtime checks will not be valid if the step amount is 5535 // varying inside the loop. 5536 if (!isLoopInvariant(Accum, L)) 5537 return std::nullopt; 5538 5539 // *** Part2: Create the predicates 5540 5541 // Analysis was successful: we have a phi-with-cast pattern for which we 5542 // can return an AddRec expression under the following predicates: 5543 // 5544 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 5545 // fits within the truncated type (does not overflow) for i = 0 to n-1. 5546 // P2: An Equal predicate that guarantees that 5547 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 5548 // P3: An Equal predicate that guarantees that 5549 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 5550 // 5551 // As we next prove, the above predicates guarantee that: 5552 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 5553 // 5554 // 5555 // More formally, we want to prove that: 5556 // Expr(i+1) = Start + (i+1) * Accum 5557 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 5558 // 5559 // Given that: 5560 // 1) Expr(0) = Start 5561 // 2) Expr(1) = Start + Accum 5562 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 5563 // 3) Induction hypothesis (step i): 5564 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 5565 // 5566 // Proof: 5567 // Expr(i+1) = 5568 // = Start + (i+1)*Accum 5569 // = (Start + i*Accum) + Accum 5570 // = Expr(i) + Accum 5571 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 5572 // :: from step i 5573 // 5574 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 5575 // 5576 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 5577 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 5578 // + Accum :: from P3 5579 // 5580 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 5581 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 5582 // 5583 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 5584 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 5585 // 5586 // By induction, the same applies to all iterations 1<=i<n: 5587 // 5588 5589 // Create a truncated addrec for which we will add a no overflow check (P1). 5590 const SCEV *StartVal = getSCEV(StartValueV); 5591 const SCEV *PHISCEV = 5592 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 5593 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 5594 5595 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 5596 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 5597 // will be constant. 5598 // 5599 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 5600 // add P1. 5601 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 5602 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 5603 Signed ? SCEVWrapPredicate::IncrementNSSW 5604 : SCEVWrapPredicate::IncrementNUSW; 5605 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 5606 Predicates.push_back(AddRecPred); 5607 } 5608 5609 // Create the Equal Predicates P2,P3: 5610 5611 // It is possible that the predicates P2 and/or P3 are computable at 5612 // compile time due to StartVal and/or Accum being constants. 5613 // If either one is, then we can check that now and escape if either P2 5614 // or P3 is false. 5615 5616 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 5617 // for each of StartVal and Accum 5618 auto getExtendedExpr = [&](const SCEV *Expr, 5619 bool CreateSignExtend) -> const SCEV * { 5620 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 5621 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 5622 const SCEV *ExtendedExpr = 5623 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 5624 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 5625 return ExtendedExpr; 5626 }; 5627 5628 // Given: 5629 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 5630 // = getExtendedExpr(Expr) 5631 // Determine whether the predicate P: Expr == ExtendedExpr 5632 // is known to be false at compile time 5633 auto PredIsKnownFalse = [&](const SCEV *Expr, 5634 const SCEV *ExtendedExpr) -> bool { 5635 return Expr != ExtendedExpr && 5636 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 5637 }; 5638 5639 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 5640 if (PredIsKnownFalse(StartVal, StartExtended)) { 5641 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 5642 return std::nullopt; 5643 } 5644 5645 // The Step is always Signed (because the overflow checks are either 5646 // NSSW or NUSW) 5647 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 5648 if (PredIsKnownFalse(Accum, AccumExtended)) { 5649 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 5650 return std::nullopt; 5651 } 5652 5653 auto AppendPredicate = [&](const SCEV *Expr, 5654 const SCEV *ExtendedExpr) -> void { 5655 if (Expr != ExtendedExpr && 5656 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 5657 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 5658 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 5659 Predicates.push_back(Pred); 5660 } 5661 }; 5662 5663 AppendPredicate(StartVal, StartExtended); 5664 AppendPredicate(Accum, AccumExtended); 5665 5666 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 5667 // which the casts had been folded away. The caller can rewrite SymbolicPHI 5668 // into NewAR if it will also add the runtime overflow checks specified in 5669 // Predicates. 5670 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 5671 5672 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 5673 std::make_pair(NewAR, Predicates); 5674 // Remember the result of the analysis for this SCEV at this locayyytion. 5675 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 5676 return PredRewrite; 5677 } 5678 5679 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5680 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 5681 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 5682 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 5683 if (!L) 5684 return std::nullopt; 5685 5686 // Check to see if we already analyzed this PHI. 5687 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 5688 if (I != PredicatedSCEVRewrites.end()) { 5689 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 5690 I->second; 5691 // Analysis was done before and failed to create an AddRec: 5692 if (Rewrite.first == SymbolicPHI) 5693 return std::nullopt; 5694 // Analysis was done before and succeeded to create an AddRec under 5695 // a predicate: 5696 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 5697 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 5698 return Rewrite; 5699 } 5700 5701 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5702 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 5703 5704 // Record in the cache that the analysis failed 5705 if (!Rewrite) { 5706 SmallVector<const SCEVPredicate *, 3> Predicates; 5707 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 5708 return std::nullopt; 5709 } 5710 5711 return Rewrite; 5712 } 5713 5714 // FIXME: This utility is currently required because the Rewriter currently 5715 // does not rewrite this expression: 5716 // {0, +, (sext ix (trunc iy to ix) to iy)} 5717 // into {0, +, %step}, 5718 // even when the following Equal predicate exists: 5719 // "%step == (sext ix (trunc iy to ix) to iy)". 5720 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 5721 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 5722 if (AR1 == AR2) 5723 return true; 5724 5725 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 5726 if (Expr1 != Expr2 && !Preds->implies(SE.getEqualPredicate(Expr1, Expr2)) && 5727 !Preds->implies(SE.getEqualPredicate(Expr2, Expr1))) 5728 return false; 5729 return true; 5730 }; 5731 5732 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 5733 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 5734 return false; 5735 return true; 5736 } 5737 5738 /// A helper function for createAddRecFromPHI to handle simple cases. 5739 /// 5740 /// This function tries to find an AddRec expression for the simplest (yet most 5741 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 5742 /// If it fails, createAddRecFromPHI will use a more general, but slow, 5743 /// technique for finding the AddRec expression. 5744 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 5745 Value *BEValueV, 5746 Value *StartValueV) { 5747 const Loop *L = LI.getLoopFor(PN->getParent()); 5748 assert(L && L->getHeader() == PN->getParent()); 5749 assert(BEValueV && StartValueV); 5750 5751 auto BO = MatchBinaryOp(BEValueV, getDataLayout(), AC, DT, PN); 5752 if (!BO) 5753 return nullptr; 5754 5755 if (BO->Opcode != Instruction::Add) 5756 return nullptr; 5757 5758 const SCEV *Accum = nullptr; 5759 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 5760 Accum = getSCEV(BO->RHS); 5761 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 5762 Accum = getSCEV(BO->LHS); 5763 5764 if (!Accum) 5765 return nullptr; 5766 5767 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5768 if (BO->IsNUW) 5769 Flags = setFlags(Flags, SCEV::FlagNUW); 5770 if (BO->IsNSW) 5771 Flags = setFlags(Flags, SCEV::FlagNSW); 5772 5773 const SCEV *StartVal = getSCEV(StartValueV); 5774 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5775 insertValueToMap(PN, PHISCEV); 5776 5777 if (auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 5778 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), 5779 (SCEV::NoWrapFlags)(AR->getNoWrapFlags() | 5780 proveNoWrapViaConstantRanges(AR))); 5781 } 5782 5783 // We can add Flags to the post-inc expression only if we 5784 // know that it is *undefined behavior* for BEValueV to 5785 // overflow. 5786 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) { 5787 assert(isLoopInvariant(Accum, L) && 5788 "Accum is defined outside L, but is not invariant?"); 5789 if (isAddRecNeverPoison(BEInst, L)) 5790 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5791 } 5792 5793 return PHISCEV; 5794 } 5795 5796 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5797 const Loop *L = LI.getLoopFor(PN->getParent()); 5798 if (!L || L->getHeader() != PN->getParent()) 5799 return nullptr; 5800 5801 // The loop may have multiple entrances or multiple exits; we can analyze 5802 // this phi as an addrec if it has a unique entry value and a unique 5803 // backedge value. 5804 Value *BEValueV = nullptr, *StartValueV = nullptr; 5805 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5806 Value *V = PN->getIncomingValue(i); 5807 if (L->contains(PN->getIncomingBlock(i))) { 5808 if (!BEValueV) { 5809 BEValueV = V; 5810 } else if (BEValueV != V) { 5811 BEValueV = nullptr; 5812 break; 5813 } 5814 } else if (!StartValueV) { 5815 StartValueV = V; 5816 } else if (StartValueV != V) { 5817 StartValueV = nullptr; 5818 break; 5819 } 5820 } 5821 if (!BEValueV || !StartValueV) 5822 return nullptr; 5823 5824 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5825 "PHI node already processed?"); 5826 5827 // First, try to find AddRec expression without creating a fictituos symbolic 5828 // value for PN. 5829 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5830 return S; 5831 5832 // Handle PHI node value symbolically. 5833 const SCEV *SymbolicName = getUnknown(PN); 5834 insertValueToMap(PN, SymbolicName); 5835 5836 // Using this symbolic name for the PHI, analyze the value coming around 5837 // the back-edge. 5838 const SCEV *BEValue = getSCEV(BEValueV); 5839 5840 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5841 // has a special value for the first iteration of the loop. 5842 5843 // If the value coming around the backedge is an add with the symbolic 5844 // value we just inserted, then we found a simple induction variable! 5845 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5846 // If there is a single occurrence of the symbolic value, replace it 5847 // with a recurrence. 5848 unsigned FoundIndex = Add->getNumOperands(); 5849 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5850 if (Add->getOperand(i) == SymbolicName) 5851 if (FoundIndex == e) { 5852 FoundIndex = i; 5853 break; 5854 } 5855 5856 if (FoundIndex != Add->getNumOperands()) { 5857 // Create an add with everything but the specified operand. 5858 SmallVector<const SCEV *, 8> Ops; 5859 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5860 if (i != FoundIndex) 5861 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5862 L, *this)); 5863 const SCEV *Accum = getAddExpr(Ops); 5864 5865 // This is not a valid addrec if the step amount is varying each 5866 // loop iteration, but is not itself an addrec in this loop. 5867 if (isLoopInvariant(Accum, L) || 5868 (isa<SCEVAddRecExpr>(Accum) && 5869 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5870 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5871 5872 if (auto BO = MatchBinaryOp(BEValueV, getDataLayout(), AC, DT, PN)) { 5873 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5874 if (BO->IsNUW) 5875 Flags = setFlags(Flags, SCEV::FlagNUW); 5876 if (BO->IsNSW) 5877 Flags = setFlags(Flags, SCEV::FlagNSW); 5878 } 5879 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5880 // If the increment is an inbounds GEP, then we know the address 5881 // space cannot be wrapped around. We cannot make any guarantee 5882 // about signed or unsigned overflow because pointers are 5883 // unsigned but we may have a negative index from the base 5884 // pointer. We can guarantee that no unsigned wrap occurs if the 5885 // indices form a positive value. 5886 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5887 Flags = setFlags(Flags, SCEV::FlagNW); 5888 if (isKnownPositive(Accum)) 5889 Flags = setFlags(Flags, SCEV::FlagNUW); 5890 } 5891 5892 // We cannot transfer nuw and nsw flags from subtraction 5893 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5894 // for instance. 5895 } 5896 5897 const SCEV *StartVal = getSCEV(StartValueV); 5898 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5899 5900 // Okay, for the entire analysis of this edge we assumed the PHI 5901 // to be symbolic. We now need to go back and purge all of the 5902 // entries for the scalars that use the symbolic expression. 5903 forgetMemoizedResults(SymbolicName); 5904 insertValueToMap(PN, PHISCEV); 5905 5906 if (auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 5907 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), 5908 (SCEV::NoWrapFlags)(AR->getNoWrapFlags() | 5909 proveNoWrapViaConstantRanges(AR))); 5910 } 5911 5912 // We can add Flags to the post-inc expression only if we 5913 // know that it is *undefined behavior* for BEValueV to 5914 // overflow. 5915 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5916 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5917 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5918 5919 return PHISCEV; 5920 } 5921 } 5922 } else { 5923 // Otherwise, this could be a loop like this: 5924 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5925 // In this case, j = {1,+,1} and BEValue is j. 5926 // Because the other in-value of i (0) fits the evolution of BEValue 5927 // i really is an addrec evolution. 5928 // 5929 // We can generalize this saying that i is the shifted value of BEValue 5930 // by one iteration: 5931 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5932 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5933 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5934 if (Shifted != getCouldNotCompute() && 5935 Start != getCouldNotCompute()) { 5936 const SCEV *StartVal = getSCEV(StartValueV); 5937 if (Start == StartVal) { 5938 // Okay, for the entire analysis of this edge we assumed the PHI 5939 // to be symbolic. We now need to go back and purge all of the 5940 // entries for the scalars that use the symbolic expression. 5941 forgetMemoizedResults(SymbolicName); 5942 insertValueToMap(PN, Shifted); 5943 return Shifted; 5944 } 5945 } 5946 } 5947 5948 // Remove the temporary PHI node SCEV that has been inserted while intending 5949 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5950 // as it will prevent later (possibly simpler) SCEV expressions to be added 5951 // to the ValueExprMap. 5952 eraseValueFromMap(PN); 5953 5954 return nullptr; 5955 } 5956 5957 // Try to match a control flow sequence that branches out at BI and merges back 5958 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5959 // match. 5960 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5961 Value *&C, Value *&LHS, Value *&RHS) { 5962 C = BI->getCondition(); 5963 5964 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5965 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5966 5967 if (!LeftEdge.isSingleEdge()) 5968 return false; 5969 5970 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5971 5972 Use &LeftUse = Merge->getOperandUse(0); 5973 Use &RightUse = Merge->getOperandUse(1); 5974 5975 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5976 LHS = LeftUse; 5977 RHS = RightUse; 5978 return true; 5979 } 5980 5981 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5982 LHS = RightUse; 5983 RHS = LeftUse; 5984 return true; 5985 } 5986 5987 return false; 5988 } 5989 5990 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5991 auto IsReachable = 5992 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5993 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5994 // Try to match 5995 // 5996 // br %cond, label %left, label %right 5997 // left: 5998 // br label %merge 5999 // right: 6000 // br label %merge 6001 // merge: 6002 // V = phi [ %x, %left ], [ %y, %right ] 6003 // 6004 // as "select %cond, %x, %y" 6005 6006 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 6007 assert(IDom && "At least the entry block should dominate PN"); 6008 6009 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 6010 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 6011 6012 if (BI && BI->isConditional() && 6013 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 6014 properlyDominates(getSCEV(LHS), PN->getParent()) && 6015 properlyDominates(getSCEV(RHS), PN->getParent())) 6016 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 6017 } 6018 6019 return nullptr; 6020 } 6021 6022 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 6023 if (const SCEV *S = createAddRecFromPHI(PN)) 6024 return S; 6025 6026 if (Value *V = simplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 6027 return getSCEV(V); 6028 6029 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 6030 return S; 6031 6032 // If it's not a loop phi, we can't handle it yet. 6033 return getUnknown(PN); 6034 } 6035 6036 bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, 6037 SCEVTypes RootKind) { 6038 struct FindClosure { 6039 const SCEV *OperandToFind; 6040 const SCEVTypes RootKind; // Must be a sequential min/max expression. 6041 const SCEVTypes NonSequentialRootKind; // Non-seq variant of RootKind. 6042 6043 bool Found = false; 6044 6045 bool canRecurseInto(SCEVTypes Kind) const { 6046 // We can only recurse into the SCEV expression of the same effective type 6047 // as the type of our root SCEV expression, and into zero-extensions. 6048 return RootKind == Kind || NonSequentialRootKind == Kind || 6049 scZeroExtend == Kind; 6050 }; 6051 6052 FindClosure(const SCEV *OperandToFind, SCEVTypes RootKind) 6053 : OperandToFind(OperandToFind), RootKind(RootKind), 6054 NonSequentialRootKind( 6055 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType( 6056 RootKind)) {} 6057 6058 bool follow(const SCEV *S) { 6059 Found = S == OperandToFind; 6060 6061 return !isDone() && canRecurseInto(S->getSCEVType()); 6062 } 6063 6064 bool isDone() const { return Found; } 6065 }; 6066 6067 FindClosure FC(OperandToFind, RootKind); 6068 visitAll(Root, FC); 6069 return FC.Found; 6070 } 6071 6072 std::optional<const SCEV *> 6073 ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(Type *Ty, 6074 ICmpInst *Cond, 6075 Value *TrueVal, 6076 Value *FalseVal) { 6077 // Try to match some simple smax or umax patterns. 6078 auto *ICI = Cond; 6079 6080 Value *LHS = ICI->getOperand(0); 6081 Value *RHS = ICI->getOperand(1); 6082 6083 switch (ICI->getPredicate()) { 6084 case ICmpInst::ICMP_SLT: 6085 case ICmpInst::ICMP_SLE: 6086 case ICmpInst::ICMP_ULT: 6087 case ICmpInst::ICMP_ULE: 6088 std::swap(LHS, RHS); 6089 [[fallthrough]]; 6090 case ICmpInst::ICMP_SGT: 6091 case ICmpInst::ICMP_SGE: 6092 case ICmpInst::ICMP_UGT: 6093 case ICmpInst::ICMP_UGE: 6094 // a > b ? a+x : b+x -> max(a, b)+x 6095 // a > b ? b+x : a+x -> min(a, b)+x 6096 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(Ty)) { 6097 bool Signed = ICI->isSigned(); 6098 const SCEV *LA = getSCEV(TrueVal); 6099 const SCEV *RA = getSCEV(FalseVal); 6100 const SCEV *LS = getSCEV(LHS); 6101 const SCEV *RS = getSCEV(RHS); 6102 if (LA->getType()->isPointerTy()) { 6103 // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA. 6104 // Need to make sure we can't produce weird expressions involving 6105 // negated pointers. 6106 if (LA == LS && RA == RS) 6107 return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS); 6108 if (LA == RS && RA == LS) 6109 return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS); 6110 } 6111 auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * { 6112 if (Op->getType()->isPointerTy()) { 6113 Op = getLosslessPtrToIntExpr(Op); 6114 if (isa<SCEVCouldNotCompute>(Op)) 6115 return Op; 6116 } 6117 if (Signed) 6118 Op = getNoopOrSignExtend(Op, Ty); 6119 else 6120 Op = getNoopOrZeroExtend(Op, Ty); 6121 return Op; 6122 }; 6123 LS = CoerceOperand(LS); 6124 RS = CoerceOperand(RS); 6125 if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS)) 6126 break; 6127 const SCEV *LDiff = getMinusSCEV(LA, LS); 6128 const SCEV *RDiff = getMinusSCEV(RA, RS); 6129 if (LDiff == RDiff) 6130 return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS), 6131 LDiff); 6132 LDiff = getMinusSCEV(LA, RS); 6133 RDiff = getMinusSCEV(RA, LS); 6134 if (LDiff == RDiff) 6135 return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS), 6136 LDiff); 6137 } 6138 break; 6139 case ICmpInst::ICMP_NE: 6140 // x != 0 ? x+y : C+y -> x == 0 ? C+y : x+y 6141 std::swap(TrueVal, FalseVal); 6142 [[fallthrough]]; 6143 case ICmpInst::ICMP_EQ: 6144 // x == 0 ? C+y : x+y -> umax(x, C)+y iff C u<= 1 6145 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(Ty) && 6146 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 6147 const SCEV *X = getNoopOrZeroExtend(getSCEV(LHS), Ty); 6148 const SCEV *TrueValExpr = getSCEV(TrueVal); // C+y 6149 const SCEV *FalseValExpr = getSCEV(FalseVal); // x+y 6150 const SCEV *Y = getMinusSCEV(FalseValExpr, X); // y = (x+y)-x 6151 const SCEV *C = getMinusSCEV(TrueValExpr, Y); // C = (C+y)-y 6152 if (isa<SCEVConstant>(C) && cast<SCEVConstant>(C)->getAPInt().ule(1)) 6153 return getAddExpr(getUMaxExpr(X, C), Y); 6154 } 6155 // x == 0 ? 0 : umin (..., x, ...) -> umin_seq(x, umin (...)) 6156 // x == 0 ? 0 : umin_seq(..., x, ...) -> umin_seq(x, umin_seq(...)) 6157 // x == 0 ? 0 : umin (..., umin_seq(..., x, ...), ...) 6158 // -> umin_seq(x, umin (..., umin_seq(...), ...)) 6159 if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero() && 6160 isa<ConstantInt>(TrueVal) && cast<ConstantInt>(TrueVal)->isZero()) { 6161 const SCEV *X = getSCEV(LHS); 6162 while (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(X)) 6163 X = ZExt->getOperand(); 6164 if (getTypeSizeInBits(X->getType()) <= getTypeSizeInBits(Ty)) { 6165 const SCEV *FalseValExpr = getSCEV(FalseVal); 6166 if (SCEVMinMaxExprContains(FalseValExpr, X, scSequentialUMinExpr)) 6167 return getUMinExpr(getNoopOrZeroExtend(X, Ty), FalseValExpr, 6168 /*Sequential=*/true); 6169 } 6170 } 6171 break; 6172 default: 6173 break; 6174 } 6175 6176 return std::nullopt; 6177 } 6178 6179 static std::optional<const SCEV *> 6180 createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, 6181 const SCEV *TrueExpr, const SCEV *FalseExpr) { 6182 assert(CondExpr->getType()->isIntegerTy(1) && 6183 TrueExpr->getType() == FalseExpr->getType() && 6184 TrueExpr->getType()->isIntegerTy(1) && 6185 "Unexpected operands of a select."); 6186 6187 // i1 cond ? i1 x : i1 C --> C + (i1 cond ? (i1 x - i1 C) : i1 0) 6188 // --> C + (umin_seq cond, x - C) 6189 // 6190 // i1 cond ? i1 C : i1 x --> C + (i1 cond ? i1 0 : (i1 x - i1 C)) 6191 // --> C + (i1 ~cond ? (i1 x - i1 C) : i1 0) 6192 // --> C + (umin_seq ~cond, x - C) 6193 6194 // FIXME: while we can't legally model the case where both of the hands 6195 // are fully variable, we only require that the *difference* is constant. 6196 if (!isa<SCEVConstant>(TrueExpr) && !isa<SCEVConstant>(FalseExpr)) 6197 return std::nullopt; 6198 6199 const SCEV *X, *C; 6200 if (isa<SCEVConstant>(TrueExpr)) { 6201 CondExpr = SE->getNotSCEV(CondExpr); 6202 X = FalseExpr; 6203 C = TrueExpr; 6204 } else { 6205 X = TrueExpr; 6206 C = FalseExpr; 6207 } 6208 return SE->getAddExpr(C, SE->getUMinExpr(CondExpr, SE->getMinusSCEV(X, C), 6209 /*Sequential=*/true)); 6210 } 6211 6212 static std::optional<const SCEV *> 6213 createNodeForSelectViaUMinSeq(ScalarEvolution *SE, Value *Cond, Value *TrueVal, 6214 Value *FalseVal) { 6215 if (!isa<ConstantInt>(TrueVal) && !isa<ConstantInt>(FalseVal)) 6216 return std::nullopt; 6217 6218 const auto *SECond = SE->getSCEV(Cond); 6219 const auto *SETrue = SE->getSCEV(TrueVal); 6220 const auto *SEFalse = SE->getSCEV(FalseVal); 6221 return createNodeForSelectViaUMinSeq(SE, SECond, SETrue, SEFalse); 6222 } 6223 6224 const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq( 6225 Value *V, Value *Cond, Value *TrueVal, Value *FalseVal) { 6226 assert(Cond->getType()->isIntegerTy(1) && "Select condition is not an i1?"); 6227 assert(TrueVal->getType() == FalseVal->getType() && 6228 V->getType() == TrueVal->getType() && 6229 "Types of select hands and of the result must match."); 6230 6231 // For now, only deal with i1-typed `select`s. 6232 if (!V->getType()->isIntegerTy(1)) 6233 return getUnknown(V); 6234 6235 if (std::optional<const SCEV *> S = 6236 createNodeForSelectViaUMinSeq(this, Cond, TrueVal, FalseVal)) 6237 return *S; 6238 6239 return getUnknown(V); 6240 } 6241 6242 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Value *V, Value *Cond, 6243 Value *TrueVal, 6244 Value *FalseVal) { 6245 // Handle "constant" branch or select. This can occur for instance when a 6246 // loop pass transforms an inner loop and moves on to process the outer loop. 6247 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 6248 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 6249 6250 if (auto *I = dyn_cast<Instruction>(V)) { 6251 if (auto *ICI = dyn_cast<ICmpInst>(Cond)) { 6252 if (std::optional<const SCEV *> S = 6253 createNodeForSelectOrPHIInstWithICmpInstCond(I->getType(), ICI, 6254 TrueVal, FalseVal)) 6255 return *S; 6256 } 6257 } 6258 6259 return createNodeForSelectOrPHIViaUMinSeq(V, Cond, TrueVal, FalseVal); 6260 } 6261 6262 /// Expand GEP instructions into add and multiply operations. This allows them 6263 /// to be analyzed by regular SCEV code. 6264 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 6265 assert(GEP->getSourceElementType()->isSized() && 6266 "GEP source element type must be sized"); 6267 6268 SmallVector<const SCEV *, 4> IndexExprs; 6269 for (Value *Index : GEP->indices()) 6270 IndexExprs.push_back(getSCEV(Index)); 6271 return getGEPExpr(GEP, IndexExprs); 6272 } 6273 6274 APInt ScalarEvolution::getConstantMultipleImpl(const SCEV *S) { 6275 uint64_t BitWidth = getTypeSizeInBits(S->getType()); 6276 auto GetShiftedByZeros = [BitWidth](uint32_t TrailingZeros) { 6277 return TrailingZeros >= BitWidth 6278 ? APInt::getZero(BitWidth) 6279 : APInt::getOneBitSet(BitWidth, TrailingZeros); 6280 }; 6281 auto GetGCDMultiple = [this](const SCEVNAryExpr *N) { 6282 // The result is GCD of all operands results. 6283 APInt Res = getConstantMultiple(N->getOperand(0)); 6284 for (unsigned I = 1, E = N->getNumOperands(); I < E && Res != 1; ++I) 6285 Res = APIntOps::GreatestCommonDivisor( 6286 Res, getConstantMultiple(N->getOperand(I))); 6287 return Res; 6288 }; 6289 6290 switch (S->getSCEVType()) { 6291 case scConstant: 6292 return cast<SCEVConstant>(S)->getAPInt(); 6293 case scPtrToInt: 6294 return getConstantMultiple(cast<SCEVPtrToIntExpr>(S)->getOperand()); 6295 case scUDivExpr: 6296 case scVScale: 6297 return APInt(BitWidth, 1); 6298 case scTruncate: { 6299 // Only multiples that are a power of 2 will hold after truncation. 6300 const SCEVTruncateExpr *T = cast<SCEVTruncateExpr>(S); 6301 uint32_t TZ = getMinTrailingZeros(T->getOperand()); 6302 return GetShiftedByZeros(TZ); 6303 } 6304 case scZeroExtend: { 6305 const SCEVZeroExtendExpr *Z = cast<SCEVZeroExtendExpr>(S); 6306 return getConstantMultiple(Z->getOperand()).zext(BitWidth); 6307 } 6308 case scSignExtend: { 6309 const SCEVSignExtendExpr *E = cast<SCEVSignExtendExpr>(S); 6310 return getConstantMultiple(E->getOperand()).sext(BitWidth); 6311 } 6312 case scMulExpr: { 6313 const SCEVMulExpr *M = cast<SCEVMulExpr>(S); 6314 if (M->hasNoUnsignedWrap()) { 6315 // The result is the product of all operand results. 6316 APInt Res = getConstantMultiple(M->getOperand(0)); 6317 for (const SCEV *Operand : M->operands().drop_front()) 6318 Res = Res * getConstantMultiple(Operand); 6319 return Res; 6320 } 6321 6322 // If there are no wrap guarentees, find the trailing zeros, which is the 6323 // sum of trailing zeros for all its operands. 6324 uint32_t TZ = 0; 6325 for (const SCEV *Operand : M->operands()) 6326 TZ += getMinTrailingZeros(Operand); 6327 return GetShiftedByZeros(TZ); 6328 } 6329 case scAddExpr: 6330 case scAddRecExpr: { 6331 const SCEVNAryExpr *N = cast<SCEVNAryExpr>(S); 6332 if (N->hasNoUnsignedWrap()) 6333 return GetGCDMultiple(N); 6334 // Find the trailing bits, which is the minimum of its operands. 6335 uint32_t TZ = getMinTrailingZeros(N->getOperand(0)); 6336 for (const SCEV *Operand : N->operands().drop_front()) 6337 TZ = std::min(TZ, getMinTrailingZeros(Operand)); 6338 return GetShiftedByZeros(TZ); 6339 } 6340 case scUMaxExpr: 6341 case scSMaxExpr: 6342 case scUMinExpr: 6343 case scSMinExpr: 6344 case scSequentialUMinExpr: 6345 return GetGCDMultiple(cast<SCEVNAryExpr>(S)); 6346 case scUnknown: { 6347 // ask ValueTracking for known bits 6348 const SCEVUnknown *U = cast<SCEVUnknown>(S); 6349 unsigned Known = 6350 computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT) 6351 .countMinTrailingZeros(); 6352 return GetShiftedByZeros(Known); 6353 } 6354 case scCouldNotCompute: 6355 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 6356 } 6357 llvm_unreachable("Unknown SCEV kind!"); 6358 } 6359 6360 APInt ScalarEvolution::getConstantMultiple(const SCEV *S) { 6361 auto I = ConstantMultipleCache.find(S); 6362 if (I != ConstantMultipleCache.end()) 6363 return I->second; 6364 6365 APInt Result = getConstantMultipleImpl(S); 6366 auto InsertPair = ConstantMultipleCache.insert({S, Result}); 6367 assert(InsertPair.second && "Should insert a new key"); 6368 return InsertPair.first->second; 6369 } 6370 6371 APInt ScalarEvolution::getNonZeroConstantMultiple(const SCEV *S) { 6372 APInt Multiple = getConstantMultiple(S); 6373 return Multiple == 0 ? APInt(Multiple.getBitWidth(), 1) : Multiple; 6374 } 6375 6376 uint32_t ScalarEvolution::getMinTrailingZeros(const SCEV *S) { 6377 return std::min(getConstantMultiple(S).countTrailingZeros(), 6378 (unsigned)getTypeSizeInBits(S->getType())); 6379 } 6380 6381 /// Helper method to assign a range to V from metadata present in the IR. 6382 static std::optional<ConstantRange> GetRangeFromMetadata(Value *V) { 6383 if (Instruction *I = dyn_cast<Instruction>(V)) 6384 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 6385 return getConstantRangeFromMetadata(*MD); 6386 6387 return std::nullopt; 6388 } 6389 6390 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, 6391 SCEV::NoWrapFlags Flags) { 6392 if (AddRec->getNoWrapFlags(Flags) != Flags) { 6393 AddRec->setNoWrapFlags(Flags); 6394 UnsignedRanges.erase(AddRec); 6395 SignedRanges.erase(AddRec); 6396 ConstantMultipleCache.erase(AddRec); 6397 } 6398 } 6399 6400 ConstantRange ScalarEvolution:: 6401 getRangeForUnknownRecurrence(const SCEVUnknown *U) { 6402 const DataLayout &DL = getDataLayout(); 6403 6404 unsigned BitWidth = getTypeSizeInBits(U->getType()); 6405 const ConstantRange FullSet(BitWidth, /*isFullSet=*/true); 6406 6407 // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then 6408 // use information about the trip count to improve our available range. Note 6409 // that the trip count independent cases are already handled by known bits. 6410 // WARNING: The definition of recurrence used here is subtly different than 6411 // the one used by AddRec (and thus most of this file). Step is allowed to 6412 // be arbitrarily loop varying here, where AddRec allows only loop invariant 6413 // and other addrecs in the same loop (for non-affine addrecs). The code 6414 // below intentionally handles the case where step is not loop invariant. 6415 auto *P = dyn_cast<PHINode>(U->getValue()); 6416 if (!P) 6417 return FullSet; 6418 6419 // Make sure that no Phi input comes from an unreachable block. Otherwise, 6420 // even the values that are not available in these blocks may come from them, 6421 // and this leads to false-positive recurrence test. 6422 for (auto *Pred : predecessors(P->getParent())) 6423 if (!DT.isReachableFromEntry(Pred)) 6424 return FullSet; 6425 6426 BinaryOperator *BO; 6427 Value *Start, *Step; 6428 if (!matchSimpleRecurrence(P, BO, Start, Step)) 6429 return FullSet; 6430 6431 // If we found a recurrence in reachable code, we must be in a loop. Note 6432 // that BO might be in some subloop of L, and that's completely okay. 6433 auto *L = LI.getLoopFor(P->getParent()); 6434 assert(L && L->getHeader() == P->getParent()); 6435 if (!L->contains(BO->getParent())) 6436 // NOTE: This bailout should be an assert instead. However, asserting 6437 // the condition here exposes a case where LoopFusion is querying SCEV 6438 // with malformed loop information during the midst of the transform. 6439 // There doesn't appear to be an obvious fix, so for the moment bailout 6440 // until the caller issue can be fixed. PR49566 tracks the bug. 6441 return FullSet; 6442 6443 // TODO: Extend to other opcodes such as mul, and div 6444 switch (BO->getOpcode()) { 6445 default: 6446 return FullSet; 6447 case Instruction::AShr: 6448 case Instruction::LShr: 6449 case Instruction::Shl: 6450 break; 6451 }; 6452 6453 if (BO->getOperand(0) != P) 6454 // TODO: Handle the power function forms some day. 6455 return FullSet; 6456 6457 unsigned TC = getSmallConstantMaxTripCount(L); 6458 if (!TC || TC >= BitWidth) 6459 return FullSet; 6460 6461 auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT); 6462 auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT); 6463 assert(KnownStart.getBitWidth() == BitWidth && 6464 KnownStep.getBitWidth() == BitWidth); 6465 6466 // Compute total shift amount, being careful of overflow and bitwidths. 6467 auto MaxShiftAmt = KnownStep.getMaxValue(); 6468 APInt TCAP(BitWidth, TC-1); 6469 bool Overflow = false; 6470 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow); 6471 if (Overflow) 6472 return FullSet; 6473 6474 switch (BO->getOpcode()) { 6475 default: 6476 llvm_unreachable("filtered out above"); 6477 case Instruction::AShr: { 6478 // For each ashr, three cases: 6479 // shift = 0 => unchanged value 6480 // saturation => 0 or -1 6481 // other => a value closer to zero (of the same sign) 6482 // Thus, the end value is closer to zero than the start. 6483 auto KnownEnd = KnownBits::ashr(KnownStart, 6484 KnownBits::makeConstant(TotalShift)); 6485 if (KnownStart.isNonNegative()) 6486 // Analogous to lshr (simply not yet canonicalized) 6487 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), 6488 KnownStart.getMaxValue() + 1); 6489 if (KnownStart.isNegative()) 6490 // End >=u Start && End <=s Start 6491 return ConstantRange::getNonEmpty(KnownStart.getMinValue(), 6492 KnownEnd.getMaxValue() + 1); 6493 break; 6494 } 6495 case Instruction::LShr: { 6496 // For each lshr, three cases: 6497 // shift = 0 => unchanged value 6498 // saturation => 0 6499 // other => a smaller positive number 6500 // Thus, the low end of the unsigned range is the last value produced. 6501 auto KnownEnd = KnownBits::lshr(KnownStart, 6502 KnownBits::makeConstant(TotalShift)); 6503 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), 6504 KnownStart.getMaxValue() + 1); 6505 } 6506 case Instruction::Shl: { 6507 // Iff no bits are shifted out, value increases on every shift. 6508 auto KnownEnd = KnownBits::shl(KnownStart, 6509 KnownBits::makeConstant(TotalShift)); 6510 if (TotalShift.ult(KnownStart.countMinLeadingZeros())) 6511 return ConstantRange(KnownStart.getMinValue(), 6512 KnownEnd.getMaxValue() + 1); 6513 break; 6514 } 6515 }; 6516 return FullSet; 6517 } 6518 6519 const ConstantRange & 6520 ScalarEvolution::getRangeRefIter(const SCEV *S, 6521 ScalarEvolution::RangeSignHint SignHint) { 6522 DenseMap<const SCEV *, ConstantRange> &Cache = 6523 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 6524 : SignedRanges; 6525 SmallVector<const SCEV *> WorkList; 6526 SmallPtrSet<const SCEV *, 8> Seen; 6527 6528 // Add Expr to the worklist, if Expr is either an N-ary expression or a 6529 // SCEVUnknown PHI node. 6530 auto AddToWorklist = [&WorkList, &Seen, &Cache](const SCEV *Expr) { 6531 if (!Seen.insert(Expr).second) 6532 return; 6533 if (Cache.contains(Expr)) 6534 return; 6535 switch (Expr->getSCEVType()) { 6536 case scUnknown: 6537 if (!isa<PHINode>(cast<SCEVUnknown>(Expr)->getValue())) 6538 break; 6539 [[fallthrough]]; 6540 case scConstant: 6541 case scVScale: 6542 case scTruncate: 6543 case scZeroExtend: 6544 case scSignExtend: 6545 case scPtrToInt: 6546 case scAddExpr: 6547 case scMulExpr: 6548 case scUDivExpr: 6549 case scAddRecExpr: 6550 case scUMaxExpr: 6551 case scSMaxExpr: 6552 case scUMinExpr: 6553 case scSMinExpr: 6554 case scSequentialUMinExpr: 6555 WorkList.push_back(Expr); 6556 break; 6557 case scCouldNotCompute: 6558 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 6559 } 6560 }; 6561 AddToWorklist(S); 6562 6563 // Build worklist by queuing operands of N-ary expressions and phi nodes. 6564 for (unsigned I = 0; I != WorkList.size(); ++I) { 6565 const SCEV *P = WorkList[I]; 6566 auto *UnknownS = dyn_cast<SCEVUnknown>(P); 6567 // If it is not a `SCEVUnknown`, just recurse into operands. 6568 if (!UnknownS) { 6569 for (const SCEV *Op : P->operands()) 6570 AddToWorklist(Op); 6571 continue; 6572 } 6573 // `SCEVUnknown`'s require special treatment. 6574 if (const PHINode *P = dyn_cast<PHINode>(UnknownS->getValue())) { 6575 if (!PendingPhiRangesIter.insert(P).second) 6576 continue; 6577 for (auto &Op : reverse(P->operands())) 6578 AddToWorklist(getSCEV(Op)); 6579 } 6580 } 6581 6582 if (!WorkList.empty()) { 6583 // Use getRangeRef to compute ranges for items in the worklist in reverse 6584 // order. This will force ranges for earlier operands to be computed before 6585 // their users in most cases. 6586 for (const SCEV *P : reverse(drop_begin(WorkList))) { 6587 getRangeRef(P, SignHint); 6588 6589 if (auto *UnknownS = dyn_cast<SCEVUnknown>(P)) 6590 if (const PHINode *P = dyn_cast<PHINode>(UnknownS->getValue())) 6591 PendingPhiRangesIter.erase(P); 6592 } 6593 } 6594 6595 return getRangeRef(S, SignHint, 0); 6596 } 6597 6598 /// Determine the range for a particular SCEV. If SignHint is 6599 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 6600 /// with a "cleaner" unsigned (resp. signed) representation. 6601 const ConstantRange &ScalarEvolution::getRangeRef( 6602 const SCEV *S, ScalarEvolution::RangeSignHint SignHint, unsigned Depth) { 6603 DenseMap<const SCEV *, ConstantRange> &Cache = 6604 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 6605 : SignedRanges; 6606 ConstantRange::PreferredRangeType RangeType = 6607 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? ConstantRange::Unsigned 6608 : ConstantRange::Signed; 6609 6610 // See if we've computed this range already. 6611 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 6612 if (I != Cache.end()) 6613 return I->second; 6614 6615 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 6616 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 6617 6618 // Switch to iteratively computing the range for S, if it is part of a deeply 6619 // nested expression. 6620 if (Depth > RangeIterThreshold) 6621 return getRangeRefIter(S, SignHint); 6622 6623 unsigned BitWidth = getTypeSizeInBits(S->getType()); 6624 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 6625 using OBO = OverflowingBinaryOperator; 6626 6627 // If the value has known zeros, the maximum value will have those known zeros 6628 // as well. 6629 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 6630 APInt Multiple = getNonZeroConstantMultiple(S); 6631 APInt Remainder = APInt::getMaxValue(BitWidth).urem(Multiple); 6632 if (!Remainder.isZero()) 6633 ConservativeResult = 6634 ConstantRange(APInt::getMinValue(BitWidth), 6635 APInt::getMaxValue(BitWidth) - Remainder + 1); 6636 } 6637 else { 6638 uint32_t TZ = getMinTrailingZeros(S); 6639 if (TZ != 0) { 6640 ConservativeResult = ConstantRange( 6641 APInt::getSignedMinValue(BitWidth), 6642 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 6643 } 6644 } 6645 6646 switch (S->getSCEVType()) { 6647 case scConstant: 6648 llvm_unreachable("Already handled above."); 6649 case scVScale: 6650 return setRange(S, SignHint, getVScaleRange(&F, BitWidth)); 6651 case scTruncate: { 6652 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(S); 6653 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint, Depth + 1); 6654 return setRange( 6655 Trunc, SignHint, 6656 ConservativeResult.intersectWith(X.truncate(BitWidth), RangeType)); 6657 } 6658 case scZeroExtend: { 6659 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(S); 6660 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint, Depth + 1); 6661 return setRange( 6662 ZExt, SignHint, 6663 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), RangeType)); 6664 } 6665 case scSignExtend: { 6666 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(S); 6667 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint, Depth + 1); 6668 return setRange( 6669 SExt, SignHint, 6670 ConservativeResult.intersectWith(X.signExtend(BitWidth), RangeType)); 6671 } 6672 case scPtrToInt: { 6673 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(S); 6674 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint, Depth + 1); 6675 return setRange(PtrToInt, SignHint, X); 6676 } 6677 case scAddExpr: { 6678 const SCEVAddExpr *Add = cast<SCEVAddExpr>(S); 6679 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint, Depth + 1); 6680 unsigned WrapType = OBO::AnyWrap; 6681 if (Add->hasNoSignedWrap()) 6682 WrapType |= OBO::NoSignedWrap; 6683 if (Add->hasNoUnsignedWrap()) 6684 WrapType |= OBO::NoUnsignedWrap; 6685 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 6686 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint, Depth + 1), 6687 WrapType, RangeType); 6688 return setRange(Add, SignHint, 6689 ConservativeResult.intersectWith(X, RangeType)); 6690 } 6691 case scMulExpr: { 6692 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(S); 6693 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint, Depth + 1); 6694 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 6695 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint, Depth + 1)); 6696 return setRange(Mul, SignHint, 6697 ConservativeResult.intersectWith(X, RangeType)); 6698 } 6699 case scUDivExpr: { 6700 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 6701 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint, Depth + 1); 6702 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint, Depth + 1); 6703 return setRange(UDiv, SignHint, 6704 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 6705 } 6706 case scAddRecExpr: { 6707 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(S); 6708 // If there's no unsigned wrap, the value will never be less than its 6709 // initial value. 6710 if (AddRec->hasNoUnsignedWrap()) { 6711 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 6712 if (!UnsignedMinValue.isZero()) 6713 ConservativeResult = ConservativeResult.intersectWith( 6714 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 6715 } 6716 6717 // If there's no signed wrap, and all the operands except initial value have 6718 // the same sign or zero, the value won't ever be: 6719 // 1: smaller than initial value if operands are non negative, 6720 // 2: bigger than initial value if operands are non positive. 6721 // For both cases, value can not cross signed min/max boundary. 6722 if (AddRec->hasNoSignedWrap()) { 6723 bool AllNonNeg = true; 6724 bool AllNonPos = true; 6725 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 6726 if (!isKnownNonNegative(AddRec->getOperand(i))) 6727 AllNonNeg = false; 6728 if (!isKnownNonPositive(AddRec->getOperand(i))) 6729 AllNonPos = false; 6730 } 6731 if (AllNonNeg) 6732 ConservativeResult = ConservativeResult.intersectWith( 6733 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 6734 APInt::getSignedMinValue(BitWidth)), 6735 RangeType); 6736 else if (AllNonPos) 6737 ConservativeResult = ConservativeResult.intersectWith( 6738 ConstantRange::getNonEmpty(APInt::getSignedMinValue(BitWidth), 6739 getSignedRangeMax(AddRec->getStart()) + 6740 1), 6741 RangeType); 6742 } 6743 6744 // TODO: non-affine addrec 6745 if (AddRec->isAffine()) { 6746 const SCEV *MaxBEScev = 6747 getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 6748 if (!isa<SCEVCouldNotCompute>(MaxBEScev)) { 6749 APInt MaxBECount = cast<SCEVConstant>(MaxBEScev)->getAPInt(); 6750 6751 // Adjust MaxBECount to the same bitwidth as AddRec. We can truncate if 6752 // MaxBECount's active bits are all <= AddRec's bit width. 6753 if (MaxBECount.getBitWidth() > BitWidth && 6754 MaxBECount.getActiveBits() <= BitWidth) 6755 MaxBECount = MaxBECount.trunc(BitWidth); 6756 else if (MaxBECount.getBitWidth() < BitWidth) 6757 MaxBECount = MaxBECount.zext(BitWidth); 6758 6759 if (MaxBECount.getBitWidth() == BitWidth) { 6760 auto RangeFromAffine = getRangeForAffineAR( 6761 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount); 6762 ConservativeResult = 6763 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 6764 6765 auto RangeFromFactoring = getRangeViaFactoring( 6766 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount); 6767 ConservativeResult = 6768 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 6769 } 6770 } 6771 6772 // Now try symbolic BE count and more powerful methods. 6773 if (UseExpensiveRangeSharpening) { 6774 const SCEV *SymbolicMaxBECount = 6775 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop()); 6776 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) && 6777 getTypeSizeInBits(MaxBEScev->getType()) <= BitWidth && 6778 AddRec->hasNoSelfWrap()) { 6779 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR( 6780 AddRec, SymbolicMaxBECount, BitWidth, SignHint); 6781 ConservativeResult = 6782 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType); 6783 } 6784 } 6785 } 6786 6787 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 6788 } 6789 case scUMaxExpr: 6790 case scSMaxExpr: 6791 case scUMinExpr: 6792 case scSMinExpr: 6793 case scSequentialUMinExpr: { 6794 Intrinsic::ID ID; 6795 switch (S->getSCEVType()) { 6796 case scUMaxExpr: 6797 ID = Intrinsic::umax; 6798 break; 6799 case scSMaxExpr: 6800 ID = Intrinsic::smax; 6801 break; 6802 case scUMinExpr: 6803 case scSequentialUMinExpr: 6804 ID = Intrinsic::umin; 6805 break; 6806 case scSMinExpr: 6807 ID = Intrinsic::smin; 6808 break; 6809 default: 6810 llvm_unreachable("Unknown SCEVMinMaxExpr/SCEVSequentialMinMaxExpr."); 6811 } 6812 6813 const auto *NAry = cast<SCEVNAryExpr>(S); 6814 ConstantRange X = getRangeRef(NAry->getOperand(0), SignHint, Depth + 1); 6815 for (unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i) 6816 X = X.intrinsic( 6817 ID, {X, getRangeRef(NAry->getOperand(i), SignHint, Depth + 1)}); 6818 return setRange(S, SignHint, 6819 ConservativeResult.intersectWith(X, RangeType)); 6820 } 6821 case scUnknown: { 6822 const SCEVUnknown *U = cast<SCEVUnknown>(S); 6823 Value *V = U->getValue(); 6824 6825 // Check if the IR explicitly contains !range metadata. 6826 std::optional<ConstantRange> MDRange = GetRangeFromMetadata(V); 6827 if (MDRange) 6828 ConservativeResult = 6829 ConservativeResult.intersectWith(*MDRange, RangeType); 6830 6831 // Use facts about recurrences in the underlying IR. Note that add 6832 // recurrences are AddRecExprs and thus don't hit this path. This 6833 // primarily handles shift recurrences. 6834 auto CR = getRangeForUnknownRecurrence(U); 6835 ConservativeResult = ConservativeResult.intersectWith(CR); 6836 6837 // See if ValueTracking can give us a useful range. 6838 const DataLayout &DL = getDataLayout(); 6839 KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, &DT); 6840 if (Known.getBitWidth() != BitWidth) 6841 Known = Known.zextOrTrunc(BitWidth); 6842 6843 // ValueTracking may be able to compute a tighter result for the number of 6844 // sign bits than for the value of those sign bits. 6845 unsigned NS = ComputeNumSignBits(V, DL, 0, &AC, nullptr, &DT); 6846 if (U->getType()->isPointerTy()) { 6847 // If the pointer size is larger than the index size type, this can cause 6848 // NS to be larger than BitWidth. So compensate for this. 6849 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 6850 int ptrIdxDiff = ptrSize - BitWidth; 6851 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 6852 NS -= ptrIdxDiff; 6853 } 6854 6855 if (NS > 1) { 6856 // If we know any of the sign bits, we know all of the sign bits. 6857 if (!Known.Zero.getHiBits(NS).isZero()) 6858 Known.Zero.setHighBits(NS); 6859 if (!Known.One.getHiBits(NS).isZero()) 6860 Known.One.setHighBits(NS); 6861 } 6862 6863 if (Known.getMinValue() != Known.getMaxValue() + 1) 6864 ConservativeResult = ConservativeResult.intersectWith( 6865 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 6866 RangeType); 6867 if (NS > 1) 6868 ConservativeResult = ConservativeResult.intersectWith( 6869 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 6870 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 6871 RangeType); 6872 6873 if (U->getType()->isPointerTy() && SignHint == HINT_RANGE_UNSIGNED) { 6874 // Strengthen the range if the underlying IR value is a 6875 // global/alloca/heap allocation using the size of the object. 6876 ObjectSizeOpts Opts; 6877 Opts.RoundToAlign = false; 6878 Opts.NullIsUnknownSize = true; 6879 uint64_t ObjSize; 6880 if ((isa<GlobalVariable>(V) || isa<AllocaInst>(V) || 6881 isAllocationFn(V, &TLI)) && 6882 getObjectSize(V, ObjSize, DL, &TLI, Opts) && ObjSize > 1) { 6883 // The highest address the object can start is ObjSize bytes before the 6884 // end (unsigned max value). If this value is not a multiple of the 6885 // alignment, the last possible start value is the next lowest multiple 6886 // of the alignment. Note: The computations below cannot overflow, 6887 // because if they would there's no possible start address for the 6888 // object. 6889 APInt MaxVal = APInt::getMaxValue(BitWidth) - APInt(BitWidth, ObjSize); 6890 uint64_t Align = U->getValue()->getPointerAlignment(DL).value(); 6891 uint64_t Rem = MaxVal.urem(Align); 6892 MaxVal -= APInt(BitWidth, Rem); 6893 APInt MinVal = APInt::getZero(BitWidth); 6894 if (llvm::isKnownNonZero(V, DL)) 6895 MinVal = Align; 6896 ConservativeResult = ConservativeResult.intersectWith( 6897 ConstantRange::getNonEmpty(MinVal, MaxVal + 1), RangeType); 6898 } 6899 } 6900 6901 // A range of Phi is a subset of union of all ranges of its input. 6902 if (PHINode *Phi = dyn_cast<PHINode>(V)) { 6903 // Make sure that we do not run over cycled Phis. 6904 if (PendingPhiRanges.insert(Phi).second) { 6905 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 6906 6907 for (const auto &Op : Phi->operands()) { 6908 auto OpRange = getRangeRef(getSCEV(Op), SignHint, Depth + 1); 6909 RangeFromOps = RangeFromOps.unionWith(OpRange); 6910 // No point to continue if we already have a full set. 6911 if (RangeFromOps.isFullSet()) 6912 break; 6913 } 6914 ConservativeResult = 6915 ConservativeResult.intersectWith(RangeFromOps, RangeType); 6916 bool Erased = PendingPhiRanges.erase(Phi); 6917 assert(Erased && "Failed to erase Phi properly?"); 6918 (void)Erased; 6919 } 6920 } 6921 6922 // vscale can't be equal to zero 6923 if (const auto *II = dyn_cast<IntrinsicInst>(V)) 6924 if (II->getIntrinsicID() == Intrinsic::vscale) { 6925 ConstantRange Disallowed = APInt::getZero(BitWidth); 6926 ConservativeResult = ConservativeResult.difference(Disallowed); 6927 } 6928 6929 return setRange(U, SignHint, std::move(ConservativeResult)); 6930 } 6931 case scCouldNotCompute: 6932 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 6933 } 6934 6935 return setRange(S, SignHint, std::move(ConservativeResult)); 6936 } 6937 6938 // Given a StartRange, Step and MaxBECount for an expression compute a range of 6939 // values that the expression can take. Initially, the expression has a value 6940 // from StartRange and then is changed by Step up to MaxBECount times. Signed 6941 // argument defines if we treat Step as signed or unsigned. 6942 static ConstantRange getRangeForAffineARHelper(APInt Step, 6943 const ConstantRange &StartRange, 6944 const APInt &MaxBECount, 6945 bool Signed) { 6946 unsigned BitWidth = Step.getBitWidth(); 6947 assert(BitWidth == StartRange.getBitWidth() && 6948 BitWidth == MaxBECount.getBitWidth() && "mismatched bit widths"); 6949 // If either Step or MaxBECount is 0, then the expression won't change, and we 6950 // just need to return the initial range. 6951 if (Step == 0 || MaxBECount == 0) 6952 return StartRange; 6953 6954 // If we don't know anything about the initial value (i.e. StartRange is 6955 // FullRange), then we don't know anything about the final range either. 6956 // Return FullRange. 6957 if (StartRange.isFullSet()) 6958 return ConstantRange::getFull(BitWidth); 6959 6960 // If Step is signed and negative, then we use its absolute value, but we also 6961 // note that we're moving in the opposite direction. 6962 bool Descending = Signed && Step.isNegative(); 6963 6964 if (Signed) 6965 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 6966 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 6967 // This equations hold true due to the well-defined wrap-around behavior of 6968 // APInt. 6969 Step = Step.abs(); 6970 6971 // Check if Offset is more than full span of BitWidth. If it is, the 6972 // expression is guaranteed to overflow. 6973 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 6974 return ConstantRange::getFull(BitWidth); 6975 6976 // Offset is by how much the expression can change. Checks above guarantee no 6977 // overflow here. 6978 APInt Offset = Step * MaxBECount; 6979 6980 // Minimum value of the final range will match the minimal value of StartRange 6981 // if the expression is increasing and will be decreased by Offset otherwise. 6982 // Maximum value of the final range will match the maximal value of StartRange 6983 // if the expression is decreasing and will be increased by Offset otherwise. 6984 APInt StartLower = StartRange.getLower(); 6985 APInt StartUpper = StartRange.getUpper() - 1; 6986 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 6987 : (StartUpper + std::move(Offset)); 6988 6989 // It's possible that the new minimum/maximum value will fall into the initial 6990 // range (due to wrap around). This means that the expression can take any 6991 // value in this bitwidth, and we have to return full range. 6992 if (StartRange.contains(MovedBoundary)) 6993 return ConstantRange::getFull(BitWidth); 6994 6995 APInt NewLower = 6996 Descending ? std::move(MovedBoundary) : std::move(StartLower); 6997 APInt NewUpper = 6998 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 6999 NewUpper += 1; 7000 7001 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 7002 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 7003 } 7004 7005 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 7006 const SCEV *Step, 7007 const APInt &MaxBECount) { 7008 assert(getTypeSizeInBits(Start->getType()) == 7009 getTypeSizeInBits(Step->getType()) && 7010 getTypeSizeInBits(Start->getType()) == MaxBECount.getBitWidth() && 7011 "mismatched bit widths"); 7012 7013 // First, consider step signed. 7014 ConstantRange StartSRange = getSignedRange(Start); 7015 ConstantRange StepSRange = getSignedRange(Step); 7016 7017 // If Step can be both positive and negative, we need to find ranges for the 7018 // maximum absolute step values in both directions and union them. 7019 ConstantRange SR = getRangeForAffineARHelper( 7020 StepSRange.getSignedMin(), StartSRange, MaxBECount, /* Signed = */ true); 7021 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 7022 StartSRange, MaxBECount, 7023 /* Signed = */ true)); 7024 7025 // Next, consider step unsigned. 7026 ConstantRange UR = getRangeForAffineARHelper( 7027 getUnsignedRangeMax(Step), getUnsignedRange(Start), MaxBECount, 7028 /* Signed = */ false); 7029 7030 // Finally, intersect signed and unsigned ranges. 7031 return SR.intersectWith(UR, ConstantRange::Smallest); 7032 } 7033 7034 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR( 7035 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth, 7036 ScalarEvolution::RangeSignHint SignHint) { 7037 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n"); 7038 assert(AddRec->hasNoSelfWrap() && 7039 "This only works for non-self-wrapping AddRecs!"); 7040 const bool IsSigned = SignHint == HINT_RANGE_SIGNED; 7041 const SCEV *Step = AddRec->getStepRecurrence(*this); 7042 // Only deal with constant step to save compile time. 7043 if (!isa<SCEVConstant>(Step)) 7044 return ConstantRange::getFull(BitWidth); 7045 // Let's make sure that we can prove that we do not self-wrap during 7046 // MaxBECount iterations. We need this because MaxBECount is a maximum 7047 // iteration count estimate, and we might infer nw from some exit for which we 7048 // do not know max exit count (or any other side reasoning). 7049 // TODO: Turn into assert at some point. 7050 if (getTypeSizeInBits(MaxBECount->getType()) > 7051 getTypeSizeInBits(AddRec->getType())) 7052 return ConstantRange::getFull(BitWidth); 7053 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType()); 7054 const SCEV *RangeWidth = getMinusOne(AddRec->getType()); 7055 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step)); 7056 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs); 7057 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount, 7058 MaxItersWithoutWrap)) 7059 return ConstantRange::getFull(BitWidth); 7060 7061 ICmpInst::Predicate LEPred = 7062 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 7063 ICmpInst::Predicate GEPred = 7064 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 7065 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 7066 7067 // We know that there is no self-wrap. Let's take Start and End values and 7068 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during 7069 // the iteration. They either lie inside the range [Min(Start, End), 7070 // Max(Start, End)] or outside it: 7071 // 7072 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax; 7073 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax; 7074 // 7075 // No self wrap flag guarantees that the intermediate values cannot be BOTH 7076 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that 7077 // knowledge, let's try to prove that we are dealing with Case 1. It is so if 7078 // Start <= End and step is positive, or Start >= End and step is negative. 7079 const SCEV *Start = applyLoopGuards(AddRec->getStart(), AddRec->getLoop()); 7080 ConstantRange StartRange = getRangeRef(Start, SignHint); 7081 ConstantRange EndRange = getRangeRef(End, SignHint); 7082 ConstantRange RangeBetween = StartRange.unionWith(EndRange); 7083 // If they already cover full iteration space, we will know nothing useful 7084 // even if we prove what we want to prove. 7085 if (RangeBetween.isFullSet()) 7086 return RangeBetween; 7087 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax). 7088 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet() 7089 : RangeBetween.isWrappedSet(); 7090 if (IsWrappedSet) 7091 return ConstantRange::getFull(BitWidth); 7092 7093 if (isKnownPositive(Step) && 7094 isKnownPredicateViaConstantRanges(LEPred, Start, End)) 7095 return RangeBetween; 7096 if (isKnownNegative(Step) && 7097 isKnownPredicateViaConstantRanges(GEPred, Start, End)) 7098 return RangeBetween; 7099 return ConstantRange::getFull(BitWidth); 7100 } 7101 7102 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 7103 const SCEV *Step, 7104 const APInt &MaxBECount) { 7105 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 7106 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 7107 7108 unsigned BitWidth = MaxBECount.getBitWidth(); 7109 assert(getTypeSizeInBits(Start->getType()) == BitWidth && 7110 getTypeSizeInBits(Step->getType()) == BitWidth && 7111 "mismatched bit widths"); 7112 7113 struct SelectPattern { 7114 Value *Condition = nullptr; 7115 APInt TrueValue; 7116 APInt FalseValue; 7117 7118 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 7119 const SCEV *S) { 7120 std::optional<unsigned> CastOp; 7121 APInt Offset(BitWidth, 0); 7122 7123 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 7124 "Should be!"); 7125 7126 // Peel off a constant offset: 7127 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 7128 // In the future we could consider being smarter here and handle 7129 // {Start+Step,+,Step} too. 7130 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 7131 return; 7132 7133 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 7134 S = SA->getOperand(1); 7135 } 7136 7137 // Peel off a cast operation 7138 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) { 7139 CastOp = SCast->getSCEVType(); 7140 S = SCast->getOperand(); 7141 } 7142 7143 using namespace llvm::PatternMatch; 7144 7145 auto *SU = dyn_cast<SCEVUnknown>(S); 7146 const APInt *TrueVal, *FalseVal; 7147 if (!SU || 7148 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 7149 m_APInt(FalseVal)))) { 7150 Condition = nullptr; 7151 return; 7152 } 7153 7154 TrueValue = *TrueVal; 7155 FalseValue = *FalseVal; 7156 7157 // Re-apply the cast we peeled off earlier 7158 if (CastOp) 7159 switch (*CastOp) { 7160 default: 7161 llvm_unreachable("Unknown SCEV cast type!"); 7162 7163 case scTruncate: 7164 TrueValue = TrueValue.trunc(BitWidth); 7165 FalseValue = FalseValue.trunc(BitWidth); 7166 break; 7167 case scZeroExtend: 7168 TrueValue = TrueValue.zext(BitWidth); 7169 FalseValue = FalseValue.zext(BitWidth); 7170 break; 7171 case scSignExtend: 7172 TrueValue = TrueValue.sext(BitWidth); 7173 FalseValue = FalseValue.sext(BitWidth); 7174 break; 7175 } 7176 7177 // Re-apply the constant offset we peeled off earlier 7178 TrueValue += Offset; 7179 FalseValue += Offset; 7180 } 7181 7182 bool isRecognized() { return Condition != nullptr; } 7183 }; 7184 7185 SelectPattern StartPattern(*this, BitWidth, Start); 7186 if (!StartPattern.isRecognized()) 7187 return ConstantRange::getFull(BitWidth); 7188 7189 SelectPattern StepPattern(*this, BitWidth, Step); 7190 if (!StepPattern.isRecognized()) 7191 return ConstantRange::getFull(BitWidth); 7192 7193 if (StartPattern.Condition != StepPattern.Condition) { 7194 // We don't handle this case today; but we could, by considering four 7195 // possibilities below instead of two. I'm not sure if there are cases where 7196 // that will help over what getRange already does, though. 7197 return ConstantRange::getFull(BitWidth); 7198 } 7199 7200 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 7201 // construct arbitrary general SCEV expressions here. This function is called 7202 // from deep in the call stack, and calling getSCEV (on a sext instruction, 7203 // say) can end up caching a suboptimal value. 7204 7205 // FIXME: without the explicit `this` receiver below, MSVC errors out with 7206 // C2352 and C2512 (otherwise it isn't needed). 7207 7208 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 7209 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 7210 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 7211 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 7212 7213 ConstantRange TrueRange = 7214 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount); 7215 ConstantRange FalseRange = 7216 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount); 7217 7218 return TrueRange.unionWith(FalseRange); 7219 } 7220 7221 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 7222 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 7223 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 7224 7225 // Return early if there are no flags to propagate to the SCEV. 7226 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 7227 if (BinOp->hasNoUnsignedWrap()) 7228 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 7229 if (BinOp->hasNoSignedWrap()) 7230 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 7231 if (Flags == SCEV::FlagAnyWrap) 7232 return SCEV::FlagAnyWrap; 7233 7234 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 7235 } 7236 7237 const Instruction * 7238 ScalarEvolution::getNonTrivialDefiningScopeBound(const SCEV *S) { 7239 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(S)) 7240 return &*AddRec->getLoop()->getHeader()->begin(); 7241 if (auto *U = dyn_cast<SCEVUnknown>(S)) 7242 if (auto *I = dyn_cast<Instruction>(U->getValue())) 7243 return I; 7244 return nullptr; 7245 } 7246 7247 const Instruction * 7248 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops, 7249 bool &Precise) { 7250 Precise = true; 7251 // Do a bounded search of the def relation of the requested SCEVs. 7252 SmallSet<const SCEV *, 16> Visited; 7253 SmallVector<const SCEV *> Worklist; 7254 auto pushOp = [&](const SCEV *S) { 7255 if (!Visited.insert(S).second) 7256 return; 7257 // Threshold of 30 here is arbitrary. 7258 if (Visited.size() > 30) { 7259 Precise = false; 7260 return; 7261 } 7262 Worklist.push_back(S); 7263 }; 7264 7265 for (const auto *S : Ops) 7266 pushOp(S); 7267 7268 const Instruction *Bound = nullptr; 7269 while (!Worklist.empty()) { 7270 auto *S = Worklist.pop_back_val(); 7271 if (auto *DefI = getNonTrivialDefiningScopeBound(S)) { 7272 if (!Bound || DT.dominates(Bound, DefI)) 7273 Bound = DefI; 7274 } else { 7275 for (const auto *Op : S->operands()) 7276 pushOp(Op); 7277 } 7278 } 7279 return Bound ? Bound : &*F.getEntryBlock().begin(); 7280 } 7281 7282 const Instruction * 7283 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops) { 7284 bool Discard; 7285 return getDefiningScopeBound(Ops, Discard); 7286 } 7287 7288 bool ScalarEvolution::isGuaranteedToTransferExecutionTo(const Instruction *A, 7289 const Instruction *B) { 7290 if (A->getParent() == B->getParent() && 7291 isGuaranteedToTransferExecutionToSuccessor(A->getIterator(), 7292 B->getIterator())) 7293 return true; 7294 7295 auto *BLoop = LI.getLoopFor(B->getParent()); 7296 if (BLoop && BLoop->getHeader() == B->getParent() && 7297 BLoop->getLoopPreheader() == A->getParent() && 7298 isGuaranteedToTransferExecutionToSuccessor(A->getIterator(), 7299 A->getParent()->end()) && 7300 isGuaranteedToTransferExecutionToSuccessor(B->getParent()->begin(), 7301 B->getIterator())) 7302 return true; 7303 return false; 7304 } 7305 7306 7307 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 7308 // Only proceed if we can prove that I does not yield poison. 7309 if (!programUndefinedIfPoison(I)) 7310 return false; 7311 7312 // At this point we know that if I is executed, then it does not wrap 7313 // according to at least one of NSW or NUW. If I is not executed, then we do 7314 // not know if the calculation that I represents would wrap. Multiple 7315 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 7316 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 7317 // derived from other instructions that map to the same SCEV. We cannot make 7318 // that guarantee for cases where I is not executed. So we need to find a 7319 // upper bound on the defining scope for the SCEV, and prove that I is 7320 // executed every time we enter that scope. When the bounding scope is a 7321 // loop (the common case), this is equivalent to proving I executes on every 7322 // iteration of that loop. 7323 SmallVector<const SCEV *> SCEVOps; 7324 for (const Use &Op : I->operands()) { 7325 // I could be an extractvalue from a call to an overflow intrinsic. 7326 // TODO: We can do better here in some cases. 7327 if (isSCEVable(Op->getType())) 7328 SCEVOps.push_back(getSCEV(Op)); 7329 } 7330 auto *DefI = getDefiningScopeBound(SCEVOps); 7331 return isGuaranteedToTransferExecutionTo(DefI, I); 7332 } 7333 7334 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 7335 // If we know that \c I can never be poison period, then that's enough. 7336 if (isSCEVExprNeverPoison(I)) 7337 return true; 7338 7339 // If the loop only has one exit, then we know that, if the loop is entered, 7340 // any instruction dominating that exit will be executed. If any such 7341 // instruction would result in UB, the addrec cannot be poison. 7342 // 7343 // This is basically the same reasoning as in isSCEVExprNeverPoison(), but 7344 // also handles uses outside the loop header (they just need to dominate the 7345 // single exit). 7346 7347 auto *ExitingBB = L->getExitingBlock(); 7348 if (!ExitingBB || !loopHasNoAbnormalExits(L)) 7349 return false; 7350 7351 SmallPtrSet<const Value *, 16> KnownPoison; 7352 SmallVector<const Instruction *, 8> Worklist; 7353 7354 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 7355 // things that are known to be poison under that assumption go on the 7356 // Worklist. 7357 KnownPoison.insert(I); 7358 Worklist.push_back(I); 7359 7360 while (!Worklist.empty()) { 7361 const Instruction *Poison = Worklist.pop_back_val(); 7362 7363 for (const Use &U : Poison->uses()) { 7364 const Instruction *PoisonUser = cast<Instruction>(U.getUser()); 7365 if (mustTriggerUB(PoisonUser, KnownPoison) && 7366 DT.dominates(PoisonUser->getParent(), ExitingBB)) 7367 return true; 7368 7369 if (propagatesPoison(U) && L->contains(PoisonUser)) 7370 if (KnownPoison.insert(PoisonUser).second) 7371 Worklist.push_back(PoisonUser); 7372 } 7373 } 7374 7375 return false; 7376 } 7377 7378 ScalarEvolution::LoopProperties 7379 ScalarEvolution::getLoopProperties(const Loop *L) { 7380 using LoopProperties = ScalarEvolution::LoopProperties; 7381 7382 auto Itr = LoopPropertiesCache.find(L); 7383 if (Itr == LoopPropertiesCache.end()) { 7384 auto HasSideEffects = [](Instruction *I) { 7385 if (auto *SI = dyn_cast<StoreInst>(I)) 7386 return !SI->isSimple(); 7387 7388 return I->mayThrow() || I->mayWriteToMemory(); 7389 }; 7390 7391 LoopProperties LP = {/* HasNoAbnormalExits */ true, 7392 /*HasNoSideEffects*/ true}; 7393 7394 for (auto *BB : L->getBlocks()) 7395 for (auto &I : *BB) { 7396 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 7397 LP.HasNoAbnormalExits = false; 7398 if (HasSideEffects(&I)) 7399 LP.HasNoSideEffects = false; 7400 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 7401 break; // We're already as pessimistic as we can get. 7402 } 7403 7404 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 7405 assert(InsertPair.second && "We just checked!"); 7406 Itr = InsertPair.first; 7407 } 7408 7409 return Itr->second; 7410 } 7411 7412 bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) { 7413 // A mustprogress loop without side effects must be finite. 7414 // TODO: The check used here is very conservative. It's only *specific* 7415 // side effects which are well defined in infinite loops. 7416 return isFinite(L) || (isMustProgress(L) && loopHasNoSideEffects(L)); 7417 } 7418 7419 const SCEV *ScalarEvolution::createSCEVIter(Value *V) { 7420 // Worklist item with a Value and a bool indicating whether all operands have 7421 // been visited already. 7422 using PointerTy = PointerIntPair<Value *, 1, bool>; 7423 SmallVector<PointerTy> Stack; 7424 7425 Stack.emplace_back(V, true); 7426 Stack.emplace_back(V, false); 7427 while (!Stack.empty()) { 7428 auto E = Stack.pop_back_val(); 7429 Value *CurV = E.getPointer(); 7430 7431 if (getExistingSCEV(CurV)) 7432 continue; 7433 7434 SmallVector<Value *> Ops; 7435 const SCEV *CreatedSCEV = nullptr; 7436 // If all operands have been visited already, create the SCEV. 7437 if (E.getInt()) { 7438 CreatedSCEV = createSCEV(CurV); 7439 } else { 7440 // Otherwise get the operands we need to create SCEV's for before creating 7441 // the SCEV for CurV. If the SCEV for CurV can be constructed trivially, 7442 // just use it. 7443 CreatedSCEV = getOperandsToCreate(CurV, Ops); 7444 } 7445 7446 if (CreatedSCEV) { 7447 insertValueToMap(CurV, CreatedSCEV); 7448 } else { 7449 // Queue CurV for SCEV creation, followed by its's operands which need to 7450 // be constructed first. 7451 Stack.emplace_back(CurV, true); 7452 for (Value *Op : Ops) 7453 Stack.emplace_back(Op, false); 7454 } 7455 } 7456 7457 return getExistingSCEV(V); 7458 } 7459 7460 const SCEV * 7461 ScalarEvolution::getOperandsToCreate(Value *V, SmallVectorImpl<Value *> &Ops) { 7462 if (!isSCEVable(V->getType())) 7463 return getUnknown(V); 7464 7465 if (Instruction *I = dyn_cast<Instruction>(V)) { 7466 // Don't attempt to analyze instructions in blocks that aren't 7467 // reachable. Such instructions don't matter, and they aren't required 7468 // to obey basic rules for definitions dominating uses which this 7469 // analysis depends on. 7470 if (!DT.isReachableFromEntry(I->getParent())) 7471 return getUnknown(PoisonValue::get(V->getType())); 7472 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 7473 return getConstant(CI); 7474 else if (isa<GlobalAlias>(V)) 7475 return getUnknown(V); 7476 else if (!isa<ConstantExpr>(V)) 7477 return getUnknown(V); 7478 7479 Operator *U = cast<Operator>(V); 7480 if (auto BO = 7481 MatchBinaryOp(U, getDataLayout(), AC, DT, dyn_cast<Instruction>(V))) { 7482 bool IsConstArg = isa<ConstantInt>(BO->RHS); 7483 switch (BO->Opcode) { 7484 case Instruction::Add: 7485 case Instruction::Mul: { 7486 // For additions and multiplications, traverse add/mul chains for which we 7487 // can potentially create a single SCEV, to reduce the number of 7488 // get{Add,Mul}Expr calls. 7489 do { 7490 if (BO->Op) { 7491 if (BO->Op != V && getExistingSCEV(BO->Op)) { 7492 Ops.push_back(BO->Op); 7493 break; 7494 } 7495 } 7496 Ops.push_back(BO->RHS); 7497 auto NewBO = MatchBinaryOp(BO->LHS, getDataLayout(), AC, DT, 7498 dyn_cast<Instruction>(V)); 7499 if (!NewBO || 7500 (BO->Opcode == Instruction::Add && 7501 (NewBO->Opcode != Instruction::Add && 7502 NewBO->Opcode != Instruction::Sub)) || 7503 (BO->Opcode == Instruction::Mul && 7504 NewBO->Opcode != Instruction::Mul)) { 7505 Ops.push_back(BO->LHS); 7506 break; 7507 } 7508 // CreateSCEV calls getNoWrapFlagsFromUB, which under certain conditions 7509 // requires a SCEV for the LHS. 7510 if (BO->Op && (BO->IsNSW || BO->IsNUW)) { 7511 auto *I = dyn_cast<Instruction>(BO->Op); 7512 if (I && programUndefinedIfPoison(I)) { 7513 Ops.push_back(BO->LHS); 7514 break; 7515 } 7516 } 7517 BO = NewBO; 7518 } while (true); 7519 return nullptr; 7520 } 7521 case Instruction::Sub: 7522 case Instruction::UDiv: 7523 case Instruction::URem: 7524 break; 7525 case Instruction::AShr: 7526 case Instruction::Shl: 7527 case Instruction::Xor: 7528 if (!IsConstArg) 7529 return nullptr; 7530 break; 7531 case Instruction::And: 7532 case Instruction::Or: 7533 if (!IsConstArg && !BO->LHS->getType()->isIntegerTy(1)) 7534 return nullptr; 7535 break; 7536 case Instruction::LShr: 7537 return getUnknown(V); 7538 default: 7539 llvm_unreachable("Unhandled binop"); 7540 break; 7541 } 7542 7543 Ops.push_back(BO->LHS); 7544 Ops.push_back(BO->RHS); 7545 return nullptr; 7546 } 7547 7548 switch (U->getOpcode()) { 7549 case Instruction::Trunc: 7550 case Instruction::ZExt: 7551 case Instruction::SExt: 7552 case Instruction::PtrToInt: 7553 Ops.push_back(U->getOperand(0)); 7554 return nullptr; 7555 7556 case Instruction::BitCast: 7557 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) { 7558 Ops.push_back(U->getOperand(0)); 7559 return nullptr; 7560 } 7561 return getUnknown(V); 7562 7563 case Instruction::SDiv: 7564 case Instruction::SRem: 7565 Ops.push_back(U->getOperand(0)); 7566 Ops.push_back(U->getOperand(1)); 7567 return nullptr; 7568 7569 case Instruction::GetElementPtr: 7570 assert(cast<GEPOperator>(U)->getSourceElementType()->isSized() && 7571 "GEP source element type must be sized"); 7572 for (Value *Index : U->operands()) 7573 Ops.push_back(Index); 7574 return nullptr; 7575 7576 case Instruction::IntToPtr: 7577 return getUnknown(V); 7578 7579 case Instruction::PHI: 7580 // Keep constructing SCEVs' for phis recursively for now. 7581 return nullptr; 7582 7583 case Instruction::Select: { 7584 // Check if U is a select that can be simplified to a SCEVUnknown. 7585 auto CanSimplifyToUnknown = [this, U]() { 7586 if (U->getType()->isIntegerTy(1) || isa<ConstantInt>(U->getOperand(0))) 7587 return false; 7588 7589 auto *ICI = dyn_cast<ICmpInst>(U->getOperand(0)); 7590 if (!ICI) 7591 return false; 7592 Value *LHS = ICI->getOperand(0); 7593 Value *RHS = ICI->getOperand(1); 7594 if (ICI->getPredicate() == CmpInst::ICMP_EQ || 7595 ICI->getPredicate() == CmpInst::ICMP_NE) { 7596 if (!(isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero())) 7597 return true; 7598 } else if (getTypeSizeInBits(LHS->getType()) > 7599 getTypeSizeInBits(U->getType())) 7600 return true; 7601 return false; 7602 }; 7603 if (CanSimplifyToUnknown()) 7604 return getUnknown(U); 7605 7606 for (Value *Inc : U->operands()) 7607 Ops.push_back(Inc); 7608 return nullptr; 7609 break; 7610 } 7611 case Instruction::Call: 7612 case Instruction::Invoke: 7613 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) { 7614 Ops.push_back(RV); 7615 return nullptr; 7616 } 7617 7618 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 7619 switch (II->getIntrinsicID()) { 7620 case Intrinsic::abs: 7621 Ops.push_back(II->getArgOperand(0)); 7622 return nullptr; 7623 case Intrinsic::umax: 7624 case Intrinsic::umin: 7625 case Intrinsic::smax: 7626 case Intrinsic::smin: 7627 case Intrinsic::usub_sat: 7628 case Intrinsic::uadd_sat: 7629 Ops.push_back(II->getArgOperand(0)); 7630 Ops.push_back(II->getArgOperand(1)); 7631 return nullptr; 7632 case Intrinsic::start_loop_iterations: 7633 case Intrinsic::annotation: 7634 case Intrinsic::ptr_annotation: 7635 Ops.push_back(II->getArgOperand(0)); 7636 return nullptr; 7637 default: 7638 break; 7639 } 7640 } 7641 break; 7642 } 7643 7644 return nullptr; 7645 } 7646 7647 const SCEV *ScalarEvolution::createSCEV(Value *V) { 7648 if (!isSCEVable(V->getType())) 7649 return getUnknown(V); 7650 7651 if (Instruction *I = dyn_cast<Instruction>(V)) { 7652 // Don't attempt to analyze instructions in blocks that aren't 7653 // reachable. Such instructions don't matter, and they aren't required 7654 // to obey basic rules for definitions dominating uses which this 7655 // analysis depends on. 7656 if (!DT.isReachableFromEntry(I->getParent())) 7657 return getUnknown(PoisonValue::get(V->getType())); 7658 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 7659 return getConstant(CI); 7660 else if (isa<GlobalAlias>(V)) 7661 return getUnknown(V); 7662 else if (!isa<ConstantExpr>(V)) 7663 return getUnknown(V); 7664 7665 const SCEV *LHS; 7666 const SCEV *RHS; 7667 7668 Operator *U = cast<Operator>(V); 7669 if (auto BO = 7670 MatchBinaryOp(U, getDataLayout(), AC, DT, dyn_cast<Instruction>(V))) { 7671 switch (BO->Opcode) { 7672 case Instruction::Add: { 7673 // The simple thing to do would be to just call getSCEV on both operands 7674 // and call getAddExpr with the result. However if we're looking at a 7675 // bunch of things all added together, this can be quite inefficient, 7676 // because it leads to N-1 getAddExpr calls for N ultimate operands. 7677 // Instead, gather up all the operands and make a single getAddExpr call. 7678 // LLVM IR canonical form means we need only traverse the left operands. 7679 SmallVector<const SCEV *, 4> AddOps; 7680 do { 7681 if (BO->Op) { 7682 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 7683 AddOps.push_back(OpSCEV); 7684 break; 7685 } 7686 7687 // If a NUW or NSW flag can be applied to the SCEV for this 7688 // addition, then compute the SCEV for this addition by itself 7689 // with a separate call to getAddExpr. We need to do that 7690 // instead of pushing the operands of the addition onto AddOps, 7691 // since the flags are only known to apply to this particular 7692 // addition - they may not apply to other additions that can be 7693 // formed with operands from AddOps. 7694 const SCEV *RHS = getSCEV(BO->RHS); 7695 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 7696 if (Flags != SCEV::FlagAnyWrap) { 7697 const SCEV *LHS = getSCEV(BO->LHS); 7698 if (BO->Opcode == Instruction::Sub) 7699 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 7700 else 7701 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 7702 break; 7703 } 7704 } 7705 7706 if (BO->Opcode == Instruction::Sub) 7707 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 7708 else 7709 AddOps.push_back(getSCEV(BO->RHS)); 7710 7711 auto NewBO = MatchBinaryOp(BO->LHS, getDataLayout(), AC, DT, 7712 dyn_cast<Instruction>(V)); 7713 if (!NewBO || (NewBO->Opcode != Instruction::Add && 7714 NewBO->Opcode != Instruction::Sub)) { 7715 AddOps.push_back(getSCEV(BO->LHS)); 7716 break; 7717 } 7718 BO = NewBO; 7719 } while (true); 7720 7721 return getAddExpr(AddOps); 7722 } 7723 7724 case Instruction::Mul: { 7725 SmallVector<const SCEV *, 4> MulOps; 7726 do { 7727 if (BO->Op) { 7728 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 7729 MulOps.push_back(OpSCEV); 7730 break; 7731 } 7732 7733 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 7734 if (Flags != SCEV::FlagAnyWrap) { 7735 LHS = getSCEV(BO->LHS); 7736 RHS = getSCEV(BO->RHS); 7737 MulOps.push_back(getMulExpr(LHS, RHS, Flags)); 7738 break; 7739 } 7740 } 7741 7742 MulOps.push_back(getSCEV(BO->RHS)); 7743 auto NewBO = MatchBinaryOp(BO->LHS, getDataLayout(), AC, DT, 7744 dyn_cast<Instruction>(V)); 7745 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 7746 MulOps.push_back(getSCEV(BO->LHS)); 7747 break; 7748 } 7749 BO = NewBO; 7750 } while (true); 7751 7752 return getMulExpr(MulOps); 7753 } 7754 case Instruction::UDiv: 7755 LHS = getSCEV(BO->LHS); 7756 RHS = getSCEV(BO->RHS); 7757 return getUDivExpr(LHS, RHS); 7758 case Instruction::URem: 7759 LHS = getSCEV(BO->LHS); 7760 RHS = getSCEV(BO->RHS); 7761 return getURemExpr(LHS, RHS); 7762 case Instruction::Sub: { 7763 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 7764 if (BO->Op) 7765 Flags = getNoWrapFlagsFromUB(BO->Op); 7766 LHS = getSCEV(BO->LHS); 7767 RHS = getSCEV(BO->RHS); 7768 return getMinusSCEV(LHS, RHS, Flags); 7769 } 7770 case Instruction::And: 7771 // For an expression like x&255 that merely masks off the high bits, 7772 // use zext(trunc(x)) as the SCEV expression. 7773 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 7774 if (CI->isZero()) 7775 return getSCEV(BO->RHS); 7776 if (CI->isMinusOne()) 7777 return getSCEV(BO->LHS); 7778 const APInt &A = CI->getValue(); 7779 7780 // Instcombine's ShrinkDemandedConstant may strip bits out of 7781 // constants, obscuring what would otherwise be a low-bits mask. 7782 // Use computeKnownBits to compute what ShrinkDemandedConstant 7783 // knew about to reconstruct a low-bits mask value. 7784 unsigned LZ = A.countl_zero(); 7785 unsigned TZ = A.countr_zero(); 7786 unsigned BitWidth = A.getBitWidth(); 7787 KnownBits Known(BitWidth); 7788 computeKnownBits(BO->LHS, Known, getDataLayout(), 7789 0, &AC, nullptr, &DT); 7790 7791 APInt EffectiveMask = 7792 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 7793 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 7794 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 7795 const SCEV *LHS = getSCEV(BO->LHS); 7796 const SCEV *ShiftedLHS = nullptr; 7797 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 7798 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 7799 // For an expression like (x * 8) & 8, simplify the multiply. 7800 unsigned MulZeros = OpC->getAPInt().countr_zero(); 7801 unsigned GCD = std::min(MulZeros, TZ); 7802 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 7803 SmallVector<const SCEV*, 4> MulOps; 7804 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 7805 append_range(MulOps, LHSMul->operands().drop_front()); 7806 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 7807 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 7808 } 7809 } 7810 if (!ShiftedLHS) 7811 ShiftedLHS = getUDivExpr(LHS, MulCount); 7812 return getMulExpr( 7813 getZeroExtendExpr( 7814 getTruncateExpr(ShiftedLHS, 7815 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 7816 BO->LHS->getType()), 7817 MulCount); 7818 } 7819 } 7820 // Binary `and` is a bit-wise `umin`. 7821 if (BO->LHS->getType()->isIntegerTy(1)) { 7822 LHS = getSCEV(BO->LHS); 7823 RHS = getSCEV(BO->RHS); 7824 return getUMinExpr(LHS, RHS); 7825 } 7826 break; 7827 7828 case Instruction::Or: 7829 // Binary `or` is a bit-wise `umax`. 7830 if (BO->LHS->getType()->isIntegerTy(1)) { 7831 LHS = getSCEV(BO->LHS); 7832 RHS = getSCEV(BO->RHS); 7833 return getUMaxExpr(LHS, RHS); 7834 } 7835 break; 7836 7837 case Instruction::Xor: 7838 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 7839 // If the RHS of xor is -1, then this is a not operation. 7840 if (CI->isMinusOne()) 7841 return getNotSCEV(getSCEV(BO->LHS)); 7842 7843 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 7844 // This is a variant of the check for xor with -1, and it handles 7845 // the case where instcombine has trimmed non-demanded bits out 7846 // of an xor with -1. 7847 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 7848 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 7849 if (LBO->getOpcode() == Instruction::And && 7850 LCI->getValue() == CI->getValue()) 7851 if (const SCEVZeroExtendExpr *Z = 7852 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 7853 Type *UTy = BO->LHS->getType(); 7854 const SCEV *Z0 = Z->getOperand(); 7855 Type *Z0Ty = Z0->getType(); 7856 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 7857 7858 // If C is a low-bits mask, the zero extend is serving to 7859 // mask off the high bits. Complement the operand and 7860 // re-apply the zext. 7861 if (CI->getValue().isMask(Z0TySize)) 7862 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 7863 7864 // If C is a single bit, it may be in the sign-bit position 7865 // before the zero-extend. In this case, represent the xor 7866 // using an add, which is equivalent, and re-apply the zext. 7867 APInt Trunc = CI->getValue().trunc(Z0TySize); 7868 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 7869 Trunc.isSignMask()) 7870 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 7871 UTy); 7872 } 7873 } 7874 break; 7875 7876 case Instruction::Shl: 7877 // Turn shift left of a constant amount into a multiply. 7878 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 7879 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 7880 7881 // If the shift count is not less than the bitwidth, the result of 7882 // the shift is undefined. Don't try to analyze it, because the 7883 // resolution chosen here may differ from the resolution chosen in 7884 // other parts of the compiler. 7885 if (SA->getValue().uge(BitWidth)) 7886 break; 7887 7888 // We can safely preserve the nuw flag in all cases. It's also safe to 7889 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 7890 // requires special handling. It can be preserved as long as we're not 7891 // left shifting by bitwidth - 1. 7892 auto Flags = SCEV::FlagAnyWrap; 7893 if (BO->Op) { 7894 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 7895 if ((MulFlags & SCEV::FlagNSW) && 7896 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 7897 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 7898 if (MulFlags & SCEV::FlagNUW) 7899 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 7900 } 7901 7902 ConstantInt *X = ConstantInt::get( 7903 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 7904 return getMulExpr(getSCEV(BO->LHS), getConstant(X), Flags); 7905 } 7906 break; 7907 7908 case Instruction::AShr: 7909 // AShr X, C, where C is a constant. 7910 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 7911 if (!CI) 7912 break; 7913 7914 Type *OuterTy = BO->LHS->getType(); 7915 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 7916 // If the shift count is not less than the bitwidth, the result of 7917 // the shift is undefined. Don't try to analyze it, because the 7918 // resolution chosen here may differ from the resolution chosen in 7919 // other parts of the compiler. 7920 if (CI->getValue().uge(BitWidth)) 7921 break; 7922 7923 if (CI->isZero()) 7924 return getSCEV(BO->LHS); // shift by zero --> noop 7925 7926 uint64_t AShrAmt = CI->getZExtValue(); 7927 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 7928 7929 Operator *L = dyn_cast<Operator>(BO->LHS); 7930 const SCEV *AddTruncateExpr = nullptr; 7931 ConstantInt *ShlAmtCI = nullptr; 7932 const SCEV *AddConstant = nullptr; 7933 7934 if (L && L->getOpcode() == Instruction::Add) { 7935 // X = Shl A, n 7936 // Y = Add X, c 7937 // Z = AShr Y, m 7938 // n, c and m are constants. 7939 7940 Operator *LShift = dyn_cast<Operator>(L->getOperand(0)); 7941 ConstantInt *AddOperandCI = dyn_cast<ConstantInt>(L->getOperand(1)); 7942 if (LShift && LShift->getOpcode() == Instruction::Shl) { 7943 if (AddOperandCI) { 7944 const SCEV *ShlOp0SCEV = getSCEV(LShift->getOperand(0)); 7945 ShlAmtCI = dyn_cast<ConstantInt>(LShift->getOperand(1)); 7946 // since we truncate to TruncTy, the AddConstant should be of the 7947 // same type, so create a new Constant with type same as TruncTy. 7948 // Also, the Add constant should be shifted right by AShr amount. 7949 APInt AddOperand = AddOperandCI->getValue().ashr(AShrAmt); 7950 AddConstant = getConstant(AddOperand.trunc(BitWidth - AShrAmt)); 7951 // we model the expression as sext(add(trunc(A), c << n)), since the 7952 // sext(trunc) part is already handled below, we create a 7953 // AddExpr(TruncExp) which will be used later. 7954 AddTruncateExpr = getTruncateExpr(ShlOp0SCEV, TruncTy); 7955 } 7956 } 7957 } else if (L && L->getOpcode() == Instruction::Shl) { 7958 // X = Shl A, n 7959 // Y = AShr X, m 7960 // Both n and m are constant. 7961 7962 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 7963 ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 7964 AddTruncateExpr = getTruncateExpr(ShlOp0SCEV, TruncTy); 7965 } 7966 7967 if (AddTruncateExpr && ShlAmtCI) { 7968 // We can merge the two given cases into a single SCEV statement, 7969 // incase n = m, the mul expression will be 2^0, so it gets resolved to 7970 // a simpler case. The following code handles the two cases: 7971 // 7972 // 1) For a two-shift sext-inreg, i.e. n = m, 7973 // use sext(trunc(x)) as the SCEV expression. 7974 // 7975 // 2) When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 7976 // expression. We already checked that ShlAmt < BitWidth, so 7977 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 7978 // ShlAmt - AShrAmt < Amt. 7979 const APInt &ShlAmt = ShlAmtCI->getValue(); 7980 if (ShlAmt.ult(BitWidth) && ShlAmt.uge(AShrAmt)) { 7981 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 7982 ShlAmtCI->getZExtValue() - AShrAmt); 7983 const SCEV *CompositeExpr = 7984 getMulExpr(AddTruncateExpr, getConstant(Mul)); 7985 if (L->getOpcode() != Instruction::Shl) 7986 CompositeExpr = getAddExpr(CompositeExpr, AddConstant); 7987 7988 return getSignExtendExpr(CompositeExpr, OuterTy); 7989 } 7990 } 7991 break; 7992 } 7993 } 7994 7995 switch (U->getOpcode()) { 7996 case Instruction::Trunc: 7997 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 7998 7999 case Instruction::ZExt: 8000 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 8001 8002 case Instruction::SExt: 8003 if (auto BO = MatchBinaryOp(U->getOperand(0), getDataLayout(), AC, DT, 8004 dyn_cast<Instruction>(V))) { 8005 // The NSW flag of a subtract does not always survive the conversion to 8006 // A + (-1)*B. By pushing sign extension onto its operands we are much 8007 // more likely to preserve NSW and allow later AddRec optimisations. 8008 // 8009 // NOTE: This is effectively duplicating this logic from getSignExtend: 8010 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 8011 // but by that point the NSW information has potentially been lost. 8012 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 8013 Type *Ty = U->getType(); 8014 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 8015 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 8016 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 8017 } 8018 } 8019 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 8020 8021 case Instruction::BitCast: 8022 // BitCasts are no-op casts so we just eliminate the cast. 8023 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 8024 return getSCEV(U->getOperand(0)); 8025 break; 8026 8027 case Instruction::PtrToInt: { 8028 // Pointer to integer cast is straight-forward, so do model it. 8029 const SCEV *Op = getSCEV(U->getOperand(0)); 8030 Type *DstIntTy = U->getType(); 8031 // But only if effective SCEV (integer) type is wide enough to represent 8032 // all possible pointer values. 8033 const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy); 8034 if (isa<SCEVCouldNotCompute>(IntOp)) 8035 return getUnknown(V); 8036 return IntOp; 8037 } 8038 case Instruction::IntToPtr: 8039 // Just don't deal with inttoptr casts. 8040 return getUnknown(V); 8041 8042 case Instruction::SDiv: 8043 // If both operands are non-negative, this is just an udiv. 8044 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 8045 isKnownNonNegative(getSCEV(U->getOperand(1)))) 8046 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 8047 break; 8048 8049 case Instruction::SRem: 8050 // If both operands are non-negative, this is just an urem. 8051 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 8052 isKnownNonNegative(getSCEV(U->getOperand(1)))) 8053 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 8054 break; 8055 8056 case Instruction::GetElementPtr: 8057 return createNodeForGEP(cast<GEPOperator>(U)); 8058 8059 case Instruction::PHI: 8060 return createNodeForPHI(cast<PHINode>(U)); 8061 8062 case Instruction::Select: 8063 return createNodeForSelectOrPHI(U, U->getOperand(0), U->getOperand(1), 8064 U->getOperand(2)); 8065 8066 case Instruction::Call: 8067 case Instruction::Invoke: 8068 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 8069 return getSCEV(RV); 8070 8071 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 8072 switch (II->getIntrinsicID()) { 8073 case Intrinsic::abs: 8074 return getAbsExpr( 8075 getSCEV(II->getArgOperand(0)), 8076 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne()); 8077 case Intrinsic::umax: 8078 LHS = getSCEV(II->getArgOperand(0)); 8079 RHS = getSCEV(II->getArgOperand(1)); 8080 return getUMaxExpr(LHS, RHS); 8081 case Intrinsic::umin: 8082 LHS = getSCEV(II->getArgOperand(0)); 8083 RHS = getSCEV(II->getArgOperand(1)); 8084 return getUMinExpr(LHS, RHS); 8085 case Intrinsic::smax: 8086 LHS = getSCEV(II->getArgOperand(0)); 8087 RHS = getSCEV(II->getArgOperand(1)); 8088 return getSMaxExpr(LHS, RHS); 8089 case Intrinsic::smin: 8090 LHS = getSCEV(II->getArgOperand(0)); 8091 RHS = getSCEV(II->getArgOperand(1)); 8092 return getSMinExpr(LHS, RHS); 8093 case Intrinsic::usub_sat: { 8094 const SCEV *X = getSCEV(II->getArgOperand(0)); 8095 const SCEV *Y = getSCEV(II->getArgOperand(1)); 8096 const SCEV *ClampedY = getUMinExpr(X, Y); 8097 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); 8098 } 8099 case Intrinsic::uadd_sat: { 8100 const SCEV *X = getSCEV(II->getArgOperand(0)); 8101 const SCEV *Y = getSCEV(II->getArgOperand(1)); 8102 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); 8103 return getAddExpr(ClampedX, Y, SCEV::FlagNUW); 8104 } 8105 case Intrinsic::start_loop_iterations: 8106 case Intrinsic::annotation: 8107 case Intrinsic::ptr_annotation: 8108 // A start_loop_iterations or llvm.annotation or llvm.prt.annotation is 8109 // just eqivalent to the first operand for SCEV purposes. 8110 return getSCEV(II->getArgOperand(0)); 8111 case Intrinsic::vscale: 8112 return getVScale(II->getType()); 8113 default: 8114 break; 8115 } 8116 } 8117 break; 8118 } 8119 8120 return getUnknown(V); 8121 } 8122 8123 //===----------------------------------------------------------------------===// 8124 // Iteration Count Computation Code 8125 // 8126 8127 const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount) { 8128 if (isa<SCEVCouldNotCompute>(ExitCount)) 8129 return getCouldNotCompute(); 8130 8131 auto *ExitCountType = ExitCount->getType(); 8132 assert(ExitCountType->isIntegerTy()); 8133 auto *EvalTy = Type::getIntNTy(ExitCountType->getContext(), 8134 1 + ExitCountType->getScalarSizeInBits()); 8135 return getTripCountFromExitCount(ExitCount, EvalTy, nullptr); 8136 } 8137 8138 const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount, 8139 Type *EvalTy, 8140 const Loop *L) { 8141 if (isa<SCEVCouldNotCompute>(ExitCount)) 8142 return getCouldNotCompute(); 8143 8144 unsigned ExitCountSize = getTypeSizeInBits(ExitCount->getType()); 8145 unsigned EvalSize = EvalTy->getPrimitiveSizeInBits(); 8146 8147 auto CanAddOneWithoutOverflow = [&]() { 8148 ConstantRange ExitCountRange = 8149 getRangeRef(ExitCount, RangeSignHint::HINT_RANGE_UNSIGNED); 8150 if (!ExitCountRange.contains(APInt::getMaxValue(ExitCountSize))) 8151 return true; 8152 8153 return L && isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, ExitCount, 8154 getMinusOne(ExitCount->getType())); 8155 }; 8156 8157 // If we need to zero extend the backedge count, check if we can add one to 8158 // it prior to zero extending without overflow. Provided this is safe, it 8159 // allows better simplification of the +1. 8160 if (EvalSize > ExitCountSize && CanAddOneWithoutOverflow()) 8161 return getZeroExtendExpr( 8162 getAddExpr(ExitCount, getOne(ExitCount->getType())), EvalTy); 8163 8164 // Get the total trip count from the count by adding 1. This may wrap. 8165 return getAddExpr(getTruncateOrZeroExtend(ExitCount, EvalTy), getOne(EvalTy)); 8166 } 8167 8168 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 8169 if (!ExitCount) 8170 return 0; 8171 8172 ConstantInt *ExitConst = ExitCount->getValue(); 8173 8174 // Guard against huge trip counts. 8175 if (ExitConst->getValue().getActiveBits() > 32) 8176 return 0; 8177 8178 // In case of integer overflow, this returns 0, which is correct. 8179 return ((unsigned)ExitConst->getZExtValue()) + 1; 8180 } 8181 8182 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 8183 auto *ExitCount = dyn_cast<SCEVConstant>(getBackedgeTakenCount(L, Exact)); 8184 return getConstantTripCount(ExitCount); 8185 } 8186 8187 unsigned 8188 ScalarEvolution::getSmallConstantTripCount(const Loop *L, 8189 const BasicBlock *ExitingBlock) { 8190 assert(ExitingBlock && "Must pass a non-null exiting block!"); 8191 assert(L->isLoopExiting(ExitingBlock) && 8192 "Exiting block must actually branch out of the loop!"); 8193 const SCEVConstant *ExitCount = 8194 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 8195 return getConstantTripCount(ExitCount); 8196 } 8197 8198 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 8199 const auto *MaxExitCount = 8200 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 8201 return getConstantTripCount(MaxExitCount); 8202 } 8203 8204 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 8205 SmallVector<BasicBlock *, 8> ExitingBlocks; 8206 L->getExitingBlocks(ExitingBlocks); 8207 8208 std::optional<unsigned> Res; 8209 for (auto *ExitingBB : ExitingBlocks) { 8210 unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB); 8211 if (!Res) 8212 Res = Multiple; 8213 Res = (unsigned)std::gcd(*Res, Multiple); 8214 } 8215 return Res.value_or(1); 8216 } 8217 8218 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 8219 const SCEV *ExitCount) { 8220 if (ExitCount == getCouldNotCompute()) 8221 return 1; 8222 8223 // Get the trip count 8224 const SCEV *TCExpr = getTripCountFromExitCount(applyLoopGuards(ExitCount, L)); 8225 8226 APInt Multiple = getNonZeroConstantMultiple(TCExpr); 8227 // If a trip multiple is huge (>=2^32), the trip count is still divisible by 8228 // the greatest power of 2 divisor less than 2^32. 8229 return Multiple.getActiveBits() > 32 8230 ? 1U << std::min((unsigned)31, Multiple.countTrailingZeros()) 8231 : (unsigned)Multiple.zextOrTrunc(32).getZExtValue(); 8232 } 8233 8234 /// Returns the largest constant divisor of the trip count of this loop as a 8235 /// normal unsigned value, if possible. This means that the actual trip count is 8236 /// always a multiple of the returned value (don't forget the trip count could 8237 /// very well be zero as well!). 8238 /// 8239 /// Returns 1 if the trip count is unknown or not guaranteed to be the 8240 /// multiple of a constant (which is also the case if the trip count is simply 8241 /// constant, use getSmallConstantTripCount for that case), Will also return 1 8242 /// if the trip count is very large (>= 2^32). 8243 /// 8244 /// As explained in the comments for getSmallConstantTripCount, this assumes 8245 /// that control exits the loop via ExitingBlock. 8246 unsigned 8247 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 8248 const BasicBlock *ExitingBlock) { 8249 assert(ExitingBlock && "Must pass a non-null exiting block!"); 8250 assert(L->isLoopExiting(ExitingBlock) && 8251 "Exiting block must actually branch out of the loop!"); 8252 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 8253 return getSmallConstantTripMultiple(L, ExitCount); 8254 } 8255 8256 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 8257 const BasicBlock *ExitingBlock, 8258 ExitCountKind Kind) { 8259 switch (Kind) { 8260 case Exact: 8261 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 8262 case SymbolicMaximum: 8263 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock, this); 8264 case ConstantMaximum: 8265 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this); 8266 }; 8267 llvm_unreachable("Invalid ExitCountKind!"); 8268 } 8269 8270 const SCEV * 8271 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 8272 SmallVector<const SCEVPredicate *, 4> &Preds) { 8273 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 8274 } 8275 8276 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 8277 ExitCountKind Kind) { 8278 switch (Kind) { 8279 case Exact: 8280 return getBackedgeTakenInfo(L).getExact(L, this); 8281 case ConstantMaximum: 8282 return getBackedgeTakenInfo(L).getConstantMax(this); 8283 case SymbolicMaximum: 8284 return getBackedgeTakenInfo(L).getSymbolicMax(L, this); 8285 }; 8286 llvm_unreachable("Invalid ExitCountKind!"); 8287 } 8288 8289 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 8290 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this); 8291 } 8292 8293 /// Push PHI nodes in the header of the given loop onto the given Worklist. 8294 static void PushLoopPHIs(const Loop *L, 8295 SmallVectorImpl<Instruction *> &Worklist, 8296 SmallPtrSetImpl<Instruction *> &Visited) { 8297 BasicBlock *Header = L->getHeader(); 8298 8299 // Push all Loop-header PHIs onto the Worklist stack. 8300 for (PHINode &PN : Header->phis()) 8301 if (Visited.insert(&PN).second) 8302 Worklist.push_back(&PN); 8303 } 8304 8305 const ScalarEvolution::BackedgeTakenInfo & 8306 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 8307 auto &BTI = getBackedgeTakenInfo(L); 8308 if (BTI.hasFullInfo()) 8309 return BTI; 8310 8311 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 8312 8313 if (!Pair.second) 8314 return Pair.first->second; 8315 8316 BackedgeTakenInfo Result = 8317 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 8318 8319 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 8320 } 8321 8322 ScalarEvolution::BackedgeTakenInfo & 8323 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 8324 // Initially insert an invalid entry for this loop. If the insertion 8325 // succeeds, proceed to actually compute a backedge-taken count and 8326 // update the value. The temporary CouldNotCompute value tells SCEV 8327 // code elsewhere that it shouldn't attempt to request a new 8328 // backedge-taken count, which could result in infinite recursion. 8329 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 8330 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 8331 if (!Pair.second) 8332 return Pair.first->second; 8333 8334 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 8335 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 8336 // must be cleared in this scope. 8337 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 8338 8339 // Now that we know more about the trip count for this loop, forget any 8340 // existing SCEV values for PHI nodes in this loop since they are only 8341 // conservative estimates made without the benefit of trip count 8342 // information. This invalidation is not necessary for correctness, and is 8343 // only done to produce more precise results. 8344 if (Result.hasAnyInfo()) { 8345 // Invalidate any expression using an addrec in this loop. 8346 SmallVector<const SCEV *, 8> ToForget; 8347 auto LoopUsersIt = LoopUsers.find(L); 8348 if (LoopUsersIt != LoopUsers.end()) 8349 append_range(ToForget, LoopUsersIt->second); 8350 forgetMemoizedResults(ToForget); 8351 8352 // Invalidate constant-evolved loop header phis. 8353 for (PHINode &PN : L->getHeader()->phis()) 8354 ConstantEvolutionLoopExitValue.erase(&PN); 8355 } 8356 8357 // Re-lookup the insert position, since the call to 8358 // computeBackedgeTakenCount above could result in a 8359 // recusive call to getBackedgeTakenInfo (on a different 8360 // loop), which would invalidate the iterator computed 8361 // earlier. 8362 return BackedgeTakenCounts.find(L)->second = std::move(Result); 8363 } 8364 8365 void ScalarEvolution::forgetAllLoops() { 8366 // This method is intended to forget all info about loops. It should 8367 // invalidate caches as if the following happened: 8368 // - The trip counts of all loops have changed arbitrarily 8369 // - Every llvm::Value has been updated in place to produce a different 8370 // result. 8371 BackedgeTakenCounts.clear(); 8372 PredicatedBackedgeTakenCounts.clear(); 8373 BECountUsers.clear(); 8374 LoopPropertiesCache.clear(); 8375 ConstantEvolutionLoopExitValue.clear(); 8376 ValueExprMap.clear(); 8377 ValuesAtScopes.clear(); 8378 ValuesAtScopesUsers.clear(); 8379 LoopDispositions.clear(); 8380 BlockDispositions.clear(); 8381 UnsignedRanges.clear(); 8382 SignedRanges.clear(); 8383 ExprValueMap.clear(); 8384 HasRecMap.clear(); 8385 ConstantMultipleCache.clear(); 8386 PredicatedSCEVRewrites.clear(); 8387 FoldCache.clear(); 8388 FoldCacheUser.clear(); 8389 } 8390 void ScalarEvolution::visitAndClearUsers( 8391 SmallVectorImpl<Instruction *> &Worklist, 8392 SmallPtrSetImpl<Instruction *> &Visited, 8393 SmallVectorImpl<const SCEV *> &ToForget) { 8394 while (!Worklist.empty()) { 8395 Instruction *I = Worklist.pop_back_val(); 8396 if (!isSCEVable(I->getType())) 8397 continue; 8398 8399 ValueExprMapType::iterator It = 8400 ValueExprMap.find_as(static_cast<Value *>(I)); 8401 if (It != ValueExprMap.end()) { 8402 eraseValueFromMap(It->first); 8403 ToForget.push_back(It->second); 8404 if (PHINode *PN = dyn_cast<PHINode>(I)) 8405 ConstantEvolutionLoopExitValue.erase(PN); 8406 } 8407 8408 PushDefUseChildren(I, Worklist, Visited); 8409 } 8410 } 8411 8412 void ScalarEvolution::forgetLoop(const Loop *L) { 8413 SmallVector<const Loop *, 16> LoopWorklist(1, L); 8414 SmallVector<Instruction *, 32> Worklist; 8415 SmallPtrSet<Instruction *, 16> Visited; 8416 SmallVector<const SCEV *, 16> ToForget; 8417 8418 // Iterate over all the loops and sub-loops to drop SCEV information. 8419 while (!LoopWorklist.empty()) { 8420 auto *CurrL = LoopWorklist.pop_back_val(); 8421 8422 // Drop any stored trip count value. 8423 forgetBackedgeTakenCounts(CurrL, /* Predicated */ false); 8424 forgetBackedgeTakenCounts(CurrL, /* Predicated */ true); 8425 8426 // Drop information about predicated SCEV rewrites for this loop. 8427 for (auto I = PredicatedSCEVRewrites.begin(); 8428 I != PredicatedSCEVRewrites.end();) { 8429 std::pair<const SCEV *, const Loop *> Entry = I->first; 8430 if (Entry.second == CurrL) 8431 PredicatedSCEVRewrites.erase(I++); 8432 else 8433 ++I; 8434 } 8435 8436 auto LoopUsersItr = LoopUsers.find(CurrL); 8437 if (LoopUsersItr != LoopUsers.end()) { 8438 ToForget.insert(ToForget.end(), LoopUsersItr->second.begin(), 8439 LoopUsersItr->second.end()); 8440 } 8441 8442 // Drop information about expressions based on loop-header PHIs. 8443 PushLoopPHIs(CurrL, Worklist, Visited); 8444 visitAndClearUsers(Worklist, Visited, ToForget); 8445 8446 LoopPropertiesCache.erase(CurrL); 8447 // Forget all contained loops too, to avoid dangling entries in the 8448 // ValuesAtScopes map. 8449 LoopWorklist.append(CurrL->begin(), CurrL->end()); 8450 } 8451 forgetMemoizedResults(ToForget); 8452 } 8453 8454 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 8455 forgetLoop(L->getOutermostLoop()); 8456 } 8457 8458 void ScalarEvolution::forgetValue(Value *V) { 8459 Instruction *I = dyn_cast<Instruction>(V); 8460 if (!I) return; 8461 8462 // Drop information about expressions based on loop-header PHIs. 8463 SmallVector<Instruction *, 16> Worklist; 8464 SmallPtrSet<Instruction *, 8> Visited; 8465 SmallVector<const SCEV *, 8> ToForget; 8466 Worklist.push_back(I); 8467 Visited.insert(I); 8468 visitAndClearUsers(Worklist, Visited, ToForget); 8469 8470 forgetMemoizedResults(ToForget); 8471 } 8472 8473 void ScalarEvolution::forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V) { 8474 if (!isSCEVable(V->getType())) 8475 return; 8476 8477 // If SCEV looked through a trivial LCSSA phi node, we might have SCEV's 8478 // directly using a SCEVUnknown/SCEVAddRec defined in the loop. After an 8479 // extra predecessor is added, this is no longer valid. Find all Unknowns and 8480 // AddRecs defined in the loop and invalidate any SCEV's making use of them. 8481 if (const SCEV *S = getExistingSCEV(V)) { 8482 struct InvalidationRootCollector { 8483 Loop *L; 8484 SmallVector<const SCEV *, 8> Roots; 8485 8486 InvalidationRootCollector(Loop *L) : L(L) {} 8487 8488 bool follow(const SCEV *S) { 8489 if (auto *SU = dyn_cast<SCEVUnknown>(S)) { 8490 if (auto *I = dyn_cast<Instruction>(SU->getValue())) 8491 if (L->contains(I)) 8492 Roots.push_back(S); 8493 } else if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 8494 if (L->contains(AddRec->getLoop())) 8495 Roots.push_back(S); 8496 } 8497 return true; 8498 } 8499 bool isDone() const { return false; } 8500 }; 8501 8502 InvalidationRootCollector C(L); 8503 visitAll(S, C); 8504 forgetMemoizedResults(C.Roots); 8505 } 8506 8507 // Also perform the normal invalidation. 8508 forgetValue(V); 8509 } 8510 8511 void ScalarEvolution::forgetLoopDispositions() { LoopDispositions.clear(); } 8512 8513 void ScalarEvolution::forgetBlockAndLoopDispositions(Value *V) { 8514 // Unless a specific value is passed to invalidation, completely clear both 8515 // caches. 8516 if (!V) { 8517 BlockDispositions.clear(); 8518 LoopDispositions.clear(); 8519 return; 8520 } 8521 8522 if (!isSCEVable(V->getType())) 8523 return; 8524 8525 const SCEV *S = getExistingSCEV(V); 8526 if (!S) 8527 return; 8528 8529 // Invalidate the block and loop dispositions cached for S. Dispositions of 8530 // S's users may change if S's disposition changes (i.e. a user may change to 8531 // loop-invariant, if S changes to loop invariant), so also invalidate 8532 // dispositions of S's users recursively. 8533 SmallVector<const SCEV *, 8> Worklist = {S}; 8534 SmallPtrSet<const SCEV *, 8> Seen = {S}; 8535 while (!Worklist.empty()) { 8536 const SCEV *Curr = Worklist.pop_back_val(); 8537 bool LoopDispoRemoved = LoopDispositions.erase(Curr); 8538 bool BlockDispoRemoved = BlockDispositions.erase(Curr); 8539 if (!LoopDispoRemoved && !BlockDispoRemoved) 8540 continue; 8541 auto Users = SCEVUsers.find(Curr); 8542 if (Users != SCEVUsers.end()) 8543 for (const auto *User : Users->second) 8544 if (Seen.insert(User).second) 8545 Worklist.push_back(User); 8546 } 8547 } 8548 8549 /// Get the exact loop backedge taken count considering all loop exits. A 8550 /// computable result can only be returned for loops with all exiting blocks 8551 /// dominating the latch. howFarToZero assumes that the limit of each loop test 8552 /// is never skipped. This is a valid assumption as long as the loop exits via 8553 /// that test. For precise results, it is the caller's responsibility to specify 8554 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 8555 const SCEV * 8556 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 8557 SmallVector<const SCEVPredicate *, 4> *Preds) const { 8558 // If any exits were not computable, the loop is not computable. 8559 if (!isComplete() || ExitNotTaken.empty()) 8560 return SE->getCouldNotCompute(); 8561 8562 const BasicBlock *Latch = L->getLoopLatch(); 8563 // All exiting blocks we have collected must dominate the only backedge. 8564 if (!Latch) 8565 return SE->getCouldNotCompute(); 8566 8567 // All exiting blocks we have gathered dominate loop's latch, so exact trip 8568 // count is simply a minimum out of all these calculated exit counts. 8569 SmallVector<const SCEV *, 2> Ops; 8570 for (const auto &ENT : ExitNotTaken) { 8571 const SCEV *BECount = ENT.ExactNotTaken; 8572 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 8573 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 8574 "We should only have known counts for exiting blocks that dominate " 8575 "latch!"); 8576 8577 Ops.push_back(BECount); 8578 8579 if (Preds) 8580 for (const auto *P : ENT.Predicates) 8581 Preds->push_back(P); 8582 8583 assert((Preds || ENT.hasAlwaysTruePredicate()) && 8584 "Predicate should be always true!"); 8585 } 8586 8587 // If an earlier exit exits on the first iteration (exit count zero), then 8588 // a later poison exit count should not propagate into the result. This are 8589 // exactly the semantics provided by umin_seq. 8590 return SE->getUMinFromMismatchedTypes(Ops, /* Sequential */ true); 8591 } 8592 8593 /// Get the exact not taken count for this loop exit. 8594 const SCEV * 8595 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, 8596 ScalarEvolution *SE) const { 8597 for (const auto &ENT : ExitNotTaken) 8598 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 8599 return ENT.ExactNotTaken; 8600 8601 return SE->getCouldNotCompute(); 8602 } 8603 8604 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax( 8605 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { 8606 for (const auto &ENT : ExitNotTaken) 8607 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 8608 return ENT.ConstantMaxNotTaken; 8609 8610 return SE->getCouldNotCompute(); 8611 } 8612 8613 const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax( 8614 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { 8615 for (const auto &ENT : ExitNotTaken) 8616 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 8617 return ENT.SymbolicMaxNotTaken; 8618 8619 return SE->getCouldNotCompute(); 8620 } 8621 8622 /// getConstantMax - Get the constant max backedge taken count for the loop. 8623 const SCEV * 8624 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const { 8625 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 8626 return !ENT.hasAlwaysTruePredicate(); 8627 }; 8628 8629 if (!getConstantMax() || any_of(ExitNotTaken, PredicateNotAlwaysTrue)) 8630 return SE->getCouldNotCompute(); 8631 8632 assert((isa<SCEVCouldNotCompute>(getConstantMax()) || 8633 isa<SCEVConstant>(getConstantMax())) && 8634 "No point in having a non-constant max backedge taken count!"); 8635 return getConstantMax(); 8636 } 8637 8638 const SCEV * 8639 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L, 8640 ScalarEvolution *SE) { 8641 if (!SymbolicMax) 8642 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L); 8643 return SymbolicMax; 8644 } 8645 8646 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( 8647 ScalarEvolution *SE) const { 8648 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 8649 return !ENT.hasAlwaysTruePredicate(); 8650 }; 8651 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 8652 } 8653 8654 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 8655 : ExitLimit(E, E, E, false, std::nullopt) {} 8656 8657 ScalarEvolution::ExitLimit::ExitLimit( 8658 const SCEV *E, const SCEV *ConstantMaxNotTaken, 8659 const SCEV *SymbolicMaxNotTaken, bool MaxOrZero, 8660 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 8661 : ExactNotTaken(E), ConstantMaxNotTaken(ConstantMaxNotTaken), 8662 SymbolicMaxNotTaken(SymbolicMaxNotTaken), MaxOrZero(MaxOrZero) { 8663 // If we prove the max count is zero, so is the symbolic bound. This happens 8664 // in practice due to differences in a) how context sensitive we've chosen 8665 // to be and b) how we reason about bounds implied by UB. 8666 if (ConstantMaxNotTaken->isZero()) { 8667 this->ExactNotTaken = E = ConstantMaxNotTaken; 8668 this->SymbolicMaxNotTaken = SymbolicMaxNotTaken = ConstantMaxNotTaken; 8669 } 8670 8671 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 8672 !isa<SCEVCouldNotCompute>(ConstantMaxNotTaken)) && 8673 "Exact is not allowed to be less precise than Constant Max"); 8674 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 8675 !isa<SCEVCouldNotCompute>(SymbolicMaxNotTaken)) && 8676 "Exact is not allowed to be less precise than Symbolic Max"); 8677 assert((isa<SCEVCouldNotCompute>(SymbolicMaxNotTaken) || 8678 !isa<SCEVCouldNotCompute>(ConstantMaxNotTaken)) && 8679 "Symbolic Max is not allowed to be less precise than Constant Max"); 8680 assert((isa<SCEVCouldNotCompute>(ConstantMaxNotTaken) || 8681 isa<SCEVConstant>(ConstantMaxNotTaken)) && 8682 "No point in having a non-constant max backedge taken count!"); 8683 for (const auto *PredSet : PredSetList) 8684 for (const auto *P : *PredSet) 8685 addPredicate(P); 8686 assert((isa<SCEVCouldNotCompute>(E) || !E->getType()->isPointerTy()) && 8687 "Backedge count should be int"); 8688 assert((isa<SCEVCouldNotCompute>(ConstantMaxNotTaken) || 8689 !ConstantMaxNotTaken->getType()->isPointerTy()) && 8690 "Max backedge count should be int"); 8691 } 8692 8693 ScalarEvolution::ExitLimit::ExitLimit( 8694 const SCEV *E, const SCEV *ConstantMaxNotTaken, 8695 const SCEV *SymbolicMaxNotTaken, bool MaxOrZero, 8696 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 8697 : ExitLimit(E, ConstantMaxNotTaken, SymbolicMaxNotTaken, MaxOrZero, 8698 { &PredSet }) {} 8699 8700 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 8701 /// computable exit into a persistent ExitNotTakenInfo array. 8702 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 8703 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts, 8704 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero) 8705 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) { 8706 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 8707 8708 ExitNotTaken.reserve(ExitCounts.size()); 8709 std::transform(ExitCounts.begin(), ExitCounts.end(), 8710 std::back_inserter(ExitNotTaken), 8711 [&](const EdgeExitInfo &EEI) { 8712 BasicBlock *ExitBB = EEI.first; 8713 const ExitLimit &EL = EEI.second; 8714 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, 8715 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken, 8716 EL.Predicates); 8717 }); 8718 assert((isa<SCEVCouldNotCompute>(ConstantMax) || 8719 isa<SCEVConstant>(ConstantMax)) && 8720 "No point in having a non-constant max backedge taken count!"); 8721 } 8722 8723 /// Compute the number of times the backedge of the specified loop will execute. 8724 ScalarEvolution::BackedgeTakenInfo 8725 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 8726 bool AllowPredicates) { 8727 SmallVector<BasicBlock *, 8> ExitingBlocks; 8728 L->getExitingBlocks(ExitingBlocks); 8729 8730 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 8731 8732 SmallVector<EdgeExitInfo, 4> ExitCounts; 8733 bool CouldComputeBECount = true; 8734 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 8735 const SCEV *MustExitMaxBECount = nullptr; 8736 const SCEV *MayExitMaxBECount = nullptr; 8737 bool MustExitMaxOrZero = false; 8738 8739 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 8740 // and compute maxBECount. 8741 // Do a union of all the predicates here. 8742 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 8743 BasicBlock *ExitBB = ExitingBlocks[i]; 8744 8745 // We canonicalize untaken exits to br (constant), ignore them so that 8746 // proving an exit untaken doesn't negatively impact our ability to reason 8747 // about the loop as whole. 8748 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 8749 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 8750 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 8751 if (ExitIfTrue == CI->isZero()) 8752 continue; 8753 } 8754 8755 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 8756 8757 assert((AllowPredicates || EL.Predicates.empty()) && 8758 "Predicated exit limit when predicates are not allowed!"); 8759 8760 // 1. For each exit that can be computed, add an entry to ExitCounts. 8761 // CouldComputeBECount is true only if all exits can be computed. 8762 if (EL.ExactNotTaken != getCouldNotCompute()) 8763 ++NumExitCountsComputed; 8764 else 8765 // We couldn't compute an exact value for this exit, so 8766 // we won't be able to compute an exact value for the loop. 8767 CouldComputeBECount = false; 8768 // Remember exit count if either exact or symbolic is known. Because 8769 // Exact always implies symbolic, only check symbolic. 8770 if (EL.SymbolicMaxNotTaken != getCouldNotCompute()) 8771 ExitCounts.emplace_back(ExitBB, EL); 8772 else { 8773 assert(EL.ExactNotTaken == getCouldNotCompute() && 8774 "Exact is known but symbolic isn't?"); 8775 ++NumExitCountsNotComputed; 8776 } 8777 8778 // 2. Derive the loop's MaxBECount from each exit's max number of 8779 // non-exiting iterations. Partition the loop exits into two kinds: 8780 // LoopMustExits and LoopMayExits. 8781 // 8782 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 8783 // is a LoopMayExit. If any computable LoopMustExit is found, then 8784 // MaxBECount is the minimum EL.ConstantMaxNotTaken of computable 8785 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 8786 // EL.ConstantMaxNotTaken, where CouldNotCompute is considered greater than 8787 // any 8788 // computable EL.ConstantMaxNotTaken. 8789 if (EL.ConstantMaxNotTaken != getCouldNotCompute() && Latch && 8790 DT.dominates(ExitBB, Latch)) { 8791 if (!MustExitMaxBECount) { 8792 MustExitMaxBECount = EL.ConstantMaxNotTaken; 8793 MustExitMaxOrZero = EL.MaxOrZero; 8794 } else { 8795 MustExitMaxBECount = getUMinFromMismatchedTypes(MustExitMaxBECount, 8796 EL.ConstantMaxNotTaken); 8797 } 8798 } else if (MayExitMaxBECount != getCouldNotCompute()) { 8799 if (!MayExitMaxBECount || EL.ConstantMaxNotTaken == getCouldNotCompute()) 8800 MayExitMaxBECount = EL.ConstantMaxNotTaken; 8801 else { 8802 MayExitMaxBECount = getUMaxFromMismatchedTypes(MayExitMaxBECount, 8803 EL.ConstantMaxNotTaken); 8804 } 8805 } 8806 } 8807 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 8808 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 8809 // The loop backedge will be taken the maximum or zero times if there's 8810 // a single exit that must be taken the maximum or zero times. 8811 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 8812 8813 // Remember which SCEVs are used in exit limits for invalidation purposes. 8814 // We only care about non-constant SCEVs here, so we can ignore 8815 // EL.ConstantMaxNotTaken 8816 // and MaxBECount, which must be SCEVConstant. 8817 for (const auto &Pair : ExitCounts) { 8818 if (!isa<SCEVConstant>(Pair.second.ExactNotTaken)) 8819 BECountUsers[Pair.second.ExactNotTaken].insert({L, AllowPredicates}); 8820 if (!isa<SCEVConstant>(Pair.second.SymbolicMaxNotTaken)) 8821 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert( 8822 {L, AllowPredicates}); 8823 } 8824 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 8825 MaxBECount, MaxOrZero); 8826 } 8827 8828 ScalarEvolution::ExitLimit 8829 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 8830 bool AllowPredicates) { 8831 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 8832 // If our exiting block does not dominate the latch, then its connection with 8833 // loop's exit limit may be far from trivial. 8834 const BasicBlock *Latch = L->getLoopLatch(); 8835 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 8836 return getCouldNotCompute(); 8837 8838 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 8839 Instruction *Term = ExitingBlock->getTerminator(); 8840 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 8841 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 8842 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 8843 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 8844 "It should have one successor in loop and one exit block!"); 8845 // Proceed to the next level to examine the exit condition expression. 8846 return computeExitLimitFromCond(L, BI->getCondition(), ExitIfTrue, 8847 /*ControlsOnlyExit=*/IsOnlyExit, 8848 AllowPredicates); 8849 } 8850 8851 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 8852 // For switch, make sure that there is a single exit from the loop. 8853 BasicBlock *Exit = nullptr; 8854 for (auto *SBB : successors(ExitingBlock)) 8855 if (!L->contains(SBB)) { 8856 if (Exit) // Multiple exit successors. 8857 return getCouldNotCompute(); 8858 Exit = SBB; 8859 } 8860 assert(Exit && "Exiting block must have at least one exit"); 8861 return computeExitLimitFromSingleExitSwitch( 8862 L, SI, Exit, 8863 /*ControlsOnlyExit=*/IsOnlyExit); 8864 } 8865 8866 return getCouldNotCompute(); 8867 } 8868 8869 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 8870 const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, 8871 bool AllowPredicates) { 8872 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 8873 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 8874 ControlsOnlyExit, AllowPredicates); 8875 } 8876 8877 std::optional<ScalarEvolution::ExitLimit> 8878 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 8879 bool ExitIfTrue, bool ControlsOnlyExit, 8880 bool AllowPredicates) { 8881 (void)this->L; 8882 (void)this->ExitIfTrue; 8883 (void)this->AllowPredicates; 8884 8885 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 8886 this->AllowPredicates == AllowPredicates && 8887 "Variance in assumed invariant key components!"); 8888 auto Itr = TripCountMap.find({ExitCond, ControlsOnlyExit}); 8889 if (Itr == TripCountMap.end()) 8890 return std::nullopt; 8891 return Itr->second; 8892 } 8893 8894 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 8895 bool ExitIfTrue, 8896 bool ControlsOnlyExit, 8897 bool AllowPredicates, 8898 const ExitLimit &EL) { 8899 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 8900 this->AllowPredicates == AllowPredicates && 8901 "Variance in assumed invariant key components!"); 8902 8903 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsOnlyExit}, EL}); 8904 assert(InsertResult.second && "Expected successful insertion!"); 8905 (void)InsertResult; 8906 (void)ExitIfTrue; 8907 } 8908 8909 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 8910 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 8911 bool ControlsOnlyExit, bool AllowPredicates) { 8912 8913 if (auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsOnlyExit, 8914 AllowPredicates)) 8915 return *MaybeEL; 8916 8917 ExitLimit EL = computeExitLimitFromCondImpl( 8918 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates); 8919 Cache.insert(L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates, EL); 8920 return EL; 8921 } 8922 8923 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 8924 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 8925 bool ControlsOnlyExit, bool AllowPredicates) { 8926 // Handle BinOp conditions (And, Or). 8927 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp( 8928 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates)) 8929 return *LimitFromBinOp; 8930 8931 // With an icmp, it may be feasible to compute an exact backedge-taken count. 8932 // Proceed to the next level to examine the icmp. 8933 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 8934 ExitLimit EL = 8935 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsOnlyExit); 8936 if (EL.hasFullInfo() || !AllowPredicates) 8937 return EL; 8938 8939 // Try again, but use SCEV predicates this time. 8940 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, 8941 ControlsOnlyExit, 8942 /*AllowPredicates=*/true); 8943 } 8944 8945 // Check for a constant condition. These are normally stripped out by 8946 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 8947 // preserve the CFG and is temporarily leaving constant conditions 8948 // in place. 8949 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 8950 if (ExitIfTrue == !CI->getZExtValue()) 8951 // The backedge is always taken. 8952 return getCouldNotCompute(); 8953 // The backedge is never taken. 8954 return getZero(CI->getType()); 8955 } 8956 8957 // If we're exiting based on the overflow flag of an x.with.overflow intrinsic 8958 // with a constant step, we can form an equivalent icmp predicate and figure 8959 // out how many iterations will be taken before we exit. 8960 const WithOverflowInst *WO; 8961 const APInt *C; 8962 if (match(ExitCond, m_ExtractValue<1>(m_WithOverflowInst(WO))) && 8963 match(WO->getRHS(), m_APInt(C))) { 8964 ConstantRange NWR = 8965 ConstantRange::makeExactNoWrapRegion(WO->getBinaryOp(), *C, 8966 WO->getNoWrapKind()); 8967 CmpInst::Predicate Pred; 8968 APInt NewRHSC, Offset; 8969 NWR.getEquivalentICmp(Pred, NewRHSC, Offset); 8970 if (!ExitIfTrue) 8971 Pred = ICmpInst::getInversePredicate(Pred); 8972 auto *LHS = getSCEV(WO->getLHS()); 8973 if (Offset != 0) 8974 LHS = getAddExpr(LHS, getConstant(Offset)); 8975 auto EL = computeExitLimitFromICmp(L, Pred, LHS, getConstant(NewRHSC), 8976 ControlsOnlyExit, AllowPredicates); 8977 if (EL.hasAnyInfo()) 8978 return EL; 8979 } 8980 8981 // If it's not an integer or pointer comparison then compute it the hard way. 8982 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 8983 } 8984 8985 std::optional<ScalarEvolution::ExitLimit> 8986 ScalarEvolution::computeExitLimitFromCondFromBinOp( 8987 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 8988 bool ControlsOnlyExit, bool AllowPredicates) { 8989 // Check if the controlling expression for this loop is an And or Or. 8990 Value *Op0, *Op1; 8991 bool IsAnd = false; 8992 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) 8993 IsAnd = true; 8994 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) 8995 IsAnd = false; 8996 else 8997 return std::nullopt; 8998 8999 // EitherMayExit is true in these two cases: 9000 // br (and Op0 Op1), loop, exit 9001 // br (or Op0 Op1), exit, loop 9002 bool EitherMayExit = IsAnd ^ ExitIfTrue; 9003 ExitLimit EL0 = computeExitLimitFromCondCached( 9004 Cache, L, Op0, ExitIfTrue, ControlsOnlyExit && !EitherMayExit, 9005 AllowPredicates); 9006 ExitLimit EL1 = computeExitLimitFromCondCached( 9007 Cache, L, Op1, ExitIfTrue, ControlsOnlyExit && !EitherMayExit, 9008 AllowPredicates); 9009 9010 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement" 9011 const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd); 9012 if (isa<ConstantInt>(Op1)) 9013 return Op1 == NeutralElement ? EL0 : EL1; 9014 if (isa<ConstantInt>(Op0)) 9015 return Op0 == NeutralElement ? EL1 : EL0; 9016 9017 const SCEV *BECount = getCouldNotCompute(); 9018 const SCEV *ConstantMaxBECount = getCouldNotCompute(); 9019 const SCEV *SymbolicMaxBECount = getCouldNotCompute(); 9020 if (EitherMayExit) { 9021 bool UseSequentialUMin = !isa<BinaryOperator>(ExitCond); 9022 // Both conditions must be same for the loop to continue executing. 9023 // Choose the less conservative count. 9024 if (EL0.ExactNotTaken != getCouldNotCompute() && 9025 EL1.ExactNotTaken != getCouldNotCompute()) { 9026 BECount = getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken, 9027 UseSequentialUMin); 9028 } 9029 if (EL0.ConstantMaxNotTaken == getCouldNotCompute()) 9030 ConstantMaxBECount = EL1.ConstantMaxNotTaken; 9031 else if (EL1.ConstantMaxNotTaken == getCouldNotCompute()) 9032 ConstantMaxBECount = EL0.ConstantMaxNotTaken; 9033 else 9034 ConstantMaxBECount = getUMinFromMismatchedTypes(EL0.ConstantMaxNotTaken, 9035 EL1.ConstantMaxNotTaken); 9036 if (EL0.SymbolicMaxNotTaken == getCouldNotCompute()) 9037 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken; 9038 else if (EL1.SymbolicMaxNotTaken == getCouldNotCompute()) 9039 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken; 9040 else 9041 SymbolicMaxBECount = getUMinFromMismatchedTypes( 9042 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin); 9043 } else { 9044 // Both conditions must be same at the same time for the loop to exit. 9045 // For now, be conservative. 9046 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 9047 BECount = EL0.ExactNotTaken; 9048 } 9049 9050 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 9051 // to be more aggressive when computing BECount than when computing 9052 // ConstantMaxBECount. In these cases it is possible for EL0.ExactNotTaken 9053 // and 9054 // EL1.ExactNotTaken to match, but for EL0.ConstantMaxNotTaken and 9055 // EL1.ConstantMaxNotTaken to not. 9056 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount) && 9057 !isa<SCEVCouldNotCompute>(BECount)) 9058 ConstantMaxBECount = getConstant(getUnsignedRangeMax(BECount)); 9059 if (isa<SCEVCouldNotCompute>(SymbolicMaxBECount)) 9060 SymbolicMaxBECount = 9061 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount; 9062 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, false, 9063 { &EL0.Predicates, &EL1.Predicates }); 9064 } 9065 9066 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp( 9067 const Loop *L, ICmpInst *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, 9068 bool AllowPredicates) { 9069 // If the condition was exit on true, convert the condition to exit on false 9070 ICmpInst::Predicate Pred; 9071 if (!ExitIfTrue) 9072 Pred = ExitCond->getPredicate(); 9073 else 9074 Pred = ExitCond->getInversePredicate(); 9075 const ICmpInst::Predicate OriginalPred = Pred; 9076 9077 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 9078 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 9079 9080 ExitLimit EL = computeExitLimitFromICmp(L, Pred, LHS, RHS, ControlsOnlyExit, 9081 AllowPredicates); 9082 if (EL.hasAnyInfo()) 9083 return EL; 9084 9085 auto *ExhaustiveCount = 9086 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 9087 9088 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 9089 return ExhaustiveCount; 9090 9091 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 9092 ExitCond->getOperand(1), L, OriginalPred); 9093 } 9094 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp( 9095 const Loop *L, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9096 bool ControlsOnlyExit, bool AllowPredicates) { 9097 9098 // Try to evaluate any dependencies out of the loop. 9099 LHS = getSCEVAtScope(LHS, L); 9100 RHS = getSCEVAtScope(RHS, L); 9101 9102 // At this point, we would like to compute how many iterations of the 9103 // loop the predicate will return true for these inputs. 9104 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 9105 // If there is a loop-invariant, force it into the RHS. 9106 std::swap(LHS, RHS); 9107 Pred = ICmpInst::getSwappedPredicate(Pred); 9108 } 9109 9110 bool ControllingFiniteLoop = ControlsOnlyExit && loopHasNoAbnormalExits(L) && 9111 loopIsFiniteByAssumption(L); 9112 // Simplify the operands before analyzing them. 9113 (void)SimplifyICmpOperands(Pred, LHS, RHS, /*Depth=*/0); 9114 9115 // If we have a comparison of a chrec against a constant, try to use value 9116 // ranges to answer this query. 9117 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 9118 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 9119 if (AddRec->getLoop() == L) { 9120 // Form the constant range. 9121 ConstantRange CompRange = 9122 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 9123 9124 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 9125 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 9126 } 9127 9128 // If this loop must exit based on this condition (or execute undefined 9129 // behaviour), and we can prove the test sequence produced must repeat 9130 // the same values on self-wrap of the IV, then we can infer that IV 9131 // doesn't self wrap because if it did, we'd have an infinite (undefined) 9132 // loop. 9133 if (ControllingFiniteLoop && isLoopInvariant(RHS, L)) { 9134 // TODO: We can peel off any functions which are invertible *in L*. Loop 9135 // invariant terms are effectively constants for our purposes here. 9136 auto *InnerLHS = LHS; 9137 if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) 9138 InnerLHS = ZExt->getOperand(); 9139 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(InnerLHS)) { 9140 auto *StrideC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)); 9141 if (!AR->hasNoSelfWrap() && AR->getLoop() == L && AR->isAffine() && 9142 StrideC && StrideC->getAPInt().isPowerOf2()) { 9143 auto Flags = AR->getNoWrapFlags(); 9144 Flags = setFlags(Flags, SCEV::FlagNW); 9145 SmallVector<const SCEV*> Operands{AR->operands()}; 9146 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 9147 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags); 9148 } 9149 } 9150 } 9151 9152 switch (Pred) { 9153 case ICmpInst::ICMP_NE: { // while (X != Y) 9154 // Convert to: while (X-Y != 0) 9155 if (LHS->getType()->isPointerTy()) { 9156 LHS = getLosslessPtrToIntExpr(LHS); 9157 if (isa<SCEVCouldNotCompute>(LHS)) 9158 return LHS; 9159 } 9160 if (RHS->getType()->isPointerTy()) { 9161 RHS = getLosslessPtrToIntExpr(RHS); 9162 if (isa<SCEVCouldNotCompute>(RHS)) 9163 return RHS; 9164 } 9165 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsOnlyExit, 9166 AllowPredicates); 9167 if (EL.hasAnyInfo()) 9168 return EL; 9169 break; 9170 } 9171 case ICmpInst::ICMP_EQ: { // while (X == Y) 9172 // Convert to: while (X-Y == 0) 9173 if (LHS->getType()->isPointerTy()) { 9174 LHS = getLosslessPtrToIntExpr(LHS); 9175 if (isa<SCEVCouldNotCompute>(LHS)) 9176 return LHS; 9177 } 9178 if (RHS->getType()->isPointerTy()) { 9179 RHS = getLosslessPtrToIntExpr(RHS); 9180 if (isa<SCEVCouldNotCompute>(RHS)) 9181 return RHS; 9182 } 9183 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 9184 if (EL.hasAnyInfo()) return EL; 9185 break; 9186 } 9187 case ICmpInst::ICMP_SLE: 9188 case ICmpInst::ICMP_ULE: 9189 // Since the loop is finite, an invariant RHS cannot include the boundary 9190 // value, otherwise it would loop forever. 9191 if (!EnableFiniteLoopControl || !ControllingFiniteLoop || 9192 !isLoopInvariant(RHS, L)) 9193 break; 9194 RHS = getAddExpr(getOne(RHS->getType()), RHS); 9195 [[fallthrough]]; 9196 case ICmpInst::ICMP_SLT: 9197 case ICmpInst::ICMP_ULT: { // while (X < Y) 9198 bool IsSigned = ICmpInst::isSigned(Pred); 9199 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsOnlyExit, 9200 AllowPredicates); 9201 if (EL.hasAnyInfo()) 9202 return EL; 9203 break; 9204 } 9205 case ICmpInst::ICMP_SGE: 9206 case ICmpInst::ICMP_UGE: 9207 // Since the loop is finite, an invariant RHS cannot include the boundary 9208 // value, otherwise it would loop forever. 9209 if (!EnableFiniteLoopControl || !ControllingFiniteLoop || 9210 !isLoopInvariant(RHS, L)) 9211 break; 9212 RHS = getAddExpr(getMinusOne(RHS->getType()), RHS); 9213 [[fallthrough]]; 9214 case ICmpInst::ICMP_SGT: 9215 case ICmpInst::ICMP_UGT: { // while (X > Y) 9216 bool IsSigned = ICmpInst::isSigned(Pred); 9217 ExitLimit EL = howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsOnlyExit, 9218 AllowPredicates); 9219 if (EL.hasAnyInfo()) 9220 return EL; 9221 break; 9222 } 9223 default: 9224 break; 9225 } 9226 9227 return getCouldNotCompute(); 9228 } 9229 9230 ScalarEvolution::ExitLimit 9231 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 9232 SwitchInst *Switch, 9233 BasicBlock *ExitingBlock, 9234 bool ControlsOnlyExit) { 9235 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 9236 9237 // Give up if the exit is the default dest of a switch. 9238 if (Switch->getDefaultDest() == ExitingBlock) 9239 return getCouldNotCompute(); 9240 9241 assert(L->contains(Switch->getDefaultDest()) && 9242 "Default case must not exit the loop!"); 9243 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 9244 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 9245 9246 // while (X != Y) --> while (X-Y != 0) 9247 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsOnlyExit); 9248 if (EL.hasAnyInfo()) 9249 return EL; 9250 9251 return getCouldNotCompute(); 9252 } 9253 9254 static ConstantInt * 9255 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 9256 ScalarEvolution &SE) { 9257 const SCEV *InVal = SE.getConstant(C); 9258 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 9259 assert(isa<SCEVConstant>(Val) && 9260 "Evaluation of SCEV at constant didn't fold correctly?"); 9261 return cast<SCEVConstant>(Val)->getValue(); 9262 } 9263 9264 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 9265 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 9266 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 9267 if (!RHS) 9268 return getCouldNotCompute(); 9269 9270 const BasicBlock *Latch = L->getLoopLatch(); 9271 if (!Latch) 9272 return getCouldNotCompute(); 9273 9274 const BasicBlock *Predecessor = L->getLoopPredecessor(); 9275 if (!Predecessor) 9276 return getCouldNotCompute(); 9277 9278 // Return true if V is of the form "LHS `shift_op` <positive constant>". 9279 // Return LHS in OutLHS and shift_opt in OutOpCode. 9280 auto MatchPositiveShift = 9281 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 9282 9283 using namespace PatternMatch; 9284 9285 ConstantInt *ShiftAmt; 9286 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 9287 OutOpCode = Instruction::LShr; 9288 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 9289 OutOpCode = Instruction::AShr; 9290 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 9291 OutOpCode = Instruction::Shl; 9292 else 9293 return false; 9294 9295 return ShiftAmt->getValue().isStrictlyPositive(); 9296 }; 9297 9298 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 9299 // 9300 // loop: 9301 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 9302 // %iv.shifted = lshr i32 %iv, <positive constant> 9303 // 9304 // Return true on a successful match. Return the corresponding PHI node (%iv 9305 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 9306 auto MatchShiftRecurrence = 9307 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 9308 std::optional<Instruction::BinaryOps> PostShiftOpCode; 9309 9310 { 9311 Instruction::BinaryOps OpC; 9312 Value *V; 9313 9314 // If we encounter a shift instruction, "peel off" the shift operation, 9315 // and remember that we did so. Later when we inspect %iv's backedge 9316 // value, we will make sure that the backedge value uses the same 9317 // operation. 9318 // 9319 // Note: the peeled shift operation does not have to be the same 9320 // instruction as the one feeding into the PHI's backedge value. We only 9321 // really care about it being the same *kind* of shift instruction -- 9322 // that's all that is required for our later inferences to hold. 9323 if (MatchPositiveShift(LHS, V, OpC)) { 9324 PostShiftOpCode = OpC; 9325 LHS = V; 9326 } 9327 } 9328 9329 PNOut = dyn_cast<PHINode>(LHS); 9330 if (!PNOut || PNOut->getParent() != L->getHeader()) 9331 return false; 9332 9333 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 9334 Value *OpLHS; 9335 9336 return 9337 // The backedge value for the PHI node must be a shift by a positive 9338 // amount 9339 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 9340 9341 // of the PHI node itself 9342 OpLHS == PNOut && 9343 9344 // and the kind of shift should be match the kind of shift we peeled 9345 // off, if any. 9346 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut); 9347 }; 9348 9349 PHINode *PN; 9350 Instruction::BinaryOps OpCode; 9351 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 9352 return getCouldNotCompute(); 9353 9354 const DataLayout &DL = getDataLayout(); 9355 9356 // The key rationale for this optimization is that for some kinds of shift 9357 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 9358 // within a finite number of iterations. If the condition guarding the 9359 // backedge (in the sense that the backedge is taken if the condition is true) 9360 // is false for the value the shift recurrence stabilizes to, then we know 9361 // that the backedge is taken only a finite number of times. 9362 9363 ConstantInt *StableValue = nullptr; 9364 switch (OpCode) { 9365 default: 9366 llvm_unreachable("Impossible case!"); 9367 9368 case Instruction::AShr: { 9369 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 9370 // bitwidth(K) iterations. 9371 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 9372 KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC, 9373 Predecessor->getTerminator(), &DT); 9374 auto *Ty = cast<IntegerType>(RHS->getType()); 9375 if (Known.isNonNegative()) 9376 StableValue = ConstantInt::get(Ty, 0); 9377 else if (Known.isNegative()) 9378 StableValue = ConstantInt::get(Ty, -1, true); 9379 else 9380 return getCouldNotCompute(); 9381 9382 break; 9383 } 9384 case Instruction::LShr: 9385 case Instruction::Shl: 9386 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 9387 // stabilize to 0 in at most bitwidth(K) iterations. 9388 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 9389 break; 9390 } 9391 9392 auto *Result = 9393 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 9394 assert(Result->getType()->isIntegerTy(1) && 9395 "Otherwise cannot be an operand to a branch instruction"); 9396 9397 if (Result->isZeroValue()) { 9398 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9399 const SCEV *UpperBound = 9400 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 9401 return ExitLimit(getCouldNotCompute(), UpperBound, UpperBound, false); 9402 } 9403 9404 return getCouldNotCompute(); 9405 } 9406 9407 /// Return true if we can constant fold an instruction of the specified type, 9408 /// assuming that all operands were constants. 9409 static bool CanConstantFold(const Instruction *I) { 9410 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 9411 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 9412 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 9413 return true; 9414 9415 if (const CallInst *CI = dyn_cast<CallInst>(I)) 9416 if (const Function *F = CI->getCalledFunction()) 9417 return canConstantFoldCallTo(CI, F); 9418 return false; 9419 } 9420 9421 /// Determine whether this instruction can constant evolve within this loop 9422 /// assuming its operands can all constant evolve. 9423 static bool canConstantEvolve(Instruction *I, const Loop *L) { 9424 // An instruction outside of the loop can't be derived from a loop PHI. 9425 if (!L->contains(I)) return false; 9426 9427 if (isa<PHINode>(I)) { 9428 // We don't currently keep track of the control flow needed to evaluate 9429 // PHIs, so we cannot handle PHIs inside of loops. 9430 return L->getHeader() == I->getParent(); 9431 } 9432 9433 // If we won't be able to constant fold this expression even if the operands 9434 // are constants, bail early. 9435 return CanConstantFold(I); 9436 } 9437 9438 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 9439 /// recursing through each instruction operand until reaching a loop header phi. 9440 static PHINode * 9441 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 9442 DenseMap<Instruction *, PHINode *> &PHIMap, 9443 unsigned Depth) { 9444 if (Depth > MaxConstantEvolvingDepth) 9445 return nullptr; 9446 9447 // Otherwise, we can evaluate this instruction if all of its operands are 9448 // constant or derived from a PHI node themselves. 9449 PHINode *PHI = nullptr; 9450 for (Value *Op : UseInst->operands()) { 9451 if (isa<Constant>(Op)) continue; 9452 9453 Instruction *OpInst = dyn_cast<Instruction>(Op); 9454 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 9455 9456 PHINode *P = dyn_cast<PHINode>(OpInst); 9457 if (!P) 9458 // If this operand is already visited, reuse the prior result. 9459 // We may have P != PHI if this is the deepest point at which the 9460 // inconsistent paths meet. 9461 P = PHIMap.lookup(OpInst); 9462 if (!P) { 9463 // Recurse and memoize the results, whether a phi is found or not. 9464 // This recursive call invalidates pointers into PHIMap. 9465 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 9466 PHIMap[OpInst] = P; 9467 } 9468 if (!P) 9469 return nullptr; // Not evolving from PHI 9470 if (PHI && PHI != P) 9471 return nullptr; // Evolving from multiple different PHIs. 9472 PHI = P; 9473 } 9474 // This is a expression evolving from a constant PHI! 9475 return PHI; 9476 } 9477 9478 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 9479 /// in the loop that V is derived from. We allow arbitrary operations along the 9480 /// way, but the operands of an operation must either be constants or a value 9481 /// derived from a constant PHI. If this expression does not fit with these 9482 /// constraints, return null. 9483 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 9484 Instruction *I = dyn_cast<Instruction>(V); 9485 if (!I || !canConstantEvolve(I, L)) return nullptr; 9486 9487 if (PHINode *PN = dyn_cast<PHINode>(I)) 9488 return PN; 9489 9490 // Record non-constant instructions contained by the loop. 9491 DenseMap<Instruction *, PHINode *> PHIMap; 9492 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 9493 } 9494 9495 /// EvaluateExpression - Given an expression that passes the 9496 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 9497 /// in the loop has the value PHIVal. If we can't fold this expression for some 9498 /// reason, return null. 9499 static Constant *EvaluateExpression(Value *V, const Loop *L, 9500 DenseMap<Instruction *, Constant *> &Vals, 9501 const DataLayout &DL, 9502 const TargetLibraryInfo *TLI) { 9503 // Convenient constant check, but redundant for recursive calls. 9504 if (Constant *C = dyn_cast<Constant>(V)) return C; 9505 Instruction *I = dyn_cast<Instruction>(V); 9506 if (!I) return nullptr; 9507 9508 if (Constant *C = Vals.lookup(I)) return C; 9509 9510 // An instruction inside the loop depends on a value outside the loop that we 9511 // weren't given a mapping for, or a value such as a call inside the loop. 9512 if (!canConstantEvolve(I, L)) return nullptr; 9513 9514 // An unmapped PHI can be due to a branch or another loop inside this loop, 9515 // or due to this not being the initial iteration through a loop where we 9516 // couldn't compute the evolution of this particular PHI last time. 9517 if (isa<PHINode>(I)) return nullptr; 9518 9519 std::vector<Constant*> Operands(I->getNumOperands()); 9520 9521 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 9522 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 9523 if (!Operand) { 9524 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 9525 if (!Operands[i]) return nullptr; 9526 continue; 9527 } 9528 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 9529 Vals[Operand] = C; 9530 if (!C) return nullptr; 9531 Operands[i] = C; 9532 } 9533 9534 return ConstantFoldInstOperands(I, Operands, DL, TLI); 9535 } 9536 9537 9538 // If every incoming value to PN except the one for BB is a specific Constant, 9539 // return that, else return nullptr. 9540 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 9541 Constant *IncomingVal = nullptr; 9542 9543 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 9544 if (PN->getIncomingBlock(i) == BB) 9545 continue; 9546 9547 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 9548 if (!CurrentVal) 9549 return nullptr; 9550 9551 if (IncomingVal != CurrentVal) { 9552 if (IncomingVal) 9553 return nullptr; 9554 IncomingVal = CurrentVal; 9555 } 9556 } 9557 9558 return IncomingVal; 9559 } 9560 9561 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 9562 /// in the header of its containing loop, we know the loop executes a 9563 /// constant number of times, and the PHI node is just a recurrence 9564 /// involving constants, fold it. 9565 Constant * 9566 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 9567 const APInt &BEs, 9568 const Loop *L) { 9569 auto I = ConstantEvolutionLoopExitValue.find(PN); 9570 if (I != ConstantEvolutionLoopExitValue.end()) 9571 return I->second; 9572 9573 if (BEs.ugt(MaxBruteForceIterations)) 9574 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 9575 9576 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 9577 9578 DenseMap<Instruction *, Constant *> CurrentIterVals; 9579 BasicBlock *Header = L->getHeader(); 9580 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 9581 9582 BasicBlock *Latch = L->getLoopLatch(); 9583 if (!Latch) 9584 return nullptr; 9585 9586 for (PHINode &PHI : Header->phis()) { 9587 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 9588 CurrentIterVals[&PHI] = StartCST; 9589 } 9590 if (!CurrentIterVals.count(PN)) 9591 return RetVal = nullptr; 9592 9593 Value *BEValue = PN->getIncomingValueForBlock(Latch); 9594 9595 // Execute the loop symbolically to determine the exit value. 9596 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 9597 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 9598 9599 unsigned NumIterations = BEs.getZExtValue(); // must be in range 9600 unsigned IterationNum = 0; 9601 const DataLayout &DL = getDataLayout(); 9602 for (; ; ++IterationNum) { 9603 if (IterationNum == NumIterations) 9604 return RetVal = CurrentIterVals[PN]; // Got exit value! 9605 9606 // Compute the value of the PHIs for the next iteration. 9607 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 9608 DenseMap<Instruction *, Constant *> NextIterVals; 9609 Constant *NextPHI = 9610 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 9611 if (!NextPHI) 9612 return nullptr; // Couldn't evaluate! 9613 NextIterVals[PN] = NextPHI; 9614 9615 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 9616 9617 // Also evaluate the other PHI nodes. However, we don't get to stop if we 9618 // cease to be able to evaluate one of them or if they stop evolving, 9619 // because that doesn't necessarily prevent us from computing PN. 9620 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 9621 for (const auto &I : CurrentIterVals) { 9622 PHINode *PHI = dyn_cast<PHINode>(I.first); 9623 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 9624 PHIsToCompute.emplace_back(PHI, I.second); 9625 } 9626 // We use two distinct loops because EvaluateExpression may invalidate any 9627 // iterators into CurrentIterVals. 9628 for (const auto &I : PHIsToCompute) { 9629 PHINode *PHI = I.first; 9630 Constant *&NextPHI = NextIterVals[PHI]; 9631 if (!NextPHI) { // Not already computed. 9632 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 9633 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 9634 } 9635 if (NextPHI != I.second) 9636 StoppedEvolving = false; 9637 } 9638 9639 // If all entries in CurrentIterVals == NextIterVals then we can stop 9640 // iterating, the loop can't continue to change. 9641 if (StoppedEvolving) 9642 return RetVal = CurrentIterVals[PN]; 9643 9644 CurrentIterVals.swap(NextIterVals); 9645 } 9646 } 9647 9648 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 9649 Value *Cond, 9650 bool ExitWhen) { 9651 PHINode *PN = getConstantEvolvingPHI(Cond, L); 9652 if (!PN) return getCouldNotCompute(); 9653 9654 // If the loop is canonicalized, the PHI will have exactly two entries. 9655 // That's the only form we support here. 9656 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 9657 9658 DenseMap<Instruction *, Constant *> CurrentIterVals; 9659 BasicBlock *Header = L->getHeader(); 9660 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 9661 9662 BasicBlock *Latch = L->getLoopLatch(); 9663 assert(Latch && "Should follow from NumIncomingValues == 2!"); 9664 9665 for (PHINode &PHI : Header->phis()) { 9666 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 9667 CurrentIterVals[&PHI] = StartCST; 9668 } 9669 if (!CurrentIterVals.count(PN)) 9670 return getCouldNotCompute(); 9671 9672 // Okay, we find a PHI node that defines the trip count of this loop. Execute 9673 // the loop symbolically to determine when the condition gets a value of 9674 // "ExitWhen". 9675 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 9676 const DataLayout &DL = getDataLayout(); 9677 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 9678 auto *CondVal = dyn_cast_or_null<ConstantInt>( 9679 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 9680 9681 // Couldn't symbolically evaluate. 9682 if (!CondVal) return getCouldNotCompute(); 9683 9684 if (CondVal->getValue() == uint64_t(ExitWhen)) { 9685 ++NumBruteForceTripCountsComputed; 9686 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 9687 } 9688 9689 // Update all the PHI nodes for the next iteration. 9690 DenseMap<Instruction *, Constant *> NextIterVals; 9691 9692 // Create a list of which PHIs we need to compute. We want to do this before 9693 // calling EvaluateExpression on them because that may invalidate iterators 9694 // into CurrentIterVals. 9695 SmallVector<PHINode *, 8> PHIsToCompute; 9696 for (const auto &I : CurrentIterVals) { 9697 PHINode *PHI = dyn_cast<PHINode>(I.first); 9698 if (!PHI || PHI->getParent() != Header) continue; 9699 PHIsToCompute.push_back(PHI); 9700 } 9701 for (PHINode *PHI : PHIsToCompute) { 9702 Constant *&NextPHI = NextIterVals[PHI]; 9703 if (NextPHI) continue; // Already computed! 9704 9705 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 9706 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 9707 } 9708 CurrentIterVals.swap(NextIterVals); 9709 } 9710 9711 // Too many iterations were needed to evaluate. 9712 return getCouldNotCompute(); 9713 } 9714 9715 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 9716 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 9717 ValuesAtScopes[V]; 9718 // Check to see if we've folded this expression at this loop before. 9719 for (auto &LS : Values) 9720 if (LS.first == L) 9721 return LS.second ? LS.second : V; 9722 9723 Values.emplace_back(L, nullptr); 9724 9725 // Otherwise compute it. 9726 const SCEV *C = computeSCEVAtScope(V, L); 9727 for (auto &LS : reverse(ValuesAtScopes[V])) 9728 if (LS.first == L) { 9729 LS.second = C; 9730 if (!isa<SCEVConstant>(C)) 9731 ValuesAtScopesUsers[C].push_back({L, V}); 9732 break; 9733 } 9734 return C; 9735 } 9736 9737 /// This builds up a Constant using the ConstantExpr interface. That way, we 9738 /// will return Constants for objects which aren't represented by a 9739 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 9740 /// Returns NULL if the SCEV isn't representable as a Constant. 9741 static Constant *BuildConstantFromSCEV(const SCEV *V) { 9742 switch (V->getSCEVType()) { 9743 case scCouldNotCompute: 9744 case scAddRecExpr: 9745 case scVScale: 9746 return nullptr; 9747 case scConstant: 9748 return cast<SCEVConstant>(V)->getValue(); 9749 case scUnknown: 9750 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 9751 case scPtrToInt: { 9752 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V); 9753 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand())) 9754 return ConstantExpr::getPtrToInt(CastOp, P2I->getType()); 9755 9756 return nullptr; 9757 } 9758 case scTruncate: { 9759 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 9760 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 9761 return ConstantExpr::getTrunc(CastOp, ST->getType()); 9762 return nullptr; 9763 } 9764 case scAddExpr: { 9765 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 9766 Constant *C = nullptr; 9767 for (const SCEV *Op : SA->operands()) { 9768 Constant *OpC = BuildConstantFromSCEV(Op); 9769 if (!OpC) 9770 return nullptr; 9771 if (!C) { 9772 C = OpC; 9773 continue; 9774 } 9775 assert(!C->getType()->isPointerTy() && 9776 "Can only have one pointer, and it must be last"); 9777 if (OpC->getType()->isPointerTy()) { 9778 // The offsets have been converted to bytes. We can add bytes using 9779 // an i8 GEP. 9780 C = ConstantExpr::getGetElementPtr(Type::getInt8Ty(C->getContext()), 9781 OpC, C); 9782 } else { 9783 C = ConstantExpr::getAdd(C, OpC); 9784 } 9785 } 9786 return C; 9787 } 9788 case scMulExpr: 9789 case scSignExtend: 9790 case scZeroExtend: 9791 case scUDivExpr: 9792 case scSMaxExpr: 9793 case scUMaxExpr: 9794 case scSMinExpr: 9795 case scUMinExpr: 9796 case scSequentialUMinExpr: 9797 return nullptr; 9798 } 9799 llvm_unreachable("Unknown SCEV kind!"); 9800 } 9801 9802 const SCEV * 9803 ScalarEvolution::getWithOperands(const SCEV *S, 9804 SmallVectorImpl<const SCEV *> &NewOps) { 9805 switch (S->getSCEVType()) { 9806 case scTruncate: 9807 case scZeroExtend: 9808 case scSignExtend: 9809 case scPtrToInt: 9810 return getCastExpr(S->getSCEVType(), NewOps[0], S->getType()); 9811 case scAddRecExpr: { 9812 auto *AddRec = cast<SCEVAddRecExpr>(S); 9813 return getAddRecExpr(NewOps, AddRec->getLoop(), AddRec->getNoWrapFlags()); 9814 } 9815 case scAddExpr: 9816 return getAddExpr(NewOps, cast<SCEVAddExpr>(S)->getNoWrapFlags()); 9817 case scMulExpr: 9818 return getMulExpr(NewOps, cast<SCEVMulExpr>(S)->getNoWrapFlags()); 9819 case scUDivExpr: 9820 return getUDivExpr(NewOps[0], NewOps[1]); 9821 case scUMaxExpr: 9822 case scSMaxExpr: 9823 case scUMinExpr: 9824 case scSMinExpr: 9825 return getMinMaxExpr(S->getSCEVType(), NewOps); 9826 case scSequentialUMinExpr: 9827 return getSequentialMinMaxExpr(S->getSCEVType(), NewOps); 9828 case scConstant: 9829 case scVScale: 9830 case scUnknown: 9831 return S; 9832 case scCouldNotCompute: 9833 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 9834 } 9835 llvm_unreachable("Unknown SCEV kind!"); 9836 } 9837 9838 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 9839 switch (V->getSCEVType()) { 9840 case scConstant: 9841 case scVScale: 9842 return V; 9843 case scAddRecExpr: { 9844 // If this is a loop recurrence for a loop that does not contain L, then we 9845 // are dealing with the final value computed by the loop. 9846 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(V); 9847 // First, attempt to evaluate each operand. 9848 // Avoid performing the look-up in the common case where the specified 9849 // expression has no loop-variant portions. 9850 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 9851 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 9852 if (OpAtScope == AddRec->getOperand(i)) 9853 continue; 9854 9855 // Okay, at least one of these operands is loop variant but might be 9856 // foldable. Build a new instance of the folded commutative expression. 9857 SmallVector<const SCEV *, 8> NewOps; 9858 NewOps.reserve(AddRec->getNumOperands()); 9859 append_range(NewOps, AddRec->operands().take_front(i)); 9860 NewOps.push_back(OpAtScope); 9861 for (++i; i != e; ++i) 9862 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 9863 9864 const SCEV *FoldedRec = getAddRecExpr( 9865 NewOps, AddRec->getLoop(), AddRec->getNoWrapFlags(SCEV::FlagNW)); 9866 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 9867 // The addrec may be folded to a nonrecurrence, for example, if the 9868 // induction variable is multiplied by zero after constant folding. Go 9869 // ahead and return the folded value. 9870 if (!AddRec) 9871 return FoldedRec; 9872 break; 9873 } 9874 9875 // If the scope is outside the addrec's loop, evaluate it by using the 9876 // loop exit value of the addrec. 9877 if (!AddRec->getLoop()->contains(L)) { 9878 // To evaluate this recurrence, we need to know how many times the AddRec 9879 // loop iterates. Compute this now. 9880 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 9881 if (BackedgeTakenCount == getCouldNotCompute()) 9882 return AddRec; 9883 9884 // Then, evaluate the AddRec. 9885 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 9886 } 9887 9888 return AddRec; 9889 } 9890 case scTruncate: 9891 case scZeroExtend: 9892 case scSignExtend: 9893 case scPtrToInt: 9894 case scAddExpr: 9895 case scMulExpr: 9896 case scUDivExpr: 9897 case scUMaxExpr: 9898 case scSMaxExpr: 9899 case scUMinExpr: 9900 case scSMinExpr: 9901 case scSequentialUMinExpr: { 9902 ArrayRef<const SCEV *> Ops = V->operands(); 9903 // Avoid performing the look-up in the common case where the specified 9904 // expression has no loop-variant portions. 9905 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 9906 const SCEV *OpAtScope = getSCEVAtScope(Ops[i], L); 9907 if (OpAtScope != Ops[i]) { 9908 // Okay, at least one of these operands is loop variant but might be 9909 // foldable. Build a new instance of the folded commutative expression. 9910 SmallVector<const SCEV *, 8> NewOps; 9911 NewOps.reserve(Ops.size()); 9912 append_range(NewOps, Ops.take_front(i)); 9913 NewOps.push_back(OpAtScope); 9914 9915 for (++i; i != e; ++i) { 9916 OpAtScope = getSCEVAtScope(Ops[i], L); 9917 NewOps.push_back(OpAtScope); 9918 } 9919 9920 return getWithOperands(V, NewOps); 9921 } 9922 } 9923 // If we got here, all operands are loop invariant. 9924 return V; 9925 } 9926 case scUnknown: { 9927 // If this instruction is evolved from a constant-evolving PHI, compute the 9928 // exit value from the loop without using SCEVs. 9929 const SCEVUnknown *SU = cast<SCEVUnknown>(V); 9930 Instruction *I = dyn_cast<Instruction>(SU->getValue()); 9931 if (!I) 9932 return V; // This is some other type of SCEVUnknown, just return it. 9933 9934 if (PHINode *PN = dyn_cast<PHINode>(I)) { 9935 const Loop *CurrLoop = this->LI[I->getParent()]; 9936 // Looking for loop exit value. 9937 if (CurrLoop && CurrLoop->getParentLoop() == L && 9938 PN->getParent() == CurrLoop->getHeader()) { 9939 // Okay, there is no closed form solution for the PHI node. Check 9940 // to see if the loop that contains it has a known backedge-taken 9941 // count. If so, we may be able to force computation of the exit 9942 // value. 9943 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); 9944 // This trivial case can show up in some degenerate cases where 9945 // the incoming IR has not yet been fully simplified. 9946 if (BackedgeTakenCount->isZero()) { 9947 Value *InitValue = nullptr; 9948 bool MultipleInitValues = false; 9949 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 9950 if (!CurrLoop->contains(PN->getIncomingBlock(i))) { 9951 if (!InitValue) 9952 InitValue = PN->getIncomingValue(i); 9953 else if (InitValue != PN->getIncomingValue(i)) { 9954 MultipleInitValues = true; 9955 break; 9956 } 9957 } 9958 } 9959 if (!MultipleInitValues && InitValue) 9960 return getSCEV(InitValue); 9961 } 9962 // Do we have a loop invariant value flowing around the backedge 9963 // for a loop which must execute the backedge? 9964 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 9965 isKnownNonZero(BackedgeTakenCount) && 9966 PN->getNumIncomingValues() == 2) { 9967 9968 unsigned InLoopPred = 9969 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; 9970 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 9971 if (CurrLoop->isLoopInvariant(BackedgeVal)) 9972 return getSCEV(BackedgeVal); 9973 } 9974 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 9975 // Okay, we know how many times the containing loop executes. If 9976 // this is a constant evolving PHI node, get the final value at 9977 // the specified iteration number. 9978 Constant *RV = 9979 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop); 9980 if (RV) 9981 return getSCEV(RV); 9982 } 9983 } 9984 } 9985 9986 // Okay, this is an expression that we cannot symbolically evaluate 9987 // into a SCEV. Check to see if it's possible to symbolically evaluate 9988 // the arguments into constants, and if so, try to constant propagate the 9989 // result. This is particularly useful for computing loop exit values. 9990 if (!CanConstantFold(I)) 9991 return V; // This is some other type of SCEVUnknown, just return it. 9992 9993 SmallVector<Constant *, 4> Operands; 9994 Operands.reserve(I->getNumOperands()); 9995 bool MadeImprovement = false; 9996 for (Value *Op : I->operands()) { 9997 if (Constant *C = dyn_cast<Constant>(Op)) { 9998 Operands.push_back(C); 9999 continue; 10000 } 10001 10002 // If any of the operands is non-constant and if they are 10003 // non-integer and non-pointer, don't even try to analyze them 10004 // with scev techniques. 10005 if (!isSCEVable(Op->getType())) 10006 return V; 10007 10008 const SCEV *OrigV = getSCEV(Op); 10009 const SCEV *OpV = getSCEVAtScope(OrigV, L); 10010 MadeImprovement |= OrigV != OpV; 10011 10012 Constant *C = BuildConstantFromSCEV(OpV); 10013 if (!C) 10014 return V; 10015 assert(C->getType() == Op->getType() && "Type mismatch"); 10016 Operands.push_back(C); 10017 } 10018 10019 // Check to see if getSCEVAtScope actually made an improvement. 10020 if (!MadeImprovement) 10021 return V; // This is some other type of SCEVUnknown, just return it. 10022 10023 Constant *C = nullptr; 10024 const DataLayout &DL = getDataLayout(); 10025 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 10026 if (!C) 10027 return V; 10028 return getSCEV(C); 10029 } 10030 case scCouldNotCompute: 10031 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 10032 } 10033 llvm_unreachable("Unknown SCEV type!"); 10034 } 10035 10036 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 10037 return getSCEVAtScope(getSCEV(V), L); 10038 } 10039 10040 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 10041 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 10042 return stripInjectiveFunctions(ZExt->getOperand()); 10043 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 10044 return stripInjectiveFunctions(SExt->getOperand()); 10045 return S; 10046 } 10047 10048 /// Finds the minimum unsigned root of the following equation: 10049 /// 10050 /// A * X = B (mod N) 10051 /// 10052 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 10053 /// A and B isn't important. 10054 /// 10055 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 10056 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 10057 ScalarEvolution &SE) { 10058 uint32_t BW = A.getBitWidth(); 10059 assert(BW == SE.getTypeSizeInBits(B->getType())); 10060 assert(A != 0 && "A must be non-zero."); 10061 10062 // 1. D = gcd(A, N) 10063 // 10064 // The gcd of A and N may have only one prime factor: 2. The number of 10065 // trailing zeros in A is its multiplicity 10066 uint32_t Mult2 = A.countr_zero(); 10067 // D = 2^Mult2 10068 10069 // 2. Check if B is divisible by D. 10070 // 10071 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 10072 // is not less than multiplicity of this prime factor for D. 10073 if (SE.getMinTrailingZeros(B) < Mult2) 10074 return SE.getCouldNotCompute(); 10075 10076 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 10077 // modulo (N / D). 10078 // 10079 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 10080 // (N / D) in general. The inverse itself always fits into BW bits, though, 10081 // so we immediately truncate it. 10082 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 10083 APInt Mod(BW + 1, 0); 10084 Mod.setBit(BW - Mult2); // Mod = N / D 10085 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 10086 10087 // 4. Compute the minimum unsigned root of the equation: 10088 // I * (B / D) mod (N / D) 10089 // To simplify the computation, we factor out the divide by D: 10090 // (I * B mod N) / D 10091 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 10092 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 10093 } 10094 10095 /// For a given quadratic addrec, generate coefficients of the corresponding 10096 /// quadratic equation, multiplied by a common value to ensure that they are 10097 /// integers. 10098 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 10099 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 10100 /// were multiplied by, and BitWidth is the bit width of the original addrec 10101 /// coefficients. 10102 /// This function returns std::nullopt if the addrec coefficients are not 10103 /// compile- time constants. 10104 static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 10105 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 10106 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 10107 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 10108 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 10109 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 10110 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 10111 << *AddRec << '\n'); 10112 10113 // We currently can only solve this if the coefficients are constants. 10114 if (!LC || !MC || !NC) { 10115 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 10116 return std::nullopt; 10117 } 10118 10119 APInt L = LC->getAPInt(); 10120 APInt M = MC->getAPInt(); 10121 APInt N = NC->getAPInt(); 10122 assert(!N.isZero() && "This is not a quadratic addrec"); 10123 10124 unsigned BitWidth = LC->getAPInt().getBitWidth(); 10125 unsigned NewWidth = BitWidth + 1; 10126 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 10127 << BitWidth << '\n'); 10128 // The sign-extension (as opposed to a zero-extension) here matches the 10129 // extension used in SolveQuadraticEquationWrap (with the same motivation). 10130 N = N.sext(NewWidth); 10131 M = M.sext(NewWidth); 10132 L = L.sext(NewWidth); 10133 10134 // The increments are M, M+N, M+2N, ..., so the accumulated values are 10135 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 10136 // L+M, L+2M+N, L+3M+3N, ... 10137 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 10138 // 10139 // The equation Acc = 0 is then 10140 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 10141 // In a quadratic form it becomes: 10142 // N n^2 + (2M-N) n + 2L = 0. 10143 10144 APInt A = N; 10145 APInt B = 2 * M - A; 10146 APInt C = 2 * L; 10147 APInt T = APInt(NewWidth, 2); 10148 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 10149 << "x + " << C << ", coeff bw: " << NewWidth 10150 << ", multiplied by " << T << '\n'); 10151 return std::make_tuple(A, B, C, T, BitWidth); 10152 } 10153 10154 /// Helper function to compare optional APInts: 10155 /// (a) if X and Y both exist, return min(X, Y), 10156 /// (b) if neither X nor Y exist, return std::nullopt, 10157 /// (c) if exactly one of X and Y exists, return that value. 10158 static std::optional<APInt> MinOptional(std::optional<APInt> X, 10159 std::optional<APInt> Y) { 10160 if (X && Y) { 10161 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 10162 APInt XW = X->sext(W); 10163 APInt YW = Y->sext(W); 10164 return XW.slt(YW) ? *X : *Y; 10165 } 10166 if (!X && !Y) 10167 return std::nullopt; 10168 return X ? *X : *Y; 10169 } 10170 10171 /// Helper function to truncate an optional APInt to a given BitWidth. 10172 /// When solving addrec-related equations, it is preferable to return a value 10173 /// that has the same bit width as the original addrec's coefficients. If the 10174 /// solution fits in the original bit width, truncate it (except for i1). 10175 /// Returning a value of a different bit width may inhibit some optimizations. 10176 /// 10177 /// In general, a solution to a quadratic equation generated from an addrec 10178 /// may require BW+1 bits, where BW is the bit width of the addrec's 10179 /// coefficients. The reason is that the coefficients of the quadratic 10180 /// equation are BW+1 bits wide (to avoid truncation when converting from 10181 /// the addrec to the equation). 10182 static std::optional<APInt> TruncIfPossible(std::optional<APInt> X, 10183 unsigned BitWidth) { 10184 if (!X) 10185 return std::nullopt; 10186 unsigned W = X->getBitWidth(); 10187 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 10188 return X->trunc(BitWidth); 10189 return X; 10190 } 10191 10192 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 10193 /// iterations. The values L, M, N are assumed to be signed, and they 10194 /// should all have the same bit widths. 10195 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 10196 /// where BW is the bit width of the addrec's coefficients. 10197 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 10198 /// returned as such, otherwise the bit width of the returned value may 10199 /// be greater than BW. 10200 /// 10201 /// This function returns std::nullopt if 10202 /// (a) the addrec coefficients are not constant, or 10203 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 10204 /// like x^2 = 5, no integer solutions exist, in other cases an integer 10205 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 10206 static std::optional<APInt> 10207 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 10208 APInt A, B, C, M; 10209 unsigned BitWidth; 10210 auto T = GetQuadraticEquation(AddRec); 10211 if (!T) 10212 return std::nullopt; 10213 10214 std::tie(A, B, C, M, BitWidth) = *T; 10215 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 10216 std::optional<APInt> X = 10217 APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth + 1); 10218 if (!X) 10219 return std::nullopt; 10220 10221 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 10222 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 10223 if (!V->isZero()) 10224 return std::nullopt; 10225 10226 return TruncIfPossible(X, BitWidth); 10227 } 10228 10229 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 10230 /// iterations. The values M, N are assumed to be signed, and they 10231 /// should all have the same bit widths. 10232 /// Find the least n such that c(n) does not belong to the given range, 10233 /// while c(n-1) does. 10234 /// 10235 /// This function returns std::nullopt if 10236 /// (a) the addrec coefficients are not constant, or 10237 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 10238 /// bounds of the range. 10239 static std::optional<APInt> 10240 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 10241 const ConstantRange &Range, ScalarEvolution &SE) { 10242 assert(AddRec->getOperand(0)->isZero() && 10243 "Starting value of addrec should be 0"); 10244 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 10245 << Range << ", addrec " << *AddRec << '\n'); 10246 // This case is handled in getNumIterationsInRange. Here we can assume that 10247 // we start in the range. 10248 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 10249 "Addrec's initial value should be in range"); 10250 10251 APInt A, B, C, M; 10252 unsigned BitWidth; 10253 auto T = GetQuadraticEquation(AddRec); 10254 if (!T) 10255 return std::nullopt; 10256 10257 // Be careful about the return value: there can be two reasons for not 10258 // returning an actual number. First, if no solutions to the equations 10259 // were found, and second, if the solutions don't leave the given range. 10260 // The first case means that the actual solution is "unknown", the second 10261 // means that it's known, but not valid. If the solution is unknown, we 10262 // cannot make any conclusions. 10263 // Return a pair: the optional solution and a flag indicating if the 10264 // solution was found. 10265 auto SolveForBoundary = 10266 [&](APInt Bound) -> std::pair<std::optional<APInt>, bool> { 10267 // Solve for signed overflow and unsigned overflow, pick the lower 10268 // solution. 10269 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 10270 << Bound << " (before multiplying by " << M << ")\n"); 10271 Bound *= M; // The quadratic equation multiplier. 10272 10273 std::optional<APInt> SO; 10274 if (BitWidth > 1) { 10275 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 10276 "signed overflow\n"); 10277 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 10278 } 10279 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 10280 "unsigned overflow\n"); 10281 std::optional<APInt> UO = 10282 APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth + 1); 10283 10284 auto LeavesRange = [&] (const APInt &X) { 10285 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 10286 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 10287 if (Range.contains(V0->getValue())) 10288 return false; 10289 // X should be at least 1, so X-1 is non-negative. 10290 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 10291 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 10292 if (Range.contains(V1->getValue())) 10293 return true; 10294 return false; 10295 }; 10296 10297 // If SolveQuadraticEquationWrap returns std::nullopt, it means that there 10298 // can be a solution, but the function failed to find it. We cannot treat it 10299 // as "no solution". 10300 if (!SO || !UO) 10301 return {std::nullopt, false}; 10302 10303 // Check the smaller value first to see if it leaves the range. 10304 // At this point, both SO and UO must have values. 10305 std::optional<APInt> Min = MinOptional(SO, UO); 10306 if (LeavesRange(*Min)) 10307 return { Min, true }; 10308 std::optional<APInt> Max = Min == SO ? UO : SO; 10309 if (LeavesRange(*Max)) 10310 return { Max, true }; 10311 10312 // Solutions were found, but were eliminated, hence the "true". 10313 return {std::nullopt, true}; 10314 }; 10315 10316 std::tie(A, B, C, M, BitWidth) = *T; 10317 // Lower bound is inclusive, subtract 1 to represent the exiting value. 10318 APInt Lower = Range.getLower().sext(A.getBitWidth()) - 1; 10319 APInt Upper = Range.getUpper().sext(A.getBitWidth()); 10320 auto SL = SolveForBoundary(Lower); 10321 auto SU = SolveForBoundary(Upper); 10322 // If any of the solutions was unknown, no meaninigful conclusions can 10323 // be made. 10324 if (!SL.second || !SU.second) 10325 return std::nullopt; 10326 10327 // Claim: The correct solution is not some value between Min and Max. 10328 // 10329 // Justification: Assuming that Min and Max are different values, one of 10330 // them is when the first signed overflow happens, the other is when the 10331 // first unsigned overflow happens. Crossing the range boundary is only 10332 // possible via an overflow (treating 0 as a special case of it, modeling 10333 // an overflow as crossing k*2^W for some k). 10334 // 10335 // The interesting case here is when Min was eliminated as an invalid 10336 // solution, but Max was not. The argument is that if there was another 10337 // overflow between Min and Max, it would also have been eliminated if 10338 // it was considered. 10339 // 10340 // For a given boundary, it is possible to have two overflows of the same 10341 // type (signed/unsigned) without having the other type in between: this 10342 // can happen when the vertex of the parabola is between the iterations 10343 // corresponding to the overflows. This is only possible when the two 10344 // overflows cross k*2^W for the same k. In such case, if the second one 10345 // left the range (and was the first one to do so), the first overflow 10346 // would have to enter the range, which would mean that either we had left 10347 // the range before or that we started outside of it. Both of these cases 10348 // are contradictions. 10349 // 10350 // Claim: In the case where SolveForBoundary returns std::nullopt, the correct 10351 // solution is not some value between the Max for this boundary and the 10352 // Min of the other boundary. 10353 // 10354 // Justification: Assume that we had such Max_A and Min_B corresponding 10355 // to range boundaries A and B and such that Max_A < Min_B. If there was 10356 // a solution between Max_A and Min_B, it would have to be caused by an 10357 // overflow corresponding to either A or B. It cannot correspond to B, 10358 // since Min_B is the first occurrence of such an overflow. If it 10359 // corresponded to A, it would have to be either a signed or an unsigned 10360 // overflow that is larger than both eliminated overflows for A. But 10361 // between the eliminated overflows and this overflow, the values would 10362 // cover the entire value space, thus crossing the other boundary, which 10363 // is a contradiction. 10364 10365 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 10366 } 10367 10368 ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(const SCEV *V, 10369 const Loop *L, 10370 bool ControlsOnlyExit, 10371 bool AllowPredicates) { 10372 10373 // This is only used for loops with a "x != y" exit test. The exit condition 10374 // is now expressed as a single expression, V = x-y. So the exit test is 10375 // effectively V != 0. We know and take advantage of the fact that this 10376 // expression only being used in a comparison by zero context. 10377 10378 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10379 // If the value is a constant 10380 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 10381 // If the value is already zero, the branch will execute zero times. 10382 if (C->getValue()->isZero()) return C; 10383 return getCouldNotCompute(); // Otherwise it will loop infinitely. 10384 } 10385 10386 const SCEVAddRecExpr *AddRec = 10387 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 10388 10389 if (!AddRec && AllowPredicates) 10390 // Try to make this an AddRec using runtime tests, in the first X 10391 // iterations of this loop, where X is the SCEV expression found by the 10392 // algorithm below. 10393 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 10394 10395 if (!AddRec || AddRec->getLoop() != L) 10396 return getCouldNotCompute(); 10397 10398 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 10399 // the quadratic equation to solve it. 10400 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 10401 // We can only use this value if the chrec ends up with an exact zero 10402 // value at this index. When solving for "X*X != 5", for example, we 10403 // should not accept a root of 2. 10404 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 10405 const auto *R = cast<SCEVConstant>(getConstant(*S)); 10406 return ExitLimit(R, R, R, false, Predicates); 10407 } 10408 return getCouldNotCompute(); 10409 } 10410 10411 // Otherwise we can only handle this if it is affine. 10412 if (!AddRec->isAffine()) 10413 return getCouldNotCompute(); 10414 10415 // If this is an affine expression, the execution count of this branch is 10416 // the minimum unsigned root of the following equation: 10417 // 10418 // Start + Step*N = 0 (mod 2^BW) 10419 // 10420 // equivalent to: 10421 // 10422 // Step*N = -Start (mod 2^BW) 10423 // 10424 // where BW is the common bit width of Start and Step. 10425 10426 // Get the initial value for the loop. 10427 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 10428 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 10429 10430 // For now we handle only constant steps. 10431 // 10432 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 10433 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 10434 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 10435 // We have not yet seen any such cases. 10436 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 10437 if (!StepC || StepC->getValue()->isZero()) 10438 return getCouldNotCompute(); 10439 10440 // For positive steps (counting up until unsigned overflow): 10441 // N = -Start/Step (as unsigned) 10442 // For negative steps (counting down to zero): 10443 // N = Start/-Step 10444 // First compute the unsigned distance from zero in the direction of Step. 10445 bool CountDown = StepC->getAPInt().isNegative(); 10446 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 10447 10448 // Handle unitary steps, which cannot wraparound. 10449 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 10450 // N = Distance (as unsigned) 10451 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 10452 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); 10453 MaxBECount = APIntOps::umin(MaxBECount, getUnsignedRangeMax(Distance)); 10454 10455 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 10456 // we end up with a loop whose backedge-taken count is n - 1. Detect this 10457 // case, and see if we can improve the bound. 10458 // 10459 // Explicitly handling this here is necessary because getUnsignedRange 10460 // isn't context-sensitive; it doesn't know that we only care about the 10461 // range inside the loop. 10462 const SCEV *Zero = getZero(Distance->getType()); 10463 const SCEV *One = getOne(Distance->getType()); 10464 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 10465 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 10466 // If Distance + 1 doesn't overflow, we can compute the maximum distance 10467 // as "unsigned_max(Distance + 1) - 1". 10468 ConstantRange CR = getUnsignedRange(DistancePlusOne); 10469 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 10470 } 10471 return ExitLimit(Distance, getConstant(MaxBECount), Distance, false, 10472 Predicates); 10473 } 10474 10475 // If the condition controls loop exit (the loop exits only if the expression 10476 // is true) and the addition is no-wrap we can use unsigned divide to 10477 // compute the backedge count. In this case, the step may not divide the 10478 // distance, but we don't care because if the condition is "missed" the loop 10479 // will have undefined behavior due to wrapping. 10480 if (ControlsOnlyExit && AddRec->hasNoSelfWrap() && 10481 loopHasNoAbnormalExits(AddRec->getLoop())) { 10482 const SCEV *Exact = 10483 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 10484 const SCEV *ConstantMax = getCouldNotCompute(); 10485 if (Exact != getCouldNotCompute()) { 10486 APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L)); 10487 ConstantMax = 10488 getConstant(APIntOps::umin(MaxInt, getUnsignedRangeMax(Exact))); 10489 } 10490 const SCEV *SymbolicMax = 10491 isa<SCEVCouldNotCompute>(Exact) ? ConstantMax : Exact; 10492 return ExitLimit(Exact, ConstantMax, SymbolicMax, false, Predicates); 10493 } 10494 10495 // Solve the general equation. 10496 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 10497 getNegativeSCEV(Start), *this); 10498 10499 const SCEV *M = E; 10500 if (E != getCouldNotCompute()) { 10501 APInt MaxWithGuards = getUnsignedRangeMax(applyLoopGuards(E, L)); 10502 M = getConstant(APIntOps::umin(MaxWithGuards, getUnsignedRangeMax(E))); 10503 } 10504 auto *S = isa<SCEVCouldNotCompute>(E) ? M : E; 10505 return ExitLimit(E, M, S, false, Predicates); 10506 } 10507 10508 ScalarEvolution::ExitLimit 10509 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 10510 // Loops that look like: while (X == 0) are very strange indeed. We don't 10511 // handle them yet except for the trivial case. This could be expanded in the 10512 // future as needed. 10513 10514 // If the value is a constant, check to see if it is known to be non-zero 10515 // already. If so, the backedge will execute zero times. 10516 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 10517 if (!C->getValue()->isZero()) 10518 return getZero(C->getType()); 10519 return getCouldNotCompute(); // Otherwise it will loop infinitely. 10520 } 10521 10522 // We could implement others, but I really doubt anyone writes loops like 10523 // this, and if they did, they would already be constant folded. 10524 return getCouldNotCompute(); 10525 } 10526 10527 std::pair<const BasicBlock *, const BasicBlock *> 10528 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) 10529 const { 10530 // If the block has a unique predecessor, then there is no path from the 10531 // predecessor to the block that does not go through the direct edge 10532 // from the predecessor to the block. 10533 if (const BasicBlock *Pred = BB->getSinglePredecessor()) 10534 return {Pred, BB}; 10535 10536 // A loop's header is defined to be a block that dominates the loop. 10537 // If the header has a unique predecessor outside the loop, it must be 10538 // a block that has exactly one successor that can reach the loop. 10539 if (const Loop *L = LI.getLoopFor(BB)) 10540 return {L->getLoopPredecessor(), L->getHeader()}; 10541 10542 return {nullptr, nullptr}; 10543 } 10544 10545 /// SCEV structural equivalence is usually sufficient for testing whether two 10546 /// expressions are equal, however for the purposes of looking for a condition 10547 /// guarding a loop, it can be useful to be a little more general, since a 10548 /// front-end may have replicated the controlling expression. 10549 static bool HasSameValue(const SCEV *A, const SCEV *B) { 10550 // Quick check to see if they are the same SCEV. 10551 if (A == B) return true; 10552 10553 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 10554 // Not all instructions that are "identical" compute the same value. For 10555 // instance, two distinct alloca instructions allocating the same type are 10556 // identical and do not read memory; but compute distinct values. 10557 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 10558 }; 10559 10560 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 10561 // two different instructions with the same value. Check for this case. 10562 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 10563 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 10564 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 10565 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 10566 if (ComputesEqualValues(AI, BI)) 10567 return true; 10568 10569 // Otherwise assume they may have a different value. 10570 return false; 10571 } 10572 10573 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 10574 const SCEV *&LHS, const SCEV *&RHS, 10575 unsigned Depth) { 10576 bool Changed = false; 10577 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 10578 // '0 != 0'. 10579 auto TrivialCase = [&](bool TriviallyTrue) { 10580 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 10581 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 10582 return true; 10583 }; 10584 // If we hit the max recursion limit bail out. 10585 if (Depth >= 3) 10586 return false; 10587 10588 // Canonicalize a constant to the right side. 10589 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 10590 // Check for both operands constant. 10591 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 10592 if (ConstantExpr::getICmp(Pred, 10593 LHSC->getValue(), 10594 RHSC->getValue())->isNullValue()) 10595 return TrivialCase(false); 10596 return TrivialCase(true); 10597 } 10598 // Otherwise swap the operands to put the constant on the right. 10599 std::swap(LHS, RHS); 10600 Pred = ICmpInst::getSwappedPredicate(Pred); 10601 Changed = true; 10602 } 10603 10604 // If we're comparing an addrec with a value which is loop-invariant in the 10605 // addrec's loop, put the addrec on the left. Also make a dominance check, 10606 // as both operands could be addrecs loop-invariant in each other's loop. 10607 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 10608 const Loop *L = AR->getLoop(); 10609 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 10610 std::swap(LHS, RHS); 10611 Pred = ICmpInst::getSwappedPredicate(Pred); 10612 Changed = true; 10613 } 10614 } 10615 10616 // If there's a constant operand, canonicalize comparisons with boundary 10617 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 10618 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 10619 const APInt &RA = RC->getAPInt(); 10620 10621 bool SimplifiedByConstantRange = false; 10622 10623 if (!ICmpInst::isEquality(Pred)) { 10624 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 10625 if (ExactCR.isFullSet()) 10626 return TrivialCase(true); 10627 if (ExactCR.isEmptySet()) 10628 return TrivialCase(false); 10629 10630 APInt NewRHS; 10631 CmpInst::Predicate NewPred; 10632 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 10633 ICmpInst::isEquality(NewPred)) { 10634 // We were able to convert an inequality to an equality. 10635 Pred = NewPred; 10636 RHS = getConstant(NewRHS); 10637 Changed = SimplifiedByConstantRange = true; 10638 } 10639 } 10640 10641 if (!SimplifiedByConstantRange) { 10642 switch (Pred) { 10643 default: 10644 break; 10645 case ICmpInst::ICMP_EQ: 10646 case ICmpInst::ICMP_NE: 10647 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 10648 if (!RA) 10649 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 10650 if (const SCEVMulExpr *ME = 10651 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 10652 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 10653 ME->getOperand(0)->isAllOnesValue()) { 10654 RHS = AE->getOperand(1); 10655 LHS = ME->getOperand(1); 10656 Changed = true; 10657 } 10658 break; 10659 10660 10661 // The "Should have been caught earlier!" messages refer to the fact 10662 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 10663 // should have fired on the corresponding cases, and canonicalized the 10664 // check to trivial case. 10665 10666 case ICmpInst::ICMP_UGE: 10667 assert(!RA.isMinValue() && "Should have been caught earlier!"); 10668 Pred = ICmpInst::ICMP_UGT; 10669 RHS = getConstant(RA - 1); 10670 Changed = true; 10671 break; 10672 case ICmpInst::ICMP_ULE: 10673 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 10674 Pred = ICmpInst::ICMP_ULT; 10675 RHS = getConstant(RA + 1); 10676 Changed = true; 10677 break; 10678 case ICmpInst::ICMP_SGE: 10679 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 10680 Pred = ICmpInst::ICMP_SGT; 10681 RHS = getConstant(RA - 1); 10682 Changed = true; 10683 break; 10684 case ICmpInst::ICMP_SLE: 10685 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 10686 Pred = ICmpInst::ICMP_SLT; 10687 RHS = getConstant(RA + 1); 10688 Changed = true; 10689 break; 10690 } 10691 } 10692 } 10693 10694 // Check for obvious equality. 10695 if (HasSameValue(LHS, RHS)) { 10696 if (ICmpInst::isTrueWhenEqual(Pred)) 10697 return TrivialCase(true); 10698 if (ICmpInst::isFalseWhenEqual(Pred)) 10699 return TrivialCase(false); 10700 } 10701 10702 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 10703 // adding or subtracting 1 from one of the operands. 10704 switch (Pred) { 10705 case ICmpInst::ICMP_SLE: 10706 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 10707 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 10708 SCEV::FlagNSW); 10709 Pred = ICmpInst::ICMP_SLT; 10710 Changed = true; 10711 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 10712 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 10713 SCEV::FlagNSW); 10714 Pred = ICmpInst::ICMP_SLT; 10715 Changed = true; 10716 } 10717 break; 10718 case ICmpInst::ICMP_SGE: 10719 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 10720 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 10721 SCEV::FlagNSW); 10722 Pred = ICmpInst::ICMP_SGT; 10723 Changed = true; 10724 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 10725 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 10726 SCEV::FlagNSW); 10727 Pred = ICmpInst::ICMP_SGT; 10728 Changed = true; 10729 } 10730 break; 10731 case ICmpInst::ICMP_ULE: 10732 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 10733 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 10734 SCEV::FlagNUW); 10735 Pred = ICmpInst::ICMP_ULT; 10736 Changed = true; 10737 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 10738 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 10739 Pred = ICmpInst::ICMP_ULT; 10740 Changed = true; 10741 } 10742 break; 10743 case ICmpInst::ICMP_UGE: 10744 if (!getUnsignedRangeMin(RHS).isMinValue()) { 10745 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 10746 Pred = ICmpInst::ICMP_UGT; 10747 Changed = true; 10748 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 10749 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 10750 SCEV::FlagNUW); 10751 Pred = ICmpInst::ICMP_UGT; 10752 Changed = true; 10753 } 10754 break; 10755 default: 10756 break; 10757 } 10758 10759 // TODO: More simplifications are possible here. 10760 10761 // Recursively simplify until we either hit a recursion limit or nothing 10762 // changes. 10763 if (Changed) 10764 return SimplifyICmpOperands(Pred, LHS, RHS, Depth + 1); 10765 10766 return Changed; 10767 } 10768 10769 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 10770 return getSignedRangeMax(S).isNegative(); 10771 } 10772 10773 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 10774 return getSignedRangeMin(S).isStrictlyPositive(); 10775 } 10776 10777 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 10778 return !getSignedRangeMin(S).isNegative(); 10779 } 10780 10781 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 10782 return !getSignedRangeMax(S).isStrictlyPositive(); 10783 } 10784 10785 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 10786 // Query push down for cases where the unsigned range is 10787 // less than sufficient. 10788 if (const auto *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 10789 return isKnownNonZero(SExt->getOperand(0)); 10790 return getUnsignedRangeMin(S) != 0; 10791 } 10792 10793 std::pair<const SCEV *, const SCEV *> 10794 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 10795 // Compute SCEV on entry of loop L. 10796 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 10797 if (Start == getCouldNotCompute()) 10798 return { Start, Start }; 10799 // Compute post increment SCEV for loop L. 10800 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 10801 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 10802 return { Start, PostInc }; 10803 } 10804 10805 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 10806 const SCEV *LHS, const SCEV *RHS) { 10807 // First collect all loops. 10808 SmallPtrSet<const Loop *, 8> LoopsUsed; 10809 getUsedLoops(LHS, LoopsUsed); 10810 getUsedLoops(RHS, LoopsUsed); 10811 10812 if (LoopsUsed.empty()) 10813 return false; 10814 10815 // Domination relationship must be a linear order on collected loops. 10816 #ifndef NDEBUG 10817 for (const auto *L1 : LoopsUsed) 10818 for (const auto *L2 : LoopsUsed) 10819 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 10820 DT.dominates(L2->getHeader(), L1->getHeader())) && 10821 "Domination relationship is not a linear order"); 10822 #endif 10823 10824 const Loop *MDL = 10825 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 10826 [&](const Loop *L1, const Loop *L2) { 10827 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 10828 }); 10829 10830 // Get init and post increment value for LHS. 10831 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 10832 // if LHS contains unknown non-invariant SCEV then bail out. 10833 if (SplitLHS.first == getCouldNotCompute()) 10834 return false; 10835 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 10836 // Get init and post increment value for RHS. 10837 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 10838 // if RHS contains unknown non-invariant SCEV then bail out. 10839 if (SplitRHS.first == getCouldNotCompute()) 10840 return false; 10841 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 10842 // It is possible that init SCEV contains an invariant load but it does 10843 // not dominate MDL and is not available at MDL loop entry, so we should 10844 // check it here. 10845 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 10846 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 10847 return false; 10848 10849 // It seems backedge guard check is faster than entry one so in some cases 10850 // it can speed up whole estimation by short circuit 10851 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 10852 SplitRHS.second) && 10853 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 10854 } 10855 10856 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 10857 const SCEV *LHS, const SCEV *RHS) { 10858 // Canonicalize the inputs first. 10859 (void)SimplifyICmpOperands(Pred, LHS, RHS); 10860 10861 if (isKnownViaInduction(Pred, LHS, RHS)) 10862 return true; 10863 10864 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 10865 return true; 10866 10867 // Otherwise see what can be done with some simple reasoning. 10868 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 10869 } 10870 10871 std::optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred, 10872 const SCEV *LHS, 10873 const SCEV *RHS) { 10874 if (isKnownPredicate(Pred, LHS, RHS)) 10875 return true; 10876 if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS)) 10877 return false; 10878 return std::nullopt; 10879 } 10880 10881 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, 10882 const SCEV *LHS, const SCEV *RHS, 10883 const Instruction *CtxI) { 10884 // TODO: Analyze guards and assumes from Context's block. 10885 return isKnownPredicate(Pred, LHS, RHS) || 10886 isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS); 10887 } 10888 10889 std::optional<bool> 10890 ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, 10891 const SCEV *RHS, const Instruction *CtxI) { 10892 std::optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS); 10893 if (KnownWithoutContext) 10894 return KnownWithoutContext; 10895 10896 if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS)) 10897 return true; 10898 if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), 10899 ICmpInst::getInversePredicate(Pred), 10900 LHS, RHS)) 10901 return false; 10902 return std::nullopt; 10903 } 10904 10905 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 10906 const SCEVAddRecExpr *LHS, 10907 const SCEV *RHS) { 10908 const Loop *L = LHS->getLoop(); 10909 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 10910 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 10911 } 10912 10913 std::optional<ScalarEvolution::MonotonicPredicateType> 10914 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, 10915 ICmpInst::Predicate Pred) { 10916 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred); 10917 10918 #ifndef NDEBUG 10919 // Verify an invariant: inverting the predicate should turn a monotonically 10920 // increasing change to a monotonically decreasing one, and vice versa. 10921 if (Result) { 10922 auto ResultSwapped = 10923 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred)); 10924 10925 assert(*ResultSwapped != *Result && 10926 "monotonicity should flip as we flip the predicate"); 10927 } 10928 #endif 10929 10930 return Result; 10931 } 10932 10933 std::optional<ScalarEvolution::MonotonicPredicateType> 10934 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS, 10935 ICmpInst::Predicate Pred) { 10936 // A zero step value for LHS means the induction variable is essentially a 10937 // loop invariant value. We don't really depend on the predicate actually 10938 // flipping from false to true (for increasing predicates, and the other way 10939 // around for decreasing predicates), all we care about is that *if* the 10940 // predicate changes then it only changes from false to true. 10941 // 10942 // A zero step value in itself is not very useful, but there may be places 10943 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 10944 // as general as possible. 10945 10946 // Only handle LE/LT/GE/GT predicates. 10947 if (!ICmpInst::isRelational(Pred)) 10948 return std::nullopt; 10949 10950 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred); 10951 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) && 10952 "Should be greater or less!"); 10953 10954 // Check that AR does not wrap. 10955 if (ICmpInst::isUnsigned(Pred)) { 10956 if (!LHS->hasNoUnsignedWrap()) 10957 return std::nullopt; 10958 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 10959 } 10960 assert(ICmpInst::isSigned(Pred) && 10961 "Relational predicate is either signed or unsigned!"); 10962 if (!LHS->hasNoSignedWrap()) 10963 return std::nullopt; 10964 10965 const SCEV *Step = LHS->getStepRecurrence(*this); 10966 10967 if (isKnownNonNegative(Step)) 10968 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 10969 10970 if (isKnownNonPositive(Step)) 10971 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 10972 10973 return std::nullopt; 10974 } 10975 10976 std::optional<ScalarEvolution::LoopInvariantPredicate> 10977 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred, 10978 const SCEV *LHS, const SCEV *RHS, 10979 const Loop *L, 10980 const Instruction *CtxI) { 10981 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 10982 if (!isLoopInvariant(RHS, L)) { 10983 if (!isLoopInvariant(LHS, L)) 10984 return std::nullopt; 10985 10986 std::swap(LHS, RHS); 10987 Pred = ICmpInst::getSwappedPredicate(Pred); 10988 } 10989 10990 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 10991 if (!ArLHS || ArLHS->getLoop() != L) 10992 return std::nullopt; 10993 10994 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred); 10995 if (!MonotonicType) 10996 return std::nullopt; 10997 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 10998 // true as the loop iterates, and the backedge is control dependent on 10999 // "ArLHS `Pred` RHS" == true then we can reason as follows: 11000 // 11001 // * if the predicate was false in the first iteration then the predicate 11002 // is never evaluated again, since the loop exits without taking the 11003 // backedge. 11004 // * if the predicate was true in the first iteration then it will 11005 // continue to be true for all future iterations since it is 11006 // monotonically increasing. 11007 // 11008 // For both the above possibilities, we can replace the loop varying 11009 // predicate with its value on the first iteration of the loop (which is 11010 // loop invariant). 11011 // 11012 // A similar reasoning applies for a monotonically decreasing predicate, by 11013 // replacing true with false and false with true in the above two bullets. 11014 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing; 11015 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 11016 11017 if (isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 11018 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), 11019 RHS); 11020 11021 if (!CtxI) 11022 return std::nullopt; 11023 // Try to prove via context. 11024 // TODO: Support other cases. 11025 switch (Pred) { 11026 default: 11027 break; 11028 case ICmpInst::ICMP_ULE: 11029 case ICmpInst::ICMP_ULT: { 11030 assert(ArLHS->hasNoUnsignedWrap() && "Is a requirement of monotonicity!"); 11031 // Given preconditions 11032 // (1) ArLHS does not cross the border of positive and negative parts of 11033 // range because of: 11034 // - Positive step; (TODO: lift this limitation) 11035 // - nuw - does not cross zero boundary; 11036 // - nsw - does not cross SINT_MAX boundary; 11037 // (2) ArLHS <s RHS 11038 // (3) RHS >=s 0 11039 // we can replace the loop variant ArLHS <u RHS condition with loop 11040 // invariant Start(ArLHS) <u RHS. 11041 // 11042 // Because of (1) there are two options: 11043 // - ArLHS is always negative. It means that ArLHS <u RHS is always false; 11044 // - ArLHS is always non-negative. Because of (3) RHS is also non-negative. 11045 // It means that ArLHS <s RHS <=> ArLHS <u RHS. 11046 // Because of (2) ArLHS <u RHS is trivially true. 11047 // All together it means that ArLHS <u RHS <=> Start(ArLHS) >=s 0. 11048 // We can strengthen this to Start(ArLHS) <u RHS. 11049 auto SignFlippedPred = ICmpInst::getFlippedSignednessPredicate(Pred); 11050 if (ArLHS->hasNoSignedWrap() && ArLHS->isAffine() && 11051 isKnownPositive(ArLHS->getStepRecurrence(*this)) && 11052 isKnownNonNegative(RHS) && 11053 isKnownPredicateAt(SignFlippedPred, ArLHS, RHS, CtxI)) 11054 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), 11055 RHS); 11056 } 11057 } 11058 11059 return std::nullopt; 11060 } 11061 11062 std::optional<ScalarEvolution::LoopInvariantPredicate> 11063 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations( 11064 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 11065 const Instruction *CtxI, const SCEV *MaxIter) { 11066 if (auto LIP = getLoopInvariantExitCondDuringFirstIterationsImpl( 11067 Pred, LHS, RHS, L, CtxI, MaxIter)) 11068 return LIP; 11069 if (auto *UMin = dyn_cast<SCEVUMinExpr>(MaxIter)) 11070 // Number of iterations expressed as UMIN isn't always great for expressing 11071 // the value on the last iteration. If the straightforward approach didn't 11072 // work, try the following trick: if the a predicate is invariant for X, it 11073 // is also invariant for umin(X, ...). So try to find something that works 11074 // among subexpressions of MaxIter expressed as umin. 11075 for (auto *Op : UMin->operands()) 11076 if (auto LIP = getLoopInvariantExitCondDuringFirstIterationsImpl( 11077 Pred, LHS, RHS, L, CtxI, Op)) 11078 return LIP; 11079 return std::nullopt; 11080 } 11081 11082 std::optional<ScalarEvolution::LoopInvariantPredicate> 11083 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterationsImpl( 11084 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 11085 const Instruction *CtxI, const SCEV *MaxIter) { 11086 // Try to prove the following set of facts: 11087 // - The predicate is monotonic in the iteration space. 11088 // - If the check does not fail on the 1st iteration: 11089 // - No overflow will happen during first MaxIter iterations; 11090 // - It will not fail on the MaxIter'th iteration. 11091 // If the check does fail on the 1st iteration, we leave the loop and no 11092 // other checks matter. 11093 11094 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 11095 if (!isLoopInvariant(RHS, L)) { 11096 if (!isLoopInvariant(LHS, L)) 11097 return std::nullopt; 11098 11099 std::swap(LHS, RHS); 11100 Pred = ICmpInst::getSwappedPredicate(Pred); 11101 } 11102 11103 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS); 11104 if (!AR || AR->getLoop() != L) 11105 return std::nullopt; 11106 11107 // The predicate must be relational (i.e. <, <=, >=, >). 11108 if (!ICmpInst::isRelational(Pred)) 11109 return std::nullopt; 11110 11111 // TODO: Support steps other than +/- 1. 11112 const SCEV *Step = AR->getStepRecurrence(*this); 11113 auto *One = getOne(Step->getType()); 11114 auto *MinusOne = getNegativeSCEV(One); 11115 if (Step != One && Step != MinusOne) 11116 return std::nullopt; 11117 11118 // Type mismatch here means that MaxIter is potentially larger than max 11119 // unsigned value in start type, which mean we cannot prove no wrap for the 11120 // indvar. 11121 if (AR->getType() != MaxIter->getType()) 11122 return std::nullopt; 11123 11124 // Value of IV on suggested last iteration. 11125 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this); 11126 // Does it still meet the requirement? 11127 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS)) 11128 return std::nullopt; 11129 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does 11130 // not exceed max unsigned value of this type), this effectively proves 11131 // that there is no wrap during the iteration. To prove that there is no 11132 // signed/unsigned wrap, we need to check that 11133 // Start <= Last for step = 1 or Start >= Last for step = -1. 11134 ICmpInst::Predicate NoOverflowPred = 11135 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 11136 if (Step == MinusOne) 11137 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred); 11138 const SCEV *Start = AR->getStart(); 11139 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, CtxI)) 11140 return std::nullopt; 11141 11142 // Everything is fine. 11143 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS); 11144 } 11145 11146 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 11147 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 11148 if (HasSameValue(LHS, RHS)) 11149 return ICmpInst::isTrueWhenEqual(Pred); 11150 11151 // This code is split out from isKnownPredicate because it is called from 11152 // within isLoopEntryGuardedByCond. 11153 11154 auto CheckRanges = [&](const ConstantRange &RangeLHS, 11155 const ConstantRange &RangeRHS) { 11156 return RangeLHS.icmp(Pred, RangeRHS); 11157 }; 11158 11159 // The check at the top of the function catches the case where the values are 11160 // known to be equal. 11161 if (Pred == CmpInst::ICMP_EQ) 11162 return false; 11163 11164 if (Pred == CmpInst::ICMP_NE) { 11165 auto SL = getSignedRange(LHS); 11166 auto SR = getSignedRange(RHS); 11167 if (CheckRanges(SL, SR)) 11168 return true; 11169 auto UL = getUnsignedRange(LHS); 11170 auto UR = getUnsignedRange(RHS); 11171 if (CheckRanges(UL, UR)) 11172 return true; 11173 auto *Diff = getMinusSCEV(LHS, RHS); 11174 return !isa<SCEVCouldNotCompute>(Diff) && isKnownNonZero(Diff); 11175 } 11176 11177 if (CmpInst::isSigned(Pred)) { 11178 auto SL = getSignedRange(LHS); 11179 auto SR = getSignedRange(RHS); 11180 return CheckRanges(SL, SR); 11181 } 11182 11183 auto UL = getUnsignedRange(LHS); 11184 auto UR = getUnsignedRange(RHS); 11185 return CheckRanges(UL, UR); 11186 } 11187 11188 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 11189 const SCEV *LHS, 11190 const SCEV *RHS) { 11191 // Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where 11192 // C1 and C2 are constant integers. If either X or Y are not add expressions, 11193 // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via 11194 // OutC1 and OutC2. 11195 auto MatchBinaryAddToConst = [this](const SCEV *X, const SCEV *Y, 11196 APInt &OutC1, APInt &OutC2, 11197 SCEV::NoWrapFlags ExpectedFlags) { 11198 const SCEV *XNonConstOp, *XConstOp; 11199 const SCEV *YNonConstOp, *YConstOp; 11200 SCEV::NoWrapFlags XFlagsPresent; 11201 SCEV::NoWrapFlags YFlagsPresent; 11202 11203 if (!splitBinaryAdd(X, XConstOp, XNonConstOp, XFlagsPresent)) { 11204 XConstOp = getZero(X->getType()); 11205 XNonConstOp = X; 11206 XFlagsPresent = ExpectedFlags; 11207 } 11208 if (!isa<SCEVConstant>(XConstOp) || 11209 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) 11210 return false; 11211 11212 if (!splitBinaryAdd(Y, YConstOp, YNonConstOp, YFlagsPresent)) { 11213 YConstOp = getZero(Y->getType()); 11214 YNonConstOp = Y; 11215 YFlagsPresent = ExpectedFlags; 11216 } 11217 11218 if (!isa<SCEVConstant>(YConstOp) || 11219 (YFlagsPresent & ExpectedFlags) != ExpectedFlags) 11220 return false; 11221 11222 if (YNonConstOp != XNonConstOp) 11223 return false; 11224 11225 OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt(); 11226 OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt(); 11227 11228 return true; 11229 }; 11230 11231 APInt C1; 11232 APInt C2; 11233 11234 switch (Pred) { 11235 default: 11236 break; 11237 11238 case ICmpInst::ICMP_SGE: 11239 std::swap(LHS, RHS); 11240 [[fallthrough]]; 11241 case ICmpInst::ICMP_SLE: 11242 // (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2. 11243 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2)) 11244 return true; 11245 11246 break; 11247 11248 case ICmpInst::ICMP_SGT: 11249 std::swap(LHS, RHS); 11250 [[fallthrough]]; 11251 case ICmpInst::ICMP_SLT: 11252 // (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2. 11253 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2)) 11254 return true; 11255 11256 break; 11257 11258 case ICmpInst::ICMP_UGE: 11259 std::swap(LHS, RHS); 11260 [[fallthrough]]; 11261 case ICmpInst::ICMP_ULE: 11262 // (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2. 11263 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2)) 11264 return true; 11265 11266 break; 11267 11268 case ICmpInst::ICMP_UGT: 11269 std::swap(LHS, RHS); 11270 [[fallthrough]]; 11271 case ICmpInst::ICMP_ULT: 11272 // (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2. 11273 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2)) 11274 return true; 11275 break; 11276 } 11277 11278 return false; 11279 } 11280 11281 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 11282 const SCEV *LHS, 11283 const SCEV *RHS) { 11284 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 11285 return false; 11286 11287 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 11288 // the stack can result in exponential time complexity. 11289 SaveAndRestore Restore(ProvingSplitPredicate, true); 11290 11291 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 11292 // 11293 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 11294 // isKnownPredicate. isKnownPredicate is more powerful, but also more 11295 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 11296 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 11297 // use isKnownPredicate later if needed. 11298 return isKnownNonNegative(RHS) && 11299 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 11300 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 11301 } 11302 11303 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, 11304 ICmpInst::Predicate Pred, 11305 const SCEV *LHS, const SCEV *RHS) { 11306 // No need to even try if we know the module has no guards. 11307 if (!HasGuards) 11308 return false; 11309 11310 return any_of(*BB, [&](const Instruction &I) { 11311 using namespace llvm::PatternMatch; 11312 11313 Value *Condition; 11314 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 11315 m_Value(Condition))) && 11316 isImpliedCond(Pred, LHS, RHS, Condition, false); 11317 }); 11318 } 11319 11320 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 11321 /// protected by a conditional between LHS and RHS. This is used to 11322 /// to eliminate casts. 11323 bool 11324 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 11325 ICmpInst::Predicate Pred, 11326 const SCEV *LHS, const SCEV *RHS) { 11327 // Interpret a null as meaning no loop, where there is obviously no guard 11328 // (interprocedural conditions notwithstanding). Do not bother about 11329 // unreachable loops. 11330 if (!L || !DT.isReachableFromEntry(L->getHeader())) 11331 return true; 11332 11333 if (VerifyIR) 11334 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 11335 "This cannot be done on broken IR!"); 11336 11337 11338 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 11339 return true; 11340 11341 BasicBlock *Latch = L->getLoopLatch(); 11342 if (!Latch) 11343 return false; 11344 11345 BranchInst *LoopContinuePredicate = 11346 dyn_cast<BranchInst>(Latch->getTerminator()); 11347 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 11348 isImpliedCond(Pred, LHS, RHS, 11349 LoopContinuePredicate->getCondition(), 11350 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 11351 return true; 11352 11353 // We don't want more than one activation of the following loops on the stack 11354 // -- that can lead to O(n!) time complexity. 11355 if (WalkingBEDominatingConds) 11356 return false; 11357 11358 SaveAndRestore ClearOnExit(WalkingBEDominatingConds, true); 11359 11360 // See if we can exploit a trip count to prove the predicate. 11361 const auto &BETakenInfo = getBackedgeTakenInfo(L); 11362 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 11363 if (LatchBECount != getCouldNotCompute()) { 11364 // We know that Latch branches back to the loop header exactly 11365 // LatchBECount times. This means the backdege condition at Latch is 11366 // equivalent to "{0,+,1} u< LatchBECount". 11367 Type *Ty = LatchBECount->getType(); 11368 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 11369 const SCEV *LoopCounter = 11370 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 11371 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 11372 LatchBECount)) 11373 return true; 11374 } 11375 11376 // Check conditions due to any @llvm.assume intrinsics. 11377 for (auto &AssumeVH : AC.assumptions()) { 11378 if (!AssumeVH) 11379 continue; 11380 auto *CI = cast<CallInst>(AssumeVH); 11381 if (!DT.dominates(CI, Latch->getTerminator())) 11382 continue; 11383 11384 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 11385 return true; 11386 } 11387 11388 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 11389 return true; 11390 11391 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 11392 DTN != HeaderDTN; DTN = DTN->getIDom()) { 11393 assert(DTN && "should reach the loop header before reaching the root!"); 11394 11395 BasicBlock *BB = DTN->getBlock(); 11396 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 11397 return true; 11398 11399 BasicBlock *PBB = BB->getSinglePredecessor(); 11400 if (!PBB) 11401 continue; 11402 11403 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 11404 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 11405 continue; 11406 11407 Value *Condition = ContinuePredicate->getCondition(); 11408 11409 // If we have an edge `E` within the loop body that dominates the only 11410 // latch, the condition guarding `E` also guards the backedge. This 11411 // reasoning works only for loops with a single latch. 11412 11413 BasicBlockEdge DominatingEdge(PBB, BB); 11414 if (DominatingEdge.isSingleEdge()) { 11415 // We're constructively (and conservatively) enumerating edges within the 11416 // loop body that dominate the latch. The dominator tree better agree 11417 // with us on this: 11418 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 11419 11420 if (isImpliedCond(Pred, LHS, RHS, Condition, 11421 BB != ContinuePredicate->getSuccessor(0))) 11422 return true; 11423 } 11424 } 11425 11426 return false; 11427 } 11428 11429 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, 11430 ICmpInst::Predicate Pred, 11431 const SCEV *LHS, 11432 const SCEV *RHS) { 11433 // Do not bother proving facts for unreachable code. 11434 if (!DT.isReachableFromEntry(BB)) 11435 return true; 11436 if (VerifyIR) 11437 assert(!verifyFunction(*BB->getParent(), &dbgs()) && 11438 "This cannot be done on broken IR!"); 11439 11440 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 11441 // the facts (a >= b && a != b) separately. A typical situation is when the 11442 // non-strict comparison is known from ranges and non-equality is known from 11443 // dominating predicates. If we are proving strict comparison, we always try 11444 // to prove non-equality and non-strict comparison separately. 11445 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 11446 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 11447 bool ProvedNonStrictComparison = false; 11448 bool ProvedNonEquality = false; 11449 11450 auto SplitAndProve = 11451 [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool { 11452 if (!ProvedNonStrictComparison) 11453 ProvedNonStrictComparison = Fn(NonStrictPredicate); 11454 if (!ProvedNonEquality) 11455 ProvedNonEquality = Fn(ICmpInst::ICMP_NE); 11456 if (ProvedNonStrictComparison && ProvedNonEquality) 11457 return true; 11458 return false; 11459 }; 11460 11461 if (ProvingStrictComparison) { 11462 auto ProofFn = [&](ICmpInst::Predicate P) { 11463 return isKnownViaNonRecursiveReasoning(P, LHS, RHS); 11464 }; 11465 if (SplitAndProve(ProofFn)) 11466 return true; 11467 } 11468 11469 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 11470 auto ProveViaCond = [&](const Value *Condition, bool Inverse) { 11471 const Instruction *CtxI = &BB->front(); 11472 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, CtxI)) 11473 return true; 11474 if (ProvingStrictComparison) { 11475 auto ProofFn = [&](ICmpInst::Predicate P) { 11476 return isImpliedCond(P, LHS, RHS, Condition, Inverse, CtxI); 11477 }; 11478 if (SplitAndProve(ProofFn)) 11479 return true; 11480 } 11481 return false; 11482 }; 11483 11484 // Starting at the block's predecessor, climb up the predecessor chain, as long 11485 // as there are predecessors that can be found that have unique successors 11486 // leading to the original block. 11487 const Loop *ContainingLoop = LI.getLoopFor(BB); 11488 const BasicBlock *PredBB; 11489 if (ContainingLoop && ContainingLoop->getHeader() == BB) 11490 PredBB = ContainingLoop->getLoopPredecessor(); 11491 else 11492 PredBB = BB->getSinglePredecessor(); 11493 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB); 11494 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 11495 const BranchInst *BlockEntryPredicate = 11496 dyn_cast<BranchInst>(Pair.first->getTerminator()); 11497 if (!BlockEntryPredicate || BlockEntryPredicate->isUnconditional()) 11498 continue; 11499 11500 if (ProveViaCond(BlockEntryPredicate->getCondition(), 11501 BlockEntryPredicate->getSuccessor(0) != Pair.second)) 11502 return true; 11503 } 11504 11505 // Check conditions due to any @llvm.assume intrinsics. 11506 for (auto &AssumeVH : AC.assumptions()) { 11507 if (!AssumeVH) 11508 continue; 11509 auto *CI = cast<CallInst>(AssumeVH); 11510 if (!DT.dominates(CI, BB)) 11511 continue; 11512 11513 if (ProveViaCond(CI->getArgOperand(0), false)) 11514 return true; 11515 } 11516 11517 // Check conditions due to any @llvm.experimental.guard intrinsics. 11518 auto *GuardDecl = F.getParent()->getFunction( 11519 Intrinsic::getName(Intrinsic::experimental_guard)); 11520 if (GuardDecl) 11521 for (const auto *GU : GuardDecl->users()) 11522 if (const auto *Guard = dyn_cast<IntrinsicInst>(GU)) 11523 if (Guard->getFunction() == BB->getParent() && DT.dominates(Guard, BB)) 11524 if (ProveViaCond(Guard->getArgOperand(0), false)) 11525 return true; 11526 return false; 11527 } 11528 11529 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 11530 ICmpInst::Predicate Pred, 11531 const SCEV *LHS, 11532 const SCEV *RHS) { 11533 // Interpret a null as meaning no loop, where there is obviously no guard 11534 // (interprocedural conditions notwithstanding). 11535 if (!L) 11536 return false; 11537 11538 // Both LHS and RHS must be available at loop entry. 11539 assert(isAvailableAtLoopEntry(LHS, L) && 11540 "LHS is not available at Loop Entry"); 11541 assert(isAvailableAtLoopEntry(RHS, L) && 11542 "RHS is not available at Loop Entry"); 11543 11544 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 11545 return true; 11546 11547 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); 11548 } 11549 11550 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 11551 const SCEV *RHS, 11552 const Value *FoundCondValue, bool Inverse, 11553 const Instruction *CtxI) { 11554 // False conditions implies anything. Do not bother analyzing it further. 11555 if (FoundCondValue == 11556 ConstantInt::getBool(FoundCondValue->getContext(), Inverse)) 11557 return true; 11558 11559 if (!PendingLoopPredicates.insert(FoundCondValue).second) 11560 return false; 11561 11562 auto ClearOnExit = 11563 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 11564 11565 // Recursively handle And and Or conditions. 11566 const Value *Op0, *Op1; 11567 if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { 11568 if (!Inverse) 11569 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) || 11570 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI); 11571 } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) { 11572 if (Inverse) 11573 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) || 11574 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI); 11575 } 11576 11577 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 11578 if (!ICI) return false; 11579 11580 // Now that we found a conditional branch that dominates the loop or controls 11581 // the loop latch. Check to see if it is the comparison we are looking for. 11582 ICmpInst::Predicate FoundPred; 11583 if (Inverse) 11584 FoundPred = ICI->getInversePredicate(); 11585 else 11586 FoundPred = ICI->getPredicate(); 11587 11588 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 11589 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 11590 11591 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, CtxI); 11592 } 11593 11594 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 11595 const SCEV *RHS, 11596 ICmpInst::Predicate FoundPred, 11597 const SCEV *FoundLHS, const SCEV *FoundRHS, 11598 const Instruction *CtxI) { 11599 // Balance the types. 11600 if (getTypeSizeInBits(LHS->getType()) < 11601 getTypeSizeInBits(FoundLHS->getType())) { 11602 // For unsigned and equality predicates, try to prove that both found 11603 // operands fit into narrow unsigned range. If so, try to prove facts in 11604 // narrow types. 11605 if (!CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy() && 11606 !FoundRHS->getType()->isPointerTy()) { 11607 auto *NarrowType = LHS->getType(); 11608 auto *WideType = FoundLHS->getType(); 11609 auto BitWidth = getTypeSizeInBits(NarrowType); 11610 const SCEV *MaxValue = getZeroExtendExpr( 11611 getConstant(APInt::getMaxValue(BitWidth)), WideType); 11612 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundLHS, 11613 MaxValue) && 11614 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundRHS, 11615 MaxValue)) { 11616 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType); 11617 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType); 11618 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS, 11619 TruncFoundRHS, CtxI)) 11620 return true; 11621 } 11622 } 11623 11624 if (LHS->getType()->isPointerTy() || RHS->getType()->isPointerTy()) 11625 return false; 11626 if (CmpInst::isSigned(Pred)) { 11627 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 11628 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 11629 } else { 11630 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 11631 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 11632 } 11633 } else if (getTypeSizeInBits(LHS->getType()) > 11634 getTypeSizeInBits(FoundLHS->getType())) { 11635 if (FoundLHS->getType()->isPointerTy() || FoundRHS->getType()->isPointerTy()) 11636 return false; 11637 if (CmpInst::isSigned(FoundPred)) { 11638 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 11639 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 11640 } else { 11641 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 11642 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 11643 } 11644 } 11645 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, 11646 FoundRHS, CtxI); 11647 } 11648 11649 bool ScalarEvolution::isImpliedCondBalancedTypes( 11650 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 11651 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, 11652 const Instruction *CtxI) { 11653 assert(getTypeSizeInBits(LHS->getType()) == 11654 getTypeSizeInBits(FoundLHS->getType()) && 11655 "Types should be balanced!"); 11656 // Canonicalize the query to match the way instcombine will have 11657 // canonicalized the comparison. 11658 if (SimplifyICmpOperands(Pred, LHS, RHS)) 11659 if (LHS == RHS) 11660 return CmpInst::isTrueWhenEqual(Pred); 11661 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 11662 if (FoundLHS == FoundRHS) 11663 return CmpInst::isFalseWhenEqual(FoundPred); 11664 11665 // Check to see if we can make the LHS or RHS match. 11666 if (LHS == FoundRHS || RHS == FoundLHS) { 11667 if (isa<SCEVConstant>(RHS)) { 11668 std::swap(FoundLHS, FoundRHS); 11669 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 11670 } else { 11671 std::swap(LHS, RHS); 11672 Pred = ICmpInst::getSwappedPredicate(Pred); 11673 } 11674 } 11675 11676 // Check whether the found predicate is the same as the desired predicate. 11677 if (FoundPred == Pred) 11678 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI); 11679 11680 // Check whether swapping the found predicate makes it the same as the 11681 // desired predicate. 11682 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 11683 // We can write the implication 11684 // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS 11685 // using one of the following ways: 11686 // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS 11687 // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS 11688 // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS 11689 // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS 11690 // Forms 1. and 2. require swapping the operands of one condition. Don't 11691 // do this if it would break canonical constant/addrec ordering. 11692 if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS)) 11693 return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS, 11694 CtxI); 11695 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS)) 11696 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, CtxI); 11697 11698 // There's no clear preference between forms 3. and 4., try both. Avoid 11699 // forming getNotSCEV of pointer values as the resulting subtract is 11700 // not legal. 11701 if (!LHS->getType()->isPointerTy() && !RHS->getType()->isPointerTy() && 11702 isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS), 11703 FoundLHS, FoundRHS, CtxI)) 11704 return true; 11705 11706 if (!FoundLHS->getType()->isPointerTy() && 11707 !FoundRHS->getType()->isPointerTy() && 11708 isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS), 11709 getNotSCEV(FoundRHS), CtxI)) 11710 return true; 11711 11712 return false; 11713 } 11714 11715 auto IsSignFlippedPredicate = [](CmpInst::Predicate P1, 11716 CmpInst::Predicate P2) { 11717 assert(P1 != P2 && "Handled earlier!"); 11718 return CmpInst::isRelational(P2) && 11719 P1 == CmpInst::getFlippedSignednessPredicate(P2); 11720 }; 11721 if (IsSignFlippedPredicate(Pred, FoundPred)) { 11722 // Unsigned comparison is the same as signed comparison when both the 11723 // operands are non-negative or negative. 11724 if ((isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) || 11725 (isKnownNegative(FoundLHS) && isKnownNegative(FoundRHS))) 11726 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI); 11727 // Create local copies that we can freely swap and canonicalize our 11728 // conditions to "le/lt". 11729 ICmpInst::Predicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred; 11730 const SCEV *CanonicalLHS = LHS, *CanonicalRHS = RHS, 11731 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS; 11732 if (ICmpInst::isGT(CanonicalPred) || ICmpInst::isGE(CanonicalPred)) { 11733 CanonicalPred = ICmpInst::getSwappedPredicate(CanonicalPred); 11734 CanonicalFoundPred = ICmpInst::getSwappedPredicate(CanonicalFoundPred); 11735 std::swap(CanonicalLHS, CanonicalRHS); 11736 std::swap(CanonicalFoundLHS, CanonicalFoundRHS); 11737 } 11738 assert((ICmpInst::isLT(CanonicalPred) || ICmpInst::isLE(CanonicalPred)) && 11739 "Must be!"); 11740 assert((ICmpInst::isLT(CanonicalFoundPred) || 11741 ICmpInst::isLE(CanonicalFoundPred)) && 11742 "Must be!"); 11743 if (ICmpInst::isSigned(CanonicalPred) && isKnownNonNegative(CanonicalRHS)) 11744 // Use implication: 11745 // x <u y && y >=s 0 --> x <s y. 11746 // If we can prove the left part, the right part is also proven. 11747 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS, 11748 CanonicalRHS, CanonicalFoundLHS, 11749 CanonicalFoundRHS); 11750 if (ICmpInst::isUnsigned(CanonicalPred) && isKnownNegative(CanonicalRHS)) 11751 // Use implication: 11752 // x <s y && y <s 0 --> x <u y. 11753 // If we can prove the left part, the right part is also proven. 11754 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS, 11755 CanonicalRHS, CanonicalFoundLHS, 11756 CanonicalFoundRHS); 11757 } 11758 11759 // Check if we can make progress by sharpening ranges. 11760 if (FoundPred == ICmpInst::ICMP_NE && 11761 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 11762 11763 const SCEVConstant *C = nullptr; 11764 const SCEV *V = nullptr; 11765 11766 if (isa<SCEVConstant>(FoundLHS)) { 11767 C = cast<SCEVConstant>(FoundLHS); 11768 V = FoundRHS; 11769 } else { 11770 C = cast<SCEVConstant>(FoundRHS); 11771 V = FoundLHS; 11772 } 11773 11774 // The guarding predicate tells us that C != V. If the known range 11775 // of V is [C, t), we can sharpen the range to [C + 1, t). The 11776 // range we consider has to correspond to same signedness as the 11777 // predicate we're interested in folding. 11778 11779 APInt Min = ICmpInst::isSigned(Pred) ? 11780 getSignedRangeMin(V) : getUnsignedRangeMin(V); 11781 11782 if (Min == C->getAPInt()) { 11783 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 11784 // This is true even if (Min + 1) wraps around -- in case of 11785 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 11786 11787 APInt SharperMin = Min + 1; 11788 11789 switch (Pred) { 11790 case ICmpInst::ICMP_SGE: 11791 case ICmpInst::ICMP_UGE: 11792 // We know V `Pred` SharperMin. If this implies LHS `Pred` 11793 // RHS, we're done. 11794 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), 11795 CtxI)) 11796 return true; 11797 [[fallthrough]]; 11798 11799 case ICmpInst::ICMP_SGT: 11800 case ICmpInst::ICMP_UGT: 11801 // We know from the range information that (V `Pred` Min || 11802 // V == Min). We know from the guarding condition that !(V 11803 // == Min). This gives us 11804 // 11805 // V `Pred` Min || V == Min && !(V == Min) 11806 // => V `Pred` Min 11807 // 11808 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 11809 11810 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI)) 11811 return true; 11812 break; 11813 11814 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. 11815 case ICmpInst::ICMP_SLE: 11816 case ICmpInst::ICMP_ULE: 11817 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 11818 LHS, V, getConstant(SharperMin), CtxI)) 11819 return true; 11820 [[fallthrough]]; 11821 11822 case ICmpInst::ICMP_SLT: 11823 case ICmpInst::ICMP_ULT: 11824 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 11825 LHS, V, getConstant(Min), CtxI)) 11826 return true; 11827 break; 11828 11829 default: 11830 // No change 11831 break; 11832 } 11833 } 11834 } 11835 11836 // Check whether the actual condition is beyond sufficient. 11837 if (FoundPred == ICmpInst::ICMP_EQ) 11838 if (ICmpInst::isTrueWhenEqual(Pred)) 11839 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI)) 11840 return true; 11841 if (Pred == ICmpInst::ICMP_NE) 11842 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 11843 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, CtxI)) 11844 return true; 11845 11846 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS)) 11847 return true; 11848 11849 // Otherwise assume the worst. 11850 return false; 11851 } 11852 11853 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 11854 const SCEV *&L, const SCEV *&R, 11855 SCEV::NoWrapFlags &Flags) { 11856 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 11857 if (!AE || AE->getNumOperands() != 2) 11858 return false; 11859 11860 L = AE->getOperand(0); 11861 R = AE->getOperand(1); 11862 Flags = AE->getNoWrapFlags(); 11863 return true; 11864 } 11865 11866 std::optional<APInt> 11867 ScalarEvolution::computeConstantDifference(const SCEV *More, const SCEV *Less) { 11868 // We avoid subtracting expressions here because this function is usually 11869 // fairly deep in the call stack (i.e. is called many times). 11870 11871 // X - X = 0. 11872 if (More == Less) 11873 return APInt(getTypeSizeInBits(More->getType()), 0); 11874 11875 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 11876 const auto *LAR = cast<SCEVAddRecExpr>(Less); 11877 const auto *MAR = cast<SCEVAddRecExpr>(More); 11878 11879 if (LAR->getLoop() != MAR->getLoop()) 11880 return std::nullopt; 11881 11882 // We look at affine expressions only; not for correctness but to keep 11883 // getStepRecurrence cheap. 11884 if (!LAR->isAffine() || !MAR->isAffine()) 11885 return std::nullopt; 11886 11887 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 11888 return std::nullopt; 11889 11890 Less = LAR->getStart(); 11891 More = MAR->getStart(); 11892 11893 // fall through 11894 } 11895 11896 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 11897 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 11898 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 11899 return M - L; 11900 } 11901 11902 SCEV::NoWrapFlags Flags; 11903 const SCEV *LLess = nullptr, *RLess = nullptr; 11904 const SCEV *LMore = nullptr, *RMore = nullptr; 11905 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 11906 // Compare (X + C1) vs X. 11907 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 11908 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 11909 if (RLess == More) 11910 return -(C1->getAPInt()); 11911 11912 // Compare X vs (X + C2). 11913 if (splitBinaryAdd(More, LMore, RMore, Flags)) 11914 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 11915 if (RMore == Less) 11916 return C2->getAPInt(); 11917 11918 // Compare (X + C1) vs (X + C2). 11919 if (C1 && C2 && RLess == RMore) 11920 return C2->getAPInt() - C1->getAPInt(); 11921 11922 return std::nullopt; 11923 } 11924 11925 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( 11926 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 11927 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *CtxI) { 11928 // Try to recognize the following pattern: 11929 // 11930 // FoundRHS = ... 11931 // ... 11932 // loop: 11933 // FoundLHS = {Start,+,W} 11934 // context_bb: // Basic block from the same loop 11935 // known(Pred, FoundLHS, FoundRHS) 11936 // 11937 // If some predicate is known in the context of a loop, it is also known on 11938 // each iteration of this loop, including the first iteration. Therefore, in 11939 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to 11940 // prove the original pred using this fact. 11941 if (!CtxI) 11942 return false; 11943 const BasicBlock *ContextBB = CtxI->getParent(); 11944 // Make sure AR varies in the context block. 11945 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) { 11946 const Loop *L = AR->getLoop(); 11947 // Make sure that context belongs to the loop and executes on 1st iteration 11948 // (if it ever executes at all). 11949 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 11950 return false; 11951 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) 11952 return false; 11953 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); 11954 } 11955 11956 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) { 11957 const Loop *L = AR->getLoop(); 11958 // Make sure that context belongs to the loop and executes on 1st iteration 11959 // (if it ever executes at all). 11960 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 11961 return false; 11962 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) 11963 return false; 11964 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); 11965 } 11966 11967 return false; 11968 } 11969 11970 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 11971 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 11972 const SCEV *FoundLHS, const SCEV *FoundRHS) { 11973 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 11974 return false; 11975 11976 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 11977 if (!AddRecLHS) 11978 return false; 11979 11980 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 11981 if (!AddRecFoundLHS) 11982 return false; 11983 11984 // We'd like to let SCEV reason about control dependencies, so we constrain 11985 // both the inequalities to be about add recurrences on the same loop. This 11986 // way we can use isLoopEntryGuardedByCond later. 11987 11988 const Loop *L = AddRecFoundLHS->getLoop(); 11989 if (L != AddRecLHS->getLoop()) 11990 return false; 11991 11992 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 11993 // 11994 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 11995 // ... (2) 11996 // 11997 // Informal proof for (2), assuming (1) [*]: 11998 // 11999 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 12000 // 12001 // Then 12002 // 12003 // FoundLHS s< FoundRHS s< INT_MIN - C 12004 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 12005 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 12006 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 12007 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 12008 // <=> FoundLHS + C s< FoundRHS + C 12009 // 12010 // [*]: (1) can be proved by ruling out overflow. 12011 // 12012 // [**]: This can be proved by analyzing all the four possibilities: 12013 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 12014 // (A s>= 0, B s>= 0). 12015 // 12016 // Note: 12017 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 12018 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 12019 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 12020 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 12021 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 12022 // C)". 12023 12024 std::optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 12025 std::optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 12026 if (!LDiff || !RDiff || *LDiff != *RDiff) 12027 return false; 12028 12029 if (LDiff->isMinValue()) 12030 return true; 12031 12032 APInt FoundRHSLimit; 12033 12034 if (Pred == CmpInst::ICMP_ULT) { 12035 FoundRHSLimit = -(*RDiff); 12036 } else { 12037 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 12038 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 12039 } 12040 12041 // Try to prove (1) or (2), as needed. 12042 return isAvailableAtLoopEntry(FoundRHS, L) && 12043 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 12044 getConstant(FoundRHSLimit)); 12045 } 12046 12047 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 12048 const SCEV *LHS, const SCEV *RHS, 12049 const SCEV *FoundLHS, 12050 const SCEV *FoundRHS, unsigned Depth) { 12051 const PHINode *LPhi = nullptr, *RPhi = nullptr; 12052 12053 auto ClearOnExit = make_scope_exit([&]() { 12054 if (LPhi) { 12055 bool Erased = PendingMerges.erase(LPhi); 12056 assert(Erased && "Failed to erase LPhi!"); 12057 (void)Erased; 12058 } 12059 if (RPhi) { 12060 bool Erased = PendingMerges.erase(RPhi); 12061 assert(Erased && "Failed to erase RPhi!"); 12062 (void)Erased; 12063 } 12064 }); 12065 12066 // Find respective Phis and check that they are not being pending. 12067 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 12068 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 12069 if (!PendingMerges.insert(Phi).second) 12070 return false; 12071 LPhi = Phi; 12072 } 12073 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 12074 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 12075 // If we detect a loop of Phi nodes being processed by this method, for 12076 // example: 12077 // 12078 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 12079 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 12080 // 12081 // we don't want to deal with a case that complex, so return conservative 12082 // answer false. 12083 if (!PendingMerges.insert(Phi).second) 12084 return false; 12085 RPhi = Phi; 12086 } 12087 12088 // If none of LHS, RHS is a Phi, nothing to do here. 12089 if (!LPhi && !RPhi) 12090 return false; 12091 12092 // If there is a SCEVUnknown Phi we are interested in, make it left. 12093 if (!LPhi) { 12094 std::swap(LHS, RHS); 12095 std::swap(FoundLHS, FoundRHS); 12096 std::swap(LPhi, RPhi); 12097 Pred = ICmpInst::getSwappedPredicate(Pred); 12098 } 12099 12100 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 12101 const BasicBlock *LBB = LPhi->getParent(); 12102 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 12103 12104 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 12105 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 12106 isImpliedCondOperandsViaRanges(Pred, S1, S2, Pred, FoundLHS, FoundRHS) || 12107 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 12108 }; 12109 12110 if (RPhi && RPhi->getParent() == LBB) { 12111 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 12112 // If we compare two Phis from the same block, and for each entry block 12113 // the predicate is true for incoming values from this block, then the 12114 // predicate is also true for the Phis. 12115 for (const BasicBlock *IncBB : predecessors(LBB)) { 12116 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 12117 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 12118 if (!ProvedEasily(L, R)) 12119 return false; 12120 } 12121 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 12122 // Case two: RHS is also a Phi from the same basic block, and it is an 12123 // AddRec. It means that there is a loop which has both AddRec and Unknown 12124 // PHIs, for it we can compare incoming values of AddRec from above the loop 12125 // and latch with their respective incoming values of LPhi. 12126 // TODO: Generalize to handle loops with many inputs in a header. 12127 if (LPhi->getNumIncomingValues() != 2) return false; 12128 12129 auto *RLoop = RAR->getLoop(); 12130 auto *Predecessor = RLoop->getLoopPredecessor(); 12131 assert(Predecessor && "Loop with AddRec with no predecessor?"); 12132 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 12133 if (!ProvedEasily(L1, RAR->getStart())) 12134 return false; 12135 auto *Latch = RLoop->getLoopLatch(); 12136 assert(Latch && "Loop with AddRec with no latch?"); 12137 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 12138 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 12139 return false; 12140 } else { 12141 // In all other cases go over inputs of LHS and compare each of them to RHS, 12142 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 12143 // At this point RHS is either a non-Phi, or it is a Phi from some block 12144 // different from LBB. 12145 for (const BasicBlock *IncBB : predecessors(LBB)) { 12146 // Check that RHS is available in this block. 12147 if (!dominates(RHS, IncBB)) 12148 return false; 12149 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 12150 // Make sure L does not refer to a value from a potentially previous 12151 // iteration of a loop. 12152 if (!properlyDominates(L, LBB)) 12153 return false; 12154 if (!ProvedEasily(L, RHS)) 12155 return false; 12156 } 12157 } 12158 return true; 12159 } 12160 12161 bool ScalarEvolution::isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred, 12162 const SCEV *LHS, 12163 const SCEV *RHS, 12164 const SCEV *FoundLHS, 12165 const SCEV *FoundRHS) { 12166 // We want to imply LHS < RHS from LHS < (RHS >> shiftvalue). First, make 12167 // sure that we are dealing with same LHS. 12168 if (RHS == FoundRHS) { 12169 std::swap(LHS, RHS); 12170 std::swap(FoundLHS, FoundRHS); 12171 Pred = ICmpInst::getSwappedPredicate(Pred); 12172 } 12173 if (LHS != FoundLHS) 12174 return false; 12175 12176 auto *SUFoundRHS = dyn_cast<SCEVUnknown>(FoundRHS); 12177 if (!SUFoundRHS) 12178 return false; 12179 12180 Value *Shiftee, *ShiftValue; 12181 12182 using namespace PatternMatch; 12183 if (match(SUFoundRHS->getValue(), 12184 m_LShr(m_Value(Shiftee), m_Value(ShiftValue)))) { 12185 auto *ShifteeS = getSCEV(Shiftee); 12186 // Prove one of the following: 12187 // LHS <u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <u RHS 12188 // LHS <=u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <=u RHS 12189 // LHS <s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0 12190 // ---> LHS <s RHS 12191 // LHS <=s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0 12192 // ---> LHS <=s RHS 12193 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 12194 return isKnownPredicate(ICmpInst::ICMP_ULE, ShifteeS, RHS); 12195 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 12196 if (isKnownNonNegative(ShifteeS)) 12197 return isKnownPredicate(ICmpInst::ICMP_SLE, ShifteeS, RHS); 12198 } 12199 12200 return false; 12201 } 12202 12203 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 12204 const SCEV *LHS, const SCEV *RHS, 12205 const SCEV *FoundLHS, 12206 const SCEV *FoundRHS, 12207 const Instruction *CtxI) { 12208 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, Pred, FoundLHS, FoundRHS)) 12209 return true; 12210 12211 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 12212 return true; 12213 12214 if (isImpliedCondOperandsViaShift(Pred, LHS, RHS, FoundLHS, FoundRHS)) 12215 return true; 12216 12217 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, 12218 CtxI)) 12219 return true; 12220 12221 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 12222 FoundLHS, FoundRHS); 12223 } 12224 12225 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 12226 template <typename MinMaxExprType> 12227 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 12228 const SCEV *Candidate) { 12229 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 12230 if (!MinMaxExpr) 12231 return false; 12232 12233 return is_contained(MinMaxExpr->operands(), Candidate); 12234 } 12235 12236 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 12237 ICmpInst::Predicate Pred, 12238 const SCEV *LHS, const SCEV *RHS) { 12239 // If both sides are affine addrecs for the same loop, with equal 12240 // steps, and we know the recurrences don't wrap, then we only 12241 // need to check the predicate on the starting values. 12242 12243 if (!ICmpInst::isRelational(Pred)) 12244 return false; 12245 12246 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 12247 if (!LAR) 12248 return false; 12249 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 12250 if (!RAR) 12251 return false; 12252 if (LAR->getLoop() != RAR->getLoop()) 12253 return false; 12254 if (!LAR->isAffine() || !RAR->isAffine()) 12255 return false; 12256 12257 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 12258 return false; 12259 12260 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 12261 SCEV::FlagNSW : SCEV::FlagNUW; 12262 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 12263 return false; 12264 12265 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 12266 } 12267 12268 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 12269 /// expression? 12270 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 12271 ICmpInst::Predicate Pred, 12272 const SCEV *LHS, const SCEV *RHS) { 12273 switch (Pred) { 12274 default: 12275 return false; 12276 12277 case ICmpInst::ICMP_SGE: 12278 std::swap(LHS, RHS); 12279 [[fallthrough]]; 12280 case ICmpInst::ICMP_SLE: 12281 return 12282 // min(A, ...) <= A 12283 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 12284 // A <= max(A, ...) 12285 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 12286 12287 case ICmpInst::ICMP_UGE: 12288 std::swap(LHS, RHS); 12289 [[fallthrough]]; 12290 case ICmpInst::ICMP_ULE: 12291 return 12292 // min(A, ...) <= A 12293 // FIXME: what about umin_seq? 12294 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 12295 // A <= max(A, ...) 12296 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 12297 } 12298 12299 llvm_unreachable("covered switch fell through?!"); 12300 } 12301 12302 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 12303 const SCEV *LHS, const SCEV *RHS, 12304 const SCEV *FoundLHS, 12305 const SCEV *FoundRHS, 12306 unsigned Depth) { 12307 assert(getTypeSizeInBits(LHS->getType()) == 12308 getTypeSizeInBits(RHS->getType()) && 12309 "LHS and RHS have different sizes?"); 12310 assert(getTypeSizeInBits(FoundLHS->getType()) == 12311 getTypeSizeInBits(FoundRHS->getType()) && 12312 "FoundLHS and FoundRHS have different sizes?"); 12313 // We want to avoid hurting the compile time with analysis of too big trees. 12314 if (Depth > MaxSCEVOperationsImplicationDepth) 12315 return false; 12316 12317 // We only want to work with GT comparison so far. 12318 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { 12319 Pred = CmpInst::getSwappedPredicate(Pred); 12320 std::swap(LHS, RHS); 12321 std::swap(FoundLHS, FoundRHS); 12322 } 12323 12324 // For unsigned, try to reduce it to corresponding signed comparison. 12325 if (Pred == ICmpInst::ICMP_UGT) 12326 // We can replace unsigned predicate with its signed counterpart if all 12327 // involved values are non-negative. 12328 // TODO: We could have better support for unsigned. 12329 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { 12330 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing 12331 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us 12332 // use this fact to prove that LHS and RHS are non-negative. 12333 const SCEV *MinusOne = getMinusOne(LHS->getType()); 12334 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, 12335 FoundRHS) && 12336 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, 12337 FoundRHS)) 12338 Pred = ICmpInst::ICMP_SGT; 12339 } 12340 12341 if (Pred != ICmpInst::ICMP_SGT) 12342 return false; 12343 12344 auto GetOpFromSExt = [&](const SCEV *S) { 12345 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 12346 return Ext->getOperand(); 12347 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 12348 // the constant in some cases. 12349 return S; 12350 }; 12351 12352 // Acquire values from extensions. 12353 auto *OrigLHS = LHS; 12354 auto *OrigFoundLHS = FoundLHS; 12355 LHS = GetOpFromSExt(LHS); 12356 FoundLHS = GetOpFromSExt(FoundLHS); 12357 12358 // Is the SGT predicate can be proved trivially or using the found context. 12359 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 12360 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 12361 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 12362 FoundRHS, Depth + 1); 12363 }; 12364 12365 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 12366 // We want to avoid creation of any new non-constant SCEV. Since we are 12367 // going to compare the operands to RHS, we should be certain that we don't 12368 // need any size extensions for this. So let's decline all cases when the 12369 // sizes of types of LHS and RHS do not match. 12370 // TODO: Maybe try to get RHS from sext to catch more cases? 12371 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 12372 return false; 12373 12374 // Should not overflow. 12375 if (!LHSAddExpr->hasNoSignedWrap()) 12376 return false; 12377 12378 auto *LL = LHSAddExpr->getOperand(0); 12379 auto *LR = LHSAddExpr->getOperand(1); 12380 auto *MinusOne = getMinusOne(RHS->getType()); 12381 12382 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 12383 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 12384 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 12385 }; 12386 // Try to prove the following rule: 12387 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 12388 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 12389 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 12390 return true; 12391 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 12392 Value *LL, *LR; 12393 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 12394 12395 using namespace llvm::PatternMatch; 12396 12397 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 12398 // Rules for division. 12399 // We are going to perform some comparisons with Denominator and its 12400 // derivative expressions. In general case, creating a SCEV for it may 12401 // lead to a complex analysis of the entire graph, and in particular it 12402 // can request trip count recalculation for the same loop. This would 12403 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 12404 // this, we only want to create SCEVs that are constants in this section. 12405 // So we bail if Denominator is not a constant. 12406 if (!isa<ConstantInt>(LR)) 12407 return false; 12408 12409 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 12410 12411 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 12412 // then a SCEV for the numerator already exists and matches with FoundLHS. 12413 auto *Numerator = getExistingSCEV(LL); 12414 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 12415 return false; 12416 12417 // Make sure that the numerator matches with FoundLHS and the denominator 12418 // is positive. 12419 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 12420 return false; 12421 12422 auto *DTy = Denominator->getType(); 12423 auto *FRHSTy = FoundRHS->getType(); 12424 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 12425 // One of types is a pointer and another one is not. We cannot extend 12426 // them properly to a wider type, so let us just reject this case. 12427 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 12428 // to avoid this check. 12429 return false; 12430 12431 // Given that: 12432 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 12433 auto *WTy = getWiderType(DTy, FRHSTy); 12434 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 12435 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 12436 12437 // Try to prove the following rule: 12438 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 12439 // For example, given that FoundLHS > 2. It means that FoundLHS is at 12440 // least 3. If we divide it by Denominator < 4, we will have at least 1. 12441 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 12442 if (isKnownNonPositive(RHS) && 12443 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 12444 return true; 12445 12446 // Try to prove the following rule: 12447 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 12448 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 12449 // If we divide it by Denominator > 2, then: 12450 // 1. If FoundLHS is negative, then the result is 0. 12451 // 2. If FoundLHS is non-negative, then the result is non-negative. 12452 // Anyways, the result is non-negative. 12453 auto *MinusOne = getMinusOne(WTy); 12454 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 12455 if (isKnownNegative(RHS) && 12456 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 12457 return true; 12458 } 12459 } 12460 12461 // If our expression contained SCEVUnknown Phis, and we split it down and now 12462 // need to prove something for them, try to prove the predicate for every 12463 // possible incoming values of those Phis. 12464 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 12465 return true; 12466 12467 return false; 12468 } 12469 12470 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 12471 const SCEV *LHS, const SCEV *RHS) { 12472 // zext x u<= sext x, sext x s<= zext x 12473 switch (Pred) { 12474 case ICmpInst::ICMP_SGE: 12475 std::swap(LHS, RHS); 12476 [[fallthrough]]; 12477 case ICmpInst::ICMP_SLE: { 12478 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 12479 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 12480 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 12481 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 12482 return true; 12483 break; 12484 } 12485 case ICmpInst::ICMP_UGE: 12486 std::swap(LHS, RHS); 12487 [[fallthrough]]; 12488 case ICmpInst::ICMP_ULE: { 12489 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 12490 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 12491 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 12492 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 12493 return true; 12494 break; 12495 } 12496 default: 12497 break; 12498 }; 12499 return false; 12500 } 12501 12502 bool 12503 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 12504 const SCEV *LHS, const SCEV *RHS) { 12505 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 12506 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 12507 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 12508 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 12509 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 12510 } 12511 12512 bool 12513 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 12514 const SCEV *LHS, const SCEV *RHS, 12515 const SCEV *FoundLHS, 12516 const SCEV *FoundRHS) { 12517 switch (Pred) { 12518 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 12519 case ICmpInst::ICMP_EQ: 12520 case ICmpInst::ICMP_NE: 12521 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 12522 return true; 12523 break; 12524 case ICmpInst::ICMP_SLT: 12525 case ICmpInst::ICMP_SLE: 12526 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 12527 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 12528 return true; 12529 break; 12530 case ICmpInst::ICMP_SGT: 12531 case ICmpInst::ICMP_SGE: 12532 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 12533 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 12534 return true; 12535 break; 12536 case ICmpInst::ICMP_ULT: 12537 case ICmpInst::ICMP_ULE: 12538 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 12539 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 12540 return true; 12541 break; 12542 case ICmpInst::ICMP_UGT: 12543 case ICmpInst::ICMP_UGE: 12544 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 12545 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 12546 return true; 12547 break; 12548 } 12549 12550 // Maybe it can be proved via operations? 12551 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 12552 return true; 12553 12554 return false; 12555 } 12556 12557 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 12558 const SCEV *LHS, 12559 const SCEV *RHS, 12560 ICmpInst::Predicate FoundPred, 12561 const SCEV *FoundLHS, 12562 const SCEV *FoundRHS) { 12563 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 12564 // The restriction on `FoundRHS` be lifted easily -- it exists only to 12565 // reduce the compile time impact of this optimization. 12566 return false; 12567 12568 std::optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 12569 if (!Addend) 12570 return false; 12571 12572 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 12573 12574 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 12575 // antecedent "`FoundLHS` `FoundPred` `FoundRHS`". 12576 ConstantRange FoundLHSRange = 12577 ConstantRange::makeExactICmpRegion(FoundPred, ConstFoundRHS); 12578 12579 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 12580 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 12581 12582 // We can also compute the range of values for `LHS` that satisfy the 12583 // consequent, "`LHS` `Pred` `RHS`": 12584 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 12585 // The antecedent implies the consequent if every value of `LHS` that 12586 // satisfies the antecedent also satisfies the consequent. 12587 return LHSRange.icmp(Pred, ConstRHS); 12588 } 12589 12590 bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 12591 bool IsSigned) { 12592 assert(isKnownPositive(Stride) && "Positive stride expected!"); 12593 12594 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 12595 const SCEV *One = getOne(Stride->getType()); 12596 12597 if (IsSigned) { 12598 APInt MaxRHS = getSignedRangeMax(RHS); 12599 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 12600 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 12601 12602 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 12603 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 12604 } 12605 12606 APInt MaxRHS = getUnsignedRangeMax(RHS); 12607 APInt MaxValue = APInt::getMaxValue(BitWidth); 12608 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 12609 12610 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 12611 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 12612 } 12613 12614 bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 12615 bool IsSigned) { 12616 12617 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 12618 const SCEV *One = getOne(Stride->getType()); 12619 12620 if (IsSigned) { 12621 APInt MinRHS = getSignedRangeMin(RHS); 12622 APInt MinValue = APInt::getSignedMinValue(BitWidth); 12623 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 12624 12625 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 12626 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 12627 } 12628 12629 APInt MinRHS = getUnsignedRangeMin(RHS); 12630 APInt MinValue = APInt::getMinValue(BitWidth); 12631 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 12632 12633 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 12634 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 12635 } 12636 12637 const SCEV *ScalarEvolution::getUDivCeilSCEV(const SCEV *N, const SCEV *D) { 12638 // umin(N, 1) + floor((N - umin(N, 1)) / D) 12639 // This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin 12640 // expression fixes the case of N=0. 12641 const SCEV *MinNOne = getUMinExpr(N, getOne(N->getType())); 12642 const SCEV *NMinusOne = getMinusSCEV(N, MinNOne); 12643 return getAddExpr(MinNOne, getUDivExpr(NMinusOne, D)); 12644 } 12645 12646 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 12647 const SCEV *Stride, 12648 const SCEV *End, 12649 unsigned BitWidth, 12650 bool IsSigned) { 12651 // The logic in this function assumes we can represent a positive stride. 12652 // If we can't, the backedge-taken count must be zero. 12653 if (IsSigned && BitWidth == 1) 12654 return getZero(Stride->getType()); 12655 12656 // This code below only been closely audited for negative strides in the 12657 // unsigned comparison case, it may be correct for signed comparison, but 12658 // that needs to be established. 12659 if (IsSigned && isKnownNegative(Stride)) 12660 return getCouldNotCompute(); 12661 12662 // Calculate the maximum backedge count based on the range of values 12663 // permitted by Start, End, and Stride. 12664 APInt MinStart = 12665 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 12666 12667 APInt MinStride = 12668 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 12669 12670 // We assume either the stride is positive, or the backedge-taken count 12671 // is zero. So force StrideForMaxBECount to be at least one. 12672 APInt One(BitWidth, 1); 12673 APInt StrideForMaxBECount = IsSigned ? APIntOps::smax(One, MinStride) 12674 : APIntOps::umax(One, MinStride); 12675 12676 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 12677 : APInt::getMaxValue(BitWidth); 12678 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 12679 12680 // Although End can be a MAX expression we estimate MaxEnd considering only 12681 // the case End = RHS of the loop termination condition. This is safe because 12682 // in the other case (End - Start) is zero, leading to a zero maximum backedge 12683 // taken count. 12684 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 12685 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 12686 12687 // MaxBECount = ceil((max(MaxEnd, MinStart) - MinStart) / Stride) 12688 MaxEnd = IsSigned ? APIntOps::smax(MaxEnd, MinStart) 12689 : APIntOps::umax(MaxEnd, MinStart); 12690 12691 return getUDivCeilSCEV(getConstant(MaxEnd - MinStart) /* Delta */, 12692 getConstant(StrideForMaxBECount) /* Step */); 12693 } 12694 12695 ScalarEvolution::ExitLimit 12696 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 12697 const Loop *L, bool IsSigned, 12698 bool ControlsOnlyExit, bool AllowPredicates) { 12699 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 12700 12701 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 12702 bool PredicatedIV = false; 12703 12704 auto canAssumeNoSelfWrap = [&](const SCEVAddRecExpr *AR) { 12705 // Can we prove this loop *must* be UB if overflow of IV occurs? 12706 // Reasoning goes as follows: 12707 // * Suppose the IV did self wrap. 12708 // * If Stride evenly divides the iteration space, then once wrap 12709 // occurs, the loop must revisit the same values. 12710 // * We know that RHS is invariant, and that none of those values 12711 // caused this exit to be taken previously. Thus, this exit is 12712 // dynamically dead. 12713 // * If this is the sole exit, then a dead exit implies the loop 12714 // must be infinite if there are no abnormal exits. 12715 // * If the loop were infinite, then it must either not be mustprogress 12716 // or have side effects. Otherwise, it must be UB. 12717 // * It can't (by assumption), be UB so we have contradicted our 12718 // premise and can conclude the IV did not in fact self-wrap. 12719 if (!isLoopInvariant(RHS, L)) 12720 return false; 12721 12722 auto *StrideC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)); 12723 if (!StrideC || !StrideC->getAPInt().isPowerOf2()) 12724 return false; 12725 12726 if (!ControlsOnlyExit || !loopHasNoAbnormalExits(L)) 12727 return false; 12728 12729 return loopIsFiniteByAssumption(L); 12730 }; 12731 12732 if (!IV) { 12733 if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) { 12734 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ZExt->getOperand()); 12735 if (AR && AR->getLoop() == L && AR->isAffine()) { 12736 auto canProveNUW = [&]() { 12737 // We can use the comparison to infer no-wrap flags only if it fully 12738 // controls the loop exit. 12739 if (!ControlsOnlyExit) 12740 return false; 12741 12742 if (!isLoopInvariant(RHS, L)) 12743 return false; 12744 12745 if (!isKnownNonZero(AR->getStepRecurrence(*this))) 12746 // We need the sequence defined by AR to strictly increase in the 12747 // unsigned integer domain for the logic below to hold. 12748 return false; 12749 12750 const unsigned InnerBitWidth = getTypeSizeInBits(AR->getType()); 12751 const unsigned OuterBitWidth = getTypeSizeInBits(RHS->getType()); 12752 // If RHS <=u Limit, then there must exist a value V in the sequence 12753 // defined by AR (e.g. {Start,+,Step}) such that V >u RHS, and 12754 // V <=u UINT_MAX. Thus, we must exit the loop before unsigned 12755 // overflow occurs. This limit also implies that a signed comparison 12756 // (in the wide bitwidth) is equivalent to an unsigned comparison as 12757 // the high bits on both sides must be zero. 12758 APInt StrideMax = getUnsignedRangeMax(AR->getStepRecurrence(*this)); 12759 APInt Limit = APInt::getMaxValue(InnerBitWidth) - (StrideMax - 1); 12760 Limit = Limit.zext(OuterBitWidth); 12761 return getUnsignedRangeMax(applyLoopGuards(RHS, L)).ule(Limit); 12762 }; 12763 auto Flags = AR->getNoWrapFlags(); 12764 if (!hasFlags(Flags, SCEV::FlagNUW) && canProveNUW()) 12765 Flags = setFlags(Flags, SCEV::FlagNUW); 12766 12767 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags); 12768 if (AR->hasNoUnsignedWrap()) { 12769 // Emulate what getZeroExtendExpr would have done during construction 12770 // if we'd been able to infer the fact just above at that time. 12771 const SCEV *Step = AR->getStepRecurrence(*this); 12772 Type *Ty = ZExt->getType(); 12773 auto *S = getAddRecExpr( 12774 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 0), 12775 getZeroExtendExpr(Step, Ty, 0), L, AR->getNoWrapFlags()); 12776 IV = dyn_cast<SCEVAddRecExpr>(S); 12777 } 12778 } 12779 } 12780 } 12781 12782 12783 if (!IV && AllowPredicates) { 12784 // Try to make this an AddRec using runtime tests, in the first X 12785 // iterations of this loop, where X is the SCEV expression found by the 12786 // algorithm below. 12787 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 12788 PredicatedIV = true; 12789 } 12790 12791 // Avoid weird loops 12792 if (!IV || IV->getLoop() != L || !IV->isAffine()) 12793 return getCouldNotCompute(); 12794 12795 // A precondition of this method is that the condition being analyzed 12796 // reaches an exiting branch which dominates the latch. Given that, we can 12797 // assume that an increment which violates the nowrap specification and 12798 // produces poison must cause undefined behavior when the resulting poison 12799 // value is branched upon and thus we can conclude that the backedge is 12800 // taken no more often than would be required to produce that poison value. 12801 // Note that a well defined loop can exit on the iteration which violates 12802 // the nowrap specification if there is another exit (either explicit or 12803 // implicit/exceptional) which causes the loop to execute before the 12804 // exiting instruction we're analyzing would trigger UB. 12805 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; 12806 bool NoWrap = ControlsOnlyExit && IV->getNoWrapFlags(WrapType); 12807 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; 12808 12809 const SCEV *Stride = IV->getStepRecurrence(*this); 12810 12811 bool PositiveStride = isKnownPositive(Stride); 12812 12813 // Avoid negative or zero stride values. 12814 if (!PositiveStride) { 12815 // We can compute the correct backedge taken count for loops with unknown 12816 // strides if we can prove that the loop is not an infinite loop with side 12817 // effects. Here's the loop structure we are trying to handle - 12818 // 12819 // i = start 12820 // do { 12821 // A[i] = i; 12822 // i += s; 12823 // } while (i < end); 12824 // 12825 // The backedge taken count for such loops is evaluated as - 12826 // (max(end, start + stride) - start - 1) /u stride 12827 // 12828 // The additional preconditions that we need to check to prove correctness 12829 // of the above formula is as follows - 12830 // 12831 // a) IV is either nuw or nsw depending upon signedness (indicated by the 12832 // NoWrap flag). 12833 // b) the loop is guaranteed to be finite (e.g. is mustprogress and has 12834 // no side effects within the loop) 12835 // c) loop has a single static exit (with no abnormal exits) 12836 // 12837 // Precondition a) implies that if the stride is negative, this is a single 12838 // trip loop. The backedge taken count formula reduces to zero in this case. 12839 // 12840 // Precondition b) and c) combine to imply that if rhs is invariant in L, 12841 // then a zero stride means the backedge can't be taken without executing 12842 // undefined behavior. 12843 // 12844 // The positive stride case is the same as isKnownPositive(Stride) returning 12845 // true (original behavior of the function). 12846 // 12847 if (PredicatedIV || !NoWrap || !loopIsFiniteByAssumption(L) || 12848 !loopHasNoAbnormalExits(L)) 12849 return getCouldNotCompute(); 12850 12851 if (!isKnownNonZero(Stride)) { 12852 // If we have a step of zero, and RHS isn't invariant in L, we don't know 12853 // if it might eventually be greater than start and if so, on which 12854 // iteration. We can't even produce a useful upper bound. 12855 if (!isLoopInvariant(RHS, L)) 12856 return getCouldNotCompute(); 12857 12858 // We allow a potentially zero stride, but we need to divide by stride 12859 // below. Since the loop can't be infinite and this check must control 12860 // the sole exit, we can infer the exit must be taken on the first 12861 // iteration (e.g. backedge count = 0) if the stride is zero. Given that, 12862 // we know the numerator in the divides below must be zero, so we can 12863 // pick an arbitrary non-zero value for the denominator (e.g. stride) 12864 // and produce the right result. 12865 // FIXME: Handle the case where Stride is poison? 12866 auto wouldZeroStrideBeUB = [&]() { 12867 // Proof by contradiction. Suppose the stride were zero. If we can 12868 // prove that the backedge *is* taken on the first iteration, then since 12869 // we know this condition controls the sole exit, we must have an 12870 // infinite loop. We can't have a (well defined) infinite loop per 12871 // check just above. 12872 // Note: The (Start - Stride) term is used to get the start' term from 12873 // (start' + stride,+,stride). Remember that we only care about the 12874 // result of this expression when stride == 0 at runtime. 12875 auto *StartIfZero = getMinusSCEV(IV->getStart(), Stride); 12876 return isLoopEntryGuardedByCond(L, Cond, StartIfZero, RHS); 12877 }; 12878 if (!wouldZeroStrideBeUB()) { 12879 Stride = getUMaxExpr(Stride, getOne(Stride->getType())); 12880 } 12881 } 12882 } else if (!Stride->isOne() && !NoWrap) { 12883 auto isUBOnWrap = [&]() { 12884 // From no-self-wrap, we need to then prove no-(un)signed-wrap. This 12885 // follows trivially from the fact that every (un)signed-wrapped, but 12886 // not self-wrapped value must be LT than the last value before 12887 // (un)signed wrap. Since we know that last value didn't exit, nor 12888 // will any smaller one. 12889 return canAssumeNoSelfWrap(IV); 12890 }; 12891 12892 // Avoid proven overflow cases: this will ensure that the backedge taken 12893 // count will not generate any unsigned overflow. Relaxed no-overflow 12894 // conditions exploit NoWrapFlags, allowing to optimize in presence of 12895 // undefined behaviors like the case of C language. 12896 if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap()) 12897 return getCouldNotCompute(); 12898 } 12899 12900 // On all paths just preceeding, we established the following invariant: 12901 // IV can be assumed not to overflow up to and including the exiting 12902 // iteration. We proved this in one of two ways: 12903 // 1) We can show overflow doesn't occur before the exiting iteration 12904 // 1a) canIVOverflowOnLT, and b) step of one 12905 // 2) We can show that if overflow occurs, the loop must execute UB 12906 // before any possible exit. 12907 // Note that we have not yet proved RHS invariant (in general). 12908 12909 const SCEV *Start = IV->getStart(); 12910 12911 // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond. 12912 // If we convert to integers, isLoopEntryGuardedByCond will miss some cases. 12913 // Use integer-typed versions for actual computation; we can't subtract 12914 // pointers in general. 12915 const SCEV *OrigStart = Start; 12916 const SCEV *OrigRHS = RHS; 12917 if (Start->getType()->isPointerTy()) { 12918 Start = getLosslessPtrToIntExpr(Start); 12919 if (isa<SCEVCouldNotCompute>(Start)) 12920 return Start; 12921 } 12922 if (RHS->getType()->isPointerTy()) { 12923 RHS = getLosslessPtrToIntExpr(RHS); 12924 if (isa<SCEVCouldNotCompute>(RHS)) 12925 return RHS; 12926 } 12927 12928 // When the RHS is not invariant, we do not know the end bound of the loop and 12929 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 12930 // calculate the MaxBECount, given the start, stride and max value for the end 12931 // bound of the loop (RHS), and the fact that IV does not overflow (which is 12932 // checked above). 12933 if (!isLoopInvariant(RHS, L)) { 12934 const SCEV *MaxBECount = computeMaxBECountForLT( 12935 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 12936 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 12937 MaxBECount, false /*MaxOrZero*/, Predicates); 12938 } 12939 12940 // We use the expression (max(End,Start)-Start)/Stride to describe the 12941 // backedge count, as if the backedge is taken at least once max(End,Start) 12942 // is End and so the result is as above, and if not max(End,Start) is Start 12943 // so we get a backedge count of zero. 12944 const SCEV *BECount = nullptr; 12945 auto *OrigStartMinusStride = getMinusSCEV(OrigStart, Stride); 12946 assert(isAvailableAtLoopEntry(OrigStartMinusStride, L) && "Must be!"); 12947 assert(isAvailableAtLoopEntry(OrigStart, L) && "Must be!"); 12948 assert(isAvailableAtLoopEntry(OrigRHS, L) && "Must be!"); 12949 // Can we prove (max(RHS,Start) > Start - Stride? 12950 if (isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigStart) && 12951 isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigRHS)) { 12952 // In this case, we can use a refined formula for computing backedge taken 12953 // count. The general formula remains: 12954 // "End-Start /uceiling Stride" where "End = max(RHS,Start)" 12955 // We want to use the alternate formula: 12956 // "((End - 1) - (Start - Stride)) /u Stride" 12957 // Let's do a quick case analysis to show these are equivalent under 12958 // our precondition that max(RHS,Start) > Start - Stride. 12959 // * For RHS <= Start, the backedge-taken count must be zero. 12960 // "((End - 1) - (Start - Stride)) /u Stride" reduces to 12961 // "((Start - 1) - (Start - Stride)) /u Stride" which simplies to 12962 // "Stride - 1 /u Stride" which is indeed zero for all non-zero values 12963 // of Stride. For 0 stride, we've use umin(1,Stride) above, reducing 12964 // this to the stride of 1 case. 12965 // * For RHS >= Start, the backedge count must be "RHS-Start /uceil Stride". 12966 // "((End - 1) - (Start - Stride)) /u Stride" reduces to 12967 // "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to 12968 // "((RHS - (Start - Stride) - 1) /u Stride". 12969 // Our preconditions trivially imply no overflow in that form. 12970 const SCEV *MinusOne = getMinusOne(Stride->getType()); 12971 const SCEV *Numerator = 12972 getMinusSCEV(getAddExpr(RHS, MinusOne), getMinusSCEV(Start, Stride)); 12973 BECount = getUDivExpr(Numerator, Stride); 12974 } 12975 12976 const SCEV *BECountIfBackedgeTaken = nullptr; 12977 if (!BECount) { 12978 auto canProveRHSGreaterThanEqualStart = [&]() { 12979 auto CondGE = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 12980 const SCEV *GuardedRHS = applyLoopGuards(OrigRHS, L); 12981 const SCEV *GuardedStart = applyLoopGuards(OrigStart, L); 12982 12983 if (isLoopEntryGuardedByCond(L, CondGE, OrigRHS, OrigStart) || 12984 isKnownPredicate(CondGE, GuardedRHS, GuardedStart)) 12985 return true; 12986 12987 // (RHS > Start - 1) implies RHS >= Start. 12988 // * "RHS >= Start" is trivially equivalent to "RHS > Start - 1" if 12989 // "Start - 1" doesn't overflow. 12990 // * For signed comparison, if Start - 1 does overflow, it's equal 12991 // to INT_MAX, and "RHS >s INT_MAX" is trivially false. 12992 // * For unsigned comparison, if Start - 1 does overflow, it's equal 12993 // to UINT_MAX, and "RHS >u UINT_MAX" is trivially false. 12994 // 12995 // FIXME: Should isLoopEntryGuardedByCond do this for us? 12996 auto CondGT = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 12997 auto *StartMinusOne = getAddExpr(OrigStart, 12998 getMinusOne(OrigStart->getType())); 12999 return isLoopEntryGuardedByCond(L, CondGT, OrigRHS, StartMinusOne); 13000 }; 13001 13002 // If we know that RHS >= Start in the context of loop, then we know that 13003 // max(RHS, Start) = RHS at this point. 13004 const SCEV *End; 13005 if (canProveRHSGreaterThanEqualStart()) { 13006 End = RHS; 13007 } else { 13008 // If RHS < Start, the backedge will be taken zero times. So in 13009 // general, we can write the backedge-taken count as: 13010 // 13011 // RHS >= Start ? ceil(RHS - Start) / Stride : 0 13012 // 13013 // We convert it to the following to make it more convenient for SCEV: 13014 // 13015 // ceil(max(RHS, Start) - Start) / Stride 13016 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 13017 13018 // See what would happen if we assume the backedge is taken. This is 13019 // used to compute MaxBECount. 13020 BECountIfBackedgeTaken = getUDivCeilSCEV(getMinusSCEV(RHS, Start), Stride); 13021 } 13022 13023 // At this point, we know: 13024 // 13025 // 1. If IsSigned, Start <=s End; otherwise, Start <=u End 13026 // 2. The index variable doesn't overflow. 13027 // 13028 // Therefore, we know N exists such that 13029 // (Start + Stride * N) >= End, and computing "(Start + Stride * N)" 13030 // doesn't overflow. 13031 // 13032 // Using this information, try to prove whether the addition in 13033 // "(Start - End) + (Stride - 1)" has unsigned overflow. 13034 const SCEV *One = getOne(Stride->getType()); 13035 bool MayAddOverflow = [&] { 13036 if (auto *StrideC = dyn_cast<SCEVConstant>(Stride)) { 13037 if (StrideC->getAPInt().isPowerOf2()) { 13038 // Suppose Stride is a power of two, and Start/End are unsigned 13039 // integers. Let UMAX be the largest representable unsigned 13040 // integer. 13041 // 13042 // By the preconditions of this function, we know 13043 // "(Start + Stride * N) >= End", and this doesn't overflow. 13044 // As a formula: 13045 // 13046 // End <= (Start + Stride * N) <= UMAX 13047 // 13048 // Subtracting Start from all the terms: 13049 // 13050 // End - Start <= Stride * N <= UMAX - Start 13051 // 13052 // Since Start is unsigned, UMAX - Start <= UMAX. Therefore: 13053 // 13054 // End - Start <= Stride * N <= UMAX 13055 // 13056 // Stride * N is a multiple of Stride. Therefore, 13057 // 13058 // End - Start <= Stride * N <= UMAX - (UMAX mod Stride) 13059 // 13060 // Since Stride is a power of two, UMAX + 1 is divisible by Stride. 13061 // Therefore, UMAX mod Stride == Stride - 1. So we can write: 13062 // 13063 // End - Start <= Stride * N <= UMAX - Stride - 1 13064 // 13065 // Dropping the middle term: 13066 // 13067 // End - Start <= UMAX - Stride - 1 13068 // 13069 // Adding Stride - 1 to both sides: 13070 // 13071 // (End - Start) + (Stride - 1) <= UMAX 13072 // 13073 // In other words, the addition doesn't have unsigned overflow. 13074 // 13075 // A similar proof works if we treat Start/End as signed values. 13076 // Just rewrite steps before "End - Start <= Stride * N <= UMAX" to 13077 // use signed max instead of unsigned max. Note that we're trying 13078 // to prove a lack of unsigned overflow in either case. 13079 return false; 13080 } 13081 } 13082 if (Start == Stride || Start == getMinusSCEV(Stride, One)) { 13083 // If Start is equal to Stride, (End - Start) + (Stride - 1) == End - 1. 13084 // If !IsSigned, 0 <u Stride == Start <=u End; so 0 <u End - 1 <u End. 13085 // If IsSigned, 0 <s Stride == Start <=s End; so 0 <s End - 1 <s End. 13086 // 13087 // If Start is equal to Stride - 1, (End - Start) + Stride - 1 == End. 13088 return false; 13089 } 13090 return true; 13091 }(); 13092 13093 const SCEV *Delta = getMinusSCEV(End, Start); 13094 if (!MayAddOverflow) { 13095 // floor((D + (S - 1)) / S) 13096 // We prefer this formulation if it's legal because it's fewer operations. 13097 BECount = 13098 getUDivExpr(getAddExpr(Delta, getMinusSCEV(Stride, One)), Stride); 13099 } else { 13100 BECount = getUDivCeilSCEV(Delta, Stride); 13101 } 13102 } 13103 13104 const SCEV *ConstantMaxBECount; 13105 bool MaxOrZero = false; 13106 if (isa<SCEVConstant>(BECount)) { 13107 ConstantMaxBECount = BECount; 13108 } else if (BECountIfBackedgeTaken && 13109 isa<SCEVConstant>(BECountIfBackedgeTaken)) { 13110 // If we know exactly how many times the backedge will be taken if it's 13111 // taken at least once, then the backedge count will either be that or 13112 // zero. 13113 ConstantMaxBECount = BECountIfBackedgeTaken; 13114 MaxOrZero = true; 13115 } else { 13116 ConstantMaxBECount = computeMaxBECountForLT( 13117 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 13118 } 13119 13120 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount) && 13121 !isa<SCEVCouldNotCompute>(BECount)) 13122 ConstantMaxBECount = getConstant(getUnsignedRangeMax(BECount)); 13123 13124 const SCEV *SymbolicMaxBECount = 13125 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount; 13126 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero, 13127 Predicates); 13128 } 13129 13130 ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans( 13131 const SCEV *LHS, const SCEV *RHS, const Loop *L, bool IsSigned, 13132 bool ControlsOnlyExit, bool AllowPredicates) { 13133 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 13134 // We handle only IV > Invariant 13135 if (!isLoopInvariant(RHS, L)) 13136 return getCouldNotCompute(); 13137 13138 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 13139 if (!IV && AllowPredicates) 13140 // Try to make this an AddRec using runtime tests, in the first X 13141 // iterations of this loop, where X is the SCEV expression found by the 13142 // algorithm below. 13143 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 13144 13145 // Avoid weird loops 13146 if (!IV || IV->getLoop() != L || !IV->isAffine()) 13147 return getCouldNotCompute(); 13148 13149 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; 13150 bool NoWrap = ControlsOnlyExit && IV->getNoWrapFlags(WrapType); 13151 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 13152 13153 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 13154 13155 // Avoid negative or zero stride values 13156 if (!isKnownPositive(Stride)) 13157 return getCouldNotCompute(); 13158 13159 // Avoid proven overflow cases: this will ensure that the backedge taken count 13160 // will not generate any unsigned overflow. Relaxed no-overflow conditions 13161 // exploit NoWrapFlags, allowing to optimize in presence of undefined 13162 // behaviors like the case of C language. 13163 if (!Stride->isOne() && !NoWrap) 13164 if (canIVOverflowOnGT(RHS, Stride, IsSigned)) 13165 return getCouldNotCompute(); 13166 13167 const SCEV *Start = IV->getStart(); 13168 const SCEV *End = RHS; 13169 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 13170 // If we know that Start >= RHS in the context of loop, then we know that 13171 // min(RHS, Start) = RHS at this point. 13172 if (isLoopEntryGuardedByCond( 13173 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) 13174 End = RHS; 13175 else 13176 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 13177 } 13178 13179 if (Start->getType()->isPointerTy()) { 13180 Start = getLosslessPtrToIntExpr(Start); 13181 if (isa<SCEVCouldNotCompute>(Start)) 13182 return Start; 13183 } 13184 if (End->getType()->isPointerTy()) { 13185 End = getLosslessPtrToIntExpr(End); 13186 if (isa<SCEVCouldNotCompute>(End)) 13187 return End; 13188 } 13189 13190 // Compute ((Start - End) + (Stride - 1)) / Stride. 13191 // FIXME: This can overflow. Holding off on fixing this for now; 13192 // howManyGreaterThans will hopefully be gone soon. 13193 const SCEV *One = getOne(Stride->getType()); 13194 const SCEV *BECount = getUDivExpr( 13195 getAddExpr(getMinusSCEV(Start, End), getMinusSCEV(Stride, One)), Stride); 13196 13197 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 13198 : getUnsignedRangeMax(Start); 13199 13200 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 13201 : getUnsignedRangeMin(Stride); 13202 13203 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 13204 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 13205 : APInt::getMinValue(BitWidth) + (MinStride - 1); 13206 13207 // Although End can be a MIN expression we estimate MinEnd considering only 13208 // the case End = RHS. This is safe because in the other case (Start - End) 13209 // is zero, leading to a zero maximum backedge taken count. 13210 APInt MinEnd = 13211 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 13212 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 13213 13214 const SCEV *ConstantMaxBECount = 13215 isa<SCEVConstant>(BECount) 13216 ? BECount 13217 : getUDivCeilSCEV(getConstant(MaxStart - MinEnd), 13218 getConstant(MinStride)); 13219 13220 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount)) 13221 ConstantMaxBECount = BECount; 13222 const SCEV *SymbolicMaxBECount = 13223 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount; 13224 13225 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, false, 13226 Predicates); 13227 } 13228 13229 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 13230 ScalarEvolution &SE) const { 13231 if (Range.isFullSet()) // Infinite loop. 13232 return SE.getCouldNotCompute(); 13233 13234 // If the start is a non-zero constant, shift the range to simplify things. 13235 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 13236 if (!SC->getValue()->isZero()) { 13237 SmallVector<const SCEV *, 4> Operands(operands()); 13238 Operands[0] = SE.getZero(SC->getType()); 13239 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 13240 getNoWrapFlags(FlagNW)); 13241 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 13242 return ShiftedAddRec->getNumIterationsInRange( 13243 Range.subtract(SC->getAPInt()), SE); 13244 // This is strange and shouldn't happen. 13245 return SE.getCouldNotCompute(); 13246 } 13247 13248 // The only time we can solve this is when we have all constant indices. 13249 // Otherwise, we cannot determine the overflow conditions. 13250 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 13251 return SE.getCouldNotCompute(); 13252 13253 // Okay at this point we know that all elements of the chrec are constants and 13254 // that the start element is zero. 13255 13256 // First check to see if the range contains zero. If not, the first 13257 // iteration exits. 13258 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 13259 if (!Range.contains(APInt(BitWidth, 0))) 13260 return SE.getZero(getType()); 13261 13262 if (isAffine()) { 13263 // If this is an affine expression then we have this situation: 13264 // Solve {0,+,A} in Range === Ax in Range 13265 13266 // We know that zero is in the range. If A is positive then we know that 13267 // the upper value of the range must be the first possible exit value. 13268 // If A is negative then the lower of the range is the last possible loop 13269 // value. Also note that we already checked for a full range. 13270 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 13271 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 13272 13273 // The exit value should be (End+A)/A. 13274 APInt ExitVal = (End + A).udiv(A); 13275 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 13276 13277 // Evaluate at the exit value. If we really did fall out of the valid 13278 // range, then we computed our trip count, otherwise wrap around or other 13279 // things must have happened. 13280 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 13281 if (Range.contains(Val->getValue())) 13282 return SE.getCouldNotCompute(); // Something strange happened 13283 13284 // Ensure that the previous value is in the range. 13285 assert(Range.contains( 13286 EvaluateConstantChrecAtConstant(this, 13287 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 13288 "Linear scev computation is off in a bad way!"); 13289 return SE.getConstant(ExitValue); 13290 } 13291 13292 if (isQuadratic()) { 13293 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 13294 return SE.getConstant(*S); 13295 } 13296 13297 return SE.getCouldNotCompute(); 13298 } 13299 13300 const SCEVAddRecExpr * 13301 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 13302 assert(getNumOperands() > 1 && "AddRec with zero step?"); 13303 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 13304 // but in this case we cannot guarantee that the value returned will be an 13305 // AddRec because SCEV does not have a fixed point where it stops 13306 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 13307 // may happen if we reach arithmetic depth limit while simplifying. So we 13308 // construct the returned value explicitly. 13309 SmallVector<const SCEV *, 3> Ops; 13310 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 13311 // (this + Step) is {A+B,+,B+C,+...,+,N}. 13312 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 13313 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 13314 // We know that the last operand is not a constant zero (otherwise it would 13315 // have been popped out earlier). This guarantees us that if the result has 13316 // the same last operand, then it will also not be popped out, meaning that 13317 // the returned value will be an AddRec. 13318 const SCEV *Last = getOperand(getNumOperands() - 1); 13319 assert(!Last->isZero() && "Recurrency with zero step?"); 13320 Ops.push_back(Last); 13321 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 13322 SCEV::FlagAnyWrap)); 13323 } 13324 13325 // Return true when S contains at least an undef value. 13326 bool ScalarEvolution::containsUndefs(const SCEV *S) const { 13327 return SCEVExprContains(S, [](const SCEV *S) { 13328 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 13329 return isa<UndefValue>(SU->getValue()); 13330 return false; 13331 }); 13332 } 13333 13334 // Return true when S contains a value that is a nullptr. 13335 bool ScalarEvolution::containsErasedValue(const SCEV *S) const { 13336 return SCEVExprContains(S, [](const SCEV *S) { 13337 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 13338 return SU->getValue() == nullptr; 13339 return false; 13340 }); 13341 } 13342 13343 /// Return the size of an element read or written by Inst. 13344 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 13345 Type *Ty; 13346 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 13347 Ty = Store->getValueOperand()->getType(); 13348 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 13349 Ty = Load->getType(); 13350 else 13351 return nullptr; 13352 13353 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 13354 return getSizeOfExpr(ETy, Ty); 13355 } 13356 13357 //===----------------------------------------------------------------------===// 13358 // SCEVCallbackVH Class Implementation 13359 //===----------------------------------------------------------------------===// 13360 13361 void ScalarEvolution::SCEVCallbackVH::deleted() { 13362 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 13363 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 13364 SE->ConstantEvolutionLoopExitValue.erase(PN); 13365 SE->eraseValueFromMap(getValPtr()); 13366 // this now dangles! 13367 } 13368 13369 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 13370 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 13371 13372 // Forget all the expressions associated with users of the old value, 13373 // so that future queries will recompute the expressions using the new 13374 // value. 13375 SE->forgetValue(getValPtr()); 13376 // this now dangles! 13377 } 13378 13379 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 13380 : CallbackVH(V), SE(se) {} 13381 13382 //===----------------------------------------------------------------------===// 13383 // ScalarEvolution Class Implementation 13384 //===----------------------------------------------------------------------===// 13385 13386 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 13387 AssumptionCache &AC, DominatorTree &DT, 13388 LoopInfo &LI) 13389 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 13390 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 13391 LoopDispositions(64), BlockDispositions(64) { 13392 // To use guards for proving predicates, we need to scan every instruction in 13393 // relevant basic blocks, and not just terminators. Doing this is a waste of 13394 // time if the IR does not actually contain any calls to 13395 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 13396 // 13397 // This pessimizes the case where a pass that preserves ScalarEvolution wants 13398 // to _add_ guards to the module when there weren't any before, and wants 13399 // ScalarEvolution to optimize based on those guards. For now we prefer to be 13400 // efficient in lieu of being smart in that rather obscure case. 13401 13402 auto *GuardDecl = F.getParent()->getFunction( 13403 Intrinsic::getName(Intrinsic::experimental_guard)); 13404 HasGuards = GuardDecl && !GuardDecl->use_empty(); 13405 } 13406 13407 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 13408 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 13409 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 13410 ValueExprMap(std::move(Arg.ValueExprMap)), 13411 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 13412 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 13413 PendingMerges(std::move(Arg.PendingMerges)), 13414 ConstantMultipleCache(std::move(Arg.ConstantMultipleCache)), 13415 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 13416 PredicatedBackedgeTakenCounts( 13417 std::move(Arg.PredicatedBackedgeTakenCounts)), 13418 BECountUsers(std::move(Arg.BECountUsers)), 13419 ConstantEvolutionLoopExitValue( 13420 std::move(Arg.ConstantEvolutionLoopExitValue)), 13421 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 13422 ValuesAtScopesUsers(std::move(Arg.ValuesAtScopesUsers)), 13423 LoopDispositions(std::move(Arg.LoopDispositions)), 13424 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 13425 BlockDispositions(std::move(Arg.BlockDispositions)), 13426 SCEVUsers(std::move(Arg.SCEVUsers)), 13427 UnsignedRanges(std::move(Arg.UnsignedRanges)), 13428 SignedRanges(std::move(Arg.SignedRanges)), 13429 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 13430 UniquePreds(std::move(Arg.UniquePreds)), 13431 SCEVAllocator(std::move(Arg.SCEVAllocator)), 13432 LoopUsers(std::move(Arg.LoopUsers)), 13433 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 13434 FirstUnknown(Arg.FirstUnknown) { 13435 Arg.FirstUnknown = nullptr; 13436 } 13437 13438 ScalarEvolution::~ScalarEvolution() { 13439 // Iterate through all the SCEVUnknown instances and call their 13440 // destructors, so that they release their references to their values. 13441 for (SCEVUnknown *U = FirstUnknown; U;) { 13442 SCEVUnknown *Tmp = U; 13443 U = U->Next; 13444 Tmp->~SCEVUnknown(); 13445 } 13446 FirstUnknown = nullptr; 13447 13448 ExprValueMap.clear(); 13449 ValueExprMap.clear(); 13450 HasRecMap.clear(); 13451 BackedgeTakenCounts.clear(); 13452 PredicatedBackedgeTakenCounts.clear(); 13453 13454 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 13455 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 13456 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 13457 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 13458 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 13459 } 13460 13461 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 13462 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 13463 } 13464 13465 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 13466 const Loop *L) { 13467 // Print all inner loops first 13468 for (Loop *I : *L) 13469 PrintLoopInfo(OS, SE, I); 13470 13471 OS << "Loop "; 13472 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13473 OS << ": "; 13474 13475 SmallVector<BasicBlock *, 8> ExitingBlocks; 13476 L->getExitingBlocks(ExitingBlocks); 13477 if (ExitingBlocks.size() != 1) 13478 OS << "<multiple exits> "; 13479 13480 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 13481 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 13482 else 13483 OS << "Unpredictable backedge-taken count.\n"; 13484 13485 if (ExitingBlocks.size() > 1) 13486 for (BasicBlock *ExitingBlock : ExitingBlocks) { 13487 OS << " exit count for " << ExitingBlock->getName() << ": " 13488 << *SE->getExitCount(L, ExitingBlock) << "\n"; 13489 } 13490 13491 OS << "Loop "; 13492 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13493 OS << ": "; 13494 13495 auto *ConstantBTC = SE->getConstantMaxBackedgeTakenCount(L); 13496 if (!isa<SCEVCouldNotCompute>(ConstantBTC)) { 13497 OS << "constant max backedge-taken count is " << *ConstantBTC; 13498 if (SE->isBackedgeTakenCountMaxOrZero(L)) 13499 OS << ", actual taken count either this or zero."; 13500 } else { 13501 OS << "Unpredictable constant max backedge-taken count. "; 13502 } 13503 13504 OS << "\n" 13505 "Loop "; 13506 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13507 OS << ": "; 13508 13509 auto *SymbolicBTC = SE->getSymbolicMaxBackedgeTakenCount(L); 13510 if (!isa<SCEVCouldNotCompute>(SymbolicBTC)) { 13511 OS << "symbolic max backedge-taken count is " << *SymbolicBTC; 13512 if (SE->isBackedgeTakenCountMaxOrZero(L)) 13513 OS << ", actual taken count either this or zero."; 13514 } else { 13515 OS << "Unpredictable symbolic max backedge-taken count. "; 13516 } 13517 13518 OS << "\n"; 13519 if (ExitingBlocks.size() > 1) 13520 for (BasicBlock *ExitingBlock : ExitingBlocks) { 13521 OS << " symbolic max exit count for " << ExitingBlock->getName() << ": " 13522 << *SE->getExitCount(L, ExitingBlock, ScalarEvolution::SymbolicMaximum) 13523 << "\n"; 13524 } 13525 13526 OS << "Loop "; 13527 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13528 OS << ": "; 13529 13530 SmallVector<const SCEVPredicate *, 4> Preds; 13531 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Preds); 13532 if (!isa<SCEVCouldNotCompute>(PBT)) { 13533 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 13534 OS << " Predicates:\n"; 13535 for (const auto *P : Preds) 13536 P->print(OS, 4); 13537 } else { 13538 OS << "Unpredictable predicated backedge-taken count.\n"; 13539 } 13540 13541 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 13542 OS << "Loop "; 13543 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13544 OS << ": "; 13545 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 13546 } 13547 } 13548 13549 namespace llvm { 13550 raw_ostream &operator<<(raw_ostream &OS, ScalarEvolution::LoopDisposition LD) { 13551 switch (LD) { 13552 case ScalarEvolution::LoopVariant: 13553 OS << "Variant"; 13554 break; 13555 case ScalarEvolution::LoopInvariant: 13556 OS << "Invariant"; 13557 break; 13558 case ScalarEvolution::LoopComputable: 13559 OS << "Computable"; 13560 break; 13561 } 13562 return OS; 13563 } 13564 13565 raw_ostream &operator<<(raw_ostream &OS, ScalarEvolution::BlockDisposition BD) { 13566 switch (BD) { 13567 case ScalarEvolution::DoesNotDominateBlock: 13568 OS << "DoesNotDominate"; 13569 break; 13570 case ScalarEvolution::DominatesBlock: 13571 OS << "Dominates"; 13572 break; 13573 case ScalarEvolution::ProperlyDominatesBlock: 13574 OS << "ProperlyDominates"; 13575 break; 13576 } 13577 return OS; 13578 } 13579 } 13580 13581 void ScalarEvolution::print(raw_ostream &OS) const { 13582 // ScalarEvolution's implementation of the print method is to print 13583 // out SCEV values of all instructions that are interesting. Doing 13584 // this potentially causes it to create new SCEV objects though, 13585 // which technically conflicts with the const qualifier. This isn't 13586 // observable from outside the class though, so casting away the 13587 // const isn't dangerous. 13588 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 13589 13590 if (ClassifyExpressions) { 13591 OS << "Classifying expressions for: "; 13592 F.printAsOperand(OS, /*PrintType=*/false); 13593 OS << "\n"; 13594 for (Instruction &I : instructions(F)) 13595 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 13596 OS << I << '\n'; 13597 OS << " --> "; 13598 const SCEV *SV = SE.getSCEV(&I); 13599 SV->print(OS); 13600 if (!isa<SCEVCouldNotCompute>(SV)) { 13601 OS << " U: "; 13602 SE.getUnsignedRange(SV).print(OS); 13603 OS << " S: "; 13604 SE.getSignedRange(SV).print(OS); 13605 } 13606 13607 const Loop *L = LI.getLoopFor(I.getParent()); 13608 13609 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 13610 if (AtUse != SV) { 13611 OS << " --> "; 13612 AtUse->print(OS); 13613 if (!isa<SCEVCouldNotCompute>(AtUse)) { 13614 OS << " U: "; 13615 SE.getUnsignedRange(AtUse).print(OS); 13616 OS << " S: "; 13617 SE.getSignedRange(AtUse).print(OS); 13618 } 13619 } 13620 13621 if (L) { 13622 OS << "\t\t" "Exits: "; 13623 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 13624 if (!SE.isLoopInvariant(ExitValue, L)) { 13625 OS << "<<Unknown>>"; 13626 } else { 13627 OS << *ExitValue; 13628 } 13629 13630 bool First = true; 13631 for (const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 13632 if (First) { 13633 OS << "\t\t" "LoopDispositions: { "; 13634 First = false; 13635 } else { 13636 OS << ", "; 13637 } 13638 13639 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13640 OS << ": " << SE.getLoopDisposition(SV, Iter); 13641 } 13642 13643 for (const auto *InnerL : depth_first(L)) { 13644 if (InnerL == L) 13645 continue; 13646 if (First) { 13647 OS << "\t\t" "LoopDispositions: { "; 13648 First = false; 13649 } else { 13650 OS << ", "; 13651 } 13652 13653 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13654 OS << ": " << SE.getLoopDisposition(SV, InnerL); 13655 } 13656 13657 OS << " }"; 13658 } 13659 13660 OS << "\n"; 13661 } 13662 } 13663 13664 OS << "Determining loop execution counts for: "; 13665 F.printAsOperand(OS, /*PrintType=*/false); 13666 OS << "\n"; 13667 for (Loop *I : LI) 13668 PrintLoopInfo(OS, &SE, I); 13669 } 13670 13671 ScalarEvolution::LoopDisposition 13672 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 13673 auto &Values = LoopDispositions[S]; 13674 for (auto &V : Values) { 13675 if (V.getPointer() == L) 13676 return V.getInt(); 13677 } 13678 Values.emplace_back(L, LoopVariant); 13679 LoopDisposition D = computeLoopDisposition(S, L); 13680 auto &Values2 = LoopDispositions[S]; 13681 for (auto &V : llvm::reverse(Values2)) { 13682 if (V.getPointer() == L) { 13683 V.setInt(D); 13684 break; 13685 } 13686 } 13687 return D; 13688 } 13689 13690 ScalarEvolution::LoopDisposition 13691 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 13692 switch (S->getSCEVType()) { 13693 case scConstant: 13694 case scVScale: 13695 return LoopInvariant; 13696 case scAddRecExpr: { 13697 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 13698 13699 // If L is the addrec's loop, it's computable. 13700 if (AR->getLoop() == L) 13701 return LoopComputable; 13702 13703 // Add recurrences are never invariant in the function-body (null loop). 13704 if (!L) 13705 return LoopVariant; 13706 13707 // Everything that is not defined at loop entry is variant. 13708 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 13709 return LoopVariant; 13710 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 13711 " dominate the contained loop's header?"); 13712 13713 // This recurrence is invariant w.r.t. L if AR's loop contains L. 13714 if (AR->getLoop()->contains(L)) 13715 return LoopInvariant; 13716 13717 // This recurrence is variant w.r.t. L if any of its operands 13718 // are variant. 13719 for (const auto *Op : AR->operands()) 13720 if (!isLoopInvariant(Op, L)) 13721 return LoopVariant; 13722 13723 // Otherwise it's loop-invariant. 13724 return LoopInvariant; 13725 } 13726 case scTruncate: 13727 case scZeroExtend: 13728 case scSignExtend: 13729 case scPtrToInt: 13730 case scAddExpr: 13731 case scMulExpr: 13732 case scUDivExpr: 13733 case scUMaxExpr: 13734 case scSMaxExpr: 13735 case scUMinExpr: 13736 case scSMinExpr: 13737 case scSequentialUMinExpr: { 13738 bool HasVarying = false; 13739 for (const auto *Op : S->operands()) { 13740 LoopDisposition D = getLoopDisposition(Op, L); 13741 if (D == LoopVariant) 13742 return LoopVariant; 13743 if (D == LoopComputable) 13744 HasVarying = true; 13745 } 13746 return HasVarying ? LoopComputable : LoopInvariant; 13747 } 13748 case scUnknown: 13749 // All non-instruction values are loop invariant. All instructions are loop 13750 // invariant if they are not contained in the specified loop. 13751 // Instructions are never considered invariant in the function body 13752 // (null loop) because they are defined within the "loop". 13753 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 13754 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 13755 return LoopInvariant; 13756 case scCouldNotCompute: 13757 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 13758 } 13759 llvm_unreachable("Unknown SCEV kind!"); 13760 } 13761 13762 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 13763 return getLoopDisposition(S, L) == LoopInvariant; 13764 } 13765 13766 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 13767 return getLoopDisposition(S, L) == LoopComputable; 13768 } 13769 13770 ScalarEvolution::BlockDisposition 13771 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 13772 auto &Values = BlockDispositions[S]; 13773 for (auto &V : Values) { 13774 if (V.getPointer() == BB) 13775 return V.getInt(); 13776 } 13777 Values.emplace_back(BB, DoesNotDominateBlock); 13778 BlockDisposition D = computeBlockDisposition(S, BB); 13779 auto &Values2 = BlockDispositions[S]; 13780 for (auto &V : llvm::reverse(Values2)) { 13781 if (V.getPointer() == BB) { 13782 V.setInt(D); 13783 break; 13784 } 13785 } 13786 return D; 13787 } 13788 13789 ScalarEvolution::BlockDisposition 13790 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 13791 switch (S->getSCEVType()) { 13792 case scConstant: 13793 case scVScale: 13794 return ProperlyDominatesBlock; 13795 case scAddRecExpr: { 13796 // This uses a "dominates" query instead of "properly dominates" query 13797 // to test for proper dominance too, because the instruction which 13798 // produces the addrec's value is a PHI, and a PHI effectively properly 13799 // dominates its entire containing block. 13800 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 13801 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 13802 return DoesNotDominateBlock; 13803 13804 // Fall through into SCEVNAryExpr handling. 13805 [[fallthrough]]; 13806 } 13807 case scTruncate: 13808 case scZeroExtend: 13809 case scSignExtend: 13810 case scPtrToInt: 13811 case scAddExpr: 13812 case scMulExpr: 13813 case scUDivExpr: 13814 case scUMaxExpr: 13815 case scSMaxExpr: 13816 case scUMinExpr: 13817 case scSMinExpr: 13818 case scSequentialUMinExpr: { 13819 bool Proper = true; 13820 for (const SCEV *NAryOp : S->operands()) { 13821 BlockDisposition D = getBlockDisposition(NAryOp, BB); 13822 if (D == DoesNotDominateBlock) 13823 return DoesNotDominateBlock; 13824 if (D == DominatesBlock) 13825 Proper = false; 13826 } 13827 return Proper ? ProperlyDominatesBlock : DominatesBlock; 13828 } 13829 case scUnknown: 13830 if (Instruction *I = 13831 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 13832 if (I->getParent() == BB) 13833 return DominatesBlock; 13834 if (DT.properlyDominates(I->getParent(), BB)) 13835 return ProperlyDominatesBlock; 13836 return DoesNotDominateBlock; 13837 } 13838 return ProperlyDominatesBlock; 13839 case scCouldNotCompute: 13840 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 13841 } 13842 llvm_unreachable("Unknown SCEV kind!"); 13843 } 13844 13845 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 13846 return getBlockDisposition(S, BB) >= DominatesBlock; 13847 } 13848 13849 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 13850 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 13851 } 13852 13853 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 13854 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 13855 } 13856 13857 void ScalarEvolution::forgetBackedgeTakenCounts(const Loop *L, 13858 bool Predicated) { 13859 auto &BECounts = 13860 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts; 13861 auto It = BECounts.find(L); 13862 if (It != BECounts.end()) { 13863 for (const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) { 13864 for (const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) { 13865 if (!isa<SCEVConstant>(S)) { 13866 auto UserIt = BECountUsers.find(S); 13867 assert(UserIt != BECountUsers.end()); 13868 UserIt->second.erase({L, Predicated}); 13869 } 13870 } 13871 } 13872 BECounts.erase(It); 13873 } 13874 } 13875 13876 void ScalarEvolution::forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs) { 13877 SmallPtrSet<const SCEV *, 8> ToForget(SCEVs.begin(), SCEVs.end()); 13878 SmallVector<const SCEV *, 8> Worklist(ToForget.begin(), ToForget.end()); 13879 13880 while (!Worklist.empty()) { 13881 const SCEV *Curr = Worklist.pop_back_val(); 13882 auto Users = SCEVUsers.find(Curr); 13883 if (Users != SCEVUsers.end()) 13884 for (const auto *User : Users->second) 13885 if (ToForget.insert(User).second) 13886 Worklist.push_back(User); 13887 } 13888 13889 for (const auto *S : ToForget) 13890 forgetMemoizedResultsImpl(S); 13891 13892 for (auto I = PredicatedSCEVRewrites.begin(); 13893 I != PredicatedSCEVRewrites.end();) { 13894 std::pair<const SCEV *, const Loop *> Entry = I->first; 13895 if (ToForget.count(Entry.first)) 13896 PredicatedSCEVRewrites.erase(I++); 13897 else 13898 ++I; 13899 } 13900 } 13901 13902 void ScalarEvolution::forgetMemoizedResultsImpl(const SCEV *S) { 13903 LoopDispositions.erase(S); 13904 BlockDispositions.erase(S); 13905 UnsignedRanges.erase(S); 13906 SignedRanges.erase(S); 13907 HasRecMap.erase(S); 13908 ConstantMultipleCache.erase(S); 13909 13910 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) { 13911 UnsignedWrapViaInductionTried.erase(AR); 13912 SignedWrapViaInductionTried.erase(AR); 13913 } 13914 13915 auto ExprIt = ExprValueMap.find(S); 13916 if (ExprIt != ExprValueMap.end()) { 13917 for (Value *V : ExprIt->second) { 13918 auto ValueIt = ValueExprMap.find_as(V); 13919 if (ValueIt != ValueExprMap.end()) 13920 ValueExprMap.erase(ValueIt); 13921 } 13922 ExprValueMap.erase(ExprIt); 13923 } 13924 13925 auto ScopeIt = ValuesAtScopes.find(S); 13926 if (ScopeIt != ValuesAtScopes.end()) { 13927 for (const auto &Pair : ScopeIt->second) 13928 if (!isa_and_nonnull<SCEVConstant>(Pair.second)) 13929 llvm::erase(ValuesAtScopesUsers[Pair.second], 13930 std::make_pair(Pair.first, S)); 13931 ValuesAtScopes.erase(ScopeIt); 13932 } 13933 13934 auto ScopeUserIt = ValuesAtScopesUsers.find(S); 13935 if (ScopeUserIt != ValuesAtScopesUsers.end()) { 13936 for (const auto &Pair : ScopeUserIt->second) 13937 llvm::erase(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S)); 13938 ValuesAtScopesUsers.erase(ScopeUserIt); 13939 } 13940 13941 auto BEUsersIt = BECountUsers.find(S); 13942 if (BEUsersIt != BECountUsers.end()) { 13943 // Work on a copy, as forgetBackedgeTakenCounts() will modify the original. 13944 auto Copy = BEUsersIt->second; 13945 for (const auto &Pair : Copy) 13946 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt()); 13947 BECountUsers.erase(BEUsersIt); 13948 } 13949 13950 auto FoldUser = FoldCacheUser.find(S); 13951 if (FoldUser != FoldCacheUser.end()) 13952 for (auto &KV : FoldUser->second) 13953 FoldCache.erase(KV); 13954 FoldCacheUser.erase(S); 13955 } 13956 13957 void 13958 ScalarEvolution::getUsedLoops(const SCEV *S, 13959 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 13960 struct FindUsedLoops { 13961 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 13962 : LoopsUsed(LoopsUsed) {} 13963 SmallPtrSetImpl<const Loop *> &LoopsUsed; 13964 bool follow(const SCEV *S) { 13965 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 13966 LoopsUsed.insert(AR->getLoop()); 13967 return true; 13968 } 13969 13970 bool isDone() const { return false; } 13971 }; 13972 13973 FindUsedLoops F(LoopsUsed); 13974 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 13975 } 13976 13977 void ScalarEvolution::getReachableBlocks( 13978 SmallPtrSetImpl<BasicBlock *> &Reachable, Function &F) { 13979 SmallVector<BasicBlock *> Worklist; 13980 Worklist.push_back(&F.getEntryBlock()); 13981 while (!Worklist.empty()) { 13982 BasicBlock *BB = Worklist.pop_back_val(); 13983 if (!Reachable.insert(BB).second) 13984 continue; 13985 13986 Value *Cond; 13987 BasicBlock *TrueBB, *FalseBB; 13988 if (match(BB->getTerminator(), m_Br(m_Value(Cond), m_BasicBlock(TrueBB), 13989 m_BasicBlock(FalseBB)))) { 13990 if (auto *C = dyn_cast<ConstantInt>(Cond)) { 13991 Worklist.push_back(C->isOne() ? TrueBB : FalseBB); 13992 continue; 13993 } 13994 13995 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) { 13996 const SCEV *L = getSCEV(Cmp->getOperand(0)); 13997 const SCEV *R = getSCEV(Cmp->getOperand(1)); 13998 if (isKnownPredicateViaConstantRanges(Cmp->getPredicate(), L, R)) { 13999 Worklist.push_back(TrueBB); 14000 continue; 14001 } 14002 if (isKnownPredicateViaConstantRanges(Cmp->getInversePredicate(), L, 14003 R)) { 14004 Worklist.push_back(FalseBB); 14005 continue; 14006 } 14007 } 14008 } 14009 14010 append_range(Worklist, successors(BB)); 14011 } 14012 } 14013 14014 void ScalarEvolution::verify() const { 14015 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 14016 ScalarEvolution SE2(F, TLI, AC, DT, LI); 14017 14018 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 14019 14020 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 14021 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 14022 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 14023 14024 const SCEV *visitConstant(const SCEVConstant *Constant) { 14025 return SE.getConstant(Constant->getAPInt()); 14026 } 14027 14028 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 14029 return SE.getUnknown(Expr->getValue()); 14030 } 14031 14032 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 14033 return SE.getCouldNotCompute(); 14034 } 14035 }; 14036 14037 SCEVMapper SCM(SE2); 14038 SmallPtrSet<BasicBlock *, 16> ReachableBlocks; 14039 SE2.getReachableBlocks(ReachableBlocks, F); 14040 14041 auto GetDelta = [&](const SCEV *Old, const SCEV *New) -> const SCEV * { 14042 if (containsUndefs(Old) || containsUndefs(New)) { 14043 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 14044 // not propagate undef aggressively). This means we can (and do) fail 14045 // verification in cases where a transform makes a value go from "undef" 14046 // to "undef+1" (say). The transform is fine, since in both cases the 14047 // result is "undef", but SCEV thinks the value increased by 1. 14048 return nullptr; 14049 } 14050 14051 // Unless VerifySCEVStrict is set, we only compare constant deltas. 14052 const SCEV *Delta = SE2.getMinusSCEV(Old, New); 14053 if (!VerifySCEVStrict && !isa<SCEVConstant>(Delta)) 14054 return nullptr; 14055 14056 return Delta; 14057 }; 14058 14059 while (!LoopStack.empty()) { 14060 auto *L = LoopStack.pop_back_val(); 14061 llvm::append_range(LoopStack, *L); 14062 14063 // Only verify BECounts in reachable loops. For an unreachable loop, 14064 // any BECount is legal. 14065 if (!ReachableBlocks.contains(L->getHeader())) 14066 continue; 14067 14068 // Only verify cached BECounts. Computing new BECounts may change the 14069 // results of subsequent SCEV uses. 14070 auto It = BackedgeTakenCounts.find(L); 14071 if (It == BackedgeTakenCounts.end()) 14072 continue; 14073 14074 auto *CurBECount = 14075 SCM.visit(It->second.getExact(L, const_cast<ScalarEvolution *>(this))); 14076 auto *NewBECount = SE2.getBackedgeTakenCount(L); 14077 14078 if (CurBECount == SE2.getCouldNotCompute() || 14079 NewBECount == SE2.getCouldNotCompute()) { 14080 // NB! This situation is legal, but is very suspicious -- whatever pass 14081 // change the loop to make a trip count go from could not compute to 14082 // computable or vice-versa *should have* invalidated SCEV. However, we 14083 // choose not to assert here (for now) since we don't want false 14084 // positives. 14085 continue; 14086 } 14087 14088 if (SE.getTypeSizeInBits(CurBECount->getType()) > 14089 SE.getTypeSizeInBits(NewBECount->getType())) 14090 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 14091 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 14092 SE.getTypeSizeInBits(NewBECount->getType())) 14093 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 14094 14095 const SCEV *Delta = GetDelta(CurBECount, NewBECount); 14096 if (Delta && !Delta->isZero()) { 14097 dbgs() << "Trip Count for " << *L << " Changed!\n"; 14098 dbgs() << "Old: " << *CurBECount << "\n"; 14099 dbgs() << "New: " << *NewBECount << "\n"; 14100 dbgs() << "Delta: " << *Delta << "\n"; 14101 std::abort(); 14102 } 14103 } 14104 14105 // Collect all valid loops currently in LoopInfo. 14106 SmallPtrSet<Loop *, 32> ValidLoops; 14107 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end()); 14108 while (!Worklist.empty()) { 14109 Loop *L = Worklist.pop_back_val(); 14110 if (ValidLoops.insert(L).second) 14111 Worklist.append(L->begin(), L->end()); 14112 } 14113 for (const auto &KV : ValueExprMap) { 14114 #ifndef NDEBUG 14115 // Check for SCEV expressions referencing invalid/deleted loops. 14116 if (auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second)) { 14117 assert(ValidLoops.contains(AR->getLoop()) && 14118 "AddRec references invalid loop"); 14119 } 14120 #endif 14121 14122 // Check that the value is also part of the reverse map. 14123 auto It = ExprValueMap.find(KV.second); 14124 if (It == ExprValueMap.end() || !It->second.contains(KV.first)) { 14125 dbgs() << "Value " << *KV.first 14126 << " is in ValueExprMap but not in ExprValueMap\n"; 14127 std::abort(); 14128 } 14129 14130 if (auto *I = dyn_cast<Instruction>(&*KV.first)) { 14131 if (!ReachableBlocks.contains(I->getParent())) 14132 continue; 14133 const SCEV *OldSCEV = SCM.visit(KV.second); 14134 const SCEV *NewSCEV = SE2.getSCEV(I); 14135 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV); 14136 if (Delta && !Delta->isZero()) { 14137 dbgs() << "SCEV for value " << *I << " changed!\n" 14138 << "Old: " << *OldSCEV << "\n" 14139 << "New: " << *NewSCEV << "\n" 14140 << "Delta: " << *Delta << "\n"; 14141 std::abort(); 14142 } 14143 } 14144 } 14145 14146 for (const auto &KV : ExprValueMap) { 14147 for (Value *V : KV.second) { 14148 auto It = ValueExprMap.find_as(V); 14149 if (It == ValueExprMap.end()) { 14150 dbgs() << "Value " << *V 14151 << " is in ExprValueMap but not in ValueExprMap\n"; 14152 std::abort(); 14153 } 14154 if (It->second != KV.first) { 14155 dbgs() << "Value " << *V << " mapped to " << *It->second 14156 << " rather than " << *KV.first << "\n"; 14157 std::abort(); 14158 } 14159 } 14160 } 14161 14162 // Verify integrity of SCEV users. 14163 for (const auto &S : UniqueSCEVs) { 14164 for (const auto *Op : S.operands()) { 14165 // We do not store dependencies of constants. 14166 if (isa<SCEVConstant>(Op)) 14167 continue; 14168 auto It = SCEVUsers.find(Op); 14169 if (It != SCEVUsers.end() && It->second.count(&S)) 14170 continue; 14171 dbgs() << "Use of operand " << *Op << " by user " << S 14172 << " is not being tracked!\n"; 14173 std::abort(); 14174 } 14175 } 14176 14177 // Verify integrity of ValuesAtScopes users. 14178 for (const auto &ValueAndVec : ValuesAtScopes) { 14179 const SCEV *Value = ValueAndVec.first; 14180 for (const auto &LoopAndValueAtScope : ValueAndVec.second) { 14181 const Loop *L = LoopAndValueAtScope.first; 14182 const SCEV *ValueAtScope = LoopAndValueAtScope.second; 14183 if (!isa<SCEVConstant>(ValueAtScope)) { 14184 auto It = ValuesAtScopesUsers.find(ValueAtScope); 14185 if (It != ValuesAtScopesUsers.end() && 14186 is_contained(It->second, std::make_pair(L, Value))) 14187 continue; 14188 dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: " 14189 << *ValueAtScope << " missing in ValuesAtScopesUsers\n"; 14190 std::abort(); 14191 } 14192 } 14193 } 14194 14195 for (const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) { 14196 const SCEV *ValueAtScope = ValueAtScopeAndVec.first; 14197 for (const auto &LoopAndValue : ValueAtScopeAndVec.second) { 14198 const Loop *L = LoopAndValue.first; 14199 const SCEV *Value = LoopAndValue.second; 14200 assert(!isa<SCEVConstant>(Value)); 14201 auto It = ValuesAtScopes.find(Value); 14202 if (It != ValuesAtScopes.end() && 14203 is_contained(It->second, std::make_pair(L, ValueAtScope))) 14204 continue; 14205 dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: " 14206 << *ValueAtScope << " missing in ValuesAtScopes\n"; 14207 std::abort(); 14208 } 14209 } 14210 14211 // Verify integrity of BECountUsers. 14212 auto VerifyBECountUsers = [&](bool Predicated) { 14213 auto &BECounts = 14214 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts; 14215 for (const auto &LoopAndBEInfo : BECounts) { 14216 for (const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) { 14217 for (const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) { 14218 if (!isa<SCEVConstant>(S)) { 14219 auto UserIt = BECountUsers.find(S); 14220 if (UserIt != BECountUsers.end() && 14221 UserIt->second.contains({ LoopAndBEInfo.first, Predicated })) 14222 continue; 14223 dbgs() << "Value " << *S << " for loop " << *LoopAndBEInfo.first 14224 << " missing from BECountUsers\n"; 14225 std::abort(); 14226 } 14227 } 14228 } 14229 } 14230 }; 14231 VerifyBECountUsers(/* Predicated */ false); 14232 VerifyBECountUsers(/* Predicated */ true); 14233 14234 // Verify intergity of loop disposition cache. 14235 for (auto &[S, Values] : LoopDispositions) { 14236 for (auto [Loop, CachedDisposition] : Values) { 14237 const auto RecomputedDisposition = SE2.getLoopDisposition(S, Loop); 14238 if (CachedDisposition != RecomputedDisposition) { 14239 dbgs() << "Cached disposition of " << *S << " for loop " << *Loop 14240 << " is incorrect: cached " << CachedDisposition << ", actual " 14241 << RecomputedDisposition << "\n"; 14242 std::abort(); 14243 } 14244 } 14245 } 14246 14247 // Verify integrity of the block disposition cache. 14248 for (auto &[S, Values] : BlockDispositions) { 14249 for (auto [BB, CachedDisposition] : Values) { 14250 const auto RecomputedDisposition = SE2.getBlockDisposition(S, BB); 14251 if (CachedDisposition != RecomputedDisposition) { 14252 dbgs() << "Cached disposition of " << *S << " for block %" 14253 << BB->getName() << " is incorrect: cached " << CachedDisposition 14254 << ", actual " << RecomputedDisposition << "\n"; 14255 std::abort(); 14256 } 14257 } 14258 } 14259 14260 // Verify FoldCache/FoldCacheUser caches. 14261 for (auto [FoldID, Expr] : FoldCache) { 14262 auto I = FoldCacheUser.find(Expr); 14263 if (I == FoldCacheUser.end()) { 14264 dbgs() << "Missing entry in FoldCacheUser for cached expression " << *Expr 14265 << "!\n"; 14266 std::abort(); 14267 } 14268 if (!is_contained(I->second, FoldID)) { 14269 dbgs() << "Missing FoldID in cached users of " << *Expr << "!\n"; 14270 std::abort(); 14271 } 14272 } 14273 for (auto [Expr, IDs] : FoldCacheUser) { 14274 for (auto &FoldID : IDs) { 14275 auto I = FoldCache.find(FoldID); 14276 if (I == FoldCache.end()) { 14277 dbgs() << "Missing entry in FoldCache for expression " << *Expr 14278 << "!\n"; 14279 std::abort(); 14280 } 14281 if (I->second != Expr) { 14282 dbgs() << "Entry in FoldCache doesn't match FoldCacheUser: " 14283 << *I->second << " != " << *Expr << "!\n"; 14284 std::abort(); 14285 } 14286 } 14287 } 14288 14289 // Verify that ConstantMultipleCache computations are correct. We check that 14290 // cached multiples and recomputed multiples are multiples of each other to 14291 // verify correctness. It is possible that a recomputed multiple is different 14292 // from the cached multiple due to strengthened no wrap flags or changes in 14293 // KnownBits computations. 14294 for (auto [S, Multiple] : ConstantMultipleCache) { 14295 APInt RecomputedMultiple = SE2.getConstantMultiple(S); 14296 if ((Multiple != 0 && RecomputedMultiple != 0 && 14297 Multiple.urem(RecomputedMultiple) != 0 && 14298 RecomputedMultiple.urem(Multiple) != 0)) { 14299 dbgs() << "Incorrect cached computation in ConstantMultipleCache for " 14300 << *S << " : Computed " << RecomputedMultiple 14301 << " but cache contains " << Multiple << "!\n"; 14302 std::abort(); 14303 } 14304 } 14305 } 14306 14307 bool ScalarEvolution::invalidate( 14308 Function &F, const PreservedAnalyses &PA, 14309 FunctionAnalysisManager::Invalidator &Inv) { 14310 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 14311 // of its dependencies is invalidated. 14312 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 14313 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 14314 Inv.invalidate<AssumptionAnalysis>(F, PA) || 14315 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 14316 Inv.invalidate<LoopAnalysis>(F, PA); 14317 } 14318 14319 AnalysisKey ScalarEvolutionAnalysis::Key; 14320 14321 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 14322 FunctionAnalysisManager &AM) { 14323 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 14324 auto &AC = AM.getResult<AssumptionAnalysis>(F); 14325 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 14326 auto &LI = AM.getResult<LoopAnalysis>(F); 14327 return ScalarEvolution(F, TLI, AC, DT, LI); 14328 } 14329 14330 PreservedAnalyses 14331 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 14332 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 14333 return PreservedAnalyses::all(); 14334 } 14335 14336 PreservedAnalyses 14337 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 14338 // For compatibility with opt's -analyze feature under legacy pass manager 14339 // which was not ported to NPM. This keeps tests using 14340 // update_analyze_test_checks.py working. 14341 OS << "Printing analysis 'Scalar Evolution Analysis' for function '" 14342 << F.getName() << "':\n"; 14343 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 14344 return PreservedAnalyses::all(); 14345 } 14346 14347 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 14348 "Scalar Evolution Analysis", false, true) 14349 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 14350 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 14351 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 14352 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 14353 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 14354 "Scalar Evolution Analysis", false, true) 14355 14356 char ScalarEvolutionWrapperPass::ID = 0; 14357 14358 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 14359 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 14360 } 14361 14362 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 14363 SE.reset(new ScalarEvolution( 14364 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 14365 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 14366 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 14367 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 14368 return false; 14369 } 14370 14371 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 14372 14373 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 14374 SE->print(OS); 14375 } 14376 14377 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 14378 if (!VerifySCEV) 14379 return; 14380 14381 SE->verify(); 14382 } 14383 14384 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 14385 AU.setPreservesAll(); 14386 AU.addRequiredTransitive<AssumptionCacheTracker>(); 14387 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 14388 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 14389 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 14390 } 14391 14392 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 14393 const SCEV *RHS) { 14394 return getComparePredicate(ICmpInst::ICMP_EQ, LHS, RHS); 14395 } 14396 14397 const SCEVPredicate * 14398 ScalarEvolution::getComparePredicate(const ICmpInst::Predicate Pred, 14399 const SCEV *LHS, const SCEV *RHS) { 14400 FoldingSetNodeID ID; 14401 assert(LHS->getType() == RHS->getType() && 14402 "Type mismatch between LHS and RHS"); 14403 // Unique this node based on the arguments 14404 ID.AddInteger(SCEVPredicate::P_Compare); 14405 ID.AddInteger(Pred); 14406 ID.AddPointer(LHS); 14407 ID.AddPointer(RHS); 14408 void *IP = nullptr; 14409 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 14410 return S; 14411 SCEVComparePredicate *Eq = new (SCEVAllocator) 14412 SCEVComparePredicate(ID.Intern(SCEVAllocator), Pred, LHS, RHS); 14413 UniquePreds.InsertNode(Eq, IP); 14414 return Eq; 14415 } 14416 14417 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 14418 const SCEVAddRecExpr *AR, 14419 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 14420 FoldingSetNodeID ID; 14421 // Unique this node based on the arguments 14422 ID.AddInteger(SCEVPredicate::P_Wrap); 14423 ID.AddPointer(AR); 14424 ID.AddInteger(AddedFlags); 14425 void *IP = nullptr; 14426 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 14427 return S; 14428 auto *OF = new (SCEVAllocator) 14429 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 14430 UniquePreds.InsertNode(OF, IP); 14431 return OF; 14432 } 14433 14434 namespace { 14435 14436 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 14437 public: 14438 14439 /// Rewrites \p S in the context of a loop L and the SCEV predication 14440 /// infrastructure. 14441 /// 14442 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 14443 /// equivalences present in \p Pred. 14444 /// 14445 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 14446 /// \p NewPreds such that the result will be an AddRecExpr. 14447 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 14448 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 14449 const SCEVPredicate *Pred) { 14450 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 14451 return Rewriter.visit(S); 14452 } 14453 14454 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 14455 if (Pred) { 14456 if (auto *U = dyn_cast<SCEVUnionPredicate>(Pred)) { 14457 for (const auto *Pred : U->getPredicates()) 14458 if (const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred)) 14459 if (IPred->getLHS() == Expr && 14460 IPred->getPredicate() == ICmpInst::ICMP_EQ) 14461 return IPred->getRHS(); 14462 } else if (const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred)) { 14463 if (IPred->getLHS() == Expr && 14464 IPred->getPredicate() == ICmpInst::ICMP_EQ) 14465 return IPred->getRHS(); 14466 } 14467 } 14468 return convertToAddRecWithPreds(Expr); 14469 } 14470 14471 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 14472 const SCEV *Operand = visit(Expr->getOperand()); 14473 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 14474 if (AR && AR->getLoop() == L && AR->isAffine()) { 14475 // This couldn't be folded because the operand didn't have the nuw 14476 // flag. Add the nusw flag as an assumption that we could make. 14477 const SCEV *Step = AR->getStepRecurrence(SE); 14478 Type *Ty = Expr->getType(); 14479 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 14480 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 14481 SE.getSignExtendExpr(Step, Ty), L, 14482 AR->getNoWrapFlags()); 14483 } 14484 return SE.getZeroExtendExpr(Operand, Expr->getType()); 14485 } 14486 14487 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 14488 const SCEV *Operand = visit(Expr->getOperand()); 14489 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 14490 if (AR && AR->getLoop() == L && AR->isAffine()) { 14491 // This couldn't be folded because the operand didn't have the nsw 14492 // flag. Add the nssw flag as an assumption that we could make. 14493 const SCEV *Step = AR->getStepRecurrence(SE); 14494 Type *Ty = Expr->getType(); 14495 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 14496 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 14497 SE.getSignExtendExpr(Step, Ty), L, 14498 AR->getNoWrapFlags()); 14499 } 14500 return SE.getSignExtendExpr(Operand, Expr->getType()); 14501 } 14502 14503 private: 14504 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 14505 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 14506 const SCEVPredicate *Pred) 14507 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 14508 14509 bool addOverflowAssumption(const SCEVPredicate *P) { 14510 if (!NewPreds) { 14511 // Check if we've already made this assumption. 14512 return Pred && Pred->implies(P); 14513 } 14514 NewPreds->insert(P); 14515 return true; 14516 } 14517 14518 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 14519 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 14520 auto *A = SE.getWrapPredicate(AR, AddedFlags); 14521 return addOverflowAssumption(A); 14522 } 14523 14524 // If \p Expr represents a PHINode, we try to see if it can be represented 14525 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 14526 // to add this predicate as a runtime overflow check, we return the AddRec. 14527 // If \p Expr does not meet these conditions (is not a PHI node, or we 14528 // couldn't create an AddRec for it, or couldn't add the predicate), we just 14529 // return \p Expr. 14530 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 14531 if (!isa<PHINode>(Expr->getValue())) 14532 return Expr; 14533 std::optional< 14534 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 14535 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 14536 if (!PredicatedRewrite) 14537 return Expr; 14538 for (const auto *P : PredicatedRewrite->second){ 14539 // Wrap predicates from outer loops are not supported. 14540 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 14541 if (L != WP->getExpr()->getLoop()) 14542 return Expr; 14543 } 14544 if (!addOverflowAssumption(P)) 14545 return Expr; 14546 } 14547 return PredicatedRewrite->first; 14548 } 14549 14550 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 14551 const SCEVPredicate *Pred; 14552 const Loop *L; 14553 }; 14554 14555 } // end anonymous namespace 14556 14557 const SCEV * 14558 ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 14559 const SCEVPredicate &Preds) { 14560 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 14561 } 14562 14563 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 14564 const SCEV *S, const Loop *L, 14565 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 14566 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 14567 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 14568 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 14569 14570 if (!AddRec) 14571 return nullptr; 14572 14573 // Since the transformation was successful, we can now transfer the SCEV 14574 // predicates. 14575 for (const auto *P : TransformPreds) 14576 Preds.insert(P); 14577 14578 return AddRec; 14579 } 14580 14581 /// SCEV predicates 14582 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 14583 SCEVPredicateKind Kind) 14584 : FastID(ID), Kind(Kind) {} 14585 14586 SCEVComparePredicate::SCEVComparePredicate(const FoldingSetNodeIDRef ID, 14587 const ICmpInst::Predicate Pred, 14588 const SCEV *LHS, const SCEV *RHS) 14589 : SCEVPredicate(ID, P_Compare), Pred(Pred), LHS(LHS), RHS(RHS) { 14590 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 14591 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 14592 } 14593 14594 bool SCEVComparePredicate::implies(const SCEVPredicate *N) const { 14595 const auto *Op = dyn_cast<SCEVComparePredicate>(N); 14596 14597 if (!Op) 14598 return false; 14599 14600 if (Pred != ICmpInst::ICMP_EQ) 14601 return false; 14602 14603 return Op->LHS == LHS && Op->RHS == RHS; 14604 } 14605 14606 bool SCEVComparePredicate::isAlwaysTrue() const { return false; } 14607 14608 void SCEVComparePredicate::print(raw_ostream &OS, unsigned Depth) const { 14609 if (Pred == ICmpInst::ICMP_EQ) 14610 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 14611 else 14612 OS.indent(Depth) << "Compare predicate: " << *LHS << " " << Pred << ") " 14613 << *RHS << "\n"; 14614 14615 } 14616 14617 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 14618 const SCEVAddRecExpr *AR, 14619 IncrementWrapFlags Flags) 14620 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 14621 14622 const SCEVAddRecExpr *SCEVWrapPredicate::getExpr() const { return AR; } 14623 14624 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 14625 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 14626 14627 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 14628 } 14629 14630 bool SCEVWrapPredicate::isAlwaysTrue() const { 14631 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 14632 IncrementWrapFlags IFlags = Flags; 14633 14634 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 14635 IFlags = clearFlags(IFlags, IncrementNSSW); 14636 14637 return IFlags == IncrementAnyWrap; 14638 } 14639 14640 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 14641 OS.indent(Depth) << *getExpr() << " Added Flags: "; 14642 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 14643 OS << "<nusw>"; 14644 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 14645 OS << "<nssw>"; 14646 OS << "\n"; 14647 } 14648 14649 SCEVWrapPredicate::IncrementWrapFlags 14650 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 14651 ScalarEvolution &SE) { 14652 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 14653 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 14654 14655 // We can safely transfer the NSW flag as NSSW. 14656 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 14657 ImpliedFlags = IncrementNSSW; 14658 14659 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 14660 // If the increment is positive, the SCEV NUW flag will also imply the 14661 // WrapPredicate NUSW flag. 14662 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 14663 if (Step->getValue()->getValue().isNonNegative()) 14664 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 14665 } 14666 14667 return ImpliedFlags; 14668 } 14669 14670 /// Union predicates don't get cached so create a dummy set ID for it. 14671 SCEVUnionPredicate::SCEVUnionPredicate(ArrayRef<const SCEVPredicate *> Preds) 14672 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) { 14673 for (const auto *P : Preds) 14674 add(P); 14675 } 14676 14677 bool SCEVUnionPredicate::isAlwaysTrue() const { 14678 return all_of(Preds, 14679 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 14680 } 14681 14682 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 14683 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 14684 return all_of(Set->Preds, 14685 [this](const SCEVPredicate *I) { return this->implies(I); }); 14686 14687 return any_of(Preds, 14688 [N](const SCEVPredicate *I) { return I->implies(N); }); 14689 } 14690 14691 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 14692 for (const auto *Pred : Preds) 14693 Pred->print(OS, Depth); 14694 } 14695 14696 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 14697 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 14698 for (const auto *Pred : Set->Preds) 14699 add(Pred); 14700 return; 14701 } 14702 14703 Preds.push_back(N); 14704 } 14705 14706 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 14707 Loop &L) 14708 : SE(SE), L(L) { 14709 SmallVector<const SCEVPredicate*, 4> Empty; 14710 Preds = std::make_unique<SCEVUnionPredicate>(Empty); 14711 } 14712 14713 void ScalarEvolution::registerUser(const SCEV *User, 14714 ArrayRef<const SCEV *> Ops) { 14715 for (const auto *Op : Ops) 14716 // We do not expect that forgetting cached data for SCEVConstants will ever 14717 // open any prospects for sharpening or introduce any correctness issues, 14718 // so we don't bother storing their dependencies. 14719 if (!isa<SCEVConstant>(Op)) 14720 SCEVUsers[Op].insert(User); 14721 } 14722 14723 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 14724 const SCEV *Expr = SE.getSCEV(V); 14725 RewriteEntry &Entry = RewriteMap[Expr]; 14726 14727 // If we already have an entry and the version matches, return it. 14728 if (Entry.second && Generation == Entry.first) 14729 return Entry.second; 14730 14731 // We found an entry but it's stale. Rewrite the stale entry 14732 // according to the current predicate. 14733 if (Entry.second) 14734 Expr = Entry.second; 14735 14736 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds); 14737 Entry = {Generation, NewSCEV}; 14738 14739 return NewSCEV; 14740 } 14741 14742 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 14743 if (!BackedgeCount) { 14744 SmallVector<const SCEVPredicate *, 4> Preds; 14745 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds); 14746 for (const auto *P : Preds) 14747 addPredicate(*P); 14748 } 14749 return BackedgeCount; 14750 } 14751 14752 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 14753 if (Preds->implies(&Pred)) 14754 return; 14755 14756 auto &OldPreds = Preds->getPredicates(); 14757 SmallVector<const SCEVPredicate*, 4> NewPreds(OldPreds.begin(), OldPreds.end()); 14758 NewPreds.push_back(&Pred); 14759 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds); 14760 updateGeneration(); 14761 } 14762 14763 const SCEVPredicate &PredicatedScalarEvolution::getPredicate() const { 14764 return *Preds; 14765 } 14766 14767 void PredicatedScalarEvolution::updateGeneration() { 14768 // If the generation number wrapped recompute everything. 14769 if (++Generation == 0) { 14770 for (auto &II : RewriteMap) { 14771 const SCEV *Rewritten = II.second.second; 14772 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, *Preds)}; 14773 } 14774 } 14775 } 14776 14777 void PredicatedScalarEvolution::setNoOverflow( 14778 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 14779 const SCEV *Expr = getSCEV(V); 14780 const auto *AR = cast<SCEVAddRecExpr>(Expr); 14781 14782 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 14783 14784 // Clear the statically implied flags. 14785 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 14786 addPredicate(*SE.getWrapPredicate(AR, Flags)); 14787 14788 auto II = FlagsMap.insert({V, Flags}); 14789 if (!II.second) 14790 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 14791 } 14792 14793 bool PredicatedScalarEvolution::hasNoOverflow( 14794 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 14795 const SCEV *Expr = getSCEV(V); 14796 const auto *AR = cast<SCEVAddRecExpr>(Expr); 14797 14798 Flags = SCEVWrapPredicate::clearFlags( 14799 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 14800 14801 auto II = FlagsMap.find(V); 14802 14803 if (II != FlagsMap.end()) 14804 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 14805 14806 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 14807 } 14808 14809 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 14810 const SCEV *Expr = this->getSCEV(V); 14811 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 14812 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 14813 14814 if (!New) 14815 return nullptr; 14816 14817 for (const auto *P : NewPreds) 14818 addPredicate(*P); 14819 14820 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 14821 return New; 14822 } 14823 14824 PredicatedScalarEvolution::PredicatedScalarEvolution( 14825 const PredicatedScalarEvolution &Init) 14826 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), 14827 Preds(std::make_unique<SCEVUnionPredicate>(Init.Preds->getPredicates())), 14828 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 14829 for (auto I : Init.FlagsMap) 14830 FlagsMap.insert(I); 14831 } 14832 14833 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 14834 // For each block. 14835 for (auto *BB : L.getBlocks()) 14836 for (auto &I : *BB) { 14837 if (!SE.isSCEVable(I.getType())) 14838 continue; 14839 14840 auto *Expr = SE.getSCEV(&I); 14841 auto II = RewriteMap.find(Expr); 14842 14843 if (II == RewriteMap.end()) 14844 continue; 14845 14846 // Don't print things that are not interesting. 14847 if (II->second.second == Expr) 14848 continue; 14849 14850 OS.indent(Depth) << "[PSE]" << I << ":\n"; 14851 OS.indent(Depth + 2) << *Expr << "\n"; 14852 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 14853 } 14854 } 14855 14856 // Match the mathematical pattern A - (A / B) * B, where A and B can be 14857 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used 14858 // for URem with constant power-of-2 second operands. 14859 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 14860 // 4, A / B becomes X / 8). 14861 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 14862 const SCEV *&RHS) { 14863 // Try to match 'zext (trunc A to iB) to iY', which is used 14864 // for URem with constant power-of-2 second operands. Make sure the size of 14865 // the operand A matches the size of the whole expressions. 14866 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr)) 14867 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) { 14868 LHS = Trunc->getOperand(); 14869 // Bail out if the type of the LHS is larger than the type of the 14870 // expression for now. 14871 if (getTypeSizeInBits(LHS->getType()) > 14872 getTypeSizeInBits(Expr->getType())) 14873 return false; 14874 if (LHS->getType() != Expr->getType()) 14875 LHS = getZeroExtendExpr(LHS, Expr->getType()); 14876 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1) 14877 << getTypeSizeInBits(Trunc->getType())); 14878 return true; 14879 } 14880 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 14881 if (Add == nullptr || Add->getNumOperands() != 2) 14882 return false; 14883 14884 const SCEV *A = Add->getOperand(1); 14885 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 14886 14887 if (Mul == nullptr) 14888 return false; 14889 14890 const auto MatchURemWithDivisor = [&](const SCEV *B) { 14891 // (SomeExpr + (-(SomeExpr / B) * B)). 14892 if (Expr == getURemExpr(A, B)) { 14893 LHS = A; 14894 RHS = B; 14895 return true; 14896 } 14897 return false; 14898 }; 14899 14900 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 14901 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 14902 return MatchURemWithDivisor(Mul->getOperand(1)) || 14903 MatchURemWithDivisor(Mul->getOperand(2)); 14904 14905 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 14906 if (Mul->getNumOperands() == 2) 14907 return MatchURemWithDivisor(Mul->getOperand(1)) || 14908 MatchURemWithDivisor(Mul->getOperand(0)) || 14909 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 14910 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 14911 return false; 14912 } 14913 14914 const SCEV * 14915 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) { 14916 SmallVector<BasicBlock*, 16> ExitingBlocks; 14917 L->getExitingBlocks(ExitingBlocks); 14918 14919 // Form an expression for the maximum exit count possible for this loop. We 14920 // merge the max and exact information to approximate a version of 14921 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. 14922 SmallVector<const SCEV*, 4> ExitCounts; 14923 for (BasicBlock *ExitingBB : ExitingBlocks) { 14924 const SCEV *ExitCount = 14925 getExitCount(L, ExitingBB, ScalarEvolution::SymbolicMaximum); 14926 if (!isa<SCEVCouldNotCompute>(ExitCount)) { 14927 assert(DT.dominates(ExitingBB, L->getLoopLatch()) && 14928 "We should only have known counts for exiting blocks that " 14929 "dominate latch!"); 14930 ExitCounts.push_back(ExitCount); 14931 } 14932 } 14933 if (ExitCounts.empty()) 14934 return getCouldNotCompute(); 14935 return getUMinFromMismatchedTypes(ExitCounts, /*Sequential*/ true); 14936 } 14937 14938 /// A rewriter to replace SCEV expressions in Map with the corresponding entry 14939 /// in the map. It skips AddRecExpr because we cannot guarantee that the 14940 /// replacement is loop invariant in the loop of the AddRec. 14941 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> { 14942 const DenseMap<const SCEV *, const SCEV *> ⤅ 14943 14944 public: 14945 SCEVLoopGuardRewriter(ScalarEvolution &SE, 14946 DenseMap<const SCEV *, const SCEV *> &M) 14947 : SCEVRewriteVisitor(SE), Map(M) {} 14948 14949 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 14950 14951 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 14952 auto I = Map.find(Expr); 14953 if (I == Map.end()) 14954 return Expr; 14955 return I->second; 14956 } 14957 14958 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 14959 auto I = Map.find(Expr); 14960 if (I == Map.end()) { 14961 // If we didn't find the extact ZExt expr in the map, check if there's an 14962 // entry for a smaller ZExt we can use instead. 14963 Type *Ty = Expr->getType(); 14964 const SCEV *Op = Expr->getOperand(0); 14965 unsigned Bitwidth = Ty->getScalarSizeInBits() / 2; 14966 while (Bitwidth % 8 == 0 && Bitwidth >= 8 && 14967 Bitwidth > Op->getType()->getScalarSizeInBits()) { 14968 Type *NarrowTy = IntegerType::get(SE.getContext(), Bitwidth); 14969 auto *NarrowExt = SE.getZeroExtendExpr(Op, NarrowTy); 14970 auto I = Map.find(NarrowExt); 14971 if (I != Map.end()) 14972 return SE.getZeroExtendExpr(I->second, Ty); 14973 Bitwidth = Bitwidth / 2; 14974 } 14975 14976 return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitZeroExtendExpr( 14977 Expr); 14978 } 14979 return I->second; 14980 } 14981 14982 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 14983 auto I = Map.find(Expr); 14984 if (I == Map.end()) 14985 return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitSignExtendExpr( 14986 Expr); 14987 return I->second; 14988 } 14989 14990 const SCEV *visitUMinExpr(const SCEVUMinExpr *Expr) { 14991 auto I = Map.find(Expr); 14992 if (I == Map.end()) 14993 return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitUMinExpr(Expr); 14994 return I->second; 14995 } 14996 14997 const SCEV *visitSMinExpr(const SCEVSMinExpr *Expr) { 14998 auto I = Map.find(Expr); 14999 if (I == Map.end()) 15000 return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitSMinExpr(Expr); 15001 return I->second; 15002 } 15003 }; 15004 15005 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { 15006 SmallVector<const SCEV *> ExprsToRewrite; 15007 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, 15008 const SCEV *RHS, 15009 DenseMap<const SCEV *, const SCEV *> 15010 &RewriteMap) { 15011 // WARNING: It is generally unsound to apply any wrap flags to the proposed 15012 // replacement SCEV which isn't directly implied by the structure of that 15013 // SCEV. In particular, using contextual facts to imply flags is *NOT* 15014 // legal. See the scoping rules for flags in the header to understand why. 15015 15016 // If LHS is a constant, apply information to the other expression. 15017 if (isa<SCEVConstant>(LHS)) { 15018 std::swap(LHS, RHS); 15019 Predicate = CmpInst::getSwappedPredicate(Predicate); 15020 } 15021 15022 // Check for a condition of the form (-C1 + X < C2). InstCombine will 15023 // create this form when combining two checks of the form (X u< C2 + C1) and 15024 // (X >=u C1). 15025 auto MatchRangeCheckIdiom = [this, Predicate, LHS, RHS, &RewriteMap, 15026 &ExprsToRewrite]() { 15027 auto *AddExpr = dyn_cast<SCEVAddExpr>(LHS); 15028 if (!AddExpr || AddExpr->getNumOperands() != 2) 15029 return false; 15030 15031 auto *C1 = dyn_cast<SCEVConstant>(AddExpr->getOperand(0)); 15032 auto *LHSUnknown = dyn_cast<SCEVUnknown>(AddExpr->getOperand(1)); 15033 auto *C2 = dyn_cast<SCEVConstant>(RHS); 15034 if (!C1 || !C2 || !LHSUnknown) 15035 return false; 15036 15037 auto ExactRegion = 15038 ConstantRange::makeExactICmpRegion(Predicate, C2->getAPInt()) 15039 .sub(C1->getAPInt()); 15040 15041 // Bail out, unless we have a non-wrapping, monotonic range. 15042 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet()) 15043 return false; 15044 auto I = RewriteMap.find(LHSUnknown); 15045 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHSUnknown; 15046 RewriteMap[LHSUnknown] = getUMaxExpr( 15047 getConstant(ExactRegion.getUnsignedMin()), 15048 getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax()))); 15049 ExprsToRewrite.push_back(LHSUnknown); 15050 return true; 15051 }; 15052 if (MatchRangeCheckIdiom()) 15053 return; 15054 15055 // Return true if \p Expr is a MinMax SCEV expression with a non-negative 15056 // constant operand. If so, return in \p SCTy the SCEV type and in \p RHS 15057 // the non-constant operand and in \p LHS the constant operand. 15058 auto IsMinMaxSCEVWithNonNegativeConstant = 15059 [&](const SCEV *Expr, SCEVTypes &SCTy, const SCEV *&LHS, 15060 const SCEV *&RHS) { 15061 if (auto *MinMax = dyn_cast<SCEVMinMaxExpr>(Expr)) { 15062 if (MinMax->getNumOperands() != 2) 15063 return false; 15064 if (auto *C = dyn_cast<SCEVConstant>(MinMax->getOperand(0))) { 15065 if (C->getAPInt().isNegative()) 15066 return false; 15067 SCTy = MinMax->getSCEVType(); 15068 LHS = MinMax->getOperand(0); 15069 RHS = MinMax->getOperand(1); 15070 return true; 15071 } 15072 } 15073 return false; 15074 }; 15075 15076 // Checks whether Expr is a non-negative constant, and Divisor is a positive 15077 // constant, and returns their APInt in ExprVal and in DivisorVal. 15078 auto GetNonNegExprAndPosDivisor = [&](const SCEV *Expr, const SCEV *Divisor, 15079 APInt &ExprVal, APInt &DivisorVal) { 15080 auto *ConstExpr = dyn_cast<SCEVConstant>(Expr); 15081 auto *ConstDivisor = dyn_cast<SCEVConstant>(Divisor); 15082 if (!ConstExpr || !ConstDivisor) 15083 return false; 15084 ExprVal = ConstExpr->getAPInt(); 15085 DivisorVal = ConstDivisor->getAPInt(); 15086 return ExprVal.isNonNegative() && !DivisorVal.isNonPositive(); 15087 }; 15088 15089 // Return a new SCEV that modifies \p Expr to the closest number divides by 15090 // \p Divisor and greater or equal than Expr. 15091 // For now, only handle constant Expr and Divisor. 15092 auto GetNextSCEVDividesByDivisor = [&](const SCEV *Expr, 15093 const SCEV *Divisor) { 15094 APInt ExprVal; 15095 APInt DivisorVal; 15096 if (!GetNonNegExprAndPosDivisor(Expr, Divisor, ExprVal, DivisorVal)) 15097 return Expr; 15098 APInt Rem = ExprVal.urem(DivisorVal); 15099 if (!Rem.isZero()) 15100 // return the SCEV: Expr + Divisor - Expr % Divisor 15101 return getConstant(ExprVal + DivisorVal - Rem); 15102 return Expr; 15103 }; 15104 15105 // Return a new SCEV that modifies \p Expr to the closest number divides by 15106 // \p Divisor and less or equal than Expr. 15107 // For now, only handle constant Expr and Divisor. 15108 auto GetPreviousSCEVDividesByDivisor = [&](const SCEV *Expr, 15109 const SCEV *Divisor) { 15110 APInt ExprVal; 15111 APInt DivisorVal; 15112 if (!GetNonNegExprAndPosDivisor(Expr, Divisor, ExprVal, DivisorVal)) 15113 return Expr; 15114 APInt Rem = ExprVal.urem(DivisorVal); 15115 // return the SCEV: Expr - Expr % Divisor 15116 return getConstant(ExprVal - Rem); 15117 }; 15118 15119 // Apply divisibilty by \p Divisor on MinMaxExpr with constant values, 15120 // recursively. This is done by aligning up/down the constant value to the 15121 // Divisor. 15122 std::function<const SCEV *(const SCEV *, const SCEV *)> 15123 ApplyDivisibiltyOnMinMaxExpr = [&](const SCEV *MinMaxExpr, 15124 const SCEV *Divisor) { 15125 const SCEV *MinMaxLHS = nullptr, *MinMaxRHS = nullptr; 15126 SCEVTypes SCTy; 15127 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS, 15128 MinMaxRHS)) 15129 return MinMaxExpr; 15130 auto IsMin = 15131 isa<SCEVSMinExpr>(MinMaxExpr) || isa<SCEVUMinExpr>(MinMaxExpr); 15132 assert(isKnownNonNegative(MinMaxLHS) && 15133 "Expected non-negative operand!"); 15134 auto *DivisibleExpr = 15135 IsMin ? GetPreviousSCEVDividesByDivisor(MinMaxLHS, Divisor) 15136 : GetNextSCEVDividesByDivisor(MinMaxLHS, Divisor); 15137 SmallVector<const SCEV *> Ops = { 15138 ApplyDivisibiltyOnMinMaxExpr(MinMaxRHS, Divisor), DivisibleExpr}; 15139 return getMinMaxExpr(SCTy, Ops); 15140 }; 15141 15142 // If we have LHS == 0, check if LHS is computing a property of some unknown 15143 // SCEV %v which we can rewrite %v to express explicitly. 15144 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS); 15145 if (Predicate == CmpInst::ICMP_EQ && RHSC && 15146 RHSC->getValue()->isNullValue()) { 15147 // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to 15148 // explicitly express that. 15149 const SCEV *URemLHS = nullptr; 15150 const SCEV *URemRHS = nullptr; 15151 if (matchURem(LHS, URemLHS, URemRHS)) { 15152 if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) { 15153 auto I = RewriteMap.find(LHSUnknown); 15154 const SCEV *RewrittenLHS = 15155 I != RewriteMap.end() ? I->second : LHSUnknown; 15156 RewrittenLHS = ApplyDivisibiltyOnMinMaxExpr(RewrittenLHS, URemRHS); 15157 const auto *Multiple = 15158 getMulExpr(getUDivExpr(RewrittenLHS, URemRHS), URemRHS); 15159 RewriteMap[LHSUnknown] = Multiple; 15160 ExprsToRewrite.push_back(LHSUnknown); 15161 return; 15162 } 15163 } 15164 } 15165 15166 // Do not apply information for constants or if RHS contains an AddRec. 15167 if (isa<SCEVConstant>(LHS) || containsAddRecurrence(RHS)) 15168 return; 15169 15170 // If RHS is SCEVUnknown, make sure the information is applied to it. 15171 if (!isa<SCEVUnknown>(LHS) && isa<SCEVUnknown>(RHS)) { 15172 std::swap(LHS, RHS); 15173 Predicate = CmpInst::getSwappedPredicate(Predicate); 15174 } 15175 15176 // Puts rewrite rule \p From -> \p To into the rewrite map. Also if \p From 15177 // and \p FromRewritten are the same (i.e. there has been no rewrite 15178 // registered for \p From), then puts this value in the list of rewritten 15179 // expressions. 15180 auto AddRewrite = [&](const SCEV *From, const SCEV *FromRewritten, 15181 const SCEV *To) { 15182 if (From == FromRewritten) 15183 ExprsToRewrite.push_back(From); 15184 RewriteMap[From] = To; 15185 }; 15186 15187 // Checks whether \p S has already been rewritten. In that case returns the 15188 // existing rewrite because we want to chain further rewrites onto the 15189 // already rewritten value. Otherwise returns \p S. 15190 auto GetMaybeRewritten = [&](const SCEV *S) { 15191 auto I = RewriteMap.find(S); 15192 return I != RewriteMap.end() ? I->second : S; 15193 }; 15194 15195 // Check for the SCEV expression (A /u B) * B while B is a constant, inside 15196 // \p Expr. The check is done recuresively on \p Expr, which is assumed to 15197 // be a composition of Min/Max SCEVs. Return whether the SCEV expression (A 15198 // /u B) * B was found, and return the divisor B in \p DividesBy. For 15199 // example, if Expr = umin (umax ((A /u 8) * 8, 16), 64), return true since 15200 // (A /u 8) * 8 matched the pattern, and return the constant SCEV 8 in \p 15201 // DividesBy. 15202 std::function<bool(const SCEV *, const SCEV *&)> HasDivisibiltyInfo = 15203 [&](const SCEV *Expr, const SCEV *&DividesBy) { 15204 if (auto *Mul = dyn_cast<SCEVMulExpr>(Expr)) { 15205 if (Mul->getNumOperands() != 2) 15206 return false; 15207 auto *MulLHS = Mul->getOperand(0); 15208 auto *MulRHS = Mul->getOperand(1); 15209 if (isa<SCEVConstant>(MulLHS)) 15210 std::swap(MulLHS, MulRHS); 15211 if (auto *Div = dyn_cast<SCEVUDivExpr>(MulLHS)) 15212 if (Div->getOperand(1) == MulRHS) { 15213 DividesBy = MulRHS; 15214 return true; 15215 } 15216 } 15217 if (auto *MinMax = dyn_cast<SCEVMinMaxExpr>(Expr)) 15218 return HasDivisibiltyInfo(MinMax->getOperand(0), DividesBy) || 15219 HasDivisibiltyInfo(MinMax->getOperand(1), DividesBy); 15220 return false; 15221 }; 15222 15223 // Return true if Expr known to divide by \p DividesBy. 15224 std::function<bool(const SCEV *, const SCEV *&)> IsKnownToDivideBy = 15225 [&](const SCEV *Expr, const SCEV *DividesBy) { 15226 if (getURemExpr(Expr, DividesBy)->isZero()) 15227 return true; 15228 if (auto *MinMax = dyn_cast<SCEVMinMaxExpr>(Expr)) 15229 return IsKnownToDivideBy(MinMax->getOperand(0), DividesBy) && 15230 IsKnownToDivideBy(MinMax->getOperand(1), DividesBy); 15231 return false; 15232 }; 15233 15234 const SCEV *RewrittenLHS = GetMaybeRewritten(LHS); 15235 const SCEV *DividesBy = nullptr; 15236 if (HasDivisibiltyInfo(RewrittenLHS, DividesBy)) 15237 // Check that the whole expression is divided by DividesBy 15238 DividesBy = 15239 IsKnownToDivideBy(RewrittenLHS, DividesBy) ? DividesBy : nullptr; 15240 15241 // Collect rewrites for LHS and its transitive operands based on the 15242 // condition. 15243 // For min/max expressions, also apply the guard to its operands: 15244 // 'min(a, b) >= c' -> '(a >= c) and (b >= c)', 15245 // 'min(a, b) > c' -> '(a > c) and (b > c)', 15246 // 'max(a, b) <= c' -> '(a <= c) and (b <= c)', 15247 // 'max(a, b) < c' -> '(a < c) and (b < c)'. 15248 15249 // We cannot express strict predicates in SCEV, so instead we replace them 15250 // with non-strict ones against plus or minus one of RHS depending on the 15251 // predicate. 15252 const SCEV *One = getOne(RHS->getType()); 15253 switch (Predicate) { 15254 case CmpInst::ICMP_ULT: 15255 if (RHS->getType()->isPointerTy()) 15256 return; 15257 RHS = getUMaxExpr(RHS, One); 15258 [[fallthrough]]; 15259 case CmpInst::ICMP_SLT: { 15260 RHS = getMinusSCEV(RHS, One); 15261 RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(RHS, DividesBy) : RHS; 15262 break; 15263 } 15264 case CmpInst::ICMP_UGT: 15265 case CmpInst::ICMP_SGT: 15266 RHS = getAddExpr(RHS, One); 15267 RHS = DividesBy ? GetNextSCEVDividesByDivisor(RHS, DividesBy) : RHS; 15268 break; 15269 case CmpInst::ICMP_ULE: 15270 case CmpInst::ICMP_SLE: 15271 RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(RHS, DividesBy) : RHS; 15272 break; 15273 case CmpInst::ICMP_UGE: 15274 case CmpInst::ICMP_SGE: 15275 RHS = DividesBy ? GetNextSCEVDividesByDivisor(RHS, DividesBy) : RHS; 15276 break; 15277 default: 15278 break; 15279 } 15280 15281 SmallVector<const SCEV *, 16> Worklist(1, LHS); 15282 SmallPtrSet<const SCEV *, 16> Visited; 15283 15284 auto EnqueueOperands = [&Worklist](const SCEVNAryExpr *S) { 15285 append_range(Worklist, S->operands()); 15286 }; 15287 15288 while (!Worklist.empty()) { 15289 const SCEV *From = Worklist.pop_back_val(); 15290 if (isa<SCEVConstant>(From)) 15291 continue; 15292 if (!Visited.insert(From).second) 15293 continue; 15294 const SCEV *FromRewritten = GetMaybeRewritten(From); 15295 const SCEV *To = nullptr; 15296 15297 switch (Predicate) { 15298 case CmpInst::ICMP_ULT: 15299 case CmpInst::ICMP_ULE: 15300 To = getUMinExpr(FromRewritten, RHS); 15301 if (auto *UMax = dyn_cast<SCEVUMaxExpr>(FromRewritten)) 15302 EnqueueOperands(UMax); 15303 break; 15304 case CmpInst::ICMP_SLT: 15305 case CmpInst::ICMP_SLE: 15306 To = getSMinExpr(FromRewritten, RHS); 15307 if (auto *SMax = dyn_cast<SCEVSMaxExpr>(FromRewritten)) 15308 EnqueueOperands(SMax); 15309 break; 15310 case CmpInst::ICMP_UGT: 15311 case CmpInst::ICMP_UGE: 15312 To = getUMaxExpr(FromRewritten, RHS); 15313 if (auto *UMin = dyn_cast<SCEVUMinExpr>(FromRewritten)) 15314 EnqueueOperands(UMin); 15315 break; 15316 case CmpInst::ICMP_SGT: 15317 case CmpInst::ICMP_SGE: 15318 To = getSMaxExpr(FromRewritten, RHS); 15319 if (auto *SMin = dyn_cast<SCEVSMinExpr>(FromRewritten)) 15320 EnqueueOperands(SMin); 15321 break; 15322 case CmpInst::ICMP_EQ: 15323 if (isa<SCEVConstant>(RHS)) 15324 To = RHS; 15325 break; 15326 case CmpInst::ICMP_NE: 15327 if (isa<SCEVConstant>(RHS) && 15328 cast<SCEVConstant>(RHS)->getValue()->isNullValue()) { 15329 const SCEV *OneAlignedUp = 15330 DividesBy ? GetNextSCEVDividesByDivisor(One, DividesBy) : One; 15331 To = getUMaxExpr(FromRewritten, OneAlignedUp); 15332 } 15333 break; 15334 default: 15335 break; 15336 } 15337 15338 if (To) 15339 AddRewrite(From, FromRewritten, To); 15340 } 15341 }; 15342 15343 BasicBlock *Header = L->getHeader(); 15344 SmallVector<PointerIntPair<Value *, 1, bool>> Terms; 15345 // First, collect information from assumptions dominating the loop. 15346 for (auto &AssumeVH : AC.assumptions()) { 15347 if (!AssumeVH) 15348 continue; 15349 auto *AssumeI = cast<CallInst>(AssumeVH); 15350 if (!DT.dominates(AssumeI, Header)) 15351 continue; 15352 Terms.emplace_back(AssumeI->getOperand(0), true); 15353 } 15354 15355 // Second, collect information from llvm.experimental.guards dominating the loop. 15356 auto *GuardDecl = F.getParent()->getFunction( 15357 Intrinsic::getName(Intrinsic::experimental_guard)); 15358 if (GuardDecl) 15359 for (const auto *GU : GuardDecl->users()) 15360 if (const auto *Guard = dyn_cast<IntrinsicInst>(GU)) 15361 if (Guard->getFunction() == Header->getParent() && DT.dominates(Guard, Header)) 15362 Terms.emplace_back(Guard->getArgOperand(0), true); 15363 15364 // Third, collect conditions from dominating branches. Starting at the loop 15365 // predecessor, climb up the predecessor chain, as long as there are 15366 // predecessors that can be found that have unique successors leading to the 15367 // original header. 15368 // TODO: share this logic with isLoopEntryGuardedByCond. 15369 for (std::pair<const BasicBlock *, const BasicBlock *> Pair( 15370 L->getLoopPredecessor(), Header); 15371 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 15372 15373 const BranchInst *LoopEntryPredicate = 15374 dyn_cast<BranchInst>(Pair.first->getTerminator()); 15375 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) 15376 continue; 15377 15378 Terms.emplace_back(LoopEntryPredicate->getCondition(), 15379 LoopEntryPredicate->getSuccessor(0) == Pair.second); 15380 } 15381 15382 // Now apply the information from the collected conditions to RewriteMap. 15383 // Conditions are processed in reverse order, so the earliest conditions is 15384 // processed first. This ensures the SCEVs with the shortest dependency chains 15385 // are constructed first. 15386 DenseMap<const SCEV *, const SCEV *> RewriteMap; 15387 for (auto [Term, EnterIfTrue] : reverse(Terms)) { 15388 SmallVector<Value *, 8> Worklist; 15389 SmallPtrSet<Value *, 8> Visited; 15390 Worklist.push_back(Term); 15391 while (!Worklist.empty()) { 15392 Value *Cond = Worklist.pop_back_val(); 15393 if (!Visited.insert(Cond).second) 15394 continue; 15395 15396 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) { 15397 auto Predicate = 15398 EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate(); 15399 const auto *LHS = getSCEV(Cmp->getOperand(0)); 15400 const auto *RHS = getSCEV(Cmp->getOperand(1)); 15401 CollectCondition(Predicate, LHS, RHS, RewriteMap); 15402 continue; 15403 } 15404 15405 Value *L, *R; 15406 if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))) 15407 : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) { 15408 Worklist.push_back(L); 15409 Worklist.push_back(R); 15410 } 15411 } 15412 } 15413 15414 if (RewriteMap.empty()) 15415 return Expr; 15416 15417 // Now that all rewrite information is collect, rewrite the collected 15418 // expressions with the information in the map. This applies information to 15419 // sub-expressions. 15420 if (ExprsToRewrite.size() > 1) { 15421 for (const SCEV *Expr : ExprsToRewrite) { 15422 const SCEV *RewriteTo = RewriteMap[Expr]; 15423 RewriteMap.erase(Expr); 15424 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 15425 RewriteMap.insert({Expr, Rewriter.visit(RewriteTo)}); 15426 } 15427 } 15428 15429 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 15430 return Rewriter.visit(Expr); 15431 } 15432