1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 83 #include "llvm/Analysis/TargetLibraryInfo.h" 84 #include "llvm/Analysis/ValueTracking.h" 85 #include "llvm/Config/llvm-config.h" 86 #include "llvm/IR/Argument.h" 87 #include "llvm/IR/BasicBlock.h" 88 #include "llvm/IR/CFG.h" 89 #include "llvm/IR/Constant.h" 90 #include "llvm/IR/ConstantRange.h" 91 #include "llvm/IR/Constants.h" 92 #include "llvm/IR/DataLayout.h" 93 #include "llvm/IR/DerivedTypes.h" 94 #include "llvm/IR/Dominators.h" 95 #include "llvm/IR/Function.h" 96 #include "llvm/IR/GlobalAlias.h" 97 #include "llvm/IR/GlobalValue.h" 98 #include "llvm/IR/InstIterator.h" 99 #include "llvm/IR/InstrTypes.h" 100 #include "llvm/IR/Instruction.h" 101 #include "llvm/IR/Instructions.h" 102 #include "llvm/IR/IntrinsicInst.h" 103 #include "llvm/IR/Intrinsics.h" 104 #include "llvm/IR/LLVMContext.h" 105 #include "llvm/IR/Operator.h" 106 #include "llvm/IR/PatternMatch.h" 107 #include "llvm/IR/Type.h" 108 #include "llvm/IR/Use.h" 109 #include "llvm/IR/User.h" 110 #include "llvm/IR/Value.h" 111 #include "llvm/IR/Verifier.h" 112 #include "llvm/InitializePasses.h" 113 #include "llvm/Pass.h" 114 #include "llvm/Support/Casting.h" 115 #include "llvm/Support/CommandLine.h" 116 #include "llvm/Support/Compiler.h" 117 #include "llvm/Support/Debug.h" 118 #include "llvm/Support/ErrorHandling.h" 119 #include "llvm/Support/KnownBits.h" 120 #include "llvm/Support/SaveAndRestore.h" 121 #include "llvm/Support/raw_ostream.h" 122 #include <algorithm> 123 #include <cassert> 124 #include <climits> 125 #include <cstdint> 126 #include <cstdlib> 127 #include <map> 128 #include <memory> 129 #include <tuple> 130 #include <utility> 131 #include <vector> 132 133 using namespace llvm; 134 using namespace PatternMatch; 135 136 #define DEBUG_TYPE "scalar-evolution" 137 138 STATISTIC(NumTripCountsComputed, 139 "Number of loops with predictable loop counts"); 140 STATISTIC(NumTripCountsNotComputed, 141 "Number of loops without predictable loop counts"); 142 STATISTIC(NumBruteForceTripCountsComputed, 143 "Number of loops with trip counts computed by force"); 144 145 #ifdef EXPENSIVE_CHECKS 146 bool llvm::VerifySCEV = true; 147 #else 148 bool llvm::VerifySCEV = false; 149 #endif 150 151 static cl::opt<unsigned> 152 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 153 cl::desc("Maximum number of iterations SCEV will " 154 "symbolically execute a constant " 155 "derived loop"), 156 cl::init(100)); 157 158 static cl::opt<bool, true> VerifySCEVOpt( 159 "verify-scev", cl::Hidden, cl::location(VerifySCEV), 160 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 161 static cl::opt<bool> VerifySCEVStrict( 162 "verify-scev-strict", cl::Hidden, 163 cl::desc("Enable stricter verification with -verify-scev is passed")); 164 static cl::opt<bool> 165 VerifySCEVMap("verify-scev-maps", cl::Hidden, 166 cl::desc("Verify no dangling value in ScalarEvolution's " 167 "ExprValueMap (slow)")); 168 169 static cl::opt<bool> VerifyIR( 170 "scev-verify-ir", cl::Hidden, 171 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 172 cl::init(false)); 173 174 static cl::opt<unsigned> MulOpsInlineThreshold( 175 "scev-mulops-inline-threshold", cl::Hidden, 176 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 177 cl::init(32)); 178 179 static cl::opt<unsigned> AddOpsInlineThreshold( 180 "scev-addops-inline-threshold", cl::Hidden, 181 cl::desc("Threshold for inlining addition operands into a SCEV"), 182 cl::init(500)); 183 184 static cl::opt<unsigned> MaxSCEVCompareDepth( 185 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 186 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 187 cl::init(32)); 188 189 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 190 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 191 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 192 cl::init(2)); 193 194 static cl::opt<unsigned> MaxValueCompareDepth( 195 "scalar-evolution-max-value-compare-depth", cl::Hidden, 196 cl::desc("Maximum depth of recursive value complexity comparisons"), 197 cl::init(2)); 198 199 static cl::opt<unsigned> 200 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 201 cl::desc("Maximum depth of recursive arithmetics"), 202 cl::init(32)); 203 204 static cl::opt<unsigned> MaxConstantEvolvingDepth( 205 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 206 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 207 208 static cl::opt<unsigned> 209 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 210 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 211 cl::init(8)); 212 213 static cl::opt<unsigned> 214 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 215 cl::desc("Max coefficients in AddRec during evolving"), 216 cl::init(8)); 217 218 static cl::opt<unsigned> 219 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 220 cl::desc("Size of the expression which is considered huge"), 221 cl::init(4096)); 222 223 static cl::opt<bool> 224 ClassifyExpressions("scalar-evolution-classify-expressions", 225 cl::Hidden, cl::init(true), 226 cl::desc("When printing analysis, include information on every instruction")); 227 228 static cl::opt<bool> UseExpensiveRangeSharpening( 229 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, 230 cl::init(false), 231 cl::desc("Use more powerful methods of sharpening expression ranges. May " 232 "be costly in terms of compile time")); 233 234 static cl::opt<unsigned> MaxPhiSCCAnalysisSize( 235 "scalar-evolution-max-scc-analysis-depth", cl::Hidden, 236 cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " 237 "Phi strongly connected components"), 238 cl::init(8)); 239 240 static cl::opt<bool> 241 EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, 242 cl::desc("Handle <= and >= in finite loops"), 243 cl::init(true)); 244 245 //===----------------------------------------------------------------------===// 246 // SCEV class definitions 247 //===----------------------------------------------------------------------===// 248 249 //===----------------------------------------------------------------------===// 250 // Implementation of the SCEV class. 251 // 252 253 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 254 LLVM_DUMP_METHOD void SCEV::dump() const { 255 print(dbgs()); 256 dbgs() << '\n'; 257 } 258 #endif 259 260 void SCEV::print(raw_ostream &OS) const { 261 switch (getSCEVType()) { 262 case scConstant: 263 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 264 return; 265 case scPtrToInt: { 266 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this); 267 const SCEV *Op = PtrToInt->getOperand(); 268 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " 269 << *PtrToInt->getType() << ")"; 270 return; 271 } 272 case scTruncate: { 273 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 274 const SCEV *Op = Trunc->getOperand(); 275 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 276 << *Trunc->getType() << ")"; 277 return; 278 } 279 case scZeroExtend: { 280 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 281 const SCEV *Op = ZExt->getOperand(); 282 OS << "(zext " << *Op->getType() << " " << *Op << " to " 283 << *ZExt->getType() << ")"; 284 return; 285 } 286 case scSignExtend: { 287 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 288 const SCEV *Op = SExt->getOperand(); 289 OS << "(sext " << *Op->getType() << " " << *Op << " to " 290 << *SExt->getType() << ")"; 291 return; 292 } 293 case scAddRecExpr: { 294 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 295 OS << "{" << *AR->getOperand(0); 296 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 297 OS << ",+," << *AR->getOperand(i); 298 OS << "}<"; 299 if (AR->hasNoUnsignedWrap()) 300 OS << "nuw><"; 301 if (AR->hasNoSignedWrap()) 302 OS << "nsw><"; 303 if (AR->hasNoSelfWrap() && 304 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 305 OS << "nw><"; 306 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 307 OS << ">"; 308 return; 309 } 310 case scAddExpr: 311 case scMulExpr: 312 case scUMaxExpr: 313 case scSMaxExpr: 314 case scUMinExpr: 315 case scSMinExpr: 316 case scSequentialUMinExpr: { 317 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 318 const char *OpStr = nullptr; 319 switch (NAry->getSCEVType()) { 320 case scAddExpr: OpStr = " + "; break; 321 case scMulExpr: OpStr = " * "; break; 322 case scUMaxExpr: OpStr = " umax "; break; 323 case scSMaxExpr: OpStr = " smax "; break; 324 case scUMinExpr: 325 OpStr = " umin "; 326 break; 327 case scSMinExpr: 328 OpStr = " smin "; 329 break; 330 case scSequentialUMinExpr: 331 OpStr = " umin_seq "; 332 break; 333 default: 334 llvm_unreachable("There are no other nary expression types."); 335 } 336 OS << "("; 337 ListSeparator LS(OpStr); 338 for (const SCEV *Op : NAry->operands()) 339 OS << LS << *Op; 340 OS << ")"; 341 switch (NAry->getSCEVType()) { 342 case scAddExpr: 343 case scMulExpr: 344 if (NAry->hasNoUnsignedWrap()) 345 OS << "<nuw>"; 346 if (NAry->hasNoSignedWrap()) 347 OS << "<nsw>"; 348 break; 349 default: 350 // Nothing to print for other nary expressions. 351 break; 352 } 353 return; 354 } 355 case scUDivExpr: { 356 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 357 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 358 return; 359 } 360 case scUnknown: { 361 const SCEVUnknown *U = cast<SCEVUnknown>(this); 362 Type *AllocTy; 363 if (U->isSizeOf(AllocTy)) { 364 OS << "sizeof(" << *AllocTy << ")"; 365 return; 366 } 367 if (U->isAlignOf(AllocTy)) { 368 OS << "alignof(" << *AllocTy << ")"; 369 return; 370 } 371 372 Type *CTy; 373 Constant *FieldNo; 374 if (U->isOffsetOf(CTy, FieldNo)) { 375 OS << "offsetof(" << *CTy << ", "; 376 FieldNo->printAsOperand(OS, false); 377 OS << ")"; 378 return; 379 } 380 381 // Otherwise just print it normally. 382 U->getValue()->printAsOperand(OS, false); 383 return; 384 } 385 case scCouldNotCompute: 386 OS << "***COULDNOTCOMPUTE***"; 387 return; 388 } 389 llvm_unreachable("Unknown SCEV kind!"); 390 } 391 392 Type *SCEV::getType() const { 393 switch (getSCEVType()) { 394 case scConstant: 395 return cast<SCEVConstant>(this)->getType(); 396 case scPtrToInt: 397 case scTruncate: 398 case scZeroExtend: 399 case scSignExtend: 400 return cast<SCEVCastExpr>(this)->getType(); 401 case scAddRecExpr: 402 return cast<SCEVAddRecExpr>(this)->getType(); 403 case scMulExpr: 404 return cast<SCEVMulExpr>(this)->getType(); 405 case scUMaxExpr: 406 case scSMaxExpr: 407 case scUMinExpr: 408 case scSMinExpr: 409 return cast<SCEVMinMaxExpr>(this)->getType(); 410 case scSequentialUMinExpr: 411 return cast<SCEVSequentialMinMaxExpr>(this)->getType(); 412 case scAddExpr: 413 return cast<SCEVAddExpr>(this)->getType(); 414 case scUDivExpr: 415 return cast<SCEVUDivExpr>(this)->getType(); 416 case scUnknown: 417 return cast<SCEVUnknown>(this)->getType(); 418 case scCouldNotCompute: 419 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 420 } 421 llvm_unreachable("Unknown SCEV kind!"); 422 } 423 424 bool SCEV::isZero() const { 425 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 426 return SC->getValue()->isZero(); 427 return false; 428 } 429 430 bool SCEV::isOne() const { 431 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 432 return SC->getValue()->isOne(); 433 return false; 434 } 435 436 bool SCEV::isAllOnesValue() const { 437 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 438 return SC->getValue()->isMinusOne(); 439 return false; 440 } 441 442 bool SCEV::isNonConstantNegative() const { 443 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 444 if (!Mul) return false; 445 446 // If there is a constant factor, it will be first. 447 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 448 if (!SC) return false; 449 450 // Return true if the value is negative, this matches things like (-42 * V). 451 return SC->getAPInt().isNegative(); 452 } 453 454 SCEVCouldNotCompute::SCEVCouldNotCompute() : 455 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 456 457 bool SCEVCouldNotCompute::classof(const SCEV *S) { 458 return S->getSCEVType() == scCouldNotCompute; 459 } 460 461 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 462 FoldingSetNodeID ID; 463 ID.AddInteger(scConstant); 464 ID.AddPointer(V); 465 void *IP = nullptr; 466 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 467 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 468 UniqueSCEVs.InsertNode(S, IP); 469 return S; 470 } 471 472 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 473 return getConstant(ConstantInt::get(getContext(), Val)); 474 } 475 476 const SCEV * 477 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 478 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 479 return getConstant(ConstantInt::get(ITy, V, isSigned)); 480 } 481 482 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, 483 const SCEV *op, Type *ty) 484 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) { 485 Operands[0] = op; 486 } 487 488 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, 489 Type *ITy) 490 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { 491 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() && 492 "Must be a non-bit-width-changing pointer-to-integer cast!"); 493 } 494 495 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, 496 SCEVTypes SCEVTy, const SCEV *op, 497 Type *ty) 498 : SCEVCastExpr(ID, SCEVTy, op, ty) {} 499 500 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, 501 Type *ty) 502 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { 503 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 504 "Cannot truncate non-integer value!"); 505 } 506 507 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 508 const SCEV *op, Type *ty) 509 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { 510 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 511 "Cannot zero extend non-integer value!"); 512 } 513 514 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 515 const SCEV *op, Type *ty) 516 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { 517 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 518 "Cannot sign extend non-integer value!"); 519 } 520 521 void SCEVUnknown::deleted() { 522 // Clear this SCEVUnknown from various maps. 523 SE->forgetMemoizedResults(this); 524 525 // Remove this SCEVUnknown from the uniquing map. 526 SE->UniqueSCEVs.RemoveNode(this); 527 528 // Release the value. 529 setValPtr(nullptr); 530 } 531 532 void SCEVUnknown::allUsesReplacedWith(Value *New) { 533 // Clear this SCEVUnknown from various maps. 534 SE->forgetMemoizedResults(this); 535 536 // Remove this SCEVUnknown from the uniquing map. 537 SE->UniqueSCEVs.RemoveNode(this); 538 539 // Replace the value pointer in case someone is still using this SCEVUnknown. 540 setValPtr(New); 541 } 542 543 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 544 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 545 if (VCE->getOpcode() == Instruction::PtrToInt) 546 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 547 if (CE->getOpcode() == Instruction::GetElementPtr && 548 CE->getOperand(0)->isNullValue() && 549 CE->getNumOperands() == 2) 550 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 551 if (CI->isOne()) { 552 AllocTy = cast<GEPOperator>(CE)->getSourceElementType(); 553 return true; 554 } 555 556 return false; 557 } 558 559 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 560 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 561 if (VCE->getOpcode() == Instruction::PtrToInt) 562 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 563 if (CE->getOpcode() == Instruction::GetElementPtr && 564 CE->getOperand(0)->isNullValue()) { 565 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType(); 566 if (StructType *STy = dyn_cast<StructType>(Ty)) 567 if (!STy->isPacked() && 568 CE->getNumOperands() == 3 && 569 CE->getOperand(1)->isNullValue()) { 570 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 571 if (CI->isOne() && 572 STy->getNumElements() == 2 && 573 STy->getElementType(0)->isIntegerTy(1)) { 574 AllocTy = STy->getElementType(1); 575 return true; 576 } 577 } 578 } 579 580 return false; 581 } 582 583 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 584 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 585 if (VCE->getOpcode() == Instruction::PtrToInt) 586 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 587 if (CE->getOpcode() == Instruction::GetElementPtr && 588 CE->getNumOperands() == 3 && 589 CE->getOperand(0)->isNullValue() && 590 CE->getOperand(1)->isNullValue()) { 591 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType(); 592 // Ignore vector types here so that ScalarEvolutionExpander doesn't 593 // emit getelementptrs that index into vectors. 594 if (Ty->isStructTy() || Ty->isArrayTy()) { 595 CTy = Ty; 596 FieldNo = CE->getOperand(2); 597 return true; 598 } 599 } 600 601 return false; 602 } 603 604 //===----------------------------------------------------------------------===// 605 // SCEV Utilities 606 //===----------------------------------------------------------------------===// 607 608 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 609 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 610 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 611 /// have been previously deemed to be "equally complex" by this routine. It is 612 /// intended to avoid exponential time complexity in cases like: 613 /// 614 /// %a = f(%x, %y) 615 /// %b = f(%a, %a) 616 /// %c = f(%b, %b) 617 /// 618 /// %d = f(%x, %y) 619 /// %e = f(%d, %d) 620 /// %f = f(%e, %e) 621 /// 622 /// CompareValueComplexity(%f, %c) 623 /// 624 /// Since we do not continue running this routine on expression trees once we 625 /// have seen unequal values, there is no need to track them in the cache. 626 static int 627 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 628 const LoopInfo *const LI, Value *LV, Value *RV, 629 unsigned Depth) { 630 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 631 return 0; 632 633 // Order pointer values after integer values. This helps SCEVExpander form 634 // GEPs. 635 bool LIsPointer = LV->getType()->isPointerTy(), 636 RIsPointer = RV->getType()->isPointerTy(); 637 if (LIsPointer != RIsPointer) 638 return (int)LIsPointer - (int)RIsPointer; 639 640 // Compare getValueID values. 641 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 642 if (LID != RID) 643 return (int)LID - (int)RID; 644 645 // Sort arguments by their position. 646 if (const auto *LA = dyn_cast<Argument>(LV)) { 647 const auto *RA = cast<Argument>(RV); 648 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 649 return (int)LArgNo - (int)RArgNo; 650 } 651 652 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 653 const auto *RGV = cast<GlobalValue>(RV); 654 655 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 656 auto LT = GV->getLinkage(); 657 return !(GlobalValue::isPrivateLinkage(LT) || 658 GlobalValue::isInternalLinkage(LT)); 659 }; 660 661 // Use the names to distinguish the two values, but only if the 662 // names are semantically important. 663 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 664 return LGV->getName().compare(RGV->getName()); 665 } 666 667 // For instructions, compare their loop depth, and their operand count. This 668 // is pretty loose. 669 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 670 const auto *RInst = cast<Instruction>(RV); 671 672 // Compare loop depths. 673 const BasicBlock *LParent = LInst->getParent(), 674 *RParent = RInst->getParent(); 675 if (LParent != RParent) { 676 unsigned LDepth = LI->getLoopDepth(LParent), 677 RDepth = LI->getLoopDepth(RParent); 678 if (LDepth != RDepth) 679 return (int)LDepth - (int)RDepth; 680 } 681 682 // Compare the number of operands. 683 unsigned LNumOps = LInst->getNumOperands(), 684 RNumOps = RInst->getNumOperands(); 685 if (LNumOps != RNumOps) 686 return (int)LNumOps - (int)RNumOps; 687 688 for (unsigned Idx : seq(0u, LNumOps)) { 689 int Result = 690 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 691 RInst->getOperand(Idx), Depth + 1); 692 if (Result != 0) 693 return Result; 694 } 695 } 696 697 EqCacheValue.unionSets(LV, RV); 698 return 0; 699 } 700 701 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 702 // than RHS, respectively. A three-way result allows recursive comparisons to be 703 // more efficient. 704 // If the max analysis depth was reached, return None, assuming we do not know 705 // if they are equivalent for sure. 706 static Optional<int> 707 CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV, 708 EquivalenceClasses<const Value *> &EqCacheValue, 709 const LoopInfo *const LI, const SCEV *LHS, 710 const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) { 711 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 712 if (LHS == RHS) 713 return 0; 714 715 // Primarily, sort the SCEVs by their getSCEVType(). 716 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 717 if (LType != RType) 718 return (int)LType - (int)RType; 719 720 if (EqCacheSCEV.isEquivalent(LHS, RHS)) 721 return 0; 722 723 if (Depth > MaxSCEVCompareDepth) 724 return None; 725 726 // Aside from the getSCEVType() ordering, the particular ordering 727 // isn't very important except that it's beneficial to be consistent, 728 // so that (a + b) and (b + a) don't end up as different expressions. 729 switch (LType) { 730 case scUnknown: { 731 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 732 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 733 734 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 735 RU->getValue(), Depth + 1); 736 if (X == 0) 737 EqCacheSCEV.unionSets(LHS, RHS); 738 return X; 739 } 740 741 case scConstant: { 742 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 743 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 744 745 // Compare constant values. 746 const APInt &LA = LC->getAPInt(); 747 const APInt &RA = RC->getAPInt(); 748 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 749 if (LBitWidth != RBitWidth) 750 return (int)LBitWidth - (int)RBitWidth; 751 return LA.ult(RA) ? -1 : 1; 752 } 753 754 case scAddRecExpr: { 755 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 756 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 757 758 // There is always a dominance between two recs that are used by one SCEV, 759 // so we can safely sort recs by loop header dominance. We require such 760 // order in getAddExpr. 761 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 762 if (LLoop != RLoop) { 763 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 764 assert(LHead != RHead && "Two loops share the same header?"); 765 if (DT.dominates(LHead, RHead)) 766 return 1; 767 else 768 assert(DT.dominates(RHead, LHead) && 769 "No dominance between recurrences used by one SCEV?"); 770 return -1; 771 } 772 773 // Addrec complexity grows with operand count. 774 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 775 if (LNumOps != RNumOps) 776 return (int)LNumOps - (int)RNumOps; 777 778 // Lexicographically compare. 779 for (unsigned i = 0; i != LNumOps; ++i) { 780 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 781 LA->getOperand(i), RA->getOperand(i), DT, 782 Depth + 1); 783 if (X != 0) 784 return X; 785 } 786 EqCacheSCEV.unionSets(LHS, RHS); 787 return 0; 788 } 789 790 case scAddExpr: 791 case scMulExpr: 792 case scSMaxExpr: 793 case scUMaxExpr: 794 case scSMinExpr: 795 case scUMinExpr: 796 case scSequentialUMinExpr: { 797 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 798 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 799 800 // Lexicographically compare n-ary expressions. 801 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 802 if (LNumOps != RNumOps) 803 return (int)LNumOps - (int)RNumOps; 804 805 for (unsigned i = 0; i != LNumOps; ++i) { 806 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 807 LC->getOperand(i), RC->getOperand(i), DT, 808 Depth + 1); 809 if (X != 0) 810 return X; 811 } 812 EqCacheSCEV.unionSets(LHS, RHS); 813 return 0; 814 } 815 816 case scUDivExpr: { 817 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 818 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 819 820 // Lexicographically compare udiv expressions. 821 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 822 RC->getLHS(), DT, Depth + 1); 823 if (X != 0) 824 return X; 825 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 826 RC->getRHS(), DT, Depth + 1); 827 if (X == 0) 828 EqCacheSCEV.unionSets(LHS, RHS); 829 return X; 830 } 831 832 case scPtrToInt: 833 case scTruncate: 834 case scZeroExtend: 835 case scSignExtend: { 836 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 837 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 838 839 // Compare cast expressions by operand. 840 auto X = 841 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(), 842 RC->getOperand(), DT, Depth + 1); 843 if (X == 0) 844 EqCacheSCEV.unionSets(LHS, RHS); 845 return X; 846 } 847 848 case scCouldNotCompute: 849 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 850 } 851 llvm_unreachable("Unknown SCEV kind!"); 852 } 853 854 /// Given a list of SCEV objects, order them by their complexity, and group 855 /// objects of the same complexity together by value. When this routine is 856 /// finished, we know that any duplicates in the vector are consecutive and that 857 /// complexity is monotonically increasing. 858 /// 859 /// Note that we go take special precautions to ensure that we get deterministic 860 /// results from this routine. In other words, we don't want the results of 861 /// this to depend on where the addresses of various SCEV objects happened to 862 /// land in memory. 863 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 864 LoopInfo *LI, DominatorTree &DT) { 865 if (Ops.size() < 2) return; // Noop 866 867 EquivalenceClasses<const SCEV *> EqCacheSCEV; 868 EquivalenceClasses<const Value *> EqCacheValue; 869 870 // Whether LHS has provably less complexity than RHS. 871 auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) { 872 auto Complexity = 873 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT); 874 return Complexity && *Complexity < 0; 875 }; 876 if (Ops.size() == 2) { 877 // This is the common case, which also happens to be trivially simple. 878 // Special case it. 879 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 880 if (IsLessComplex(RHS, LHS)) 881 std::swap(LHS, RHS); 882 return; 883 } 884 885 // Do the rough sort by complexity. 886 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 887 return IsLessComplex(LHS, RHS); 888 }); 889 890 // Now that we are sorted by complexity, group elements of the same 891 // complexity. Note that this is, at worst, N^2, but the vector is likely to 892 // be extremely short in practice. Note that we take this approach because we 893 // do not want to depend on the addresses of the objects we are grouping. 894 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 895 const SCEV *S = Ops[i]; 896 unsigned Complexity = S->getSCEVType(); 897 898 // If there are any objects of the same complexity and same value as this 899 // one, group them. 900 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 901 if (Ops[j] == S) { // Found a duplicate. 902 // Move it to immediately after i'th element. 903 std::swap(Ops[i+1], Ops[j]); 904 ++i; // no need to rescan it. 905 if (i == e-2) return; // Done! 906 } 907 } 908 } 909 } 910 911 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 912 /// least HugeExprThreshold nodes). 913 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 914 return any_of(Ops, [](const SCEV *S) { 915 return S->getExpressionSize() >= HugeExprThreshold; 916 }); 917 } 918 919 //===----------------------------------------------------------------------===// 920 // Simple SCEV method implementations 921 //===----------------------------------------------------------------------===// 922 923 /// Compute BC(It, K). The result has width W. Assume, K > 0. 924 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 925 ScalarEvolution &SE, 926 Type *ResultTy) { 927 // Handle the simplest case efficiently. 928 if (K == 1) 929 return SE.getTruncateOrZeroExtend(It, ResultTy); 930 931 // We are using the following formula for BC(It, K): 932 // 933 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 934 // 935 // Suppose, W is the bitwidth of the return value. We must be prepared for 936 // overflow. Hence, we must assure that the result of our computation is 937 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 938 // safe in modular arithmetic. 939 // 940 // However, this code doesn't use exactly that formula; the formula it uses 941 // is something like the following, where T is the number of factors of 2 in 942 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 943 // exponentiation: 944 // 945 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 946 // 947 // This formula is trivially equivalent to the previous formula. However, 948 // this formula can be implemented much more efficiently. The trick is that 949 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 950 // arithmetic. To do exact division in modular arithmetic, all we have 951 // to do is multiply by the inverse. Therefore, this step can be done at 952 // width W. 953 // 954 // The next issue is how to safely do the division by 2^T. The way this 955 // is done is by doing the multiplication step at a width of at least W + T 956 // bits. This way, the bottom W+T bits of the product are accurate. Then, 957 // when we perform the division by 2^T (which is equivalent to a right shift 958 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 959 // truncated out after the division by 2^T. 960 // 961 // In comparison to just directly using the first formula, this technique 962 // is much more efficient; using the first formula requires W * K bits, 963 // but this formula less than W + K bits. Also, the first formula requires 964 // a division step, whereas this formula only requires multiplies and shifts. 965 // 966 // It doesn't matter whether the subtraction step is done in the calculation 967 // width or the input iteration count's width; if the subtraction overflows, 968 // the result must be zero anyway. We prefer here to do it in the width of 969 // the induction variable because it helps a lot for certain cases; CodeGen 970 // isn't smart enough to ignore the overflow, which leads to much less 971 // efficient code if the width of the subtraction is wider than the native 972 // register width. 973 // 974 // (It's possible to not widen at all by pulling out factors of 2 before 975 // the multiplication; for example, K=2 can be calculated as 976 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 977 // extra arithmetic, so it's not an obvious win, and it gets 978 // much more complicated for K > 3.) 979 980 // Protection from insane SCEVs; this bound is conservative, 981 // but it probably doesn't matter. 982 if (K > 1000) 983 return SE.getCouldNotCompute(); 984 985 unsigned W = SE.getTypeSizeInBits(ResultTy); 986 987 // Calculate K! / 2^T and T; we divide out the factors of two before 988 // multiplying for calculating K! / 2^T to avoid overflow. 989 // Other overflow doesn't matter because we only care about the bottom 990 // W bits of the result. 991 APInt OddFactorial(W, 1); 992 unsigned T = 1; 993 for (unsigned i = 3; i <= K; ++i) { 994 APInt Mult(W, i); 995 unsigned TwoFactors = Mult.countTrailingZeros(); 996 T += TwoFactors; 997 Mult.lshrInPlace(TwoFactors); 998 OddFactorial *= Mult; 999 } 1000 1001 // We need at least W + T bits for the multiplication step 1002 unsigned CalculationBits = W + T; 1003 1004 // Calculate 2^T, at width T+W. 1005 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1006 1007 // Calculate the multiplicative inverse of K! / 2^T; 1008 // this multiplication factor will perform the exact division by 1009 // K! / 2^T. 1010 APInt Mod = APInt::getSignedMinValue(W+1); 1011 APInt MultiplyFactor = OddFactorial.zext(W+1); 1012 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1013 MultiplyFactor = MultiplyFactor.trunc(W); 1014 1015 // Calculate the product, at width T+W 1016 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1017 CalculationBits); 1018 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1019 for (unsigned i = 1; i != K; ++i) { 1020 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1021 Dividend = SE.getMulExpr(Dividend, 1022 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1023 } 1024 1025 // Divide by 2^T 1026 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1027 1028 // Truncate the result, and divide by K! / 2^T. 1029 1030 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1031 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1032 } 1033 1034 /// Return the value of this chain of recurrences at the specified iteration 1035 /// number. We can evaluate this recurrence by multiplying each element in the 1036 /// chain by the binomial coefficient corresponding to it. In other words, we 1037 /// can evaluate {A,+,B,+,C,+,D} as: 1038 /// 1039 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1040 /// 1041 /// where BC(It, k) stands for binomial coefficient. 1042 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1043 ScalarEvolution &SE) const { 1044 return evaluateAtIteration(makeArrayRef(op_begin(), op_end()), It, SE); 1045 } 1046 1047 const SCEV * 1048 SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands, 1049 const SCEV *It, ScalarEvolution &SE) { 1050 assert(Operands.size() > 0); 1051 const SCEV *Result = Operands[0]; 1052 for (unsigned i = 1, e = Operands.size(); i != e; ++i) { 1053 // The computation is correct in the face of overflow provided that the 1054 // multiplication is performed _after_ the evaluation of the binomial 1055 // coefficient. 1056 const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType()); 1057 if (isa<SCEVCouldNotCompute>(Coeff)) 1058 return Coeff; 1059 1060 Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff)); 1061 } 1062 return Result; 1063 } 1064 1065 //===----------------------------------------------------------------------===// 1066 // SCEV Expression folder implementations 1067 //===----------------------------------------------------------------------===// 1068 1069 const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op, 1070 unsigned Depth) { 1071 assert(Depth <= 1 && 1072 "getLosslessPtrToIntExpr() should self-recurse at most once."); 1073 1074 // We could be called with an integer-typed operands during SCEV rewrites. 1075 // Since the operand is an integer already, just perform zext/trunc/self cast. 1076 if (!Op->getType()->isPointerTy()) 1077 return Op; 1078 1079 // What would be an ID for such a SCEV cast expression? 1080 FoldingSetNodeID ID; 1081 ID.AddInteger(scPtrToInt); 1082 ID.AddPointer(Op); 1083 1084 void *IP = nullptr; 1085 1086 // Is there already an expression for such a cast? 1087 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1088 return S; 1089 1090 // It isn't legal for optimizations to construct new ptrtoint expressions 1091 // for non-integral pointers. 1092 if (getDataLayout().isNonIntegralPointerType(Op->getType())) 1093 return getCouldNotCompute(); 1094 1095 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); 1096 1097 // We can only trivially model ptrtoint if SCEV's effective (integer) type 1098 // is sufficiently wide to represent all possible pointer values. 1099 // We could theoretically teach SCEV to truncate wider pointers, but 1100 // that isn't implemented for now. 1101 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) != 1102 getDataLayout().getTypeSizeInBits(IntPtrTy)) 1103 return getCouldNotCompute(); 1104 1105 // If not, is this expression something we can't reduce any further? 1106 if (auto *U = dyn_cast<SCEVUnknown>(Op)) { 1107 // Perform some basic constant folding. If the operand of the ptr2int cast 1108 // is a null pointer, don't create a ptr2int SCEV expression (that will be 1109 // left as-is), but produce a zero constant. 1110 // NOTE: We could handle a more general case, but lack motivational cases. 1111 if (isa<ConstantPointerNull>(U->getValue())) 1112 return getZero(IntPtrTy); 1113 1114 // Create an explicit cast node. 1115 // We can reuse the existing insert position since if we get here, 1116 // we won't have made any changes which would invalidate it. 1117 SCEV *S = new (SCEVAllocator) 1118 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); 1119 UniqueSCEVs.InsertNode(S, IP); 1120 registerUser(S, Op); 1121 return S; 1122 } 1123 1124 assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for " 1125 "non-SCEVUnknown's."); 1126 1127 // Otherwise, we've got some expression that is more complex than just a 1128 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an 1129 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown 1130 // only, and the expressions must otherwise be integer-typed. 1131 // So sink the cast down to the SCEVUnknown's. 1132 1133 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, 1134 /// which computes a pointer-typed value, and rewrites the whole expression 1135 /// tree so that *all* the computations are done on integers, and the only 1136 /// pointer-typed operands in the expression are SCEVUnknown. 1137 class SCEVPtrToIntSinkingRewriter 1138 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> { 1139 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>; 1140 1141 public: 1142 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} 1143 1144 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { 1145 SCEVPtrToIntSinkingRewriter Rewriter(SE); 1146 return Rewriter.visit(Scev); 1147 } 1148 1149 const SCEV *visit(const SCEV *S) { 1150 Type *STy = S->getType(); 1151 // If the expression is not pointer-typed, just keep it as-is. 1152 if (!STy->isPointerTy()) 1153 return S; 1154 // Else, recursively sink the cast down into it. 1155 return Base::visit(S); 1156 } 1157 1158 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { 1159 SmallVector<const SCEV *, 2> Operands; 1160 bool Changed = false; 1161 for (const auto *Op : Expr->operands()) { 1162 Operands.push_back(visit(Op)); 1163 Changed |= Op != Operands.back(); 1164 } 1165 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); 1166 } 1167 1168 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { 1169 SmallVector<const SCEV *, 2> Operands; 1170 bool Changed = false; 1171 for (const auto *Op : Expr->operands()) { 1172 Operands.push_back(visit(Op)); 1173 Changed |= Op != Operands.back(); 1174 } 1175 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); 1176 } 1177 1178 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 1179 assert(Expr->getType()->isPointerTy() && 1180 "Should only reach pointer-typed SCEVUnknown's."); 1181 return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1); 1182 } 1183 }; 1184 1185 // And actually perform the cast sinking. 1186 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); 1187 assert(IntOp->getType()->isIntegerTy() && 1188 "We must have succeeded in sinking the cast, " 1189 "and ending up with an integer-typed expression!"); 1190 return IntOp; 1191 } 1192 1193 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) { 1194 assert(Ty->isIntegerTy() && "Target type must be an integer type!"); 1195 1196 const SCEV *IntOp = getLosslessPtrToIntExpr(Op); 1197 if (isa<SCEVCouldNotCompute>(IntOp)) 1198 return IntOp; 1199 1200 return getTruncateOrZeroExtend(IntOp, Ty); 1201 } 1202 1203 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1204 unsigned Depth) { 1205 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1206 "This is not a truncating conversion!"); 1207 assert(isSCEVable(Ty) && 1208 "This is not a conversion to a SCEVable type!"); 1209 assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!"); 1210 Ty = getEffectiveSCEVType(Ty); 1211 1212 FoldingSetNodeID ID; 1213 ID.AddInteger(scTruncate); 1214 ID.AddPointer(Op); 1215 ID.AddPointer(Ty); 1216 void *IP = nullptr; 1217 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1218 1219 // Fold if the operand is constant. 1220 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1221 return getConstant( 1222 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1223 1224 // trunc(trunc(x)) --> trunc(x) 1225 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1226 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1227 1228 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1229 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1230 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1231 1232 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1233 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1234 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1235 1236 if (Depth > MaxCastDepth) { 1237 SCEV *S = 1238 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1239 UniqueSCEVs.InsertNode(S, IP); 1240 registerUser(S, Op); 1241 return S; 1242 } 1243 1244 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1245 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1246 // if after transforming we have at most one truncate, not counting truncates 1247 // that replace other casts. 1248 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1249 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1250 SmallVector<const SCEV *, 4> Operands; 1251 unsigned numTruncs = 0; 1252 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1253 ++i) { 1254 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1255 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && 1256 isa<SCEVTruncateExpr>(S)) 1257 numTruncs++; 1258 Operands.push_back(S); 1259 } 1260 if (numTruncs < 2) { 1261 if (isa<SCEVAddExpr>(Op)) 1262 return getAddExpr(Operands); 1263 else if (isa<SCEVMulExpr>(Op)) 1264 return getMulExpr(Operands); 1265 else 1266 llvm_unreachable("Unexpected SCEV type for Op."); 1267 } 1268 // Although we checked in the beginning that ID is not in the cache, it is 1269 // possible that during recursion and different modification ID was inserted 1270 // into the cache. So if we find it, just return it. 1271 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1272 return S; 1273 } 1274 1275 // If the input value is a chrec scev, truncate the chrec's operands. 1276 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1277 SmallVector<const SCEV *, 4> Operands; 1278 for (const SCEV *Op : AddRec->operands()) 1279 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1280 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1281 } 1282 1283 // Return zero if truncating to known zeros. 1284 uint32_t MinTrailingZeros = GetMinTrailingZeros(Op); 1285 if (MinTrailingZeros >= getTypeSizeInBits(Ty)) 1286 return getZero(Ty); 1287 1288 // The cast wasn't folded; create an explicit cast node. We can reuse 1289 // the existing insert position since if we get here, we won't have 1290 // made any changes which would invalidate it. 1291 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1292 Op, Ty); 1293 UniqueSCEVs.InsertNode(S, IP); 1294 registerUser(S, Op); 1295 return S; 1296 } 1297 1298 // Get the limit of a recurrence such that incrementing by Step cannot cause 1299 // signed overflow as long as the value of the recurrence within the 1300 // loop does not exceed this limit before incrementing. 1301 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1302 ICmpInst::Predicate *Pred, 1303 ScalarEvolution *SE) { 1304 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1305 if (SE->isKnownPositive(Step)) { 1306 *Pred = ICmpInst::ICMP_SLT; 1307 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1308 SE->getSignedRangeMax(Step)); 1309 } 1310 if (SE->isKnownNegative(Step)) { 1311 *Pred = ICmpInst::ICMP_SGT; 1312 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1313 SE->getSignedRangeMin(Step)); 1314 } 1315 return nullptr; 1316 } 1317 1318 // Get the limit of a recurrence such that incrementing by Step cannot cause 1319 // unsigned overflow as long as the value of the recurrence within the loop does 1320 // not exceed this limit before incrementing. 1321 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1322 ICmpInst::Predicate *Pred, 1323 ScalarEvolution *SE) { 1324 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1325 *Pred = ICmpInst::ICMP_ULT; 1326 1327 return SE->getConstant(APInt::getMinValue(BitWidth) - 1328 SE->getUnsignedRangeMax(Step)); 1329 } 1330 1331 namespace { 1332 1333 struct ExtendOpTraitsBase { 1334 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1335 unsigned); 1336 }; 1337 1338 // Used to make code generic over signed and unsigned overflow. 1339 template <typename ExtendOp> struct ExtendOpTraits { 1340 // Members present: 1341 // 1342 // static const SCEV::NoWrapFlags WrapType; 1343 // 1344 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1345 // 1346 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1347 // ICmpInst::Predicate *Pred, 1348 // ScalarEvolution *SE); 1349 }; 1350 1351 template <> 1352 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1353 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1354 1355 static const GetExtendExprTy GetExtendExpr; 1356 1357 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1358 ICmpInst::Predicate *Pred, 1359 ScalarEvolution *SE) { 1360 return getSignedOverflowLimitForStep(Step, Pred, SE); 1361 } 1362 }; 1363 1364 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1365 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1366 1367 template <> 1368 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1369 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1370 1371 static const GetExtendExprTy GetExtendExpr; 1372 1373 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1374 ICmpInst::Predicate *Pred, 1375 ScalarEvolution *SE) { 1376 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1377 } 1378 }; 1379 1380 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1381 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1382 1383 } // end anonymous namespace 1384 1385 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1386 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1387 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1388 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1389 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1390 // expression "Step + sext/zext(PreIncAR)" is congruent with 1391 // "sext/zext(PostIncAR)" 1392 template <typename ExtendOpTy> 1393 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1394 ScalarEvolution *SE, unsigned Depth) { 1395 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1396 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1397 1398 const Loop *L = AR->getLoop(); 1399 const SCEV *Start = AR->getStart(); 1400 const SCEV *Step = AR->getStepRecurrence(*SE); 1401 1402 // Check for a simple looking step prior to loop entry. 1403 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1404 if (!SA) 1405 return nullptr; 1406 1407 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1408 // subtraction is expensive. For this purpose, perform a quick and dirty 1409 // difference, by checking for Step in the operand list. 1410 SmallVector<const SCEV *, 4> DiffOps; 1411 for (const SCEV *Op : SA->operands()) 1412 if (Op != Step) 1413 DiffOps.push_back(Op); 1414 1415 if (DiffOps.size() == SA->getNumOperands()) 1416 return nullptr; 1417 1418 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1419 // `Step`: 1420 1421 // 1. NSW/NUW flags on the step increment. 1422 auto PreStartFlags = 1423 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1424 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1425 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1426 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1427 1428 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1429 // "S+X does not sign/unsign-overflow". 1430 // 1431 1432 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1433 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1434 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1435 return PreStart; 1436 1437 // 2. Direct overflow check on the step operation's expression. 1438 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1439 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1440 const SCEV *OperandExtendedStart = 1441 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1442 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1443 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1444 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1445 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1446 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1447 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1448 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType); 1449 } 1450 return PreStart; 1451 } 1452 1453 // 3. Loop precondition. 1454 ICmpInst::Predicate Pred; 1455 const SCEV *OverflowLimit = 1456 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1457 1458 if (OverflowLimit && 1459 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1460 return PreStart; 1461 1462 return nullptr; 1463 } 1464 1465 // Get the normalized zero or sign extended expression for this AddRec's Start. 1466 template <typename ExtendOpTy> 1467 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1468 ScalarEvolution *SE, 1469 unsigned Depth) { 1470 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1471 1472 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1473 if (!PreStart) 1474 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1475 1476 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1477 Depth), 1478 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1479 } 1480 1481 // Try to prove away overflow by looking at "nearby" add recurrences. A 1482 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1483 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1484 // 1485 // Formally: 1486 // 1487 // {S,+,X} == {S-T,+,X} + T 1488 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1489 // 1490 // If ({S-T,+,X} + T) does not overflow ... (1) 1491 // 1492 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1493 // 1494 // If {S-T,+,X} does not overflow ... (2) 1495 // 1496 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1497 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1498 // 1499 // If (S-T)+T does not overflow ... (3) 1500 // 1501 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1502 // == {Ext(S),+,Ext(X)} == LHS 1503 // 1504 // Thus, if (1), (2) and (3) are true for some T, then 1505 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1506 // 1507 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1508 // does not overflow" restricted to the 0th iteration. Therefore we only need 1509 // to check for (1) and (2). 1510 // 1511 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1512 // is `Delta` (defined below). 1513 template <typename ExtendOpTy> 1514 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1515 const SCEV *Step, 1516 const Loop *L) { 1517 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1518 1519 // We restrict `Start` to a constant to prevent SCEV from spending too much 1520 // time here. It is correct (but more expensive) to continue with a 1521 // non-constant `Start` and do a general SCEV subtraction to compute 1522 // `PreStart` below. 1523 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1524 if (!StartC) 1525 return false; 1526 1527 APInt StartAI = StartC->getAPInt(); 1528 1529 for (unsigned Delta : {-2, -1, 1, 2}) { 1530 const SCEV *PreStart = getConstant(StartAI - Delta); 1531 1532 FoldingSetNodeID ID; 1533 ID.AddInteger(scAddRecExpr); 1534 ID.AddPointer(PreStart); 1535 ID.AddPointer(Step); 1536 ID.AddPointer(L); 1537 void *IP = nullptr; 1538 const auto *PreAR = 1539 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1540 1541 // Give up if we don't already have the add recurrence we need because 1542 // actually constructing an add recurrence is relatively expensive. 1543 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1544 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1545 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1546 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1547 DeltaS, &Pred, this); 1548 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1549 return true; 1550 } 1551 } 1552 1553 return false; 1554 } 1555 1556 // Finds an integer D for an expression (C + x + y + ...) such that the top 1557 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1558 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1559 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1560 // the (C + x + y + ...) expression is \p WholeAddExpr. 1561 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1562 const SCEVConstant *ConstantTerm, 1563 const SCEVAddExpr *WholeAddExpr) { 1564 const APInt &C = ConstantTerm->getAPInt(); 1565 const unsigned BitWidth = C.getBitWidth(); 1566 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1567 uint32_t TZ = BitWidth; 1568 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1569 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1570 if (TZ) { 1571 // Set D to be as many least significant bits of C as possible while still 1572 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1573 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1574 } 1575 return APInt(BitWidth, 0); 1576 } 1577 1578 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1579 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1580 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1581 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1582 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1583 const APInt &ConstantStart, 1584 const SCEV *Step) { 1585 const unsigned BitWidth = ConstantStart.getBitWidth(); 1586 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1587 if (TZ) 1588 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1589 : ConstantStart; 1590 return APInt(BitWidth, 0); 1591 } 1592 1593 const SCEV * 1594 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1595 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1596 "This is not an extending conversion!"); 1597 assert(isSCEVable(Ty) && 1598 "This is not a conversion to a SCEVable type!"); 1599 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); 1600 Ty = getEffectiveSCEVType(Ty); 1601 1602 // Fold if the operand is constant. 1603 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1604 return getConstant( 1605 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1606 1607 // zext(zext(x)) --> zext(x) 1608 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1609 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1610 1611 // Before doing any expensive analysis, check to see if we've already 1612 // computed a SCEV for this Op and Ty. 1613 FoldingSetNodeID ID; 1614 ID.AddInteger(scZeroExtend); 1615 ID.AddPointer(Op); 1616 ID.AddPointer(Ty); 1617 void *IP = nullptr; 1618 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1619 if (Depth > MaxCastDepth) { 1620 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1621 Op, Ty); 1622 UniqueSCEVs.InsertNode(S, IP); 1623 registerUser(S, Op); 1624 return S; 1625 } 1626 1627 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1628 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1629 // It's possible the bits taken off by the truncate were all zero bits. If 1630 // so, we should be able to simplify this further. 1631 const SCEV *X = ST->getOperand(); 1632 ConstantRange CR = getUnsignedRange(X); 1633 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1634 unsigned NewBits = getTypeSizeInBits(Ty); 1635 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1636 CR.zextOrTrunc(NewBits))) 1637 return getTruncateOrZeroExtend(X, Ty, Depth); 1638 } 1639 1640 // If the input value is a chrec scev, and we can prove that the value 1641 // did not overflow the old, smaller, value, we can zero extend all of the 1642 // operands (often constants). This allows analysis of something like 1643 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1644 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1645 if (AR->isAffine()) { 1646 const SCEV *Start = AR->getStart(); 1647 const SCEV *Step = AR->getStepRecurrence(*this); 1648 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1649 const Loop *L = AR->getLoop(); 1650 1651 if (!AR->hasNoUnsignedWrap()) { 1652 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1653 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1654 } 1655 1656 // If we have special knowledge that this addrec won't overflow, 1657 // we don't need to do any further analysis. 1658 if (AR->hasNoUnsignedWrap()) { 1659 Start = 1660 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); 1661 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1662 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1663 } 1664 1665 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1666 // Note that this serves two purposes: It filters out loops that are 1667 // simply not analyzable, and it covers the case where this code is 1668 // being called from within backedge-taken count analysis, such that 1669 // attempting to ask for the backedge-taken count would likely result 1670 // in infinite recursion. In the later case, the analysis code will 1671 // cope with a conservative value, and it will take care to purge 1672 // that value once it has finished. 1673 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1674 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1675 // Manually compute the final value for AR, checking for overflow. 1676 1677 // Check whether the backedge-taken count can be losslessly casted to 1678 // the addrec's type. The count is always unsigned. 1679 const SCEV *CastedMaxBECount = 1680 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1681 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1682 CastedMaxBECount, MaxBECount->getType(), Depth); 1683 if (MaxBECount == RecastedMaxBECount) { 1684 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1685 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1686 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1687 SCEV::FlagAnyWrap, Depth + 1); 1688 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1689 SCEV::FlagAnyWrap, 1690 Depth + 1), 1691 WideTy, Depth + 1); 1692 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1693 const SCEV *WideMaxBECount = 1694 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1695 const SCEV *OperandExtendedAdd = 1696 getAddExpr(WideStart, 1697 getMulExpr(WideMaxBECount, 1698 getZeroExtendExpr(Step, WideTy, Depth + 1), 1699 SCEV::FlagAnyWrap, Depth + 1), 1700 SCEV::FlagAnyWrap, Depth + 1); 1701 if (ZAdd == OperandExtendedAdd) { 1702 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1703 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1704 // Return the expression with the addrec on the outside. 1705 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1706 Depth + 1); 1707 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1708 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1709 } 1710 // Similar to above, only this time treat the step value as signed. 1711 // This covers loops that count down. 1712 OperandExtendedAdd = 1713 getAddExpr(WideStart, 1714 getMulExpr(WideMaxBECount, 1715 getSignExtendExpr(Step, WideTy, Depth + 1), 1716 SCEV::FlagAnyWrap, Depth + 1), 1717 SCEV::FlagAnyWrap, Depth + 1); 1718 if (ZAdd == OperandExtendedAdd) { 1719 // Cache knowledge of AR NW, which is propagated to this AddRec. 1720 // Negative step causes unsigned wrap, but it still can't self-wrap. 1721 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1722 // Return the expression with the addrec on the outside. 1723 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1724 Depth + 1); 1725 Step = getSignExtendExpr(Step, Ty, Depth + 1); 1726 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1727 } 1728 } 1729 } 1730 1731 // Normally, in the cases we can prove no-overflow via a 1732 // backedge guarding condition, we can also compute a backedge 1733 // taken count for the loop. The exceptions are assumptions and 1734 // guards present in the loop -- SCEV is not great at exploiting 1735 // these to compute max backedge taken counts, but can still use 1736 // these to prove lack of overflow. Use this fact to avoid 1737 // doing extra work that may not pay off. 1738 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1739 !AC.assumptions().empty()) { 1740 1741 auto NewFlags = proveNoUnsignedWrapViaInduction(AR); 1742 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1743 if (AR->hasNoUnsignedWrap()) { 1744 // Same as nuw case above - duplicated here to avoid a compile time 1745 // issue. It's not clear that the order of checks does matter, but 1746 // it's one of two issue possible causes for a change which was 1747 // reverted. Be conservative for the moment. 1748 Start = 1749 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); 1750 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1751 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1752 } 1753 1754 // For a negative step, we can extend the operands iff doing so only 1755 // traverses values in the range zext([0,UINT_MAX]). 1756 if (isKnownNegative(Step)) { 1757 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1758 getSignedRangeMin(Step)); 1759 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1760 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1761 // Cache knowledge of AR NW, which is propagated to this 1762 // AddRec. Negative step causes unsigned wrap, but it 1763 // still can't self-wrap. 1764 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1765 // Return the expression with the addrec on the outside. 1766 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1767 Depth + 1); 1768 Step = getSignExtendExpr(Step, Ty, Depth + 1); 1769 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1770 } 1771 } 1772 } 1773 1774 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1775 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1776 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1777 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1778 const APInt &C = SC->getAPInt(); 1779 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1780 if (D != 0) { 1781 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1782 const SCEV *SResidual = 1783 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1784 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1785 return getAddExpr(SZExtD, SZExtR, 1786 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1787 Depth + 1); 1788 } 1789 } 1790 1791 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1792 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1793 Start = 1794 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); 1795 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1796 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1797 } 1798 } 1799 1800 // zext(A % B) --> zext(A) % zext(B) 1801 { 1802 const SCEV *LHS; 1803 const SCEV *RHS; 1804 if (matchURem(Op, LHS, RHS)) 1805 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1806 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1807 } 1808 1809 // zext(A / B) --> zext(A) / zext(B). 1810 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1811 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1812 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1813 1814 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1815 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1816 if (SA->hasNoUnsignedWrap()) { 1817 // If the addition does not unsign overflow then we can, by definition, 1818 // commute the zero extension with the addition operation. 1819 SmallVector<const SCEV *, 4> Ops; 1820 for (const auto *Op : SA->operands()) 1821 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1822 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1823 } 1824 1825 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1826 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1827 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1828 // 1829 // Often address arithmetics contain expressions like 1830 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1831 // This transformation is useful while proving that such expressions are 1832 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1833 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1834 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1835 if (D != 0) { 1836 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1837 const SCEV *SResidual = 1838 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1839 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1840 return getAddExpr(SZExtD, SZExtR, 1841 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1842 Depth + 1); 1843 } 1844 } 1845 } 1846 1847 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1848 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1849 if (SM->hasNoUnsignedWrap()) { 1850 // If the multiply does not unsign overflow then we can, by definition, 1851 // commute the zero extension with the multiply operation. 1852 SmallVector<const SCEV *, 4> Ops; 1853 for (const auto *Op : SM->operands()) 1854 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1855 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1856 } 1857 1858 // zext(2^K * (trunc X to iN)) to iM -> 1859 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1860 // 1861 // Proof: 1862 // 1863 // zext(2^K * (trunc X to iN)) to iM 1864 // = zext((trunc X to iN) << K) to iM 1865 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1866 // (because shl removes the top K bits) 1867 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1868 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1869 // 1870 if (SM->getNumOperands() == 2) 1871 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1872 if (MulLHS->getAPInt().isPowerOf2()) 1873 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1874 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1875 MulLHS->getAPInt().logBase2(); 1876 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1877 return getMulExpr( 1878 getZeroExtendExpr(MulLHS, Ty), 1879 getZeroExtendExpr( 1880 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1881 SCEV::FlagNUW, Depth + 1); 1882 } 1883 } 1884 1885 // The cast wasn't folded; create an explicit cast node. 1886 // Recompute the insert position, as it may have been invalidated. 1887 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1888 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1889 Op, Ty); 1890 UniqueSCEVs.InsertNode(S, IP); 1891 registerUser(S, Op); 1892 return S; 1893 } 1894 1895 const SCEV * 1896 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1897 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1898 "This is not an extending conversion!"); 1899 assert(isSCEVable(Ty) && 1900 "This is not a conversion to a SCEVable type!"); 1901 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); 1902 Ty = getEffectiveSCEVType(Ty); 1903 1904 // Fold if the operand is constant. 1905 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1906 return getConstant( 1907 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1908 1909 // sext(sext(x)) --> sext(x) 1910 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1911 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1912 1913 // sext(zext(x)) --> zext(x) 1914 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1915 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1916 1917 // Before doing any expensive analysis, check to see if we've already 1918 // computed a SCEV for this Op and Ty. 1919 FoldingSetNodeID ID; 1920 ID.AddInteger(scSignExtend); 1921 ID.AddPointer(Op); 1922 ID.AddPointer(Ty); 1923 void *IP = nullptr; 1924 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1925 // Limit recursion depth. 1926 if (Depth > MaxCastDepth) { 1927 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1928 Op, Ty); 1929 UniqueSCEVs.InsertNode(S, IP); 1930 registerUser(S, Op); 1931 return S; 1932 } 1933 1934 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1935 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1936 // It's possible the bits taken off by the truncate were all sign bits. If 1937 // so, we should be able to simplify this further. 1938 const SCEV *X = ST->getOperand(); 1939 ConstantRange CR = getSignedRange(X); 1940 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1941 unsigned NewBits = getTypeSizeInBits(Ty); 1942 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1943 CR.sextOrTrunc(NewBits))) 1944 return getTruncateOrSignExtend(X, Ty, Depth); 1945 } 1946 1947 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1948 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1949 if (SA->hasNoSignedWrap()) { 1950 // If the addition does not sign overflow then we can, by definition, 1951 // commute the sign extension with the addition operation. 1952 SmallVector<const SCEV *, 4> Ops; 1953 for (const auto *Op : SA->operands()) 1954 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1955 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1956 } 1957 1958 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1959 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1960 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1961 // 1962 // For instance, this will bring two seemingly different expressions: 1963 // 1 + sext(5 + 20 * %x + 24 * %y) and 1964 // sext(6 + 20 * %x + 24 * %y) 1965 // to the same form: 1966 // 2 + sext(4 + 20 * %x + 24 * %y) 1967 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1968 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1969 if (D != 0) { 1970 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1971 const SCEV *SResidual = 1972 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1973 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1974 return getAddExpr(SSExtD, SSExtR, 1975 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1976 Depth + 1); 1977 } 1978 } 1979 } 1980 // If the input value is a chrec scev, and we can prove that the value 1981 // did not overflow the old, smaller, value, we can sign extend all of the 1982 // operands (often constants). This allows analysis of something like 1983 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1984 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1985 if (AR->isAffine()) { 1986 const SCEV *Start = AR->getStart(); 1987 const SCEV *Step = AR->getStepRecurrence(*this); 1988 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1989 const Loop *L = AR->getLoop(); 1990 1991 if (!AR->hasNoSignedWrap()) { 1992 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1993 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1994 } 1995 1996 // If we have special knowledge that this addrec won't overflow, 1997 // we don't need to do any further analysis. 1998 if (AR->hasNoSignedWrap()) { 1999 Start = 2000 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); 2001 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2002 return getAddRecExpr(Start, Step, L, SCEV::FlagNSW); 2003 } 2004 2005 // Check whether the backedge-taken count is SCEVCouldNotCompute. 2006 // Note that this serves two purposes: It filters out loops that are 2007 // simply not analyzable, and it covers the case where this code is 2008 // being called from within backedge-taken count analysis, such that 2009 // attempting to ask for the backedge-taken count would likely result 2010 // in infinite recursion. In the later case, the analysis code will 2011 // cope with a conservative value, and it will take care to purge 2012 // that value once it has finished. 2013 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 2014 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 2015 // Manually compute the final value for AR, checking for 2016 // overflow. 2017 2018 // Check whether the backedge-taken count can be losslessly casted to 2019 // the addrec's type. The count is always unsigned. 2020 const SCEV *CastedMaxBECount = 2021 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 2022 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 2023 CastedMaxBECount, MaxBECount->getType(), Depth); 2024 if (MaxBECount == RecastedMaxBECount) { 2025 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 2026 // Check whether Start+Step*MaxBECount has no signed overflow. 2027 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 2028 SCEV::FlagAnyWrap, Depth + 1); 2029 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 2030 SCEV::FlagAnyWrap, 2031 Depth + 1), 2032 WideTy, Depth + 1); 2033 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 2034 const SCEV *WideMaxBECount = 2035 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 2036 const SCEV *OperandExtendedAdd = 2037 getAddExpr(WideStart, 2038 getMulExpr(WideMaxBECount, 2039 getSignExtendExpr(Step, WideTy, Depth + 1), 2040 SCEV::FlagAnyWrap, Depth + 1), 2041 SCEV::FlagAnyWrap, Depth + 1); 2042 if (SAdd == OperandExtendedAdd) { 2043 // Cache knowledge of AR NSW, which is propagated to this AddRec. 2044 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2045 // Return the expression with the addrec on the outside. 2046 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2047 Depth + 1); 2048 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2049 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2050 } 2051 // Similar to above, only this time treat the step value as unsigned. 2052 // This covers loops that count up with an unsigned step. 2053 OperandExtendedAdd = 2054 getAddExpr(WideStart, 2055 getMulExpr(WideMaxBECount, 2056 getZeroExtendExpr(Step, WideTy, Depth + 1), 2057 SCEV::FlagAnyWrap, Depth + 1), 2058 SCEV::FlagAnyWrap, Depth + 1); 2059 if (SAdd == OperandExtendedAdd) { 2060 // If AR wraps around then 2061 // 2062 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2063 // => SAdd != OperandExtendedAdd 2064 // 2065 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2066 // (SAdd == OperandExtendedAdd => AR is NW) 2067 2068 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 2069 2070 // Return the expression with the addrec on the outside. 2071 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2072 Depth + 1); 2073 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 2074 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2075 } 2076 } 2077 } 2078 2079 auto NewFlags = proveNoSignedWrapViaInduction(AR); 2080 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 2081 if (AR->hasNoSignedWrap()) { 2082 // Same as nsw case above - duplicated here to avoid a compile time 2083 // issue. It's not clear that the order of checks does matter, but 2084 // it's one of two issue possible causes for a change which was 2085 // reverted. Be conservative for the moment. 2086 Start = 2087 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); 2088 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2089 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2090 } 2091 2092 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2093 // if D + (C - D + Step * n) could be proven to not signed wrap 2094 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2095 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2096 const APInt &C = SC->getAPInt(); 2097 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2098 if (D != 0) { 2099 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2100 const SCEV *SResidual = 2101 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2102 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2103 return getAddExpr(SSExtD, SSExtR, 2104 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2105 Depth + 1); 2106 } 2107 } 2108 2109 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2110 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2111 Start = 2112 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); 2113 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2114 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2115 } 2116 } 2117 2118 // If the input value is provably positive and we could not simplify 2119 // away the sext build a zext instead. 2120 if (isKnownNonNegative(Op)) 2121 return getZeroExtendExpr(Op, Ty, Depth + 1); 2122 2123 // The cast wasn't folded; create an explicit cast node. 2124 // Recompute the insert position, as it may have been invalidated. 2125 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2126 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2127 Op, Ty); 2128 UniqueSCEVs.InsertNode(S, IP); 2129 registerUser(S, { Op }); 2130 return S; 2131 } 2132 2133 const SCEV *ScalarEvolution::getCastExpr(SCEVTypes Kind, const SCEV *Op, 2134 Type *Ty) { 2135 switch (Kind) { 2136 case scTruncate: 2137 return getTruncateExpr(Op, Ty); 2138 case scZeroExtend: 2139 return getZeroExtendExpr(Op, Ty); 2140 case scSignExtend: 2141 return getSignExtendExpr(Op, Ty); 2142 case scPtrToInt: 2143 return getPtrToIntExpr(Op, Ty); 2144 default: 2145 llvm_unreachable("Not a SCEV cast expression!"); 2146 } 2147 } 2148 2149 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2150 /// unspecified bits out to the given type. 2151 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2152 Type *Ty) { 2153 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2154 "This is not an extending conversion!"); 2155 assert(isSCEVable(Ty) && 2156 "This is not a conversion to a SCEVable type!"); 2157 Ty = getEffectiveSCEVType(Ty); 2158 2159 // Sign-extend negative constants. 2160 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2161 if (SC->getAPInt().isNegative()) 2162 return getSignExtendExpr(Op, Ty); 2163 2164 // Peel off a truncate cast. 2165 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2166 const SCEV *NewOp = T->getOperand(); 2167 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2168 return getAnyExtendExpr(NewOp, Ty); 2169 return getTruncateOrNoop(NewOp, Ty); 2170 } 2171 2172 // Next try a zext cast. If the cast is folded, use it. 2173 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2174 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2175 return ZExt; 2176 2177 // Next try a sext cast. If the cast is folded, use it. 2178 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2179 if (!isa<SCEVSignExtendExpr>(SExt)) 2180 return SExt; 2181 2182 // Force the cast to be folded into the operands of an addrec. 2183 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2184 SmallVector<const SCEV *, 4> Ops; 2185 for (const SCEV *Op : AR->operands()) 2186 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2187 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2188 } 2189 2190 // If the expression is obviously signed, use the sext cast value. 2191 if (isa<SCEVSMaxExpr>(Op)) 2192 return SExt; 2193 2194 // Absent any other information, use the zext cast value. 2195 return ZExt; 2196 } 2197 2198 /// Process the given Ops list, which is a list of operands to be added under 2199 /// the given scale, update the given map. This is a helper function for 2200 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2201 /// that would form an add expression like this: 2202 /// 2203 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2204 /// 2205 /// where A and B are constants, update the map with these values: 2206 /// 2207 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2208 /// 2209 /// and add 13 + A*B*29 to AccumulatedConstant. 2210 /// This will allow getAddRecExpr to produce this: 2211 /// 2212 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2213 /// 2214 /// This form often exposes folding opportunities that are hidden in 2215 /// the original operand list. 2216 /// 2217 /// Return true iff it appears that any interesting folding opportunities 2218 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2219 /// the common case where no interesting opportunities are present, and 2220 /// is also used as a check to avoid infinite recursion. 2221 static bool 2222 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2223 SmallVectorImpl<const SCEV *> &NewOps, 2224 APInt &AccumulatedConstant, 2225 const SCEV *const *Ops, size_t NumOperands, 2226 const APInt &Scale, 2227 ScalarEvolution &SE) { 2228 bool Interesting = false; 2229 2230 // Iterate over the add operands. They are sorted, with constants first. 2231 unsigned i = 0; 2232 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2233 ++i; 2234 // Pull a buried constant out to the outside. 2235 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2236 Interesting = true; 2237 AccumulatedConstant += Scale * C->getAPInt(); 2238 } 2239 2240 // Next comes everything else. We're especially interested in multiplies 2241 // here, but they're in the middle, so just visit the rest with one loop. 2242 for (; i != NumOperands; ++i) { 2243 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2244 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2245 APInt NewScale = 2246 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2247 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2248 // A multiplication of a constant with another add; recurse. 2249 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2250 Interesting |= 2251 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2252 Add->op_begin(), Add->getNumOperands(), 2253 NewScale, SE); 2254 } else { 2255 // A multiplication of a constant with some other value. Update 2256 // the map. 2257 SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands())); 2258 const SCEV *Key = SE.getMulExpr(MulOps); 2259 auto Pair = M.insert({Key, NewScale}); 2260 if (Pair.second) { 2261 NewOps.push_back(Pair.first->first); 2262 } else { 2263 Pair.first->second += NewScale; 2264 // The map already had an entry for this value, which may indicate 2265 // a folding opportunity. 2266 Interesting = true; 2267 } 2268 } 2269 } else { 2270 // An ordinary operand. Update the map. 2271 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2272 M.insert({Ops[i], Scale}); 2273 if (Pair.second) { 2274 NewOps.push_back(Pair.first->first); 2275 } else { 2276 Pair.first->second += Scale; 2277 // The map already had an entry for this value, which may indicate 2278 // a folding opportunity. 2279 Interesting = true; 2280 } 2281 } 2282 } 2283 2284 return Interesting; 2285 } 2286 2287 bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, 2288 const SCEV *LHS, const SCEV *RHS) { 2289 const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *, 2290 SCEV::NoWrapFlags, unsigned); 2291 switch (BinOp) { 2292 default: 2293 llvm_unreachable("Unsupported binary op"); 2294 case Instruction::Add: 2295 Operation = &ScalarEvolution::getAddExpr; 2296 break; 2297 case Instruction::Sub: 2298 Operation = &ScalarEvolution::getMinusSCEV; 2299 break; 2300 case Instruction::Mul: 2301 Operation = &ScalarEvolution::getMulExpr; 2302 break; 2303 } 2304 2305 const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) = 2306 Signed ? &ScalarEvolution::getSignExtendExpr 2307 : &ScalarEvolution::getZeroExtendExpr; 2308 2309 // Check ext(LHS op RHS) == ext(LHS) op ext(RHS) 2310 auto *NarrowTy = cast<IntegerType>(LHS->getType()); 2311 auto *WideTy = 2312 IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2); 2313 2314 const SCEV *A = (this->*Extension)( 2315 (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0); 2316 const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0); 2317 const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0); 2318 const SCEV *B = (this->*Operation)(LHSB, RHSB, SCEV::FlagAnyWrap, 0); 2319 return A == B; 2320 } 2321 2322 Optional<SCEV::NoWrapFlags> 2323 ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp( 2324 const OverflowingBinaryOperator *OBO) { 2325 // It cannot be done any better. 2326 if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap()) 2327 return None; 2328 2329 SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap; 2330 2331 if (OBO->hasNoUnsignedWrap()) 2332 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2333 if (OBO->hasNoSignedWrap()) 2334 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2335 2336 bool Deduced = false; 2337 2338 if (OBO->getOpcode() != Instruction::Add && 2339 OBO->getOpcode() != Instruction::Sub && 2340 OBO->getOpcode() != Instruction::Mul) 2341 return None; 2342 2343 const SCEV *LHS = getSCEV(OBO->getOperand(0)); 2344 const SCEV *RHS = getSCEV(OBO->getOperand(1)); 2345 2346 if (!OBO->hasNoUnsignedWrap() && 2347 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), 2348 /* Signed */ false, LHS, RHS)) { 2349 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2350 Deduced = true; 2351 } 2352 2353 if (!OBO->hasNoSignedWrap() && 2354 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), 2355 /* Signed */ true, LHS, RHS)) { 2356 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2357 Deduced = true; 2358 } 2359 2360 if (Deduced) 2361 return Flags; 2362 return None; 2363 } 2364 2365 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2366 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2367 // can't-overflow flags for the operation if possible. 2368 static SCEV::NoWrapFlags 2369 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2370 const ArrayRef<const SCEV *> Ops, 2371 SCEV::NoWrapFlags Flags) { 2372 using namespace std::placeholders; 2373 2374 using OBO = OverflowingBinaryOperator; 2375 2376 bool CanAnalyze = 2377 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2378 (void)CanAnalyze; 2379 assert(CanAnalyze && "don't call from other places!"); 2380 2381 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2382 SCEV::NoWrapFlags SignOrUnsignWrap = 2383 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2384 2385 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2386 auto IsKnownNonNegative = [&](const SCEV *S) { 2387 return SE->isKnownNonNegative(S); 2388 }; 2389 2390 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2391 Flags = 2392 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2393 2394 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2395 2396 if (SignOrUnsignWrap != SignOrUnsignMask && 2397 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2398 isa<SCEVConstant>(Ops[0])) { 2399 2400 auto Opcode = [&] { 2401 switch (Type) { 2402 case scAddExpr: 2403 return Instruction::Add; 2404 case scMulExpr: 2405 return Instruction::Mul; 2406 default: 2407 llvm_unreachable("Unexpected SCEV op."); 2408 } 2409 }(); 2410 2411 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2412 2413 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2414 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2415 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2416 Opcode, C, OBO::NoSignedWrap); 2417 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2418 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2419 } 2420 2421 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2422 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2423 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2424 Opcode, C, OBO::NoUnsignedWrap); 2425 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2426 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2427 } 2428 } 2429 2430 // <0,+,nonnegative><nw> is also nuw 2431 // TODO: Add corresponding nsw case 2432 if (Type == scAddRecExpr && ScalarEvolution::hasFlags(Flags, SCEV::FlagNW) && 2433 !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && Ops.size() == 2 && 2434 Ops[0]->isZero() && IsKnownNonNegative(Ops[1])) 2435 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2436 2437 // both (udiv X, Y) * Y and Y * (udiv X, Y) are always NUW 2438 if (Type == scMulExpr && !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && 2439 Ops.size() == 2) { 2440 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0])) 2441 if (UDiv->getOperand(1) == Ops[1]) 2442 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2443 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1])) 2444 if (UDiv->getOperand(1) == Ops[0]) 2445 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2446 } 2447 2448 return Flags; 2449 } 2450 2451 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2452 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2453 } 2454 2455 /// Get a canonical add expression, or something simpler if possible. 2456 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2457 SCEV::NoWrapFlags OrigFlags, 2458 unsigned Depth) { 2459 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2460 "only nuw or nsw allowed"); 2461 assert(!Ops.empty() && "Cannot get empty add!"); 2462 if (Ops.size() == 1) return Ops[0]; 2463 #ifndef NDEBUG 2464 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2465 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2466 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2467 "SCEVAddExpr operand types don't match!"); 2468 unsigned NumPtrs = count_if( 2469 Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); }); 2470 assert(NumPtrs <= 1 && "add has at most one pointer operand"); 2471 #endif 2472 2473 // Sort by complexity, this groups all similar expression types together. 2474 GroupByComplexity(Ops, &LI, DT); 2475 2476 // If there are any constants, fold them together. 2477 unsigned Idx = 0; 2478 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2479 ++Idx; 2480 assert(Idx < Ops.size()); 2481 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2482 // We found two constants, fold them together! 2483 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2484 if (Ops.size() == 2) return Ops[0]; 2485 Ops.erase(Ops.begin()+1); // Erase the folded element 2486 LHSC = cast<SCEVConstant>(Ops[0]); 2487 } 2488 2489 // If we are left with a constant zero being added, strip it off. 2490 if (LHSC->getValue()->isZero()) { 2491 Ops.erase(Ops.begin()); 2492 --Idx; 2493 } 2494 2495 if (Ops.size() == 1) return Ops[0]; 2496 } 2497 2498 // Delay expensive flag strengthening until necessary. 2499 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2500 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); 2501 }; 2502 2503 // Limit recursion calls depth. 2504 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2505 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2506 2507 if (SCEV *S = findExistingSCEVInCache(scAddExpr, Ops)) { 2508 // Don't strengthen flags if we have no new information. 2509 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S); 2510 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) 2511 Add->setNoWrapFlags(ComputeFlags(Ops)); 2512 return S; 2513 } 2514 2515 // Okay, check to see if the same value occurs in the operand list more than 2516 // once. If so, merge them together into an multiply expression. Since we 2517 // sorted the list, these values are required to be adjacent. 2518 Type *Ty = Ops[0]->getType(); 2519 bool FoundMatch = false; 2520 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2521 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2522 // Scan ahead to count how many equal operands there are. 2523 unsigned Count = 2; 2524 while (i+Count != e && Ops[i+Count] == Ops[i]) 2525 ++Count; 2526 // Merge the values into a multiply. 2527 const SCEV *Scale = getConstant(Ty, Count); 2528 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2529 if (Ops.size() == Count) 2530 return Mul; 2531 Ops[i] = Mul; 2532 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2533 --i; e -= Count - 1; 2534 FoundMatch = true; 2535 } 2536 if (FoundMatch) 2537 return getAddExpr(Ops, OrigFlags, Depth + 1); 2538 2539 // Check for truncates. If all the operands are truncated from the same 2540 // type, see if factoring out the truncate would permit the result to be 2541 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2542 // if the contents of the resulting outer trunc fold to something simple. 2543 auto FindTruncSrcType = [&]() -> Type * { 2544 // We're ultimately looking to fold an addrec of truncs and muls of only 2545 // constants and truncs, so if we find any other types of SCEV 2546 // as operands of the addrec then we bail and return nullptr here. 2547 // Otherwise, we return the type of the operand of a trunc that we find. 2548 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2549 return T->getOperand()->getType(); 2550 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2551 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2552 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2553 return T->getOperand()->getType(); 2554 } 2555 return nullptr; 2556 }; 2557 if (auto *SrcType = FindTruncSrcType()) { 2558 SmallVector<const SCEV *, 8> LargeOps; 2559 bool Ok = true; 2560 // Check all the operands to see if they can be represented in the 2561 // source type of the truncate. 2562 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2563 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2564 if (T->getOperand()->getType() != SrcType) { 2565 Ok = false; 2566 break; 2567 } 2568 LargeOps.push_back(T->getOperand()); 2569 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2570 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2571 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2572 SmallVector<const SCEV *, 8> LargeMulOps; 2573 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2574 if (const SCEVTruncateExpr *T = 2575 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2576 if (T->getOperand()->getType() != SrcType) { 2577 Ok = false; 2578 break; 2579 } 2580 LargeMulOps.push_back(T->getOperand()); 2581 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2582 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2583 } else { 2584 Ok = false; 2585 break; 2586 } 2587 } 2588 if (Ok) 2589 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2590 } else { 2591 Ok = false; 2592 break; 2593 } 2594 } 2595 if (Ok) { 2596 // Evaluate the expression in the larger type. 2597 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2598 // If it folds to something simple, use it. Otherwise, don't. 2599 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2600 return getTruncateExpr(Fold, Ty); 2601 } 2602 } 2603 2604 if (Ops.size() == 2) { 2605 // Check if we have an expression of the form ((X + C1) - C2), where C1 and 2606 // C2 can be folded in a way that allows retaining wrapping flags of (X + 2607 // C1). 2608 const SCEV *A = Ops[0]; 2609 const SCEV *B = Ops[1]; 2610 auto *AddExpr = dyn_cast<SCEVAddExpr>(B); 2611 auto *C = dyn_cast<SCEVConstant>(A); 2612 if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) { 2613 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt(); 2614 auto C2 = C->getAPInt(); 2615 SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap; 2616 2617 APInt ConstAdd = C1 + C2; 2618 auto AddFlags = AddExpr->getNoWrapFlags(); 2619 // Adding a smaller constant is NUW if the original AddExpr was NUW. 2620 if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNUW) && 2621 ConstAdd.ule(C1)) { 2622 PreservedFlags = 2623 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW); 2624 } 2625 2626 // Adding a constant with the same sign and small magnitude is NSW, if the 2627 // original AddExpr was NSW. 2628 if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNSW) && 2629 C1.isSignBitSet() == ConstAdd.isSignBitSet() && 2630 ConstAdd.abs().ule(C1.abs())) { 2631 PreservedFlags = 2632 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW); 2633 } 2634 2635 if (PreservedFlags != SCEV::FlagAnyWrap) { 2636 SmallVector<const SCEV *, 4> NewOps(AddExpr->operands()); 2637 NewOps[0] = getConstant(ConstAdd); 2638 return getAddExpr(NewOps, PreservedFlags); 2639 } 2640 } 2641 } 2642 2643 // Canonicalize (-1 * urem X, Y) + X --> (Y * X/Y) 2644 if (Ops.size() == 2) { 2645 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[0]); 2646 if (Mul && Mul->getNumOperands() == 2 && 2647 Mul->getOperand(0)->isAllOnesValue()) { 2648 const SCEV *X; 2649 const SCEV *Y; 2650 if (matchURem(Mul->getOperand(1), X, Y) && X == Ops[1]) { 2651 return getMulExpr(Y, getUDivExpr(X, Y)); 2652 } 2653 } 2654 } 2655 2656 // Skip past any other cast SCEVs. 2657 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2658 ++Idx; 2659 2660 // If there are add operands they would be next. 2661 if (Idx < Ops.size()) { 2662 bool DeletedAdd = false; 2663 // If the original flags and all inlined SCEVAddExprs are NUW, use the 2664 // common NUW flag for expression after inlining. Other flags cannot be 2665 // preserved, because they may depend on the original order of operations. 2666 SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW); 2667 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2668 if (Ops.size() > AddOpsInlineThreshold || 2669 Add->getNumOperands() > AddOpsInlineThreshold) 2670 break; 2671 // If we have an add, expand the add operands onto the end of the operands 2672 // list. 2673 Ops.erase(Ops.begin()+Idx); 2674 Ops.append(Add->op_begin(), Add->op_end()); 2675 DeletedAdd = true; 2676 CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags()); 2677 } 2678 2679 // If we deleted at least one add, we added operands to the end of the list, 2680 // and they are not necessarily sorted. Recurse to resort and resimplify 2681 // any operands we just acquired. 2682 if (DeletedAdd) 2683 return getAddExpr(Ops, CommonFlags, Depth + 1); 2684 } 2685 2686 // Skip over the add expression until we get to a multiply. 2687 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2688 ++Idx; 2689 2690 // Check to see if there are any folding opportunities present with 2691 // operands multiplied by constant values. 2692 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2693 uint64_t BitWidth = getTypeSizeInBits(Ty); 2694 DenseMap<const SCEV *, APInt> M; 2695 SmallVector<const SCEV *, 8> NewOps; 2696 APInt AccumulatedConstant(BitWidth, 0); 2697 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2698 Ops.data(), Ops.size(), 2699 APInt(BitWidth, 1), *this)) { 2700 struct APIntCompare { 2701 bool operator()(const APInt &LHS, const APInt &RHS) const { 2702 return LHS.ult(RHS); 2703 } 2704 }; 2705 2706 // Some interesting folding opportunity is present, so its worthwhile to 2707 // re-generate the operands list. Group the operands by constant scale, 2708 // to avoid multiplying by the same constant scale multiple times. 2709 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2710 for (const SCEV *NewOp : NewOps) 2711 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2712 // Re-generate the operands list. 2713 Ops.clear(); 2714 if (AccumulatedConstant != 0) 2715 Ops.push_back(getConstant(AccumulatedConstant)); 2716 for (auto &MulOp : MulOpLists) { 2717 if (MulOp.first == 1) { 2718 Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1)); 2719 } else if (MulOp.first != 0) { 2720 Ops.push_back(getMulExpr( 2721 getConstant(MulOp.first), 2722 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2723 SCEV::FlagAnyWrap, Depth + 1)); 2724 } 2725 } 2726 if (Ops.empty()) 2727 return getZero(Ty); 2728 if (Ops.size() == 1) 2729 return Ops[0]; 2730 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2731 } 2732 } 2733 2734 // If we are adding something to a multiply expression, make sure the 2735 // something is not already an operand of the multiply. If so, merge it into 2736 // the multiply. 2737 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2738 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2739 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2740 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2741 if (isa<SCEVConstant>(MulOpSCEV)) 2742 continue; 2743 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2744 if (MulOpSCEV == Ops[AddOp]) { 2745 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2746 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2747 if (Mul->getNumOperands() != 2) { 2748 // If the multiply has more than two operands, we must get the 2749 // Y*Z term. 2750 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2751 Mul->op_begin()+MulOp); 2752 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2753 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2754 } 2755 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2756 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2757 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2758 SCEV::FlagAnyWrap, Depth + 1); 2759 if (Ops.size() == 2) return OuterMul; 2760 if (AddOp < Idx) { 2761 Ops.erase(Ops.begin()+AddOp); 2762 Ops.erase(Ops.begin()+Idx-1); 2763 } else { 2764 Ops.erase(Ops.begin()+Idx); 2765 Ops.erase(Ops.begin()+AddOp-1); 2766 } 2767 Ops.push_back(OuterMul); 2768 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2769 } 2770 2771 // Check this multiply against other multiplies being added together. 2772 for (unsigned OtherMulIdx = Idx+1; 2773 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2774 ++OtherMulIdx) { 2775 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2776 // If MulOp occurs in OtherMul, we can fold the two multiplies 2777 // together. 2778 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2779 OMulOp != e; ++OMulOp) 2780 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2781 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2782 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2783 if (Mul->getNumOperands() != 2) { 2784 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2785 Mul->op_begin()+MulOp); 2786 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2787 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2788 } 2789 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2790 if (OtherMul->getNumOperands() != 2) { 2791 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2792 OtherMul->op_begin()+OMulOp); 2793 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2794 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2795 } 2796 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2797 const SCEV *InnerMulSum = 2798 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2799 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2800 SCEV::FlagAnyWrap, Depth + 1); 2801 if (Ops.size() == 2) return OuterMul; 2802 Ops.erase(Ops.begin()+Idx); 2803 Ops.erase(Ops.begin()+OtherMulIdx-1); 2804 Ops.push_back(OuterMul); 2805 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2806 } 2807 } 2808 } 2809 } 2810 2811 // If there are any add recurrences in the operands list, see if any other 2812 // added values are loop invariant. If so, we can fold them into the 2813 // recurrence. 2814 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2815 ++Idx; 2816 2817 // Scan over all recurrences, trying to fold loop invariants into them. 2818 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2819 // Scan all of the other operands to this add and add them to the vector if 2820 // they are loop invariant w.r.t. the recurrence. 2821 SmallVector<const SCEV *, 8> LIOps; 2822 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2823 const Loop *AddRecLoop = AddRec->getLoop(); 2824 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2825 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2826 LIOps.push_back(Ops[i]); 2827 Ops.erase(Ops.begin()+i); 2828 --i; --e; 2829 } 2830 2831 // If we found some loop invariants, fold them into the recurrence. 2832 if (!LIOps.empty()) { 2833 // Compute nowrap flags for the addition of the loop-invariant ops and 2834 // the addrec. Temporarily push it as an operand for that purpose. These 2835 // flags are valid in the scope of the addrec only. 2836 LIOps.push_back(AddRec); 2837 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); 2838 LIOps.pop_back(); 2839 2840 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2841 LIOps.push_back(AddRec->getStart()); 2842 2843 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2844 2845 // It is not in general safe to propagate flags valid on an add within 2846 // the addrec scope to one outside it. We must prove that the inner 2847 // scope is guaranteed to execute if the outer one does to be able to 2848 // safely propagate. We know the program is undefined if poison is 2849 // produced on the inner scoped addrec. We also know that *for this use* 2850 // the outer scoped add can't overflow (because of the flags we just 2851 // computed for the inner scoped add) without the program being undefined. 2852 // Proving that entry to the outer scope neccesitates entry to the inner 2853 // scope, thus proves the program undefined if the flags would be violated 2854 // in the outer scope. 2855 SCEV::NoWrapFlags AddFlags = Flags; 2856 if (AddFlags != SCEV::FlagAnyWrap) { 2857 auto *DefI = getDefiningScopeBound(LIOps); 2858 auto *ReachI = &*AddRecLoop->getHeader()->begin(); 2859 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI)) 2860 AddFlags = SCEV::FlagAnyWrap; 2861 } 2862 AddRecOps[0] = getAddExpr(LIOps, AddFlags, Depth + 1); 2863 2864 // Build the new addrec. Propagate the NUW and NSW flags if both the 2865 // outer add and the inner addrec are guaranteed to have no overflow. 2866 // Always propagate NW. 2867 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2868 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2869 2870 // If all of the other operands were loop invariant, we are done. 2871 if (Ops.size() == 1) return NewRec; 2872 2873 // Otherwise, add the folded AddRec by the non-invariant parts. 2874 for (unsigned i = 0;; ++i) 2875 if (Ops[i] == AddRec) { 2876 Ops[i] = NewRec; 2877 break; 2878 } 2879 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2880 } 2881 2882 // Okay, if there weren't any loop invariants to be folded, check to see if 2883 // there are multiple AddRec's with the same loop induction variable being 2884 // added together. If so, we can fold them. 2885 for (unsigned OtherIdx = Idx+1; 2886 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2887 ++OtherIdx) { 2888 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2889 // so that the 1st found AddRecExpr is dominated by all others. 2890 assert(DT.dominates( 2891 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2892 AddRec->getLoop()->getHeader()) && 2893 "AddRecExprs are not sorted in reverse dominance order?"); 2894 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2895 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2896 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2897 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2898 ++OtherIdx) { 2899 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2900 if (OtherAddRec->getLoop() == AddRecLoop) { 2901 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2902 i != e; ++i) { 2903 if (i >= AddRecOps.size()) { 2904 AddRecOps.append(OtherAddRec->op_begin()+i, 2905 OtherAddRec->op_end()); 2906 break; 2907 } 2908 SmallVector<const SCEV *, 2> TwoOps = { 2909 AddRecOps[i], OtherAddRec->getOperand(i)}; 2910 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2911 } 2912 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2913 } 2914 } 2915 // Step size has changed, so we cannot guarantee no self-wraparound. 2916 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2917 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2918 } 2919 } 2920 2921 // Otherwise couldn't fold anything into this recurrence. Move onto the 2922 // next one. 2923 } 2924 2925 // Okay, it looks like we really DO need an add expr. Check to see if we 2926 // already have one, otherwise create a new one. 2927 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2928 } 2929 2930 const SCEV * 2931 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2932 SCEV::NoWrapFlags Flags) { 2933 FoldingSetNodeID ID; 2934 ID.AddInteger(scAddExpr); 2935 for (const SCEV *Op : Ops) 2936 ID.AddPointer(Op); 2937 void *IP = nullptr; 2938 SCEVAddExpr *S = 2939 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2940 if (!S) { 2941 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2942 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2943 S = new (SCEVAllocator) 2944 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2945 UniqueSCEVs.InsertNode(S, IP); 2946 registerUser(S, Ops); 2947 } 2948 S->setNoWrapFlags(Flags); 2949 return S; 2950 } 2951 2952 const SCEV * 2953 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2954 const Loop *L, SCEV::NoWrapFlags Flags) { 2955 FoldingSetNodeID ID; 2956 ID.AddInteger(scAddRecExpr); 2957 for (const SCEV *Op : Ops) 2958 ID.AddPointer(Op); 2959 ID.AddPointer(L); 2960 void *IP = nullptr; 2961 SCEVAddRecExpr *S = 2962 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2963 if (!S) { 2964 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2965 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2966 S = new (SCEVAllocator) 2967 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2968 UniqueSCEVs.InsertNode(S, IP); 2969 LoopUsers[L].push_back(S); 2970 registerUser(S, Ops); 2971 } 2972 setNoWrapFlags(S, Flags); 2973 return S; 2974 } 2975 2976 const SCEV * 2977 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2978 SCEV::NoWrapFlags Flags) { 2979 FoldingSetNodeID ID; 2980 ID.AddInteger(scMulExpr); 2981 for (const SCEV *Op : Ops) 2982 ID.AddPointer(Op); 2983 void *IP = nullptr; 2984 SCEVMulExpr *S = 2985 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2986 if (!S) { 2987 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2988 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2989 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2990 O, Ops.size()); 2991 UniqueSCEVs.InsertNode(S, IP); 2992 registerUser(S, Ops); 2993 } 2994 S->setNoWrapFlags(Flags); 2995 return S; 2996 } 2997 2998 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2999 uint64_t k = i*j; 3000 if (j > 1 && k / j != i) Overflow = true; 3001 return k; 3002 } 3003 3004 /// Compute the result of "n choose k", the binomial coefficient. If an 3005 /// intermediate computation overflows, Overflow will be set and the return will 3006 /// be garbage. Overflow is not cleared on absence of overflow. 3007 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 3008 // We use the multiplicative formula: 3009 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 3010 // At each iteration, we take the n-th term of the numeral and divide by the 3011 // (k-n)th term of the denominator. This division will always produce an 3012 // integral result, and helps reduce the chance of overflow in the 3013 // intermediate computations. However, we can still overflow even when the 3014 // final result would fit. 3015 3016 if (n == 0 || n == k) return 1; 3017 if (k > n) return 0; 3018 3019 if (k > n/2) 3020 k = n-k; 3021 3022 uint64_t r = 1; 3023 for (uint64_t i = 1; i <= k; ++i) { 3024 r = umul_ov(r, n-(i-1), Overflow); 3025 r /= i; 3026 } 3027 return r; 3028 } 3029 3030 /// Determine if any of the operands in this SCEV are a constant or if 3031 /// any of the add or multiply expressions in this SCEV contain a constant. 3032 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 3033 struct FindConstantInAddMulChain { 3034 bool FoundConstant = false; 3035 3036 bool follow(const SCEV *S) { 3037 FoundConstant |= isa<SCEVConstant>(S); 3038 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 3039 } 3040 3041 bool isDone() const { 3042 return FoundConstant; 3043 } 3044 }; 3045 3046 FindConstantInAddMulChain F; 3047 SCEVTraversal<FindConstantInAddMulChain> ST(F); 3048 ST.visitAll(StartExpr); 3049 return F.FoundConstant; 3050 } 3051 3052 /// Get a canonical multiply expression, or something simpler if possible. 3053 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 3054 SCEV::NoWrapFlags OrigFlags, 3055 unsigned Depth) { 3056 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) && 3057 "only nuw or nsw allowed"); 3058 assert(!Ops.empty() && "Cannot get empty mul!"); 3059 if (Ops.size() == 1) return Ops[0]; 3060 #ifndef NDEBUG 3061 Type *ETy = Ops[0]->getType(); 3062 assert(!ETy->isPointerTy()); 3063 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3064 assert(Ops[i]->getType() == ETy && 3065 "SCEVMulExpr operand types don't match!"); 3066 #endif 3067 3068 // Sort by complexity, this groups all similar expression types together. 3069 GroupByComplexity(Ops, &LI, DT); 3070 3071 // If there are any constants, fold them together. 3072 unsigned Idx = 0; 3073 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3074 ++Idx; 3075 assert(Idx < Ops.size()); 3076 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3077 // We found two constants, fold them together! 3078 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); 3079 if (Ops.size() == 2) return Ops[0]; 3080 Ops.erase(Ops.begin()+1); // Erase the folded element 3081 LHSC = cast<SCEVConstant>(Ops[0]); 3082 } 3083 3084 // If we have a multiply of zero, it will always be zero. 3085 if (LHSC->getValue()->isZero()) 3086 return LHSC; 3087 3088 // If we are left with a constant one being multiplied, strip it off. 3089 if (LHSC->getValue()->isOne()) { 3090 Ops.erase(Ops.begin()); 3091 --Idx; 3092 } 3093 3094 if (Ops.size() == 1) 3095 return Ops[0]; 3096 } 3097 3098 // Delay expensive flag strengthening until necessary. 3099 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 3100 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); 3101 }; 3102 3103 // Limit recursion calls depth. 3104 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 3105 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3106 3107 if (SCEV *S = findExistingSCEVInCache(scMulExpr, Ops)) { 3108 // Don't strengthen flags if we have no new information. 3109 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S); 3110 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) 3111 Mul->setNoWrapFlags(ComputeFlags(Ops)); 3112 return S; 3113 } 3114 3115 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3116 if (Ops.size() == 2) { 3117 // C1*(C2+V) -> C1*C2 + C1*V 3118 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 3119 // If any of Add's ops are Adds or Muls with a constant, apply this 3120 // transformation as well. 3121 // 3122 // TODO: There are some cases where this transformation is not 3123 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 3124 // this transformation should be narrowed down. 3125 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) { 3126 const SCEV *LHS = getMulExpr(LHSC, Add->getOperand(0), 3127 SCEV::FlagAnyWrap, Depth + 1); 3128 const SCEV *RHS = getMulExpr(LHSC, Add->getOperand(1), 3129 SCEV::FlagAnyWrap, Depth + 1); 3130 return getAddExpr(LHS, RHS, SCEV::FlagAnyWrap, Depth + 1); 3131 } 3132 3133 if (Ops[0]->isAllOnesValue()) { 3134 // If we have a mul by -1 of an add, try distributing the -1 among the 3135 // add operands. 3136 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 3137 SmallVector<const SCEV *, 4> NewOps; 3138 bool AnyFolded = false; 3139 for (const SCEV *AddOp : Add->operands()) { 3140 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 3141 Depth + 1); 3142 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 3143 NewOps.push_back(Mul); 3144 } 3145 if (AnyFolded) 3146 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 3147 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 3148 // Negation preserves a recurrence's no self-wrap property. 3149 SmallVector<const SCEV *, 4> Operands; 3150 for (const SCEV *AddRecOp : AddRec->operands()) 3151 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 3152 Depth + 1)); 3153 3154 return getAddRecExpr(Operands, AddRec->getLoop(), 3155 AddRec->getNoWrapFlags(SCEV::FlagNW)); 3156 } 3157 } 3158 } 3159 } 3160 3161 // Skip over the add expression until we get to a multiply. 3162 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 3163 ++Idx; 3164 3165 // If there are mul operands inline them all into this expression. 3166 if (Idx < Ops.size()) { 3167 bool DeletedMul = false; 3168 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 3169 if (Ops.size() > MulOpsInlineThreshold) 3170 break; 3171 // If we have an mul, expand the mul operands onto the end of the 3172 // operands list. 3173 Ops.erase(Ops.begin()+Idx); 3174 Ops.append(Mul->op_begin(), Mul->op_end()); 3175 DeletedMul = true; 3176 } 3177 3178 // If we deleted at least one mul, we added operands to the end of the 3179 // list, and they are not necessarily sorted. Recurse to resort and 3180 // resimplify any operands we just acquired. 3181 if (DeletedMul) 3182 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3183 } 3184 3185 // If there are any add recurrences in the operands list, see if any other 3186 // added values are loop invariant. If so, we can fold them into the 3187 // recurrence. 3188 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 3189 ++Idx; 3190 3191 // Scan over all recurrences, trying to fold loop invariants into them. 3192 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 3193 // Scan all of the other operands to this mul and add them to the vector 3194 // if they are loop invariant w.r.t. the recurrence. 3195 SmallVector<const SCEV *, 8> LIOps; 3196 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 3197 const Loop *AddRecLoop = AddRec->getLoop(); 3198 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3199 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 3200 LIOps.push_back(Ops[i]); 3201 Ops.erase(Ops.begin()+i); 3202 --i; --e; 3203 } 3204 3205 // If we found some loop invariants, fold them into the recurrence. 3206 if (!LIOps.empty()) { 3207 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 3208 SmallVector<const SCEV *, 4> NewOps; 3209 NewOps.reserve(AddRec->getNumOperands()); 3210 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 3211 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 3212 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 3213 SCEV::FlagAnyWrap, Depth + 1)); 3214 3215 // Build the new addrec. Propagate the NUW and NSW flags if both the 3216 // outer mul and the inner addrec are guaranteed to have no overflow. 3217 // 3218 // No self-wrap cannot be guaranteed after changing the step size, but 3219 // will be inferred if either NUW or NSW is true. 3220 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec}); 3221 const SCEV *NewRec = getAddRecExpr( 3222 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags)); 3223 3224 // If all of the other operands were loop invariant, we are done. 3225 if (Ops.size() == 1) return NewRec; 3226 3227 // Otherwise, multiply the folded AddRec by the non-invariant parts. 3228 for (unsigned i = 0;; ++i) 3229 if (Ops[i] == AddRec) { 3230 Ops[i] = NewRec; 3231 break; 3232 } 3233 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3234 } 3235 3236 // Okay, if there weren't any loop invariants to be folded, check to see 3237 // if there are multiple AddRec's with the same loop induction variable 3238 // being multiplied together. If so, we can fold them. 3239 3240 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 3241 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 3242 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 3243 // ]]],+,...up to x=2n}. 3244 // Note that the arguments to choose() are always integers with values 3245 // known at compile time, never SCEV objects. 3246 // 3247 // The implementation avoids pointless extra computations when the two 3248 // addrec's are of different length (mathematically, it's equivalent to 3249 // an infinite stream of zeros on the right). 3250 bool OpsModified = false; 3251 for (unsigned OtherIdx = Idx+1; 3252 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3253 ++OtherIdx) { 3254 const SCEVAddRecExpr *OtherAddRec = 3255 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3256 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 3257 continue; 3258 3259 // Limit max number of arguments to avoid creation of unreasonably big 3260 // SCEVAddRecs with very complex operands. 3261 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3262 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 3263 continue; 3264 3265 bool Overflow = false; 3266 Type *Ty = AddRec->getType(); 3267 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3268 SmallVector<const SCEV*, 7> AddRecOps; 3269 for (int x = 0, xe = AddRec->getNumOperands() + 3270 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3271 SmallVector <const SCEV *, 7> SumOps; 3272 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3273 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3274 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3275 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3276 z < ze && !Overflow; ++z) { 3277 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3278 uint64_t Coeff; 3279 if (LargerThan64Bits) 3280 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3281 else 3282 Coeff = Coeff1*Coeff2; 3283 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3284 const SCEV *Term1 = AddRec->getOperand(y-z); 3285 const SCEV *Term2 = OtherAddRec->getOperand(z); 3286 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3287 SCEV::FlagAnyWrap, Depth + 1)); 3288 } 3289 } 3290 if (SumOps.empty()) 3291 SumOps.push_back(getZero(Ty)); 3292 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3293 } 3294 if (!Overflow) { 3295 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 3296 SCEV::FlagAnyWrap); 3297 if (Ops.size() == 2) return NewAddRec; 3298 Ops[Idx] = NewAddRec; 3299 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3300 OpsModified = true; 3301 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3302 if (!AddRec) 3303 break; 3304 } 3305 } 3306 if (OpsModified) 3307 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3308 3309 // Otherwise couldn't fold anything into this recurrence. Move onto the 3310 // next one. 3311 } 3312 3313 // Okay, it looks like we really DO need an mul expr. Check to see if we 3314 // already have one, otherwise create a new one. 3315 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3316 } 3317 3318 /// Represents an unsigned remainder expression based on unsigned division. 3319 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3320 const SCEV *RHS) { 3321 assert(getEffectiveSCEVType(LHS->getType()) == 3322 getEffectiveSCEVType(RHS->getType()) && 3323 "SCEVURemExpr operand types don't match!"); 3324 3325 // Short-circuit easy cases 3326 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3327 // If constant is one, the result is trivial 3328 if (RHSC->getValue()->isOne()) 3329 return getZero(LHS->getType()); // X urem 1 --> 0 3330 3331 // If constant is a power of two, fold into a zext(trunc(LHS)). 3332 if (RHSC->getAPInt().isPowerOf2()) { 3333 Type *FullTy = LHS->getType(); 3334 Type *TruncTy = 3335 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3336 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3337 } 3338 } 3339 3340 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3341 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3342 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3343 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3344 } 3345 3346 /// Get a canonical unsigned division expression, or something simpler if 3347 /// possible. 3348 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3349 const SCEV *RHS) { 3350 assert(!LHS->getType()->isPointerTy() && 3351 "SCEVUDivExpr operand can't be pointer!"); 3352 assert(LHS->getType() == RHS->getType() && 3353 "SCEVUDivExpr operand types don't match!"); 3354 3355 FoldingSetNodeID ID; 3356 ID.AddInteger(scUDivExpr); 3357 ID.AddPointer(LHS); 3358 ID.AddPointer(RHS); 3359 void *IP = nullptr; 3360 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3361 return S; 3362 3363 // 0 udiv Y == 0 3364 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) 3365 if (LHSC->getValue()->isZero()) 3366 return LHS; 3367 3368 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3369 if (RHSC->getValue()->isOne()) 3370 return LHS; // X udiv 1 --> x 3371 // If the denominator is zero, the result of the udiv is undefined. Don't 3372 // try to analyze it, because the resolution chosen here may differ from 3373 // the resolution chosen in other parts of the compiler. 3374 if (!RHSC->getValue()->isZero()) { 3375 // Determine if the division can be folded into the operands of 3376 // its operands. 3377 // TODO: Generalize this to non-constants by using known-bits information. 3378 Type *Ty = LHS->getType(); 3379 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3380 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3381 // For non-power-of-two values, effectively round the value up to the 3382 // nearest power of two. 3383 if (!RHSC->getAPInt().isPowerOf2()) 3384 ++MaxShiftAmt; 3385 IntegerType *ExtTy = 3386 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3387 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3388 if (const SCEVConstant *Step = 3389 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3390 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3391 const APInt &StepInt = Step->getAPInt(); 3392 const APInt &DivInt = RHSC->getAPInt(); 3393 if (!StepInt.urem(DivInt) && 3394 getZeroExtendExpr(AR, ExtTy) == 3395 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3396 getZeroExtendExpr(Step, ExtTy), 3397 AR->getLoop(), SCEV::FlagAnyWrap)) { 3398 SmallVector<const SCEV *, 4> Operands; 3399 for (const SCEV *Op : AR->operands()) 3400 Operands.push_back(getUDivExpr(Op, RHS)); 3401 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3402 } 3403 /// Get a canonical UDivExpr for a recurrence. 3404 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3405 // We can currently only fold X%N if X is constant. 3406 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3407 if (StartC && !DivInt.urem(StepInt) && 3408 getZeroExtendExpr(AR, ExtTy) == 3409 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3410 getZeroExtendExpr(Step, ExtTy), 3411 AR->getLoop(), SCEV::FlagAnyWrap)) { 3412 const APInt &StartInt = StartC->getAPInt(); 3413 const APInt &StartRem = StartInt.urem(StepInt); 3414 if (StartRem != 0) { 3415 const SCEV *NewLHS = 3416 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3417 AR->getLoop(), SCEV::FlagNW); 3418 if (LHS != NewLHS) { 3419 LHS = NewLHS; 3420 3421 // Reset the ID to include the new LHS, and check if it is 3422 // already cached. 3423 ID.clear(); 3424 ID.AddInteger(scUDivExpr); 3425 ID.AddPointer(LHS); 3426 ID.AddPointer(RHS); 3427 IP = nullptr; 3428 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3429 return S; 3430 } 3431 } 3432 } 3433 } 3434 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3435 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3436 SmallVector<const SCEV *, 4> Operands; 3437 for (const SCEV *Op : M->operands()) 3438 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3439 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3440 // Find an operand that's safely divisible. 3441 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3442 const SCEV *Op = M->getOperand(i); 3443 const SCEV *Div = getUDivExpr(Op, RHSC); 3444 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3445 Operands = SmallVector<const SCEV *, 4>(M->operands()); 3446 Operands[i] = Div; 3447 return getMulExpr(Operands); 3448 } 3449 } 3450 } 3451 3452 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3453 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3454 if (auto *DivisorConstant = 3455 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3456 bool Overflow = false; 3457 APInt NewRHS = 3458 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3459 if (Overflow) { 3460 return getConstant(RHSC->getType(), 0, false); 3461 } 3462 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3463 } 3464 } 3465 3466 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3467 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3468 SmallVector<const SCEV *, 4> Operands; 3469 for (const SCEV *Op : A->operands()) 3470 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3471 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3472 Operands.clear(); 3473 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3474 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3475 if (isa<SCEVUDivExpr>(Op) || 3476 getMulExpr(Op, RHS) != A->getOperand(i)) 3477 break; 3478 Operands.push_back(Op); 3479 } 3480 if (Operands.size() == A->getNumOperands()) 3481 return getAddExpr(Operands); 3482 } 3483 } 3484 3485 // Fold if both operands are constant. 3486 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) 3487 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt())); 3488 } 3489 } 3490 3491 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3492 // changes). Make sure we get a new one. 3493 IP = nullptr; 3494 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3495 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3496 LHS, RHS); 3497 UniqueSCEVs.InsertNode(S, IP); 3498 registerUser(S, {LHS, RHS}); 3499 return S; 3500 } 3501 3502 APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3503 APInt A = C1->getAPInt().abs(); 3504 APInt B = C2->getAPInt().abs(); 3505 uint32_t ABW = A.getBitWidth(); 3506 uint32_t BBW = B.getBitWidth(); 3507 3508 if (ABW > BBW) 3509 B = B.zext(ABW); 3510 else if (ABW < BBW) 3511 A = A.zext(BBW); 3512 3513 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3514 } 3515 3516 /// Get a canonical unsigned division expression, or something simpler if 3517 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3518 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3519 /// it's not exact because the udiv may be clearing bits. 3520 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3521 const SCEV *RHS) { 3522 // TODO: we could try to find factors in all sorts of things, but for now we 3523 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3524 // end of this file for inspiration. 3525 3526 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3527 if (!Mul || !Mul->hasNoUnsignedWrap()) 3528 return getUDivExpr(LHS, RHS); 3529 3530 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3531 // If the mulexpr multiplies by a constant, then that constant must be the 3532 // first element of the mulexpr. 3533 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3534 if (LHSCst == RHSCst) { 3535 SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands())); 3536 return getMulExpr(Operands); 3537 } 3538 3539 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3540 // that there's a factor provided by one of the other terms. We need to 3541 // check. 3542 APInt Factor = gcd(LHSCst, RHSCst); 3543 if (!Factor.isIntN(1)) { 3544 LHSCst = 3545 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3546 RHSCst = 3547 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3548 SmallVector<const SCEV *, 2> Operands; 3549 Operands.push_back(LHSCst); 3550 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3551 LHS = getMulExpr(Operands); 3552 RHS = RHSCst; 3553 Mul = dyn_cast<SCEVMulExpr>(LHS); 3554 if (!Mul) 3555 return getUDivExactExpr(LHS, RHS); 3556 } 3557 } 3558 } 3559 3560 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3561 if (Mul->getOperand(i) == RHS) { 3562 SmallVector<const SCEV *, 2> Operands; 3563 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3564 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3565 return getMulExpr(Operands); 3566 } 3567 } 3568 3569 return getUDivExpr(LHS, RHS); 3570 } 3571 3572 /// Get an add recurrence expression for the specified loop. Simplify the 3573 /// expression as much as possible. 3574 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3575 const Loop *L, 3576 SCEV::NoWrapFlags Flags) { 3577 SmallVector<const SCEV *, 4> Operands; 3578 Operands.push_back(Start); 3579 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3580 if (StepChrec->getLoop() == L) { 3581 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3582 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3583 } 3584 3585 Operands.push_back(Step); 3586 return getAddRecExpr(Operands, L, Flags); 3587 } 3588 3589 /// Get an add recurrence expression for the specified loop. Simplify the 3590 /// expression as much as possible. 3591 const SCEV * 3592 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3593 const Loop *L, SCEV::NoWrapFlags Flags) { 3594 if (Operands.size() == 1) return Operands[0]; 3595 #ifndef NDEBUG 3596 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3597 for (unsigned i = 1, e = Operands.size(); i != e; ++i) { 3598 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3599 "SCEVAddRecExpr operand types don't match!"); 3600 assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer"); 3601 } 3602 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3603 assert(isLoopInvariant(Operands[i], L) && 3604 "SCEVAddRecExpr operand is not loop-invariant!"); 3605 #endif 3606 3607 if (Operands.back()->isZero()) { 3608 Operands.pop_back(); 3609 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3610 } 3611 3612 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3613 // use that information to infer NUW and NSW flags. However, computing a 3614 // BE count requires calling getAddRecExpr, so we may not yet have a 3615 // meaningful BE count at this point (and if we don't, we'd be stuck 3616 // with a SCEVCouldNotCompute as the cached BE count). 3617 3618 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3619 3620 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3621 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3622 const Loop *NestedLoop = NestedAR->getLoop(); 3623 if (L->contains(NestedLoop) 3624 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3625 : (!NestedLoop->contains(L) && 3626 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3627 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands()); 3628 Operands[0] = NestedAR->getStart(); 3629 // AddRecs require their operands be loop-invariant with respect to their 3630 // loops. Don't perform this transformation if it would break this 3631 // requirement. 3632 bool AllInvariant = all_of( 3633 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3634 3635 if (AllInvariant) { 3636 // Create a recurrence for the outer loop with the same step size. 3637 // 3638 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3639 // inner recurrence has the same property. 3640 SCEV::NoWrapFlags OuterFlags = 3641 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3642 3643 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3644 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3645 return isLoopInvariant(Op, NestedLoop); 3646 }); 3647 3648 if (AllInvariant) { 3649 // Ok, both add recurrences are valid after the transformation. 3650 // 3651 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3652 // the outer recurrence has the same property. 3653 SCEV::NoWrapFlags InnerFlags = 3654 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3655 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3656 } 3657 } 3658 // Reset Operands to its original state. 3659 Operands[0] = NestedAR; 3660 } 3661 } 3662 3663 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3664 // already have one, otherwise create a new one. 3665 return getOrCreateAddRecExpr(Operands, L, Flags); 3666 } 3667 3668 const SCEV * 3669 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3670 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3671 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3672 // getSCEV(Base)->getType() has the same address space as Base->getType() 3673 // because SCEV::getType() preserves the address space. 3674 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3675 const bool AssumeInBoundsFlags = [&]() { 3676 if (!GEP->isInBounds()) 3677 return false; 3678 3679 // We'd like to propagate flags from the IR to the corresponding SCEV nodes, 3680 // but to do that, we have to ensure that said flag is valid in the entire 3681 // defined scope of the SCEV. 3682 auto *GEPI = dyn_cast<Instruction>(GEP); 3683 // TODO: non-instructions have global scope. We might be able to prove 3684 // some global scope cases 3685 return GEPI && isSCEVExprNeverPoison(GEPI); 3686 }(); 3687 3688 SCEV::NoWrapFlags OffsetWrap = 3689 AssumeInBoundsFlags ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3690 3691 Type *CurTy = GEP->getType(); 3692 bool FirstIter = true; 3693 SmallVector<const SCEV *, 4> Offsets; 3694 for (const SCEV *IndexExpr : IndexExprs) { 3695 // Compute the (potentially symbolic) offset in bytes for this index. 3696 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3697 // For a struct, add the member offset. 3698 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3699 unsigned FieldNo = Index->getZExtValue(); 3700 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3701 Offsets.push_back(FieldOffset); 3702 3703 // Update CurTy to the type of the field at Index. 3704 CurTy = STy->getTypeAtIndex(Index); 3705 } else { 3706 // Update CurTy to its element type. 3707 if (FirstIter) { 3708 assert(isa<PointerType>(CurTy) && 3709 "The first index of a GEP indexes a pointer"); 3710 CurTy = GEP->getSourceElementType(); 3711 FirstIter = false; 3712 } else { 3713 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3714 } 3715 // For an array, add the element offset, explicitly scaled. 3716 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3717 // Getelementptr indices are signed. 3718 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3719 3720 // Multiply the index by the element size to compute the element offset. 3721 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); 3722 Offsets.push_back(LocalOffset); 3723 } 3724 } 3725 3726 // Handle degenerate case of GEP without offsets. 3727 if (Offsets.empty()) 3728 return BaseExpr; 3729 3730 // Add the offsets together, assuming nsw if inbounds. 3731 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); 3732 // Add the base address and the offset. We cannot use the nsw flag, as the 3733 // base address is unsigned. However, if we know that the offset is 3734 // non-negative, we can use nuw. 3735 SCEV::NoWrapFlags BaseWrap = AssumeInBoundsFlags && isKnownNonNegative(Offset) 3736 ? SCEV::FlagNUW : SCEV::FlagAnyWrap; 3737 auto *GEPExpr = getAddExpr(BaseExpr, Offset, BaseWrap); 3738 assert(BaseExpr->getType() == GEPExpr->getType() && 3739 "GEP should not change type mid-flight."); 3740 return GEPExpr; 3741 } 3742 3743 SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, 3744 ArrayRef<const SCEV *> Ops) { 3745 FoldingSetNodeID ID; 3746 ID.AddInteger(SCEVType); 3747 for (const SCEV *Op : Ops) 3748 ID.AddPointer(Op); 3749 void *IP = nullptr; 3750 return UniqueSCEVs.FindNodeOrInsertPos(ID, IP); 3751 } 3752 3753 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { 3754 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3755 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); 3756 } 3757 3758 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, 3759 SmallVectorImpl<const SCEV *> &Ops) { 3760 assert(SCEVMinMaxExpr::isMinMaxType(Kind) && "Not a SCEVMinMaxExpr!"); 3761 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3762 if (Ops.size() == 1) return Ops[0]; 3763 #ifndef NDEBUG 3764 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3765 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 3766 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3767 "Operand types don't match!"); 3768 assert(Ops[0]->getType()->isPointerTy() == 3769 Ops[i]->getType()->isPointerTy() && 3770 "min/max should be consistently pointerish"); 3771 } 3772 #endif 3773 3774 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3775 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3776 3777 // Sort by complexity, this groups all similar expression types together. 3778 GroupByComplexity(Ops, &LI, DT); 3779 3780 // Check if we have created the same expression before. 3781 if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) { 3782 return S; 3783 } 3784 3785 // If there are any constants, fold them together. 3786 unsigned Idx = 0; 3787 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3788 ++Idx; 3789 assert(Idx < Ops.size()); 3790 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3791 if (Kind == scSMaxExpr) 3792 return APIntOps::smax(LHS, RHS); 3793 else if (Kind == scSMinExpr) 3794 return APIntOps::smin(LHS, RHS); 3795 else if (Kind == scUMaxExpr) 3796 return APIntOps::umax(LHS, RHS); 3797 else if (Kind == scUMinExpr) 3798 return APIntOps::umin(LHS, RHS); 3799 llvm_unreachable("Unknown SCEV min/max opcode"); 3800 }; 3801 3802 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3803 // We found two constants, fold them together! 3804 ConstantInt *Fold = ConstantInt::get( 3805 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3806 Ops[0] = getConstant(Fold); 3807 Ops.erase(Ops.begin()+1); // Erase the folded element 3808 if (Ops.size() == 1) return Ops[0]; 3809 LHSC = cast<SCEVConstant>(Ops[0]); 3810 } 3811 3812 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3813 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3814 3815 if (IsMax ? IsMinV : IsMaxV) { 3816 // If we are left with a constant minimum(/maximum)-int, strip it off. 3817 Ops.erase(Ops.begin()); 3818 --Idx; 3819 } else if (IsMax ? IsMaxV : IsMinV) { 3820 // If we have a max(/min) with a constant maximum(/minimum)-int, 3821 // it will always be the extremum. 3822 return LHSC; 3823 } 3824 3825 if (Ops.size() == 1) return Ops[0]; 3826 } 3827 3828 // Find the first operation of the same kind 3829 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3830 ++Idx; 3831 3832 // Check to see if one of the operands is of the same kind. If so, expand its 3833 // operands onto our operand list, and recurse to simplify. 3834 if (Idx < Ops.size()) { 3835 bool DeletedAny = false; 3836 while (Ops[Idx]->getSCEVType() == Kind) { 3837 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3838 Ops.erase(Ops.begin()+Idx); 3839 Ops.append(SMME->op_begin(), SMME->op_end()); 3840 DeletedAny = true; 3841 } 3842 3843 if (DeletedAny) 3844 return getMinMaxExpr(Kind, Ops); 3845 } 3846 3847 // Okay, check to see if the same value occurs in the operand list twice. If 3848 // so, delete one. Since we sorted the list, these values are required to 3849 // be adjacent. 3850 llvm::CmpInst::Predicate GEPred = 3851 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3852 llvm::CmpInst::Predicate LEPred = 3853 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3854 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3855 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3856 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3857 if (Ops[i] == Ops[i + 1] || 3858 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3859 // X op Y op Y --> X op Y 3860 // X op Y --> X, if we know X, Y are ordered appropriately 3861 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3862 --i; 3863 --e; 3864 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3865 Ops[i + 1])) { 3866 // X op Y --> Y, if we know X, Y are ordered appropriately 3867 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3868 --i; 3869 --e; 3870 } 3871 } 3872 3873 if (Ops.size() == 1) return Ops[0]; 3874 3875 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3876 3877 // Okay, it looks like we really DO need an expr. Check to see if we 3878 // already have one, otherwise create a new one. 3879 FoldingSetNodeID ID; 3880 ID.AddInteger(Kind); 3881 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3882 ID.AddPointer(Ops[i]); 3883 void *IP = nullptr; 3884 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP); 3885 if (ExistingSCEV) 3886 return ExistingSCEV; 3887 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3888 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3889 SCEV *S = new (SCEVAllocator) 3890 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 3891 3892 UniqueSCEVs.InsertNode(S, IP); 3893 registerUser(S, Ops); 3894 return S; 3895 } 3896 3897 namespace { 3898 3899 class SCEVSequentialMinMaxDeduplicatingVisitor final 3900 : public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor, 3901 Optional<const SCEV *>> { 3902 using RetVal = Optional<const SCEV *>; 3903 using Base = SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor, RetVal>; 3904 3905 ScalarEvolution &SE; 3906 const SCEVTypes RootKind; // Must be a sequential min/max expression. 3907 const SCEVTypes NonSequentialRootKind; // Non-sequential variant of RootKind. 3908 SmallPtrSet<const SCEV *, 16> SeenOps; 3909 3910 bool canRecurseInto(SCEVTypes Kind) const { 3911 // We can only recurse into the SCEV expression of the same effective type 3912 // as the type of our root SCEV expression. 3913 return RootKind == Kind || NonSequentialRootKind == Kind; 3914 }; 3915 3916 RetVal visitAnyMinMaxExpr(const SCEV *S) { 3917 assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) && 3918 "Only for min/max expressions."); 3919 SCEVTypes Kind = S->getSCEVType(); 3920 3921 if (!canRecurseInto(Kind)) 3922 return S; 3923 3924 auto *NAry = cast<SCEVNAryExpr>(S); 3925 SmallVector<const SCEV *> NewOps; 3926 bool Changed = 3927 visit(Kind, makeArrayRef(NAry->op_begin(), NAry->op_end()), NewOps); 3928 3929 if (!Changed) 3930 return S; 3931 if (NewOps.empty()) 3932 return None; 3933 3934 return isa<SCEVSequentialMinMaxExpr>(S) 3935 ? SE.getSequentialMinMaxExpr(Kind, NewOps) 3936 : SE.getMinMaxExpr(Kind, NewOps); 3937 } 3938 3939 RetVal visit(const SCEV *S) { 3940 // Has the whole operand been seen already? 3941 if (!SeenOps.insert(S).second) 3942 return None; 3943 return Base::visit(S); 3944 } 3945 3946 public: 3947 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE, 3948 SCEVTypes RootKind) 3949 : SE(SE), RootKind(RootKind), 3950 NonSequentialRootKind( 3951 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType( 3952 RootKind)) {} 3953 3954 bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef<const SCEV *> OrigOps, 3955 SmallVectorImpl<const SCEV *> &NewOps) { 3956 bool Changed = false; 3957 SmallVector<const SCEV *> Ops; 3958 Ops.reserve(OrigOps.size()); 3959 3960 for (const SCEV *Op : OrigOps) { 3961 RetVal NewOp = visit(Op); 3962 if (NewOp != Op) 3963 Changed = true; 3964 if (NewOp) 3965 Ops.emplace_back(*NewOp); 3966 } 3967 3968 if (Changed) 3969 NewOps = std::move(Ops); 3970 return Changed; 3971 } 3972 3973 RetVal visitConstant(const SCEVConstant *Constant) { return Constant; } 3974 3975 RetVal visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) { return Expr; } 3976 3977 RetVal visitTruncateExpr(const SCEVTruncateExpr *Expr) { return Expr; } 3978 3979 RetVal visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { return Expr; } 3980 3981 RetVal visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { return Expr; } 3982 3983 RetVal visitAddExpr(const SCEVAddExpr *Expr) { return Expr; } 3984 3985 RetVal visitMulExpr(const SCEVMulExpr *Expr) { return Expr; } 3986 3987 RetVal visitUDivExpr(const SCEVUDivExpr *Expr) { return Expr; } 3988 3989 RetVal visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 3990 3991 RetVal visitSMaxExpr(const SCEVSMaxExpr *Expr) { 3992 return visitAnyMinMaxExpr(Expr); 3993 } 3994 3995 RetVal visitUMaxExpr(const SCEVUMaxExpr *Expr) { 3996 return visitAnyMinMaxExpr(Expr); 3997 } 3998 3999 RetVal visitSMinExpr(const SCEVSMinExpr *Expr) { 4000 return visitAnyMinMaxExpr(Expr); 4001 } 4002 4003 RetVal visitUMinExpr(const SCEVUMinExpr *Expr) { 4004 return visitAnyMinMaxExpr(Expr); 4005 } 4006 4007 RetVal visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) { 4008 return visitAnyMinMaxExpr(Expr); 4009 } 4010 4011 RetVal visitUnknown(const SCEVUnknown *Expr) { return Expr; } 4012 4013 RetVal visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { return Expr; } 4014 }; 4015 4016 } // namespace 4017 4018 /// Return true if V is poison given that AssumedPoison is already poison. 4019 static bool impliesPoison(const SCEV *AssumedPoison, const SCEV *S) { 4020 // The only way poison may be introduced in a SCEV expression is from a 4021 // poison SCEVUnknown (ConstantExprs are also represented as SCEVUnknown, 4022 // not SCEVConstant). Notably, nowrap flags in SCEV nodes can *not* 4023 // introduce poison -- they encode guaranteed, non-speculated knowledge. 4024 // 4025 // Additionally, all SCEV nodes propagate poison from inputs to outputs, 4026 // with the notable exception of umin_seq, where only poison from the first 4027 // operand is (unconditionally) propagated. 4028 struct SCEVPoisonCollector { 4029 bool LookThroughSeq; 4030 SmallPtrSet<const SCEV *, 4> MaybePoison; 4031 SCEVPoisonCollector(bool LookThroughSeq) : LookThroughSeq(LookThroughSeq) {} 4032 4033 bool follow(const SCEV *S) { 4034 // TODO: We can always follow the first operand, but the SCEVTraversal 4035 // API doesn't support this. 4036 if (!LookThroughSeq && isa<SCEVSequentialMinMaxExpr>(S)) 4037 return false; 4038 4039 if (auto *SU = dyn_cast<SCEVUnknown>(S)) { 4040 if (!isGuaranteedNotToBePoison(SU->getValue())) 4041 MaybePoison.insert(S); 4042 } 4043 return true; 4044 } 4045 bool isDone() const { return false; } 4046 }; 4047 4048 // First collect all SCEVs that might result in AssumedPoison to be poison. 4049 // We need to look through umin_seq here, because we want to find all SCEVs 4050 // that *might* result in poison, not only those that are *required* to. 4051 SCEVPoisonCollector PC1(/* LookThroughSeq */ true); 4052 visitAll(AssumedPoison, PC1); 4053 4054 // AssumedPoison is never poison. As the assumption is false, the implication 4055 // is true. Don't bother walking the other SCEV in this case. 4056 if (PC1.MaybePoison.empty()) 4057 return true; 4058 4059 // Collect all SCEVs in S that, if poison, *will* result in S being poison 4060 // as well. We cannot look through umin_seq here, as its argument only *may* 4061 // make the result poison. 4062 SCEVPoisonCollector PC2(/* LookThroughSeq */ false); 4063 visitAll(S, PC2); 4064 4065 // Make sure that no matter which SCEV in PC1.MaybePoison is actually poison, 4066 // it will also make S poison by being part of PC2.MaybePoison. 4067 return all_of(PC1.MaybePoison, 4068 [&](const SCEV *S) { return PC2.MaybePoison.contains(S); }); 4069 } 4070 4071 const SCEV * 4072 ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind, 4073 SmallVectorImpl<const SCEV *> &Ops) { 4074 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) && 4075 "Not a SCEVSequentialMinMaxExpr!"); 4076 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 4077 if (Ops.size() == 1) 4078 return Ops[0]; 4079 #ifndef NDEBUG 4080 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 4081 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 4082 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 4083 "Operand types don't match!"); 4084 assert(Ops[0]->getType()->isPointerTy() == 4085 Ops[i]->getType()->isPointerTy() && 4086 "min/max should be consistently pointerish"); 4087 } 4088 #endif 4089 4090 // Note that SCEVSequentialMinMaxExpr is *NOT* commutative, 4091 // so we can *NOT* do any kind of sorting of the expressions! 4092 4093 // Check if we have created the same expression before. 4094 if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) 4095 return S; 4096 4097 // FIXME: there are *some* simplifications that we can do here. 4098 4099 // Keep only the first instance of an operand. 4100 { 4101 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*this, Kind); 4102 bool Changed = Deduplicator.visit(Kind, Ops, Ops); 4103 if (Changed) 4104 return getSequentialMinMaxExpr(Kind, Ops); 4105 } 4106 4107 // Check to see if one of the operands is of the same kind. If so, expand its 4108 // operands onto our operand list, and recurse to simplify. 4109 { 4110 unsigned Idx = 0; 4111 bool DeletedAny = false; 4112 while (Idx < Ops.size()) { 4113 if (Ops[Idx]->getSCEVType() != Kind) { 4114 ++Idx; 4115 continue; 4116 } 4117 const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[Idx]); 4118 Ops.erase(Ops.begin() + Idx); 4119 Ops.insert(Ops.begin() + Idx, SMME->op_begin(), SMME->op_end()); 4120 DeletedAny = true; 4121 } 4122 4123 if (DeletedAny) 4124 return getSequentialMinMaxExpr(Kind, Ops); 4125 } 4126 4127 const SCEV *SaturationPoint; 4128 ICmpInst::Predicate Pred; 4129 switch (Kind) { 4130 case scSequentialUMinExpr: 4131 SaturationPoint = getZero(Ops[0]->getType()); 4132 Pred = ICmpInst::ICMP_ULE; 4133 break; 4134 default: 4135 llvm_unreachable("Not a sequential min/max type."); 4136 } 4137 4138 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 4139 // We can replace %x umin_seq %y with %x umin %y if either: 4140 // * %y being poison implies %x is also poison. 4141 // * %x cannot be the saturating value (e.g. zero for umin). 4142 if (::impliesPoison(Ops[i], Ops[i - 1]) || 4143 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, Ops[i - 1], 4144 SaturationPoint)) { 4145 SmallVector<const SCEV *> SeqOps = {Ops[i - 1], Ops[i]}; 4146 Ops[i - 1] = getMinMaxExpr( 4147 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(Kind), 4148 SeqOps); 4149 Ops.erase(Ops.begin() + i); 4150 return getSequentialMinMaxExpr(Kind, Ops); 4151 } 4152 // Fold %x umin_seq %y to %x if %x ule %y. 4153 // TODO: We might be able to prove the predicate for a later operand. 4154 if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) { 4155 Ops.erase(Ops.begin() + i); 4156 return getSequentialMinMaxExpr(Kind, Ops); 4157 } 4158 } 4159 4160 // Okay, it looks like we really DO need an expr. Check to see if we 4161 // already have one, otherwise create a new one. 4162 FoldingSetNodeID ID; 4163 ID.AddInteger(Kind); 4164 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 4165 ID.AddPointer(Ops[i]); 4166 void *IP = nullptr; 4167 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP); 4168 if (ExistingSCEV) 4169 return ExistingSCEV; 4170 4171 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 4172 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 4173 SCEV *S = new (SCEVAllocator) 4174 SCEVSequentialMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 4175 4176 UniqueSCEVs.InsertNode(S, IP); 4177 registerUser(S, Ops); 4178 return S; 4179 } 4180 4181 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 4182 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 4183 return getSMaxExpr(Ops); 4184 } 4185 4186 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 4187 return getMinMaxExpr(scSMaxExpr, Ops); 4188 } 4189 4190 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 4191 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 4192 return getUMaxExpr(Ops); 4193 } 4194 4195 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 4196 return getMinMaxExpr(scUMaxExpr, Ops); 4197 } 4198 4199 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 4200 const SCEV *RHS) { 4201 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4202 return getSMinExpr(Ops); 4203 } 4204 4205 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 4206 return getMinMaxExpr(scSMinExpr, Ops); 4207 } 4208 4209 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, const SCEV *RHS, 4210 bool Sequential) { 4211 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4212 return getUMinExpr(Ops, Sequential); 4213 } 4214 4215 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops, 4216 bool Sequential) { 4217 return Sequential ? getSequentialMinMaxExpr(scSequentialUMinExpr, Ops) 4218 : getMinMaxExpr(scUMinExpr, Ops); 4219 } 4220 4221 const SCEV * 4222 ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy, 4223 ScalableVectorType *ScalableTy) { 4224 Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo()); 4225 Constant *One = ConstantInt::get(IntTy, 1); 4226 Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One); 4227 // Note that the expression we created is the final expression, we don't 4228 // want to simplify it any further Also, if we call a normal getSCEV(), 4229 // we'll end up in an endless recursion. So just create an SCEVUnknown. 4230 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy)); 4231 } 4232 4233 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 4234 if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy)) 4235 return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy); 4236 // We can bypass creating a target-independent constant expression and then 4237 // folding it back into a ConstantInt. This is just a compile-time 4238 // optimization. 4239 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 4240 } 4241 4242 const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) { 4243 if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy)) 4244 return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy); 4245 // We can bypass creating a target-independent constant expression and then 4246 // folding it back into a ConstantInt. This is just a compile-time 4247 // optimization. 4248 return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy)); 4249 } 4250 4251 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 4252 StructType *STy, 4253 unsigned FieldNo) { 4254 // We can bypass creating a target-independent constant expression and then 4255 // folding it back into a ConstantInt. This is just a compile-time 4256 // optimization. 4257 return getConstant( 4258 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 4259 } 4260 4261 const SCEV *ScalarEvolution::getUnknown(Value *V) { 4262 // Don't attempt to do anything other than create a SCEVUnknown object 4263 // here. createSCEV only calls getUnknown after checking for all other 4264 // interesting possibilities, and any other code that calls getUnknown 4265 // is doing so in order to hide a value from SCEV canonicalization. 4266 4267 FoldingSetNodeID ID; 4268 ID.AddInteger(scUnknown); 4269 ID.AddPointer(V); 4270 void *IP = nullptr; 4271 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 4272 assert(cast<SCEVUnknown>(S)->getValue() == V && 4273 "Stale SCEVUnknown in uniquing map!"); 4274 return S; 4275 } 4276 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 4277 FirstUnknown); 4278 FirstUnknown = cast<SCEVUnknown>(S); 4279 UniqueSCEVs.InsertNode(S, IP); 4280 return S; 4281 } 4282 4283 //===----------------------------------------------------------------------===// 4284 // Basic SCEV Analysis and PHI Idiom Recognition Code 4285 // 4286 4287 /// Test if values of the given type are analyzable within the SCEV 4288 /// framework. This primarily includes integer types, and it can optionally 4289 /// include pointer types if the ScalarEvolution class has access to 4290 /// target-specific information. 4291 bool ScalarEvolution::isSCEVable(Type *Ty) const { 4292 // Integers and pointers are always SCEVable. 4293 return Ty->isIntOrPtrTy(); 4294 } 4295 4296 /// Return the size in bits of the specified type, for which isSCEVable must 4297 /// return true. 4298 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 4299 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 4300 if (Ty->isPointerTy()) 4301 return getDataLayout().getIndexTypeSizeInBits(Ty); 4302 return getDataLayout().getTypeSizeInBits(Ty); 4303 } 4304 4305 /// Return a type with the same bitwidth as the given type and which represents 4306 /// how SCEV will treat the given type, for which isSCEVable must return 4307 /// true. For pointer types, this is the pointer index sized integer type. 4308 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 4309 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 4310 4311 if (Ty->isIntegerTy()) 4312 return Ty; 4313 4314 // The only other support type is pointer. 4315 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 4316 return getDataLayout().getIndexType(Ty); 4317 } 4318 4319 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 4320 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 4321 } 4322 4323 bool ScalarEvolution::instructionCouldExistWitthOperands(const SCEV *A, 4324 const SCEV *B) { 4325 /// For a valid use point to exist, the defining scope of one operand 4326 /// must dominate the other. 4327 bool PreciseA, PreciseB; 4328 auto *ScopeA = getDefiningScopeBound({A}, PreciseA); 4329 auto *ScopeB = getDefiningScopeBound({B}, PreciseB); 4330 if (!PreciseA || !PreciseB) 4331 // Can't tell. 4332 return false; 4333 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) || 4334 DT.dominates(ScopeB, ScopeA); 4335 } 4336 4337 4338 const SCEV *ScalarEvolution::getCouldNotCompute() { 4339 return CouldNotCompute.get(); 4340 } 4341 4342 bool ScalarEvolution::checkValidity(const SCEV *S) const { 4343 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 4344 auto *SU = dyn_cast<SCEVUnknown>(S); 4345 return SU && SU->getValue() == nullptr; 4346 }); 4347 4348 return !ContainsNulls; 4349 } 4350 4351 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 4352 HasRecMapType::iterator I = HasRecMap.find(S); 4353 if (I != HasRecMap.end()) 4354 return I->second; 4355 4356 bool FoundAddRec = 4357 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 4358 HasRecMap.insert({S, FoundAddRec}); 4359 return FoundAddRec; 4360 } 4361 4362 /// Return the ValueOffsetPair set for \p S. \p S can be represented 4363 /// by the value and offset from any ValueOffsetPair in the set. 4364 ArrayRef<Value *> ScalarEvolution::getSCEVValues(const SCEV *S) { 4365 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 4366 if (SI == ExprValueMap.end()) 4367 return None; 4368 #ifndef NDEBUG 4369 if (VerifySCEVMap) { 4370 // Check there is no dangling Value in the set returned. 4371 for (Value *V : SI->second) 4372 assert(ValueExprMap.count(V)); 4373 } 4374 #endif 4375 return SI->second.getArrayRef(); 4376 } 4377 4378 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 4379 /// cannot be used separately. eraseValueFromMap should be used to remove 4380 /// V from ValueExprMap and ExprValueMap at the same time. 4381 void ScalarEvolution::eraseValueFromMap(Value *V) { 4382 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 4383 if (I != ValueExprMap.end()) { 4384 auto EVIt = ExprValueMap.find(I->second); 4385 bool Removed = EVIt->second.remove(V); 4386 (void) Removed; 4387 assert(Removed && "Value not in ExprValueMap?"); 4388 ValueExprMap.erase(I); 4389 } 4390 } 4391 4392 void ScalarEvolution::insertValueToMap(Value *V, const SCEV *S) { 4393 // A recursive query may have already computed the SCEV. It should be 4394 // equivalent, but may not necessarily be exactly the same, e.g. due to lazily 4395 // inferred nowrap flags. 4396 auto It = ValueExprMap.find_as(V); 4397 if (It == ValueExprMap.end()) { 4398 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 4399 ExprValueMap[S].insert(V); 4400 } 4401 } 4402 4403 /// Return an existing SCEV if it exists, otherwise analyze the expression and 4404 /// create a new one. 4405 const SCEV *ScalarEvolution::getSCEV(Value *V) { 4406 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 4407 4408 if (const SCEV *S = getExistingSCEV(V)) 4409 return S; 4410 return createSCEVIter(V); 4411 } 4412 4413 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 4414 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 4415 4416 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 4417 if (I != ValueExprMap.end()) { 4418 const SCEV *S = I->second; 4419 assert(checkValidity(S) && 4420 "existing SCEV has not been properly invalidated"); 4421 return S; 4422 } 4423 return nullptr; 4424 } 4425 4426 /// Return a SCEV corresponding to -V = -1*V 4427 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 4428 SCEV::NoWrapFlags Flags) { 4429 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4430 return getConstant( 4431 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 4432 4433 Type *Ty = V->getType(); 4434 Ty = getEffectiveSCEVType(Ty); 4435 return getMulExpr(V, getMinusOne(Ty), Flags); 4436 } 4437 4438 /// If Expr computes ~A, return A else return nullptr 4439 static const SCEV *MatchNotExpr(const SCEV *Expr) { 4440 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 4441 if (!Add || Add->getNumOperands() != 2 || 4442 !Add->getOperand(0)->isAllOnesValue()) 4443 return nullptr; 4444 4445 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 4446 if (!AddRHS || AddRHS->getNumOperands() != 2 || 4447 !AddRHS->getOperand(0)->isAllOnesValue()) 4448 return nullptr; 4449 4450 return AddRHS->getOperand(1); 4451 } 4452 4453 /// Return a SCEV corresponding to ~V = -1-V 4454 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 4455 assert(!V->getType()->isPointerTy() && "Can't negate pointer"); 4456 4457 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4458 return getConstant( 4459 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 4460 4461 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 4462 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 4463 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 4464 SmallVector<const SCEV *, 2> MatchedOperands; 4465 for (const SCEV *Operand : MME->operands()) { 4466 const SCEV *Matched = MatchNotExpr(Operand); 4467 if (!Matched) 4468 return (const SCEV *)nullptr; 4469 MatchedOperands.push_back(Matched); 4470 } 4471 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), 4472 MatchedOperands); 4473 }; 4474 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 4475 return Replaced; 4476 } 4477 4478 Type *Ty = V->getType(); 4479 Ty = getEffectiveSCEVType(Ty); 4480 return getMinusSCEV(getMinusOne(Ty), V); 4481 } 4482 4483 const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) { 4484 assert(P->getType()->isPointerTy()); 4485 4486 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) { 4487 // The base of an AddRec is the first operand. 4488 SmallVector<const SCEV *> Ops{AddRec->operands()}; 4489 Ops[0] = removePointerBase(Ops[0]); 4490 // Don't try to transfer nowrap flags for now. We could in some cases 4491 // (for example, if pointer operand of the AddRec is a SCEVUnknown). 4492 return getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap); 4493 } 4494 if (auto *Add = dyn_cast<SCEVAddExpr>(P)) { 4495 // The base of an Add is the pointer operand. 4496 SmallVector<const SCEV *> Ops{Add->operands()}; 4497 const SCEV **PtrOp = nullptr; 4498 for (const SCEV *&AddOp : Ops) { 4499 if (AddOp->getType()->isPointerTy()) { 4500 assert(!PtrOp && "Cannot have multiple pointer ops"); 4501 PtrOp = &AddOp; 4502 } 4503 } 4504 *PtrOp = removePointerBase(*PtrOp); 4505 // Don't try to transfer nowrap flags for now. We could in some cases 4506 // (for example, if the pointer operand of the Add is a SCEVUnknown). 4507 return getAddExpr(Ops); 4508 } 4509 // Any other expression must be a pointer base. 4510 return getZero(P->getType()); 4511 } 4512 4513 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 4514 SCEV::NoWrapFlags Flags, 4515 unsigned Depth) { 4516 // Fast path: X - X --> 0. 4517 if (LHS == RHS) 4518 return getZero(LHS->getType()); 4519 4520 // If we subtract two pointers with different pointer bases, bail. 4521 // Eventually, we're going to add an assertion to getMulExpr that we 4522 // can't multiply by a pointer. 4523 if (RHS->getType()->isPointerTy()) { 4524 if (!LHS->getType()->isPointerTy() || 4525 getPointerBase(LHS) != getPointerBase(RHS)) 4526 return getCouldNotCompute(); 4527 LHS = removePointerBase(LHS); 4528 RHS = removePointerBase(RHS); 4529 } 4530 4531 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 4532 // makes it so that we cannot make much use of NUW. 4533 auto AddFlags = SCEV::FlagAnyWrap; 4534 const bool RHSIsNotMinSigned = 4535 !getSignedRangeMin(RHS).isMinSignedValue(); 4536 if (hasFlags(Flags, SCEV::FlagNSW)) { 4537 // Let M be the minimum representable signed value. Then (-1)*RHS 4538 // signed-wraps if and only if RHS is M. That can happen even for 4539 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 4540 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 4541 // (-1)*RHS, we need to prove that RHS != M. 4542 // 4543 // If LHS is non-negative and we know that LHS - RHS does not 4544 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 4545 // either by proving that RHS > M or that LHS >= 0. 4546 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 4547 AddFlags = SCEV::FlagNSW; 4548 } 4549 } 4550 4551 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 4552 // RHS is NSW and LHS >= 0. 4553 // 4554 // The difficulty here is that the NSW flag may have been proven 4555 // relative to a loop that is to be found in a recurrence in LHS and 4556 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4557 // larger scope than intended. 4558 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4559 4560 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4561 } 4562 4563 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 4564 unsigned Depth) { 4565 Type *SrcTy = V->getType(); 4566 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4567 "Cannot truncate or zero extend with non-integer arguments!"); 4568 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4569 return V; // No conversion 4570 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4571 return getTruncateExpr(V, Ty, Depth); 4572 return getZeroExtendExpr(V, Ty, Depth); 4573 } 4574 4575 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 4576 unsigned Depth) { 4577 Type *SrcTy = V->getType(); 4578 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4579 "Cannot truncate or zero extend with non-integer arguments!"); 4580 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4581 return V; // No conversion 4582 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4583 return getTruncateExpr(V, Ty, Depth); 4584 return getSignExtendExpr(V, Ty, Depth); 4585 } 4586 4587 const SCEV * 4588 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4589 Type *SrcTy = V->getType(); 4590 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4591 "Cannot noop or zero extend with non-integer arguments!"); 4592 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4593 "getNoopOrZeroExtend cannot truncate!"); 4594 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4595 return V; // No conversion 4596 return getZeroExtendExpr(V, Ty); 4597 } 4598 4599 const SCEV * 4600 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4601 Type *SrcTy = V->getType(); 4602 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4603 "Cannot noop or sign extend with non-integer arguments!"); 4604 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4605 "getNoopOrSignExtend cannot truncate!"); 4606 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4607 return V; // No conversion 4608 return getSignExtendExpr(V, Ty); 4609 } 4610 4611 const SCEV * 4612 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4613 Type *SrcTy = V->getType(); 4614 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4615 "Cannot noop or any extend with non-integer arguments!"); 4616 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4617 "getNoopOrAnyExtend cannot truncate!"); 4618 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4619 return V; // No conversion 4620 return getAnyExtendExpr(V, Ty); 4621 } 4622 4623 const SCEV * 4624 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4625 Type *SrcTy = V->getType(); 4626 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4627 "Cannot truncate or noop with non-integer arguments!"); 4628 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4629 "getTruncateOrNoop cannot extend!"); 4630 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4631 return V; // No conversion 4632 return getTruncateExpr(V, Ty); 4633 } 4634 4635 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4636 const SCEV *RHS) { 4637 const SCEV *PromotedLHS = LHS; 4638 const SCEV *PromotedRHS = RHS; 4639 4640 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4641 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4642 else 4643 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4644 4645 return getUMaxExpr(PromotedLHS, PromotedRHS); 4646 } 4647 4648 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4649 const SCEV *RHS, 4650 bool Sequential) { 4651 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4652 return getUMinFromMismatchedTypes(Ops, Sequential); 4653 } 4654 4655 const SCEV * 4656 ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops, 4657 bool Sequential) { 4658 assert(!Ops.empty() && "At least one operand must be!"); 4659 // Trivial case. 4660 if (Ops.size() == 1) 4661 return Ops[0]; 4662 4663 // Find the max type first. 4664 Type *MaxType = nullptr; 4665 for (const auto *S : Ops) 4666 if (MaxType) 4667 MaxType = getWiderType(MaxType, S->getType()); 4668 else 4669 MaxType = S->getType(); 4670 assert(MaxType && "Failed to find maximum type!"); 4671 4672 // Extend all ops to max type. 4673 SmallVector<const SCEV *, 2> PromotedOps; 4674 for (const auto *S : Ops) 4675 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4676 4677 // Generate umin. 4678 return getUMinExpr(PromotedOps, Sequential); 4679 } 4680 4681 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4682 // A pointer operand may evaluate to a nonpointer expression, such as null. 4683 if (!V->getType()->isPointerTy()) 4684 return V; 4685 4686 while (true) { 4687 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 4688 V = AddRec->getStart(); 4689 } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) { 4690 const SCEV *PtrOp = nullptr; 4691 for (const SCEV *AddOp : Add->operands()) { 4692 if (AddOp->getType()->isPointerTy()) { 4693 assert(!PtrOp && "Cannot have multiple pointer ops"); 4694 PtrOp = AddOp; 4695 } 4696 } 4697 assert(PtrOp && "Must have pointer op"); 4698 V = PtrOp; 4699 } else // Not something we can look further into. 4700 return V; 4701 } 4702 } 4703 4704 /// Push users of the given Instruction onto the given Worklist. 4705 static void PushDefUseChildren(Instruction *I, 4706 SmallVectorImpl<Instruction *> &Worklist, 4707 SmallPtrSetImpl<Instruction *> &Visited) { 4708 // Push the def-use children onto the Worklist stack. 4709 for (User *U : I->users()) { 4710 auto *UserInsn = cast<Instruction>(U); 4711 if (Visited.insert(UserInsn).second) 4712 Worklist.push_back(UserInsn); 4713 } 4714 } 4715 4716 namespace { 4717 4718 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4719 /// expression in case its Loop is L. If it is not L then 4720 /// if IgnoreOtherLoops is true then use AddRec itself 4721 /// otherwise rewrite cannot be done. 4722 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4723 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4724 public: 4725 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4726 bool IgnoreOtherLoops = true) { 4727 SCEVInitRewriter Rewriter(L, SE); 4728 const SCEV *Result = Rewriter.visit(S); 4729 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4730 return SE.getCouldNotCompute(); 4731 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4732 ? SE.getCouldNotCompute() 4733 : Result; 4734 } 4735 4736 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4737 if (!SE.isLoopInvariant(Expr, L)) 4738 SeenLoopVariantSCEVUnknown = true; 4739 return Expr; 4740 } 4741 4742 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4743 // Only re-write AddRecExprs for this loop. 4744 if (Expr->getLoop() == L) 4745 return Expr->getStart(); 4746 SeenOtherLoops = true; 4747 return Expr; 4748 } 4749 4750 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4751 4752 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4753 4754 private: 4755 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4756 : SCEVRewriteVisitor(SE), L(L) {} 4757 4758 const Loop *L; 4759 bool SeenLoopVariantSCEVUnknown = false; 4760 bool SeenOtherLoops = false; 4761 }; 4762 4763 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4764 /// increment expression in case its Loop is L. If it is not L then 4765 /// use AddRec itself. 4766 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4767 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4768 public: 4769 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4770 SCEVPostIncRewriter Rewriter(L, SE); 4771 const SCEV *Result = Rewriter.visit(S); 4772 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4773 ? SE.getCouldNotCompute() 4774 : Result; 4775 } 4776 4777 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4778 if (!SE.isLoopInvariant(Expr, L)) 4779 SeenLoopVariantSCEVUnknown = true; 4780 return Expr; 4781 } 4782 4783 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4784 // Only re-write AddRecExprs for this loop. 4785 if (Expr->getLoop() == L) 4786 return Expr->getPostIncExpr(SE); 4787 SeenOtherLoops = true; 4788 return Expr; 4789 } 4790 4791 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4792 4793 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4794 4795 private: 4796 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4797 : SCEVRewriteVisitor(SE), L(L) {} 4798 4799 const Loop *L; 4800 bool SeenLoopVariantSCEVUnknown = false; 4801 bool SeenOtherLoops = false; 4802 }; 4803 4804 /// This class evaluates the compare condition by matching it against the 4805 /// condition of loop latch. If there is a match we assume a true value 4806 /// for the condition while building SCEV nodes. 4807 class SCEVBackedgeConditionFolder 4808 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4809 public: 4810 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4811 ScalarEvolution &SE) { 4812 bool IsPosBECond = false; 4813 Value *BECond = nullptr; 4814 if (BasicBlock *Latch = L->getLoopLatch()) { 4815 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4816 if (BI && BI->isConditional()) { 4817 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4818 "Both outgoing branches should not target same header!"); 4819 BECond = BI->getCondition(); 4820 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4821 } else { 4822 return S; 4823 } 4824 } 4825 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4826 return Rewriter.visit(S); 4827 } 4828 4829 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4830 const SCEV *Result = Expr; 4831 bool InvariantF = SE.isLoopInvariant(Expr, L); 4832 4833 if (!InvariantF) { 4834 Instruction *I = cast<Instruction>(Expr->getValue()); 4835 switch (I->getOpcode()) { 4836 case Instruction::Select: { 4837 SelectInst *SI = cast<SelectInst>(I); 4838 Optional<const SCEV *> Res = 4839 compareWithBackedgeCondition(SI->getCondition()); 4840 if (Res) { 4841 bool IsOne = cast<SCEVConstant>(Res.value())->getValue()->isOne(); 4842 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4843 } 4844 break; 4845 } 4846 default: { 4847 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4848 if (Res) 4849 Result = Res.value(); 4850 break; 4851 } 4852 } 4853 } 4854 return Result; 4855 } 4856 4857 private: 4858 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4859 bool IsPosBECond, ScalarEvolution &SE) 4860 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4861 IsPositiveBECond(IsPosBECond) {} 4862 4863 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4864 4865 const Loop *L; 4866 /// Loop back condition. 4867 Value *BackedgeCond = nullptr; 4868 /// Set to true if loop back is on positive branch condition. 4869 bool IsPositiveBECond; 4870 }; 4871 4872 Optional<const SCEV *> 4873 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4874 4875 // If value matches the backedge condition for loop latch, 4876 // then return a constant evolution node based on loopback 4877 // branch taken. 4878 if (BackedgeCond == IC) 4879 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4880 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4881 return None; 4882 } 4883 4884 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4885 public: 4886 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4887 ScalarEvolution &SE) { 4888 SCEVShiftRewriter Rewriter(L, SE); 4889 const SCEV *Result = Rewriter.visit(S); 4890 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4891 } 4892 4893 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4894 // Only allow AddRecExprs for this loop. 4895 if (!SE.isLoopInvariant(Expr, L)) 4896 Valid = false; 4897 return Expr; 4898 } 4899 4900 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4901 if (Expr->getLoop() == L && Expr->isAffine()) 4902 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4903 Valid = false; 4904 return Expr; 4905 } 4906 4907 bool isValid() { return Valid; } 4908 4909 private: 4910 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4911 : SCEVRewriteVisitor(SE), L(L) {} 4912 4913 const Loop *L; 4914 bool Valid = true; 4915 }; 4916 4917 } // end anonymous namespace 4918 4919 SCEV::NoWrapFlags 4920 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4921 if (!AR->isAffine()) 4922 return SCEV::FlagAnyWrap; 4923 4924 using OBO = OverflowingBinaryOperator; 4925 4926 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4927 4928 if (!AR->hasNoSignedWrap()) { 4929 ConstantRange AddRecRange = getSignedRange(AR); 4930 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4931 4932 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4933 Instruction::Add, IncRange, OBO::NoSignedWrap); 4934 if (NSWRegion.contains(AddRecRange)) 4935 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4936 } 4937 4938 if (!AR->hasNoUnsignedWrap()) { 4939 ConstantRange AddRecRange = getUnsignedRange(AR); 4940 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4941 4942 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4943 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4944 if (NUWRegion.contains(AddRecRange)) 4945 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4946 } 4947 4948 return Result; 4949 } 4950 4951 SCEV::NoWrapFlags 4952 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4953 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4954 4955 if (AR->hasNoSignedWrap()) 4956 return Result; 4957 4958 if (!AR->isAffine()) 4959 return Result; 4960 4961 const SCEV *Step = AR->getStepRecurrence(*this); 4962 const Loop *L = AR->getLoop(); 4963 4964 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4965 // Note that this serves two purposes: It filters out loops that are 4966 // simply not analyzable, and it covers the case where this code is 4967 // being called from within backedge-taken count analysis, such that 4968 // attempting to ask for the backedge-taken count would likely result 4969 // in infinite recursion. In the later case, the analysis code will 4970 // cope with a conservative value, and it will take care to purge 4971 // that value once it has finished. 4972 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4973 4974 // Normally, in the cases we can prove no-overflow via a 4975 // backedge guarding condition, we can also compute a backedge 4976 // taken count for the loop. The exceptions are assumptions and 4977 // guards present in the loop -- SCEV is not great at exploiting 4978 // these to compute max backedge taken counts, but can still use 4979 // these to prove lack of overflow. Use this fact to avoid 4980 // doing extra work that may not pay off. 4981 4982 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4983 AC.assumptions().empty()) 4984 return Result; 4985 4986 // If the backedge is guarded by a comparison with the pre-inc value the 4987 // addrec is safe. Also, if the entry is guarded by a comparison with the 4988 // start value and the backedge is guarded by a comparison with the post-inc 4989 // value, the addrec is safe. 4990 ICmpInst::Predicate Pred; 4991 const SCEV *OverflowLimit = 4992 getSignedOverflowLimitForStep(Step, &Pred, this); 4993 if (OverflowLimit && 4994 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 4995 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 4996 Result = setFlags(Result, SCEV::FlagNSW); 4997 } 4998 return Result; 4999 } 5000 SCEV::NoWrapFlags 5001 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { 5002 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 5003 5004 if (AR->hasNoUnsignedWrap()) 5005 return Result; 5006 5007 if (!AR->isAffine()) 5008 return Result; 5009 5010 const SCEV *Step = AR->getStepRecurrence(*this); 5011 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 5012 const Loop *L = AR->getLoop(); 5013 5014 // Check whether the backedge-taken count is SCEVCouldNotCompute. 5015 // Note that this serves two purposes: It filters out loops that are 5016 // simply not analyzable, and it covers the case where this code is 5017 // being called from within backedge-taken count analysis, such that 5018 // attempting to ask for the backedge-taken count would likely result 5019 // in infinite recursion. In the later case, the analysis code will 5020 // cope with a conservative value, and it will take care to purge 5021 // that value once it has finished. 5022 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 5023 5024 // Normally, in the cases we can prove no-overflow via a 5025 // backedge guarding condition, we can also compute a backedge 5026 // taken count for the loop. The exceptions are assumptions and 5027 // guards present in the loop -- SCEV is not great at exploiting 5028 // these to compute max backedge taken counts, but can still use 5029 // these to prove lack of overflow. Use this fact to avoid 5030 // doing extra work that may not pay off. 5031 5032 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 5033 AC.assumptions().empty()) 5034 return Result; 5035 5036 // If the backedge is guarded by a comparison with the pre-inc value the 5037 // addrec is safe. Also, if the entry is guarded by a comparison with the 5038 // start value and the backedge is guarded by a comparison with the post-inc 5039 // value, the addrec is safe. 5040 if (isKnownPositive(Step)) { 5041 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 5042 getUnsignedRangeMax(Step)); 5043 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 5044 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 5045 Result = setFlags(Result, SCEV::FlagNUW); 5046 } 5047 } 5048 5049 return Result; 5050 } 5051 5052 namespace { 5053 5054 /// Represents an abstract binary operation. This may exist as a 5055 /// normal instruction or constant expression, or may have been 5056 /// derived from an expression tree. 5057 struct BinaryOp { 5058 unsigned Opcode; 5059 Value *LHS; 5060 Value *RHS; 5061 bool IsNSW = false; 5062 bool IsNUW = false; 5063 5064 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 5065 /// constant expression. 5066 Operator *Op = nullptr; 5067 5068 explicit BinaryOp(Operator *Op) 5069 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 5070 Op(Op) { 5071 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 5072 IsNSW = OBO->hasNoSignedWrap(); 5073 IsNUW = OBO->hasNoUnsignedWrap(); 5074 } 5075 } 5076 5077 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 5078 bool IsNUW = false) 5079 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 5080 }; 5081 5082 } // end anonymous namespace 5083 5084 /// Try to map \p V into a BinaryOp, and return \c None on failure. 5085 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 5086 auto *Op = dyn_cast<Operator>(V); 5087 if (!Op) 5088 return None; 5089 5090 // Implementation detail: all the cleverness here should happen without 5091 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 5092 // SCEV expressions when possible, and we should not break that. 5093 5094 switch (Op->getOpcode()) { 5095 case Instruction::Add: 5096 case Instruction::Sub: 5097 case Instruction::Mul: 5098 case Instruction::UDiv: 5099 case Instruction::URem: 5100 case Instruction::And: 5101 case Instruction::Or: 5102 case Instruction::AShr: 5103 case Instruction::Shl: 5104 return BinaryOp(Op); 5105 5106 case Instruction::Xor: 5107 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 5108 // If the RHS of the xor is a signmask, then this is just an add. 5109 // Instcombine turns add of signmask into xor as a strength reduction step. 5110 if (RHSC->getValue().isSignMask()) 5111 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 5112 // Binary `xor` is a bit-wise `add`. 5113 if (V->getType()->isIntegerTy(1)) 5114 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 5115 return BinaryOp(Op); 5116 5117 case Instruction::LShr: 5118 // Turn logical shift right of a constant into a unsigned divide. 5119 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 5120 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 5121 5122 // If the shift count is not less than the bitwidth, the result of 5123 // the shift is undefined. Don't try to analyze it, because the 5124 // resolution chosen here may differ from the resolution chosen in 5125 // other parts of the compiler. 5126 if (SA->getValue().ult(BitWidth)) { 5127 Constant *X = 5128 ConstantInt::get(SA->getContext(), 5129 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 5130 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 5131 } 5132 } 5133 return BinaryOp(Op); 5134 5135 case Instruction::ExtractValue: { 5136 auto *EVI = cast<ExtractValueInst>(Op); 5137 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 5138 break; 5139 5140 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 5141 if (!WO) 5142 break; 5143 5144 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 5145 bool Signed = WO->isSigned(); 5146 // TODO: Should add nuw/nsw flags for mul as well. 5147 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 5148 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 5149 5150 // Now that we know that all uses of the arithmetic-result component of 5151 // CI are guarded by the overflow check, we can go ahead and pretend 5152 // that the arithmetic is non-overflowing. 5153 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 5154 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 5155 } 5156 5157 default: 5158 break; 5159 } 5160 5161 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 5162 // semantics as a Sub, return a binary sub expression. 5163 if (auto *II = dyn_cast<IntrinsicInst>(V)) 5164 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 5165 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 5166 5167 return None; 5168 } 5169 5170 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 5171 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 5172 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 5173 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 5174 /// follows one of the following patterns: 5175 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 5176 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 5177 /// If the SCEV expression of \p Op conforms with one of the expected patterns 5178 /// we return the type of the truncation operation, and indicate whether the 5179 /// truncated type should be treated as signed/unsigned by setting 5180 /// \p Signed to true/false, respectively. 5181 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 5182 bool &Signed, ScalarEvolution &SE) { 5183 // The case where Op == SymbolicPHI (that is, with no type conversions on 5184 // the way) is handled by the regular add recurrence creating logic and 5185 // would have already been triggered in createAddRecForPHI. Reaching it here 5186 // means that createAddRecFromPHI had failed for this PHI before (e.g., 5187 // because one of the other operands of the SCEVAddExpr updating this PHI is 5188 // not invariant). 5189 // 5190 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 5191 // this case predicates that allow us to prove that Op == SymbolicPHI will 5192 // be added. 5193 if (Op == SymbolicPHI) 5194 return nullptr; 5195 5196 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 5197 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 5198 if (SourceBits != NewBits) 5199 return nullptr; 5200 5201 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 5202 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 5203 if (!SExt && !ZExt) 5204 return nullptr; 5205 const SCEVTruncateExpr *Trunc = 5206 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 5207 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 5208 if (!Trunc) 5209 return nullptr; 5210 const SCEV *X = Trunc->getOperand(); 5211 if (X != SymbolicPHI) 5212 return nullptr; 5213 Signed = SExt != nullptr; 5214 return Trunc->getType(); 5215 } 5216 5217 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 5218 if (!PN->getType()->isIntegerTy()) 5219 return nullptr; 5220 const Loop *L = LI.getLoopFor(PN->getParent()); 5221 if (!L || L->getHeader() != PN->getParent()) 5222 return nullptr; 5223 return L; 5224 } 5225 5226 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 5227 // computation that updates the phi follows the following pattern: 5228 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 5229 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 5230 // If so, try to see if it can be rewritten as an AddRecExpr under some 5231 // Predicates. If successful, return them as a pair. Also cache the results 5232 // of the analysis. 5233 // 5234 // Example usage scenario: 5235 // Say the Rewriter is called for the following SCEV: 5236 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 5237 // where: 5238 // %X = phi i64 (%Start, %BEValue) 5239 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 5240 // and call this function with %SymbolicPHI = %X. 5241 // 5242 // The analysis will find that the value coming around the backedge has 5243 // the following SCEV: 5244 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 5245 // Upon concluding that this matches the desired pattern, the function 5246 // will return the pair {NewAddRec, SmallPredsVec} where: 5247 // NewAddRec = {%Start,+,%Step} 5248 // SmallPredsVec = {P1, P2, P3} as follows: 5249 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 5250 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 5251 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 5252 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 5253 // under the predicates {P1,P2,P3}. 5254 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 5255 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 5256 // 5257 // TODO's: 5258 // 5259 // 1) Extend the Induction descriptor to also support inductions that involve 5260 // casts: When needed (namely, when we are called in the context of the 5261 // vectorizer induction analysis), a Set of cast instructions will be 5262 // populated by this method, and provided back to isInductionPHI. This is 5263 // needed to allow the vectorizer to properly record them to be ignored by 5264 // the cost model and to avoid vectorizing them (otherwise these casts, 5265 // which are redundant under the runtime overflow checks, will be 5266 // vectorized, which can be costly). 5267 // 5268 // 2) Support additional induction/PHISCEV patterns: We also want to support 5269 // inductions where the sext-trunc / zext-trunc operations (partly) occur 5270 // after the induction update operation (the induction increment): 5271 // 5272 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 5273 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 5274 // 5275 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 5276 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 5277 // 5278 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 5279 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5280 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 5281 SmallVector<const SCEVPredicate *, 3> Predicates; 5282 5283 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 5284 // return an AddRec expression under some predicate. 5285 5286 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 5287 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 5288 assert(L && "Expecting an integer loop header phi"); 5289 5290 // The loop may have multiple entrances or multiple exits; we can analyze 5291 // this phi as an addrec if it has a unique entry value and a unique 5292 // backedge value. 5293 Value *BEValueV = nullptr, *StartValueV = nullptr; 5294 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5295 Value *V = PN->getIncomingValue(i); 5296 if (L->contains(PN->getIncomingBlock(i))) { 5297 if (!BEValueV) { 5298 BEValueV = V; 5299 } else if (BEValueV != V) { 5300 BEValueV = nullptr; 5301 break; 5302 } 5303 } else if (!StartValueV) { 5304 StartValueV = V; 5305 } else if (StartValueV != V) { 5306 StartValueV = nullptr; 5307 break; 5308 } 5309 } 5310 if (!BEValueV || !StartValueV) 5311 return None; 5312 5313 const SCEV *BEValue = getSCEV(BEValueV); 5314 5315 // If the value coming around the backedge is an add with the symbolic 5316 // value we just inserted, possibly with casts that we can ignore under 5317 // an appropriate runtime guard, then we found a simple induction variable! 5318 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 5319 if (!Add) 5320 return None; 5321 5322 // If there is a single occurrence of the symbolic value, possibly 5323 // casted, replace it with a recurrence. 5324 unsigned FoundIndex = Add->getNumOperands(); 5325 Type *TruncTy = nullptr; 5326 bool Signed; 5327 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5328 if ((TruncTy = 5329 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 5330 if (FoundIndex == e) { 5331 FoundIndex = i; 5332 break; 5333 } 5334 5335 if (FoundIndex == Add->getNumOperands()) 5336 return None; 5337 5338 // Create an add with everything but the specified operand. 5339 SmallVector<const SCEV *, 8> Ops; 5340 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5341 if (i != FoundIndex) 5342 Ops.push_back(Add->getOperand(i)); 5343 const SCEV *Accum = getAddExpr(Ops); 5344 5345 // The runtime checks will not be valid if the step amount is 5346 // varying inside the loop. 5347 if (!isLoopInvariant(Accum, L)) 5348 return None; 5349 5350 // *** Part2: Create the predicates 5351 5352 // Analysis was successful: we have a phi-with-cast pattern for which we 5353 // can return an AddRec expression under the following predicates: 5354 // 5355 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 5356 // fits within the truncated type (does not overflow) for i = 0 to n-1. 5357 // P2: An Equal predicate that guarantees that 5358 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 5359 // P3: An Equal predicate that guarantees that 5360 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 5361 // 5362 // As we next prove, the above predicates guarantee that: 5363 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 5364 // 5365 // 5366 // More formally, we want to prove that: 5367 // Expr(i+1) = Start + (i+1) * Accum 5368 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 5369 // 5370 // Given that: 5371 // 1) Expr(0) = Start 5372 // 2) Expr(1) = Start + Accum 5373 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 5374 // 3) Induction hypothesis (step i): 5375 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 5376 // 5377 // Proof: 5378 // Expr(i+1) = 5379 // = Start + (i+1)*Accum 5380 // = (Start + i*Accum) + Accum 5381 // = Expr(i) + Accum 5382 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 5383 // :: from step i 5384 // 5385 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 5386 // 5387 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 5388 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 5389 // + Accum :: from P3 5390 // 5391 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 5392 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 5393 // 5394 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 5395 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 5396 // 5397 // By induction, the same applies to all iterations 1<=i<n: 5398 // 5399 5400 // Create a truncated addrec for which we will add a no overflow check (P1). 5401 const SCEV *StartVal = getSCEV(StartValueV); 5402 const SCEV *PHISCEV = 5403 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 5404 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 5405 5406 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 5407 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 5408 // will be constant. 5409 // 5410 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 5411 // add P1. 5412 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 5413 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 5414 Signed ? SCEVWrapPredicate::IncrementNSSW 5415 : SCEVWrapPredicate::IncrementNUSW; 5416 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 5417 Predicates.push_back(AddRecPred); 5418 } 5419 5420 // Create the Equal Predicates P2,P3: 5421 5422 // It is possible that the predicates P2 and/or P3 are computable at 5423 // compile time due to StartVal and/or Accum being constants. 5424 // If either one is, then we can check that now and escape if either P2 5425 // or P3 is false. 5426 5427 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 5428 // for each of StartVal and Accum 5429 auto getExtendedExpr = [&](const SCEV *Expr, 5430 bool CreateSignExtend) -> const SCEV * { 5431 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 5432 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 5433 const SCEV *ExtendedExpr = 5434 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 5435 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 5436 return ExtendedExpr; 5437 }; 5438 5439 // Given: 5440 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 5441 // = getExtendedExpr(Expr) 5442 // Determine whether the predicate P: Expr == ExtendedExpr 5443 // is known to be false at compile time 5444 auto PredIsKnownFalse = [&](const SCEV *Expr, 5445 const SCEV *ExtendedExpr) -> bool { 5446 return Expr != ExtendedExpr && 5447 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 5448 }; 5449 5450 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 5451 if (PredIsKnownFalse(StartVal, StartExtended)) { 5452 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 5453 return None; 5454 } 5455 5456 // The Step is always Signed (because the overflow checks are either 5457 // NSSW or NUSW) 5458 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 5459 if (PredIsKnownFalse(Accum, AccumExtended)) { 5460 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 5461 return None; 5462 } 5463 5464 auto AppendPredicate = [&](const SCEV *Expr, 5465 const SCEV *ExtendedExpr) -> void { 5466 if (Expr != ExtendedExpr && 5467 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 5468 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 5469 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 5470 Predicates.push_back(Pred); 5471 } 5472 }; 5473 5474 AppendPredicate(StartVal, StartExtended); 5475 AppendPredicate(Accum, AccumExtended); 5476 5477 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 5478 // which the casts had been folded away. The caller can rewrite SymbolicPHI 5479 // into NewAR if it will also add the runtime overflow checks specified in 5480 // Predicates. 5481 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 5482 5483 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 5484 std::make_pair(NewAR, Predicates); 5485 // Remember the result of the analysis for this SCEV at this locayyytion. 5486 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 5487 return PredRewrite; 5488 } 5489 5490 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5491 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 5492 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 5493 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 5494 if (!L) 5495 return None; 5496 5497 // Check to see if we already analyzed this PHI. 5498 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 5499 if (I != PredicatedSCEVRewrites.end()) { 5500 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 5501 I->second; 5502 // Analysis was done before and failed to create an AddRec: 5503 if (Rewrite.first == SymbolicPHI) 5504 return None; 5505 // Analysis was done before and succeeded to create an AddRec under 5506 // a predicate: 5507 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 5508 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 5509 return Rewrite; 5510 } 5511 5512 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5513 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 5514 5515 // Record in the cache that the analysis failed 5516 if (!Rewrite) { 5517 SmallVector<const SCEVPredicate *, 3> Predicates; 5518 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 5519 return None; 5520 } 5521 5522 return Rewrite; 5523 } 5524 5525 // FIXME: This utility is currently required because the Rewriter currently 5526 // does not rewrite this expression: 5527 // {0, +, (sext ix (trunc iy to ix) to iy)} 5528 // into {0, +, %step}, 5529 // even when the following Equal predicate exists: 5530 // "%step == (sext ix (trunc iy to ix) to iy)". 5531 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 5532 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 5533 if (AR1 == AR2) 5534 return true; 5535 5536 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 5537 if (Expr1 != Expr2 && !Preds->implies(SE.getEqualPredicate(Expr1, Expr2)) && 5538 !Preds->implies(SE.getEqualPredicate(Expr2, Expr1))) 5539 return false; 5540 return true; 5541 }; 5542 5543 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 5544 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 5545 return false; 5546 return true; 5547 } 5548 5549 /// A helper function for createAddRecFromPHI to handle simple cases. 5550 /// 5551 /// This function tries to find an AddRec expression for the simplest (yet most 5552 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 5553 /// If it fails, createAddRecFromPHI will use a more general, but slow, 5554 /// technique for finding the AddRec expression. 5555 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 5556 Value *BEValueV, 5557 Value *StartValueV) { 5558 const Loop *L = LI.getLoopFor(PN->getParent()); 5559 assert(L && L->getHeader() == PN->getParent()); 5560 assert(BEValueV && StartValueV); 5561 5562 auto BO = MatchBinaryOp(BEValueV, DT); 5563 if (!BO) 5564 return nullptr; 5565 5566 if (BO->Opcode != Instruction::Add) 5567 return nullptr; 5568 5569 const SCEV *Accum = nullptr; 5570 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 5571 Accum = getSCEV(BO->RHS); 5572 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 5573 Accum = getSCEV(BO->LHS); 5574 5575 if (!Accum) 5576 return nullptr; 5577 5578 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5579 if (BO->IsNUW) 5580 Flags = setFlags(Flags, SCEV::FlagNUW); 5581 if (BO->IsNSW) 5582 Flags = setFlags(Flags, SCEV::FlagNSW); 5583 5584 const SCEV *StartVal = getSCEV(StartValueV); 5585 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5586 insertValueToMap(PN, PHISCEV); 5587 5588 // We can add Flags to the post-inc expression only if we 5589 // know that it is *undefined behavior* for BEValueV to 5590 // overflow. 5591 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) { 5592 assert(isLoopInvariant(Accum, L) && 5593 "Accum is defined outside L, but is not invariant?"); 5594 if (isAddRecNeverPoison(BEInst, L)) 5595 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5596 } 5597 5598 return PHISCEV; 5599 } 5600 5601 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5602 const Loop *L = LI.getLoopFor(PN->getParent()); 5603 if (!L || L->getHeader() != PN->getParent()) 5604 return nullptr; 5605 5606 // The loop may have multiple entrances or multiple exits; we can analyze 5607 // this phi as an addrec if it has a unique entry value and a unique 5608 // backedge value. 5609 Value *BEValueV = nullptr, *StartValueV = nullptr; 5610 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5611 Value *V = PN->getIncomingValue(i); 5612 if (L->contains(PN->getIncomingBlock(i))) { 5613 if (!BEValueV) { 5614 BEValueV = V; 5615 } else if (BEValueV != V) { 5616 BEValueV = nullptr; 5617 break; 5618 } 5619 } else if (!StartValueV) { 5620 StartValueV = V; 5621 } else if (StartValueV != V) { 5622 StartValueV = nullptr; 5623 break; 5624 } 5625 } 5626 if (!BEValueV || !StartValueV) 5627 return nullptr; 5628 5629 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5630 "PHI node already processed?"); 5631 5632 // First, try to find AddRec expression without creating a fictituos symbolic 5633 // value for PN. 5634 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5635 return S; 5636 5637 // Handle PHI node value symbolically. 5638 const SCEV *SymbolicName = getUnknown(PN); 5639 insertValueToMap(PN, SymbolicName); 5640 5641 // Using this symbolic name for the PHI, analyze the value coming around 5642 // the back-edge. 5643 const SCEV *BEValue = getSCEV(BEValueV); 5644 5645 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5646 // has a special value for the first iteration of the loop. 5647 5648 // If the value coming around the backedge is an add with the symbolic 5649 // value we just inserted, then we found a simple induction variable! 5650 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5651 // If there is a single occurrence of the symbolic value, replace it 5652 // with a recurrence. 5653 unsigned FoundIndex = Add->getNumOperands(); 5654 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5655 if (Add->getOperand(i) == SymbolicName) 5656 if (FoundIndex == e) { 5657 FoundIndex = i; 5658 break; 5659 } 5660 5661 if (FoundIndex != Add->getNumOperands()) { 5662 // Create an add with everything but the specified operand. 5663 SmallVector<const SCEV *, 8> Ops; 5664 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5665 if (i != FoundIndex) 5666 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5667 L, *this)); 5668 const SCEV *Accum = getAddExpr(Ops); 5669 5670 // This is not a valid addrec if the step amount is varying each 5671 // loop iteration, but is not itself an addrec in this loop. 5672 if (isLoopInvariant(Accum, L) || 5673 (isa<SCEVAddRecExpr>(Accum) && 5674 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5675 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5676 5677 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5678 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5679 if (BO->IsNUW) 5680 Flags = setFlags(Flags, SCEV::FlagNUW); 5681 if (BO->IsNSW) 5682 Flags = setFlags(Flags, SCEV::FlagNSW); 5683 } 5684 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5685 // If the increment is an inbounds GEP, then we know the address 5686 // space cannot be wrapped around. We cannot make any guarantee 5687 // about signed or unsigned overflow because pointers are 5688 // unsigned but we may have a negative index from the base 5689 // pointer. We can guarantee that no unsigned wrap occurs if the 5690 // indices form a positive value. 5691 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5692 Flags = setFlags(Flags, SCEV::FlagNW); 5693 5694 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5695 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5696 Flags = setFlags(Flags, SCEV::FlagNUW); 5697 } 5698 5699 // We cannot transfer nuw and nsw flags from subtraction 5700 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5701 // for instance. 5702 } 5703 5704 const SCEV *StartVal = getSCEV(StartValueV); 5705 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5706 5707 // Okay, for the entire analysis of this edge we assumed the PHI 5708 // to be symbolic. We now need to go back and purge all of the 5709 // entries for the scalars that use the symbolic expression. 5710 forgetMemoizedResults(SymbolicName); 5711 insertValueToMap(PN, PHISCEV); 5712 5713 // We can add Flags to the post-inc expression only if we 5714 // know that it is *undefined behavior* for BEValueV to 5715 // overflow. 5716 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5717 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5718 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5719 5720 return PHISCEV; 5721 } 5722 } 5723 } else { 5724 // Otherwise, this could be a loop like this: 5725 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5726 // In this case, j = {1,+,1} and BEValue is j. 5727 // Because the other in-value of i (0) fits the evolution of BEValue 5728 // i really is an addrec evolution. 5729 // 5730 // We can generalize this saying that i is the shifted value of BEValue 5731 // by one iteration: 5732 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5733 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5734 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5735 if (Shifted != getCouldNotCompute() && 5736 Start != getCouldNotCompute()) { 5737 const SCEV *StartVal = getSCEV(StartValueV); 5738 if (Start == StartVal) { 5739 // Okay, for the entire analysis of this edge we assumed the PHI 5740 // to be symbolic. We now need to go back and purge all of the 5741 // entries for the scalars that use the symbolic expression. 5742 forgetMemoizedResults(SymbolicName); 5743 insertValueToMap(PN, Shifted); 5744 return Shifted; 5745 } 5746 } 5747 } 5748 5749 // Remove the temporary PHI node SCEV that has been inserted while intending 5750 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5751 // as it will prevent later (possibly simpler) SCEV expressions to be added 5752 // to the ValueExprMap. 5753 eraseValueFromMap(PN); 5754 5755 return nullptr; 5756 } 5757 5758 // Checks if the SCEV S is available at BB. S is considered available at BB 5759 // if S can be materialized at BB without introducing a fault. 5760 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5761 BasicBlock *BB) { 5762 struct CheckAvailable { 5763 bool TraversalDone = false; 5764 bool Available = true; 5765 5766 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5767 BasicBlock *BB = nullptr; 5768 DominatorTree &DT; 5769 5770 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5771 : L(L), BB(BB), DT(DT) {} 5772 5773 bool setUnavailable() { 5774 TraversalDone = true; 5775 Available = false; 5776 return false; 5777 } 5778 5779 bool follow(const SCEV *S) { 5780 switch (S->getSCEVType()) { 5781 case scConstant: 5782 case scPtrToInt: 5783 case scTruncate: 5784 case scZeroExtend: 5785 case scSignExtend: 5786 case scAddExpr: 5787 case scMulExpr: 5788 case scUMaxExpr: 5789 case scSMaxExpr: 5790 case scUMinExpr: 5791 case scSMinExpr: 5792 case scSequentialUMinExpr: 5793 // These expressions are available if their operand(s) is/are. 5794 return true; 5795 5796 case scAddRecExpr: { 5797 // We allow add recurrences that are on the loop BB is in, or some 5798 // outer loop. This guarantees availability because the value of the 5799 // add recurrence at BB is simply the "current" value of the induction 5800 // variable. We can relax this in the future; for instance an add 5801 // recurrence on a sibling dominating loop is also available at BB. 5802 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5803 if (L && (ARLoop == L || ARLoop->contains(L))) 5804 return true; 5805 5806 return setUnavailable(); 5807 } 5808 5809 case scUnknown: { 5810 // For SCEVUnknown, we check for simple dominance. 5811 const auto *SU = cast<SCEVUnknown>(S); 5812 Value *V = SU->getValue(); 5813 5814 if (isa<Argument>(V)) 5815 return false; 5816 5817 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5818 return false; 5819 5820 return setUnavailable(); 5821 } 5822 5823 case scUDivExpr: 5824 case scCouldNotCompute: 5825 // We do not try to smart about these at all. 5826 return setUnavailable(); 5827 } 5828 llvm_unreachable("Unknown SCEV kind!"); 5829 } 5830 5831 bool isDone() { return TraversalDone; } 5832 }; 5833 5834 CheckAvailable CA(L, BB, DT); 5835 SCEVTraversal<CheckAvailable> ST(CA); 5836 5837 ST.visitAll(S); 5838 return CA.Available; 5839 } 5840 5841 // Try to match a control flow sequence that branches out at BI and merges back 5842 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5843 // match. 5844 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5845 Value *&C, Value *&LHS, Value *&RHS) { 5846 C = BI->getCondition(); 5847 5848 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5849 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5850 5851 if (!LeftEdge.isSingleEdge()) 5852 return false; 5853 5854 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5855 5856 Use &LeftUse = Merge->getOperandUse(0); 5857 Use &RightUse = Merge->getOperandUse(1); 5858 5859 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5860 LHS = LeftUse; 5861 RHS = RightUse; 5862 return true; 5863 } 5864 5865 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5866 LHS = RightUse; 5867 RHS = LeftUse; 5868 return true; 5869 } 5870 5871 return false; 5872 } 5873 5874 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5875 auto IsReachable = 5876 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5877 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5878 const Loop *L = LI.getLoopFor(PN->getParent()); 5879 5880 // We don't want to break LCSSA, even in a SCEV expression tree. 5881 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5882 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5883 return nullptr; 5884 5885 // Try to match 5886 // 5887 // br %cond, label %left, label %right 5888 // left: 5889 // br label %merge 5890 // right: 5891 // br label %merge 5892 // merge: 5893 // V = phi [ %x, %left ], [ %y, %right ] 5894 // 5895 // as "select %cond, %x, %y" 5896 5897 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5898 assert(IDom && "At least the entry block should dominate PN"); 5899 5900 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5901 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5902 5903 if (BI && BI->isConditional() && 5904 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5905 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5906 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5907 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5908 } 5909 5910 return nullptr; 5911 } 5912 5913 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5914 if (const SCEV *S = createAddRecFromPHI(PN)) 5915 return S; 5916 5917 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5918 return S; 5919 5920 // If the PHI has a single incoming value, follow that value, unless the 5921 // PHI's incoming blocks are in a different loop, in which case doing so 5922 // risks breaking LCSSA form. Instcombine would normally zap these, but 5923 // it doesn't have DominatorTree information, so it may miss cases. 5924 if (Value *V = simplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5925 if (LI.replacementPreservesLCSSAForm(PN, V)) 5926 return getSCEV(V); 5927 5928 // If it's not a loop phi, we can't handle it yet. 5929 return getUnknown(PN); 5930 } 5931 5932 bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, 5933 SCEVTypes RootKind) { 5934 struct FindClosure { 5935 const SCEV *OperandToFind; 5936 const SCEVTypes RootKind; // Must be a sequential min/max expression. 5937 const SCEVTypes NonSequentialRootKind; // Non-seq variant of RootKind. 5938 5939 bool Found = false; 5940 5941 bool canRecurseInto(SCEVTypes Kind) const { 5942 // We can only recurse into the SCEV expression of the same effective type 5943 // as the type of our root SCEV expression, and into zero-extensions. 5944 return RootKind == Kind || NonSequentialRootKind == Kind || 5945 scZeroExtend == Kind; 5946 }; 5947 5948 FindClosure(const SCEV *OperandToFind, SCEVTypes RootKind) 5949 : OperandToFind(OperandToFind), RootKind(RootKind), 5950 NonSequentialRootKind( 5951 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType( 5952 RootKind)) {} 5953 5954 bool follow(const SCEV *S) { 5955 Found = S == OperandToFind; 5956 5957 return !isDone() && canRecurseInto(S->getSCEVType()); 5958 } 5959 5960 bool isDone() const { return Found; } 5961 }; 5962 5963 FindClosure FC(OperandToFind, RootKind); 5964 visitAll(Root, FC); 5965 return FC.Found; 5966 } 5967 5968 const SCEV *ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond( 5969 Instruction *I, ICmpInst *Cond, Value *TrueVal, Value *FalseVal) { 5970 // Try to match some simple smax or umax patterns. 5971 auto *ICI = Cond; 5972 5973 Value *LHS = ICI->getOperand(0); 5974 Value *RHS = ICI->getOperand(1); 5975 5976 switch (ICI->getPredicate()) { 5977 case ICmpInst::ICMP_SLT: 5978 case ICmpInst::ICMP_SLE: 5979 case ICmpInst::ICMP_ULT: 5980 case ICmpInst::ICMP_ULE: 5981 std::swap(LHS, RHS); 5982 LLVM_FALLTHROUGH; 5983 case ICmpInst::ICMP_SGT: 5984 case ICmpInst::ICMP_SGE: 5985 case ICmpInst::ICMP_UGT: 5986 case ICmpInst::ICMP_UGE: 5987 // a > b ? a+x : b+x -> max(a, b)+x 5988 // a > b ? b+x : a+x -> min(a, b)+x 5989 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5990 bool Signed = ICI->isSigned(); 5991 const SCEV *LA = getSCEV(TrueVal); 5992 const SCEV *RA = getSCEV(FalseVal); 5993 const SCEV *LS = getSCEV(LHS); 5994 const SCEV *RS = getSCEV(RHS); 5995 if (LA->getType()->isPointerTy()) { 5996 // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA. 5997 // Need to make sure we can't produce weird expressions involving 5998 // negated pointers. 5999 if (LA == LS && RA == RS) 6000 return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS); 6001 if (LA == RS && RA == LS) 6002 return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS); 6003 } 6004 auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * { 6005 if (Op->getType()->isPointerTy()) { 6006 Op = getLosslessPtrToIntExpr(Op); 6007 if (isa<SCEVCouldNotCompute>(Op)) 6008 return Op; 6009 } 6010 if (Signed) 6011 Op = getNoopOrSignExtend(Op, I->getType()); 6012 else 6013 Op = getNoopOrZeroExtend(Op, I->getType()); 6014 return Op; 6015 }; 6016 LS = CoerceOperand(LS); 6017 RS = CoerceOperand(RS); 6018 if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS)) 6019 break; 6020 const SCEV *LDiff = getMinusSCEV(LA, LS); 6021 const SCEV *RDiff = getMinusSCEV(RA, RS); 6022 if (LDiff == RDiff) 6023 return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS), 6024 LDiff); 6025 LDiff = getMinusSCEV(LA, RS); 6026 RDiff = getMinusSCEV(RA, LS); 6027 if (LDiff == RDiff) 6028 return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS), 6029 LDiff); 6030 } 6031 break; 6032 case ICmpInst::ICMP_NE: 6033 // x != 0 ? x+y : C+y -> x == 0 ? C+y : x+y 6034 std::swap(TrueVal, FalseVal); 6035 LLVM_FALLTHROUGH; 6036 case ICmpInst::ICMP_EQ: 6037 // x == 0 ? C+y : x+y -> umax(x, C)+y iff C u<= 1 6038 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 6039 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 6040 const SCEV *X = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 6041 const SCEV *TrueValExpr = getSCEV(TrueVal); // C+y 6042 const SCEV *FalseValExpr = getSCEV(FalseVal); // x+y 6043 const SCEV *Y = getMinusSCEV(FalseValExpr, X); // y = (x+y)-x 6044 const SCEV *C = getMinusSCEV(TrueValExpr, Y); // C = (C+y)-y 6045 if (isa<SCEVConstant>(C) && cast<SCEVConstant>(C)->getAPInt().ule(1)) 6046 return getAddExpr(getUMaxExpr(X, C), Y); 6047 } 6048 // x == 0 ? 0 : umin (..., x, ...) -> umin_seq(x, umin (...)) 6049 // x == 0 ? 0 : umin_seq(..., x, ...) -> umin_seq(x, umin_seq(...)) 6050 // x == 0 ? 0 : umin (..., umin_seq(..., x, ...), ...) 6051 // -> umin_seq(x, umin (..., umin_seq(...), ...)) 6052 if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero() && 6053 isa<ConstantInt>(TrueVal) && cast<ConstantInt>(TrueVal)->isZero()) { 6054 const SCEV *X = getSCEV(LHS); 6055 while (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(X)) 6056 X = ZExt->getOperand(); 6057 if (getTypeSizeInBits(X->getType()) <= getTypeSizeInBits(I->getType())) { 6058 const SCEV *FalseValExpr = getSCEV(FalseVal); 6059 if (SCEVMinMaxExprContains(FalseValExpr, X, scSequentialUMinExpr)) 6060 return getUMinExpr(getNoopOrZeroExtend(X, I->getType()), FalseValExpr, 6061 /*Sequential=*/true); 6062 } 6063 } 6064 break; 6065 default: 6066 break; 6067 } 6068 6069 return getUnknown(I); 6070 } 6071 6072 static Optional<const SCEV *> 6073 createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, 6074 const SCEV *TrueExpr, const SCEV *FalseExpr) { 6075 assert(CondExpr->getType()->isIntegerTy(1) && 6076 TrueExpr->getType() == FalseExpr->getType() && 6077 TrueExpr->getType()->isIntegerTy(1) && 6078 "Unexpected operands of a select."); 6079 6080 // i1 cond ? i1 x : i1 C --> C + (i1 cond ? (i1 x - i1 C) : i1 0) 6081 // --> C + (umin_seq cond, x - C) 6082 // 6083 // i1 cond ? i1 C : i1 x --> C + (i1 cond ? i1 0 : (i1 x - i1 C)) 6084 // --> C + (i1 ~cond ? (i1 x - i1 C) : i1 0) 6085 // --> C + (umin_seq ~cond, x - C) 6086 6087 // FIXME: while we can't legally model the case where both of the hands 6088 // are fully variable, we only require that the *difference* is constant. 6089 if (!isa<SCEVConstant>(TrueExpr) && !isa<SCEVConstant>(FalseExpr)) 6090 return None; 6091 6092 const SCEV *X, *C; 6093 if (isa<SCEVConstant>(TrueExpr)) { 6094 CondExpr = SE->getNotSCEV(CondExpr); 6095 X = FalseExpr; 6096 C = TrueExpr; 6097 } else { 6098 X = TrueExpr; 6099 C = FalseExpr; 6100 } 6101 return SE->getAddExpr(C, SE->getUMinExpr(CondExpr, SE->getMinusSCEV(X, C), 6102 /*Sequential=*/true)); 6103 } 6104 6105 static Optional<const SCEV *> createNodeForSelectViaUMinSeq(ScalarEvolution *SE, 6106 Value *Cond, 6107 Value *TrueVal, 6108 Value *FalseVal) { 6109 if (!isa<ConstantInt>(TrueVal) && !isa<ConstantInt>(FalseVal)) 6110 return None; 6111 6112 const auto *SECond = SE->getSCEV(Cond); 6113 const auto *SETrue = SE->getSCEV(TrueVal); 6114 const auto *SEFalse = SE->getSCEV(FalseVal); 6115 return createNodeForSelectViaUMinSeq(SE, SECond, SETrue, SEFalse); 6116 } 6117 6118 const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq( 6119 Value *V, Value *Cond, Value *TrueVal, Value *FalseVal) { 6120 assert(Cond->getType()->isIntegerTy(1) && "Select condition is not an i1?"); 6121 assert(TrueVal->getType() == FalseVal->getType() && 6122 V->getType() == TrueVal->getType() && 6123 "Types of select hands and of the result must match."); 6124 6125 // For now, only deal with i1-typed `select`s. 6126 if (!V->getType()->isIntegerTy(1)) 6127 return getUnknown(V); 6128 6129 if (Optional<const SCEV *> S = 6130 createNodeForSelectViaUMinSeq(this, Cond, TrueVal, FalseVal)) 6131 return *S; 6132 6133 return getUnknown(V); 6134 } 6135 6136 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Value *V, Value *Cond, 6137 Value *TrueVal, 6138 Value *FalseVal) { 6139 // Handle "constant" branch or select. This can occur for instance when a 6140 // loop pass transforms an inner loop and moves on to process the outer loop. 6141 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 6142 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 6143 6144 if (auto *I = dyn_cast<Instruction>(V)) { 6145 if (auto *ICI = dyn_cast<ICmpInst>(Cond)) { 6146 const SCEV *S = createNodeForSelectOrPHIInstWithICmpInstCond( 6147 I, ICI, TrueVal, FalseVal); 6148 if (!isa<SCEVUnknown>(S)) 6149 return S; 6150 } 6151 } 6152 6153 return createNodeForSelectOrPHIViaUMinSeq(V, Cond, TrueVal, FalseVal); 6154 } 6155 6156 /// Expand GEP instructions into add and multiply operations. This allows them 6157 /// to be analyzed by regular SCEV code. 6158 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 6159 assert(GEP->getSourceElementType()->isSized() && 6160 "GEP source element type must be sized"); 6161 6162 SmallVector<const SCEV *, 4> IndexExprs; 6163 for (Value *Index : GEP->indices()) 6164 IndexExprs.push_back(getSCEV(Index)); 6165 return getGEPExpr(GEP, IndexExprs); 6166 } 6167 6168 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 6169 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 6170 return C->getAPInt().countTrailingZeros(); 6171 6172 if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S)) 6173 return GetMinTrailingZeros(I->getOperand()); 6174 6175 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 6176 return std::min(GetMinTrailingZeros(T->getOperand()), 6177 (uint32_t)getTypeSizeInBits(T->getType())); 6178 6179 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 6180 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 6181 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 6182 ? getTypeSizeInBits(E->getType()) 6183 : OpRes; 6184 } 6185 6186 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 6187 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 6188 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 6189 ? getTypeSizeInBits(E->getType()) 6190 : OpRes; 6191 } 6192 6193 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 6194 // The result is the min of all operands results. 6195 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 6196 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 6197 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 6198 return MinOpRes; 6199 } 6200 6201 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 6202 // The result is the sum of all operands results. 6203 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 6204 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 6205 for (unsigned i = 1, e = M->getNumOperands(); 6206 SumOpRes != BitWidth && i != e; ++i) 6207 SumOpRes = 6208 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 6209 return SumOpRes; 6210 } 6211 6212 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 6213 // The result is the min of all operands results. 6214 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 6215 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 6216 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 6217 return MinOpRes; 6218 } 6219 6220 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 6221 // The result is the min of all operands results. 6222 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 6223 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 6224 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 6225 return MinOpRes; 6226 } 6227 6228 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 6229 // The result is the min of all operands results. 6230 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 6231 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 6232 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 6233 return MinOpRes; 6234 } 6235 6236 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 6237 // For a SCEVUnknown, ask ValueTracking. 6238 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 6239 return Known.countMinTrailingZeros(); 6240 } 6241 6242 // SCEVUDivExpr 6243 return 0; 6244 } 6245 6246 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 6247 auto I = MinTrailingZerosCache.find(S); 6248 if (I != MinTrailingZerosCache.end()) 6249 return I->second; 6250 6251 uint32_t Result = GetMinTrailingZerosImpl(S); 6252 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 6253 assert(InsertPair.second && "Should insert a new key"); 6254 return InsertPair.first->second; 6255 } 6256 6257 /// Helper method to assign a range to V from metadata present in the IR. 6258 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 6259 if (Instruction *I = dyn_cast<Instruction>(V)) 6260 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 6261 return getConstantRangeFromMetadata(*MD); 6262 6263 return None; 6264 } 6265 6266 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, 6267 SCEV::NoWrapFlags Flags) { 6268 if (AddRec->getNoWrapFlags(Flags) != Flags) { 6269 AddRec->setNoWrapFlags(Flags); 6270 UnsignedRanges.erase(AddRec); 6271 SignedRanges.erase(AddRec); 6272 } 6273 } 6274 6275 ConstantRange ScalarEvolution:: 6276 getRangeForUnknownRecurrence(const SCEVUnknown *U) { 6277 const DataLayout &DL = getDataLayout(); 6278 6279 unsigned BitWidth = getTypeSizeInBits(U->getType()); 6280 const ConstantRange FullSet(BitWidth, /*isFullSet=*/true); 6281 6282 // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then 6283 // use information about the trip count to improve our available range. Note 6284 // that the trip count independent cases are already handled by known bits. 6285 // WARNING: The definition of recurrence used here is subtly different than 6286 // the one used by AddRec (and thus most of this file). Step is allowed to 6287 // be arbitrarily loop varying here, where AddRec allows only loop invariant 6288 // and other addrecs in the same loop (for non-affine addrecs). The code 6289 // below intentionally handles the case where step is not loop invariant. 6290 auto *P = dyn_cast<PHINode>(U->getValue()); 6291 if (!P) 6292 return FullSet; 6293 6294 // Make sure that no Phi input comes from an unreachable block. Otherwise, 6295 // even the values that are not available in these blocks may come from them, 6296 // and this leads to false-positive recurrence test. 6297 for (auto *Pred : predecessors(P->getParent())) 6298 if (!DT.isReachableFromEntry(Pred)) 6299 return FullSet; 6300 6301 BinaryOperator *BO; 6302 Value *Start, *Step; 6303 if (!matchSimpleRecurrence(P, BO, Start, Step)) 6304 return FullSet; 6305 6306 // If we found a recurrence in reachable code, we must be in a loop. Note 6307 // that BO might be in some subloop of L, and that's completely okay. 6308 auto *L = LI.getLoopFor(P->getParent()); 6309 assert(L && L->getHeader() == P->getParent()); 6310 if (!L->contains(BO->getParent())) 6311 // NOTE: This bailout should be an assert instead. However, asserting 6312 // the condition here exposes a case where LoopFusion is querying SCEV 6313 // with malformed loop information during the midst of the transform. 6314 // There doesn't appear to be an obvious fix, so for the moment bailout 6315 // until the caller issue can be fixed. PR49566 tracks the bug. 6316 return FullSet; 6317 6318 // TODO: Extend to other opcodes such as mul, and div 6319 switch (BO->getOpcode()) { 6320 default: 6321 return FullSet; 6322 case Instruction::AShr: 6323 case Instruction::LShr: 6324 case Instruction::Shl: 6325 break; 6326 }; 6327 6328 if (BO->getOperand(0) != P) 6329 // TODO: Handle the power function forms some day. 6330 return FullSet; 6331 6332 unsigned TC = getSmallConstantMaxTripCount(L); 6333 if (!TC || TC >= BitWidth) 6334 return FullSet; 6335 6336 auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT); 6337 auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT); 6338 assert(KnownStart.getBitWidth() == BitWidth && 6339 KnownStep.getBitWidth() == BitWidth); 6340 6341 // Compute total shift amount, being careful of overflow and bitwidths. 6342 auto MaxShiftAmt = KnownStep.getMaxValue(); 6343 APInt TCAP(BitWidth, TC-1); 6344 bool Overflow = false; 6345 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow); 6346 if (Overflow) 6347 return FullSet; 6348 6349 switch (BO->getOpcode()) { 6350 default: 6351 llvm_unreachable("filtered out above"); 6352 case Instruction::AShr: { 6353 // For each ashr, three cases: 6354 // shift = 0 => unchanged value 6355 // saturation => 0 or -1 6356 // other => a value closer to zero (of the same sign) 6357 // Thus, the end value is closer to zero than the start. 6358 auto KnownEnd = KnownBits::ashr(KnownStart, 6359 KnownBits::makeConstant(TotalShift)); 6360 if (KnownStart.isNonNegative()) 6361 // Analogous to lshr (simply not yet canonicalized) 6362 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), 6363 KnownStart.getMaxValue() + 1); 6364 if (KnownStart.isNegative()) 6365 // End >=u Start && End <=s Start 6366 return ConstantRange::getNonEmpty(KnownStart.getMinValue(), 6367 KnownEnd.getMaxValue() + 1); 6368 break; 6369 } 6370 case Instruction::LShr: { 6371 // For each lshr, three cases: 6372 // shift = 0 => unchanged value 6373 // saturation => 0 6374 // other => a smaller positive number 6375 // Thus, the low end of the unsigned range is the last value produced. 6376 auto KnownEnd = KnownBits::lshr(KnownStart, 6377 KnownBits::makeConstant(TotalShift)); 6378 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), 6379 KnownStart.getMaxValue() + 1); 6380 } 6381 case Instruction::Shl: { 6382 // Iff no bits are shifted out, value increases on every shift. 6383 auto KnownEnd = KnownBits::shl(KnownStart, 6384 KnownBits::makeConstant(TotalShift)); 6385 if (TotalShift.ult(KnownStart.countMinLeadingZeros())) 6386 return ConstantRange(KnownStart.getMinValue(), 6387 KnownEnd.getMaxValue() + 1); 6388 break; 6389 } 6390 }; 6391 return FullSet; 6392 } 6393 6394 /// Determine the range for a particular SCEV. If SignHint is 6395 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 6396 /// with a "cleaner" unsigned (resp. signed) representation. 6397 const ConstantRange & 6398 ScalarEvolution::getRangeRef(const SCEV *S, 6399 ScalarEvolution::RangeSignHint SignHint) { 6400 DenseMap<const SCEV *, ConstantRange> &Cache = 6401 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 6402 : SignedRanges; 6403 ConstantRange::PreferredRangeType RangeType = 6404 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED 6405 ? ConstantRange::Unsigned : ConstantRange::Signed; 6406 6407 // See if we've computed this range already. 6408 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 6409 if (I != Cache.end()) 6410 return I->second; 6411 6412 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 6413 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 6414 6415 unsigned BitWidth = getTypeSizeInBits(S->getType()); 6416 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 6417 using OBO = OverflowingBinaryOperator; 6418 6419 // If the value has known zeros, the maximum value will have those known zeros 6420 // as well. 6421 uint32_t TZ = GetMinTrailingZeros(S); 6422 if (TZ != 0) { 6423 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 6424 ConservativeResult = 6425 ConstantRange(APInt::getMinValue(BitWidth), 6426 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 6427 else 6428 ConservativeResult = ConstantRange( 6429 APInt::getSignedMinValue(BitWidth), 6430 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 6431 } 6432 6433 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 6434 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 6435 unsigned WrapType = OBO::AnyWrap; 6436 if (Add->hasNoSignedWrap()) 6437 WrapType |= OBO::NoSignedWrap; 6438 if (Add->hasNoUnsignedWrap()) 6439 WrapType |= OBO::NoUnsignedWrap; 6440 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 6441 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), 6442 WrapType, RangeType); 6443 return setRange(Add, SignHint, 6444 ConservativeResult.intersectWith(X, RangeType)); 6445 } 6446 6447 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 6448 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 6449 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 6450 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 6451 return setRange(Mul, SignHint, 6452 ConservativeResult.intersectWith(X, RangeType)); 6453 } 6454 6455 if (isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) { 6456 Intrinsic::ID ID; 6457 switch (S->getSCEVType()) { 6458 case scUMaxExpr: 6459 ID = Intrinsic::umax; 6460 break; 6461 case scSMaxExpr: 6462 ID = Intrinsic::smax; 6463 break; 6464 case scUMinExpr: 6465 case scSequentialUMinExpr: 6466 ID = Intrinsic::umin; 6467 break; 6468 case scSMinExpr: 6469 ID = Intrinsic::smin; 6470 break; 6471 default: 6472 llvm_unreachable("Unknown SCEVMinMaxExpr/SCEVSequentialMinMaxExpr."); 6473 } 6474 6475 const auto *NAry = cast<SCEVNAryExpr>(S); 6476 ConstantRange X = getRangeRef(NAry->getOperand(0), SignHint); 6477 for (unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i) 6478 X = X.intrinsic(ID, {X, getRangeRef(NAry->getOperand(i), SignHint)}); 6479 return setRange(S, SignHint, 6480 ConservativeResult.intersectWith(X, RangeType)); 6481 } 6482 6483 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 6484 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 6485 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 6486 return setRange(UDiv, SignHint, 6487 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 6488 } 6489 6490 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 6491 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 6492 return setRange(ZExt, SignHint, 6493 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), 6494 RangeType)); 6495 } 6496 6497 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 6498 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 6499 return setRange(SExt, SignHint, 6500 ConservativeResult.intersectWith(X.signExtend(BitWidth), 6501 RangeType)); 6502 } 6503 6504 if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) { 6505 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint); 6506 return setRange(PtrToInt, SignHint, X); 6507 } 6508 6509 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 6510 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 6511 return setRange(Trunc, SignHint, 6512 ConservativeResult.intersectWith(X.truncate(BitWidth), 6513 RangeType)); 6514 } 6515 6516 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 6517 // If there's no unsigned wrap, the value will never be less than its 6518 // initial value. 6519 if (AddRec->hasNoUnsignedWrap()) { 6520 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 6521 if (!UnsignedMinValue.isZero()) 6522 ConservativeResult = ConservativeResult.intersectWith( 6523 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 6524 } 6525 6526 // If there's no signed wrap, and all the operands except initial value have 6527 // the same sign or zero, the value won't ever be: 6528 // 1: smaller than initial value if operands are non negative, 6529 // 2: bigger than initial value if operands are non positive. 6530 // For both cases, value can not cross signed min/max boundary. 6531 if (AddRec->hasNoSignedWrap()) { 6532 bool AllNonNeg = true; 6533 bool AllNonPos = true; 6534 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 6535 if (!isKnownNonNegative(AddRec->getOperand(i))) 6536 AllNonNeg = false; 6537 if (!isKnownNonPositive(AddRec->getOperand(i))) 6538 AllNonPos = false; 6539 } 6540 if (AllNonNeg) 6541 ConservativeResult = ConservativeResult.intersectWith( 6542 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 6543 APInt::getSignedMinValue(BitWidth)), 6544 RangeType); 6545 else if (AllNonPos) 6546 ConservativeResult = ConservativeResult.intersectWith( 6547 ConstantRange::getNonEmpty( 6548 APInt::getSignedMinValue(BitWidth), 6549 getSignedRangeMax(AddRec->getStart()) + 1), 6550 RangeType); 6551 } 6552 6553 // TODO: non-affine addrec 6554 if (AddRec->isAffine()) { 6555 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 6556 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 6557 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 6558 auto RangeFromAffine = getRangeForAffineAR( 6559 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 6560 BitWidth); 6561 ConservativeResult = 6562 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 6563 6564 auto RangeFromFactoring = getRangeViaFactoring( 6565 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 6566 BitWidth); 6567 ConservativeResult = 6568 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 6569 } 6570 6571 // Now try symbolic BE count and more powerful methods. 6572 if (UseExpensiveRangeSharpening) { 6573 const SCEV *SymbolicMaxBECount = 6574 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop()); 6575 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) && 6576 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 6577 AddRec->hasNoSelfWrap()) { 6578 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR( 6579 AddRec, SymbolicMaxBECount, BitWidth, SignHint); 6580 ConservativeResult = 6581 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType); 6582 } 6583 } 6584 } 6585 6586 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 6587 } 6588 6589 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 6590 6591 // Check if the IR explicitly contains !range metadata. 6592 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 6593 if (MDRange) 6594 ConservativeResult = 6595 ConservativeResult.intersectWith(MDRange.value(), RangeType); 6596 6597 // Use facts about recurrences in the underlying IR. Note that add 6598 // recurrences are AddRecExprs and thus don't hit this path. This 6599 // primarily handles shift recurrences. 6600 auto CR = getRangeForUnknownRecurrence(U); 6601 ConservativeResult = ConservativeResult.intersectWith(CR); 6602 6603 // See if ValueTracking can give us a useful range. 6604 const DataLayout &DL = getDataLayout(); 6605 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 6606 if (Known.getBitWidth() != BitWidth) 6607 Known = Known.zextOrTrunc(BitWidth); 6608 6609 // ValueTracking may be able to compute a tighter result for the number of 6610 // sign bits than for the value of those sign bits. 6611 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 6612 if (U->getType()->isPointerTy()) { 6613 // If the pointer size is larger than the index size type, this can cause 6614 // NS to be larger than BitWidth. So compensate for this. 6615 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 6616 int ptrIdxDiff = ptrSize - BitWidth; 6617 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 6618 NS -= ptrIdxDiff; 6619 } 6620 6621 if (NS > 1) { 6622 // If we know any of the sign bits, we know all of the sign bits. 6623 if (!Known.Zero.getHiBits(NS).isZero()) 6624 Known.Zero.setHighBits(NS); 6625 if (!Known.One.getHiBits(NS).isZero()) 6626 Known.One.setHighBits(NS); 6627 } 6628 6629 if (Known.getMinValue() != Known.getMaxValue() + 1) 6630 ConservativeResult = ConservativeResult.intersectWith( 6631 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 6632 RangeType); 6633 if (NS > 1) 6634 ConservativeResult = ConservativeResult.intersectWith( 6635 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 6636 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 6637 RangeType); 6638 6639 // A range of Phi is a subset of union of all ranges of its input. 6640 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 6641 // Make sure that we do not run over cycled Phis. 6642 if (PendingPhiRanges.insert(Phi).second) { 6643 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 6644 for (const auto &Op : Phi->operands()) { 6645 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 6646 RangeFromOps = RangeFromOps.unionWith(OpRange); 6647 // No point to continue if we already have a full set. 6648 if (RangeFromOps.isFullSet()) 6649 break; 6650 } 6651 ConservativeResult = 6652 ConservativeResult.intersectWith(RangeFromOps, RangeType); 6653 bool Erased = PendingPhiRanges.erase(Phi); 6654 assert(Erased && "Failed to erase Phi properly?"); 6655 (void) Erased; 6656 } 6657 } 6658 6659 // vscale can't be equal to zero 6660 if (const auto *II = dyn_cast<IntrinsicInst>(U->getValue())) 6661 if (II->getIntrinsicID() == Intrinsic::vscale) { 6662 ConstantRange Disallowed = APInt::getZero(BitWidth); 6663 ConservativeResult = ConservativeResult.difference(Disallowed); 6664 } 6665 6666 return setRange(U, SignHint, std::move(ConservativeResult)); 6667 } 6668 6669 return setRange(S, SignHint, std::move(ConservativeResult)); 6670 } 6671 6672 // Given a StartRange, Step and MaxBECount for an expression compute a range of 6673 // values that the expression can take. Initially, the expression has a value 6674 // from StartRange and then is changed by Step up to MaxBECount times. Signed 6675 // argument defines if we treat Step as signed or unsigned. 6676 static ConstantRange getRangeForAffineARHelper(APInt Step, 6677 const ConstantRange &StartRange, 6678 const APInt &MaxBECount, 6679 unsigned BitWidth, bool Signed) { 6680 // If either Step or MaxBECount is 0, then the expression won't change, and we 6681 // just need to return the initial range. 6682 if (Step == 0 || MaxBECount == 0) 6683 return StartRange; 6684 6685 // If we don't know anything about the initial value (i.e. StartRange is 6686 // FullRange), then we don't know anything about the final range either. 6687 // Return FullRange. 6688 if (StartRange.isFullSet()) 6689 return ConstantRange::getFull(BitWidth); 6690 6691 // If Step is signed and negative, then we use its absolute value, but we also 6692 // note that we're moving in the opposite direction. 6693 bool Descending = Signed && Step.isNegative(); 6694 6695 if (Signed) 6696 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 6697 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 6698 // This equations hold true due to the well-defined wrap-around behavior of 6699 // APInt. 6700 Step = Step.abs(); 6701 6702 // Check if Offset is more than full span of BitWidth. If it is, the 6703 // expression is guaranteed to overflow. 6704 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 6705 return ConstantRange::getFull(BitWidth); 6706 6707 // Offset is by how much the expression can change. Checks above guarantee no 6708 // overflow here. 6709 APInt Offset = Step * MaxBECount; 6710 6711 // Minimum value of the final range will match the minimal value of StartRange 6712 // if the expression is increasing and will be decreased by Offset otherwise. 6713 // Maximum value of the final range will match the maximal value of StartRange 6714 // if the expression is decreasing and will be increased by Offset otherwise. 6715 APInt StartLower = StartRange.getLower(); 6716 APInt StartUpper = StartRange.getUpper() - 1; 6717 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 6718 : (StartUpper + std::move(Offset)); 6719 6720 // It's possible that the new minimum/maximum value will fall into the initial 6721 // range (due to wrap around). This means that the expression can take any 6722 // value in this bitwidth, and we have to return full range. 6723 if (StartRange.contains(MovedBoundary)) 6724 return ConstantRange::getFull(BitWidth); 6725 6726 APInt NewLower = 6727 Descending ? std::move(MovedBoundary) : std::move(StartLower); 6728 APInt NewUpper = 6729 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 6730 NewUpper += 1; 6731 6732 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 6733 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 6734 } 6735 6736 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 6737 const SCEV *Step, 6738 const SCEV *MaxBECount, 6739 unsigned BitWidth) { 6740 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 6741 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 6742 "Precondition!"); 6743 6744 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 6745 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 6746 6747 // First, consider step signed. 6748 ConstantRange StartSRange = getSignedRange(Start); 6749 ConstantRange StepSRange = getSignedRange(Step); 6750 6751 // If Step can be both positive and negative, we need to find ranges for the 6752 // maximum absolute step values in both directions and union them. 6753 ConstantRange SR = 6754 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 6755 MaxBECountValue, BitWidth, /* Signed = */ true); 6756 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 6757 StartSRange, MaxBECountValue, 6758 BitWidth, /* Signed = */ true)); 6759 6760 // Next, consider step unsigned. 6761 ConstantRange UR = getRangeForAffineARHelper( 6762 getUnsignedRangeMax(Step), getUnsignedRange(Start), 6763 MaxBECountValue, BitWidth, /* Signed = */ false); 6764 6765 // Finally, intersect signed and unsigned ranges. 6766 return SR.intersectWith(UR, ConstantRange::Smallest); 6767 } 6768 6769 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR( 6770 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth, 6771 ScalarEvolution::RangeSignHint SignHint) { 6772 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n"); 6773 assert(AddRec->hasNoSelfWrap() && 6774 "This only works for non-self-wrapping AddRecs!"); 6775 const bool IsSigned = SignHint == HINT_RANGE_SIGNED; 6776 const SCEV *Step = AddRec->getStepRecurrence(*this); 6777 // Only deal with constant step to save compile time. 6778 if (!isa<SCEVConstant>(Step)) 6779 return ConstantRange::getFull(BitWidth); 6780 // Let's make sure that we can prove that we do not self-wrap during 6781 // MaxBECount iterations. We need this because MaxBECount is a maximum 6782 // iteration count estimate, and we might infer nw from some exit for which we 6783 // do not know max exit count (or any other side reasoning). 6784 // TODO: Turn into assert at some point. 6785 if (getTypeSizeInBits(MaxBECount->getType()) > 6786 getTypeSizeInBits(AddRec->getType())) 6787 return ConstantRange::getFull(BitWidth); 6788 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType()); 6789 const SCEV *RangeWidth = getMinusOne(AddRec->getType()); 6790 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step)); 6791 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs); 6792 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount, 6793 MaxItersWithoutWrap)) 6794 return ConstantRange::getFull(BitWidth); 6795 6796 ICmpInst::Predicate LEPred = 6797 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 6798 ICmpInst::Predicate GEPred = 6799 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 6800 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 6801 6802 // We know that there is no self-wrap. Let's take Start and End values and 6803 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during 6804 // the iteration. They either lie inside the range [Min(Start, End), 6805 // Max(Start, End)] or outside it: 6806 // 6807 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax; 6808 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax; 6809 // 6810 // No self wrap flag guarantees that the intermediate values cannot be BOTH 6811 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that 6812 // knowledge, let's try to prove that we are dealing with Case 1. It is so if 6813 // Start <= End and step is positive, or Start >= End and step is negative. 6814 const SCEV *Start = AddRec->getStart(); 6815 ConstantRange StartRange = getRangeRef(Start, SignHint); 6816 ConstantRange EndRange = getRangeRef(End, SignHint); 6817 ConstantRange RangeBetween = StartRange.unionWith(EndRange); 6818 // If they already cover full iteration space, we will know nothing useful 6819 // even if we prove what we want to prove. 6820 if (RangeBetween.isFullSet()) 6821 return RangeBetween; 6822 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax). 6823 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet() 6824 : RangeBetween.isWrappedSet(); 6825 if (IsWrappedSet) 6826 return ConstantRange::getFull(BitWidth); 6827 6828 if (isKnownPositive(Step) && 6829 isKnownPredicateViaConstantRanges(LEPred, Start, End)) 6830 return RangeBetween; 6831 else if (isKnownNegative(Step) && 6832 isKnownPredicateViaConstantRanges(GEPred, Start, End)) 6833 return RangeBetween; 6834 return ConstantRange::getFull(BitWidth); 6835 } 6836 6837 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 6838 const SCEV *Step, 6839 const SCEV *MaxBECount, 6840 unsigned BitWidth) { 6841 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 6842 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 6843 6844 struct SelectPattern { 6845 Value *Condition = nullptr; 6846 APInt TrueValue; 6847 APInt FalseValue; 6848 6849 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 6850 const SCEV *S) { 6851 Optional<unsigned> CastOp; 6852 APInt Offset(BitWidth, 0); 6853 6854 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 6855 "Should be!"); 6856 6857 // Peel off a constant offset: 6858 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 6859 // In the future we could consider being smarter here and handle 6860 // {Start+Step,+,Step} too. 6861 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 6862 return; 6863 6864 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 6865 S = SA->getOperand(1); 6866 } 6867 6868 // Peel off a cast operation 6869 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) { 6870 CastOp = SCast->getSCEVType(); 6871 S = SCast->getOperand(); 6872 } 6873 6874 using namespace llvm::PatternMatch; 6875 6876 auto *SU = dyn_cast<SCEVUnknown>(S); 6877 const APInt *TrueVal, *FalseVal; 6878 if (!SU || 6879 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 6880 m_APInt(FalseVal)))) { 6881 Condition = nullptr; 6882 return; 6883 } 6884 6885 TrueValue = *TrueVal; 6886 FalseValue = *FalseVal; 6887 6888 // Re-apply the cast we peeled off earlier 6889 if (CastOp) 6890 switch (*CastOp) { 6891 default: 6892 llvm_unreachable("Unknown SCEV cast type!"); 6893 6894 case scTruncate: 6895 TrueValue = TrueValue.trunc(BitWidth); 6896 FalseValue = FalseValue.trunc(BitWidth); 6897 break; 6898 case scZeroExtend: 6899 TrueValue = TrueValue.zext(BitWidth); 6900 FalseValue = FalseValue.zext(BitWidth); 6901 break; 6902 case scSignExtend: 6903 TrueValue = TrueValue.sext(BitWidth); 6904 FalseValue = FalseValue.sext(BitWidth); 6905 break; 6906 } 6907 6908 // Re-apply the constant offset we peeled off earlier 6909 TrueValue += Offset; 6910 FalseValue += Offset; 6911 } 6912 6913 bool isRecognized() { return Condition != nullptr; } 6914 }; 6915 6916 SelectPattern StartPattern(*this, BitWidth, Start); 6917 if (!StartPattern.isRecognized()) 6918 return ConstantRange::getFull(BitWidth); 6919 6920 SelectPattern StepPattern(*this, BitWidth, Step); 6921 if (!StepPattern.isRecognized()) 6922 return ConstantRange::getFull(BitWidth); 6923 6924 if (StartPattern.Condition != StepPattern.Condition) { 6925 // We don't handle this case today; but we could, by considering four 6926 // possibilities below instead of two. I'm not sure if there are cases where 6927 // that will help over what getRange already does, though. 6928 return ConstantRange::getFull(BitWidth); 6929 } 6930 6931 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 6932 // construct arbitrary general SCEV expressions here. This function is called 6933 // from deep in the call stack, and calling getSCEV (on a sext instruction, 6934 // say) can end up caching a suboptimal value. 6935 6936 // FIXME: without the explicit `this` receiver below, MSVC errors out with 6937 // C2352 and C2512 (otherwise it isn't needed). 6938 6939 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 6940 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 6941 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 6942 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 6943 6944 ConstantRange TrueRange = 6945 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 6946 ConstantRange FalseRange = 6947 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 6948 6949 return TrueRange.unionWith(FalseRange); 6950 } 6951 6952 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 6953 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 6954 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 6955 6956 // Return early if there are no flags to propagate to the SCEV. 6957 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6958 if (BinOp->hasNoUnsignedWrap()) 6959 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 6960 if (BinOp->hasNoSignedWrap()) 6961 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 6962 if (Flags == SCEV::FlagAnyWrap) 6963 return SCEV::FlagAnyWrap; 6964 6965 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 6966 } 6967 6968 const Instruction * 6969 ScalarEvolution::getNonTrivialDefiningScopeBound(const SCEV *S) { 6970 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(S)) 6971 return &*AddRec->getLoop()->getHeader()->begin(); 6972 if (auto *U = dyn_cast<SCEVUnknown>(S)) 6973 if (auto *I = dyn_cast<Instruction>(U->getValue())) 6974 return I; 6975 return nullptr; 6976 } 6977 6978 /// Fills \p Ops with unique operands of \p S, if it has operands. If not, 6979 /// \p Ops remains unmodified. 6980 static void collectUniqueOps(const SCEV *S, 6981 SmallVectorImpl<const SCEV *> &Ops) { 6982 SmallPtrSet<const SCEV *, 4> Unique; 6983 auto InsertUnique = [&](const SCEV *S) { 6984 if (Unique.insert(S).second) 6985 Ops.push_back(S); 6986 }; 6987 if (auto *S2 = dyn_cast<SCEVCastExpr>(S)) 6988 for (const auto *Op : S2->operands()) 6989 InsertUnique(Op); 6990 else if (auto *S2 = dyn_cast<SCEVNAryExpr>(S)) 6991 for (const auto *Op : S2->operands()) 6992 InsertUnique(Op); 6993 else if (auto *S2 = dyn_cast<SCEVUDivExpr>(S)) 6994 for (const auto *Op : S2->operands()) 6995 InsertUnique(Op); 6996 } 6997 6998 const Instruction * 6999 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops, 7000 bool &Precise) { 7001 Precise = true; 7002 // Do a bounded search of the def relation of the requested SCEVs. 7003 SmallSet<const SCEV *, 16> Visited; 7004 SmallVector<const SCEV *> Worklist; 7005 auto pushOp = [&](const SCEV *S) { 7006 if (!Visited.insert(S).second) 7007 return; 7008 // Threshold of 30 here is arbitrary. 7009 if (Visited.size() > 30) { 7010 Precise = false; 7011 return; 7012 } 7013 Worklist.push_back(S); 7014 }; 7015 7016 for (const auto *S : Ops) 7017 pushOp(S); 7018 7019 const Instruction *Bound = nullptr; 7020 while (!Worklist.empty()) { 7021 auto *S = Worklist.pop_back_val(); 7022 if (auto *DefI = getNonTrivialDefiningScopeBound(S)) { 7023 if (!Bound || DT.dominates(Bound, DefI)) 7024 Bound = DefI; 7025 } else { 7026 SmallVector<const SCEV *, 4> Ops; 7027 collectUniqueOps(S, Ops); 7028 for (const auto *Op : Ops) 7029 pushOp(Op); 7030 } 7031 } 7032 return Bound ? Bound : &*F.getEntryBlock().begin(); 7033 } 7034 7035 const Instruction * 7036 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops) { 7037 bool Discard; 7038 return getDefiningScopeBound(Ops, Discard); 7039 } 7040 7041 bool ScalarEvolution::isGuaranteedToTransferExecutionTo(const Instruction *A, 7042 const Instruction *B) { 7043 if (A->getParent() == B->getParent() && 7044 isGuaranteedToTransferExecutionToSuccessor(A->getIterator(), 7045 B->getIterator())) 7046 return true; 7047 7048 auto *BLoop = LI.getLoopFor(B->getParent()); 7049 if (BLoop && BLoop->getHeader() == B->getParent() && 7050 BLoop->getLoopPreheader() == A->getParent() && 7051 isGuaranteedToTransferExecutionToSuccessor(A->getIterator(), 7052 A->getParent()->end()) && 7053 isGuaranteedToTransferExecutionToSuccessor(B->getParent()->begin(), 7054 B->getIterator())) 7055 return true; 7056 return false; 7057 } 7058 7059 7060 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 7061 // Only proceed if we can prove that I does not yield poison. 7062 if (!programUndefinedIfPoison(I)) 7063 return false; 7064 7065 // At this point we know that if I is executed, then it does not wrap 7066 // according to at least one of NSW or NUW. If I is not executed, then we do 7067 // not know if the calculation that I represents would wrap. Multiple 7068 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 7069 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 7070 // derived from other instructions that map to the same SCEV. We cannot make 7071 // that guarantee for cases where I is not executed. So we need to find a 7072 // upper bound on the defining scope for the SCEV, and prove that I is 7073 // executed every time we enter that scope. When the bounding scope is a 7074 // loop (the common case), this is equivalent to proving I executes on every 7075 // iteration of that loop. 7076 SmallVector<const SCEV *> SCEVOps; 7077 for (const Use &Op : I->operands()) { 7078 // I could be an extractvalue from a call to an overflow intrinsic. 7079 // TODO: We can do better here in some cases. 7080 if (isSCEVable(Op->getType())) 7081 SCEVOps.push_back(getSCEV(Op)); 7082 } 7083 auto *DefI = getDefiningScopeBound(SCEVOps); 7084 return isGuaranteedToTransferExecutionTo(DefI, I); 7085 } 7086 7087 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 7088 // If we know that \c I can never be poison period, then that's enough. 7089 if (isSCEVExprNeverPoison(I)) 7090 return true; 7091 7092 // For an add recurrence specifically, we assume that infinite loops without 7093 // side effects are undefined behavior, and then reason as follows: 7094 // 7095 // If the add recurrence is poison in any iteration, it is poison on all 7096 // future iterations (since incrementing poison yields poison). If the result 7097 // of the add recurrence is fed into the loop latch condition and the loop 7098 // does not contain any throws or exiting blocks other than the latch, we now 7099 // have the ability to "choose" whether the backedge is taken or not (by 7100 // choosing a sufficiently evil value for the poison feeding into the branch) 7101 // for every iteration including and after the one in which \p I first became 7102 // poison. There are two possibilities (let's call the iteration in which \p 7103 // I first became poison as K): 7104 // 7105 // 1. In the set of iterations including and after K, the loop body executes 7106 // no side effects. In this case executing the backege an infinte number 7107 // of times will yield undefined behavior. 7108 // 7109 // 2. In the set of iterations including and after K, the loop body executes 7110 // at least one side effect. In this case, that specific instance of side 7111 // effect is control dependent on poison, which also yields undefined 7112 // behavior. 7113 7114 auto *ExitingBB = L->getExitingBlock(); 7115 auto *LatchBB = L->getLoopLatch(); 7116 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 7117 return false; 7118 7119 SmallPtrSet<const Instruction *, 16> Pushed; 7120 SmallVector<const Instruction *, 8> PoisonStack; 7121 7122 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 7123 // things that are known to be poison under that assumption go on the 7124 // PoisonStack. 7125 Pushed.insert(I); 7126 PoisonStack.push_back(I); 7127 7128 bool LatchControlDependentOnPoison = false; 7129 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 7130 const Instruction *Poison = PoisonStack.pop_back_val(); 7131 7132 for (const auto *PoisonUser : Poison->users()) { 7133 if (propagatesPoison(cast<Operator>(PoisonUser))) { 7134 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 7135 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 7136 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 7137 assert(BI->isConditional() && "Only possibility!"); 7138 if (BI->getParent() == LatchBB) { 7139 LatchControlDependentOnPoison = true; 7140 break; 7141 } 7142 } 7143 } 7144 } 7145 7146 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 7147 } 7148 7149 ScalarEvolution::LoopProperties 7150 ScalarEvolution::getLoopProperties(const Loop *L) { 7151 using LoopProperties = ScalarEvolution::LoopProperties; 7152 7153 auto Itr = LoopPropertiesCache.find(L); 7154 if (Itr == LoopPropertiesCache.end()) { 7155 auto HasSideEffects = [](Instruction *I) { 7156 if (auto *SI = dyn_cast<StoreInst>(I)) 7157 return !SI->isSimple(); 7158 7159 return I->mayThrow() || I->mayWriteToMemory(); 7160 }; 7161 7162 LoopProperties LP = {/* HasNoAbnormalExits */ true, 7163 /*HasNoSideEffects*/ true}; 7164 7165 for (auto *BB : L->getBlocks()) 7166 for (auto &I : *BB) { 7167 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 7168 LP.HasNoAbnormalExits = false; 7169 if (HasSideEffects(&I)) 7170 LP.HasNoSideEffects = false; 7171 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 7172 break; // We're already as pessimistic as we can get. 7173 } 7174 7175 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 7176 assert(InsertPair.second && "We just checked!"); 7177 Itr = InsertPair.first; 7178 } 7179 7180 return Itr->second; 7181 } 7182 7183 bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) { 7184 // A mustprogress loop without side effects must be finite. 7185 // TODO: The check used here is very conservative. It's only *specific* 7186 // side effects which are well defined in infinite loops. 7187 return isFinite(L) || (isMustProgress(L) && loopHasNoSideEffects(L)); 7188 } 7189 7190 const SCEV *ScalarEvolution::createSCEVIter(Value *V) { 7191 // Worklist item with a Value and a bool indicating whether all operands have 7192 // been visited already. 7193 using PointerTy = PointerIntPair<Value *, 1, bool>; 7194 SmallVector<PointerTy> Stack; 7195 7196 Stack.emplace_back(V, true); 7197 Stack.emplace_back(V, false); 7198 while (!Stack.empty()) { 7199 auto E = Stack.pop_back_val(); 7200 Value *CurV = E.getPointer(); 7201 7202 if (getExistingSCEV(CurV)) 7203 continue; 7204 7205 SmallVector<Value *> Ops; 7206 const SCEV *CreatedSCEV = nullptr; 7207 // If all operands have been visited already, create the SCEV. 7208 if (E.getInt()) { 7209 CreatedSCEV = createSCEV(CurV); 7210 } else { 7211 // Otherwise get the operands we need to create SCEV's for before creating 7212 // the SCEV for CurV. If the SCEV for CurV can be constructed trivially, 7213 // just use it. 7214 CreatedSCEV = getOperandsToCreate(CurV, Ops); 7215 } 7216 7217 if (CreatedSCEV) { 7218 insertValueToMap(CurV, CreatedSCEV); 7219 } else { 7220 // Queue CurV for SCEV creation, followed by its's operands which need to 7221 // be constructed first. 7222 Stack.emplace_back(CurV, true); 7223 for (Value *Op : Ops) 7224 Stack.emplace_back(Op, false); 7225 } 7226 } 7227 7228 return getExistingSCEV(V); 7229 } 7230 7231 const SCEV * 7232 ScalarEvolution::getOperandsToCreate(Value *V, SmallVectorImpl<Value *> &Ops) { 7233 if (!isSCEVable(V->getType())) 7234 return getUnknown(V); 7235 7236 if (Instruction *I = dyn_cast<Instruction>(V)) { 7237 // Don't attempt to analyze instructions in blocks that aren't 7238 // reachable. Such instructions don't matter, and they aren't required 7239 // to obey basic rules for definitions dominating uses which this 7240 // analysis depends on. 7241 if (!DT.isReachableFromEntry(I->getParent())) 7242 return getUnknown(PoisonValue::get(V->getType())); 7243 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 7244 return getConstant(CI); 7245 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 7246 if (!GA->isInterposable()) { 7247 Ops.push_back(GA->getAliasee()); 7248 return nullptr; 7249 } 7250 return getUnknown(V); 7251 } else if (!isa<ConstantExpr>(V)) 7252 return getUnknown(V); 7253 7254 Operator *U = cast<Operator>(V); 7255 if (auto BO = MatchBinaryOp(U, DT)) { 7256 bool IsConstArg = isa<ConstantInt>(BO->RHS); 7257 switch (BO->Opcode) { 7258 case Instruction::Add: { 7259 // For additions and multiplications, traverse add/mul chains for which we 7260 // can potentially create a single SCEV, to reduce the number of 7261 // get{Add,Mul}Expr calls. 7262 do { 7263 if (BO->Op) { 7264 if (BO->Op != V && getExistingSCEV(BO->Op)) { 7265 Ops.push_back(BO->Op); 7266 break; 7267 } 7268 } 7269 Ops.push_back(BO->RHS); 7270 auto NewBO = MatchBinaryOp(BO->LHS, DT); 7271 if (!NewBO || (NewBO->Opcode != Instruction::Add && 7272 NewBO->Opcode != Instruction::Sub)) { 7273 Ops.push_back(BO->LHS); 7274 break; 7275 } 7276 BO = NewBO; 7277 } while (true); 7278 return nullptr; 7279 } 7280 7281 case Instruction::Mul: { 7282 do { 7283 if (BO->Op) { 7284 if (BO->Op != V && getExistingSCEV(BO->Op)) { 7285 Ops.push_back(BO->Op); 7286 break; 7287 } 7288 } 7289 Ops.push_back(BO->RHS); 7290 auto NewBO = MatchBinaryOp(BO->LHS, DT); 7291 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 7292 Ops.push_back(BO->LHS); 7293 break; 7294 } 7295 BO = NewBO; 7296 } while (true); 7297 return nullptr; 7298 } 7299 case Instruction::Sub: 7300 case Instruction::UDiv: 7301 case Instruction::URem: 7302 break; 7303 case Instruction::AShr: 7304 case Instruction::Shl: 7305 case Instruction::Xor: 7306 if (!IsConstArg) 7307 return nullptr; 7308 break; 7309 case Instruction::And: 7310 case Instruction::Or: 7311 if (!IsConstArg && BO->LHS->getType()->isIntegerTy(1)) 7312 return nullptr; 7313 break; 7314 case Instruction::LShr: 7315 return getUnknown(V); 7316 default: 7317 llvm_unreachable("Unhandled binop"); 7318 break; 7319 } 7320 7321 Ops.push_back(BO->LHS); 7322 Ops.push_back(BO->RHS); 7323 return nullptr; 7324 } 7325 7326 switch (U->getOpcode()) { 7327 case Instruction::Trunc: 7328 case Instruction::ZExt: 7329 case Instruction::SExt: 7330 case Instruction::PtrToInt: 7331 Ops.push_back(U->getOperand(0)); 7332 return nullptr; 7333 7334 case Instruction::BitCast: 7335 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) { 7336 Ops.push_back(U->getOperand(0)); 7337 return nullptr; 7338 } 7339 return getUnknown(V); 7340 7341 case Instruction::SDiv: 7342 case Instruction::SRem: 7343 Ops.push_back(U->getOperand(0)); 7344 Ops.push_back(U->getOperand(1)); 7345 return nullptr; 7346 7347 case Instruction::GetElementPtr: 7348 assert(cast<GEPOperator>(U)->getSourceElementType()->isSized() && 7349 "GEP source element type must be sized"); 7350 for (Value *Index : U->operands()) 7351 Ops.push_back(Index); 7352 return nullptr; 7353 7354 case Instruction::IntToPtr: 7355 return getUnknown(V); 7356 7357 case Instruction::PHI: 7358 // Keep constructing SCEVs' for phis recursively for now. 7359 return nullptr; 7360 7361 case Instruction::Select: { 7362 // Check if U is a select that can be simplified to a SCEVUnknown. 7363 auto CanSimplifyToUnknown = [this, U]() { 7364 if (U->getType()->isIntegerTy(1) || isa<ConstantInt>(U->getOperand(0))) 7365 return false; 7366 7367 auto *ICI = dyn_cast<ICmpInst>(U->getOperand(0)); 7368 if (!ICI) 7369 return false; 7370 Value *LHS = ICI->getOperand(0); 7371 Value *RHS = ICI->getOperand(1); 7372 if (ICI->getPredicate() == CmpInst::ICMP_EQ || 7373 ICI->getPredicate() == CmpInst::ICMP_NE) { 7374 if (!(isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero())) 7375 return true; 7376 } else if (getTypeSizeInBits(LHS->getType()) > 7377 getTypeSizeInBits(U->getType())) 7378 return true; 7379 return false; 7380 }; 7381 if (CanSimplifyToUnknown()) 7382 return getUnknown(U); 7383 7384 for (Value *Inc : U->operands()) 7385 Ops.push_back(Inc); 7386 return nullptr; 7387 break; 7388 } 7389 case Instruction::Call: 7390 case Instruction::Invoke: 7391 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) { 7392 Ops.push_back(RV); 7393 return nullptr; 7394 } 7395 7396 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 7397 switch (II->getIntrinsicID()) { 7398 case Intrinsic::abs: 7399 Ops.push_back(II->getArgOperand(0)); 7400 return nullptr; 7401 case Intrinsic::umax: 7402 case Intrinsic::umin: 7403 case Intrinsic::smax: 7404 case Intrinsic::smin: 7405 case Intrinsic::usub_sat: 7406 case Intrinsic::uadd_sat: 7407 Ops.push_back(II->getArgOperand(0)); 7408 Ops.push_back(II->getArgOperand(1)); 7409 return nullptr; 7410 case Intrinsic::start_loop_iterations: 7411 case Intrinsic::annotation: 7412 case Intrinsic::ptr_annotation: 7413 Ops.push_back(II->getArgOperand(0)); 7414 return nullptr; 7415 default: 7416 break; 7417 } 7418 } 7419 break; 7420 } 7421 7422 return nullptr; 7423 } 7424 7425 const SCEV *ScalarEvolution::createSCEV(Value *V) { 7426 if (!isSCEVable(V->getType())) 7427 return getUnknown(V); 7428 7429 if (Instruction *I = dyn_cast<Instruction>(V)) { 7430 // Don't attempt to analyze instructions in blocks that aren't 7431 // reachable. Such instructions don't matter, and they aren't required 7432 // to obey basic rules for definitions dominating uses which this 7433 // analysis depends on. 7434 if (!DT.isReachableFromEntry(I->getParent())) 7435 return getUnknown(PoisonValue::get(V->getType())); 7436 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 7437 return getConstant(CI); 7438 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 7439 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 7440 else if (!isa<ConstantExpr>(V)) 7441 return getUnknown(V); 7442 7443 const SCEV *LHS; 7444 const SCEV *RHS; 7445 7446 Operator *U = cast<Operator>(V); 7447 if (auto BO = MatchBinaryOp(U, DT)) { 7448 switch (BO->Opcode) { 7449 case Instruction::Add: { 7450 // The simple thing to do would be to just call getSCEV on both operands 7451 // and call getAddExpr with the result. However if we're looking at a 7452 // bunch of things all added together, this can be quite inefficient, 7453 // because it leads to N-1 getAddExpr calls for N ultimate operands. 7454 // Instead, gather up all the operands and make a single getAddExpr call. 7455 // LLVM IR canonical form means we need only traverse the left operands. 7456 SmallVector<const SCEV *, 4> AddOps; 7457 do { 7458 if (BO->Op) { 7459 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 7460 AddOps.push_back(OpSCEV); 7461 break; 7462 } 7463 7464 // If a NUW or NSW flag can be applied to the SCEV for this 7465 // addition, then compute the SCEV for this addition by itself 7466 // with a separate call to getAddExpr. We need to do that 7467 // instead of pushing the operands of the addition onto AddOps, 7468 // since the flags are only known to apply to this particular 7469 // addition - they may not apply to other additions that can be 7470 // formed with operands from AddOps. 7471 const SCEV *RHS = getSCEV(BO->RHS); 7472 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 7473 if (Flags != SCEV::FlagAnyWrap) { 7474 const SCEV *LHS = getSCEV(BO->LHS); 7475 if (BO->Opcode == Instruction::Sub) 7476 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 7477 else 7478 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 7479 break; 7480 } 7481 } 7482 7483 if (BO->Opcode == Instruction::Sub) 7484 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 7485 else 7486 AddOps.push_back(getSCEV(BO->RHS)); 7487 7488 auto NewBO = MatchBinaryOp(BO->LHS, DT); 7489 if (!NewBO || (NewBO->Opcode != Instruction::Add && 7490 NewBO->Opcode != Instruction::Sub)) { 7491 AddOps.push_back(getSCEV(BO->LHS)); 7492 break; 7493 } 7494 BO = NewBO; 7495 } while (true); 7496 7497 return getAddExpr(AddOps); 7498 } 7499 7500 case Instruction::Mul: { 7501 SmallVector<const SCEV *, 4> MulOps; 7502 do { 7503 if (BO->Op) { 7504 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 7505 MulOps.push_back(OpSCEV); 7506 break; 7507 } 7508 7509 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 7510 if (Flags != SCEV::FlagAnyWrap) { 7511 LHS = getSCEV(BO->LHS); 7512 RHS = getSCEV(BO->RHS); 7513 MulOps.push_back(getMulExpr(LHS, RHS, Flags)); 7514 break; 7515 } 7516 } 7517 7518 MulOps.push_back(getSCEV(BO->RHS)); 7519 auto NewBO = MatchBinaryOp(BO->LHS, DT); 7520 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 7521 MulOps.push_back(getSCEV(BO->LHS)); 7522 break; 7523 } 7524 BO = NewBO; 7525 } while (true); 7526 7527 return getMulExpr(MulOps); 7528 } 7529 case Instruction::UDiv: 7530 LHS = getSCEV(BO->LHS); 7531 RHS = getSCEV(BO->RHS); 7532 return getUDivExpr(LHS, RHS); 7533 case Instruction::URem: 7534 LHS = getSCEV(BO->LHS); 7535 RHS = getSCEV(BO->RHS); 7536 return getURemExpr(LHS, RHS); 7537 case Instruction::Sub: { 7538 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 7539 if (BO->Op) 7540 Flags = getNoWrapFlagsFromUB(BO->Op); 7541 LHS = getSCEV(BO->LHS); 7542 RHS = getSCEV(BO->RHS); 7543 return getMinusSCEV(LHS, RHS, Flags); 7544 } 7545 case Instruction::And: 7546 // For an expression like x&255 that merely masks off the high bits, 7547 // use zext(trunc(x)) as the SCEV expression. 7548 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 7549 if (CI->isZero()) 7550 return getSCEV(BO->RHS); 7551 if (CI->isMinusOne()) 7552 return getSCEV(BO->LHS); 7553 const APInt &A = CI->getValue(); 7554 7555 // Instcombine's ShrinkDemandedConstant may strip bits out of 7556 // constants, obscuring what would otherwise be a low-bits mask. 7557 // Use computeKnownBits to compute what ShrinkDemandedConstant 7558 // knew about to reconstruct a low-bits mask value. 7559 unsigned LZ = A.countLeadingZeros(); 7560 unsigned TZ = A.countTrailingZeros(); 7561 unsigned BitWidth = A.getBitWidth(); 7562 KnownBits Known(BitWidth); 7563 computeKnownBits(BO->LHS, Known, getDataLayout(), 7564 0, &AC, nullptr, &DT); 7565 7566 APInt EffectiveMask = 7567 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 7568 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 7569 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 7570 const SCEV *LHS = getSCEV(BO->LHS); 7571 const SCEV *ShiftedLHS = nullptr; 7572 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 7573 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 7574 // For an expression like (x * 8) & 8, simplify the multiply. 7575 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 7576 unsigned GCD = std::min(MulZeros, TZ); 7577 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 7578 SmallVector<const SCEV*, 4> MulOps; 7579 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 7580 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 7581 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 7582 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 7583 } 7584 } 7585 if (!ShiftedLHS) 7586 ShiftedLHS = getUDivExpr(LHS, MulCount); 7587 return getMulExpr( 7588 getZeroExtendExpr( 7589 getTruncateExpr(ShiftedLHS, 7590 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 7591 BO->LHS->getType()), 7592 MulCount); 7593 } 7594 } 7595 // Binary `and` is a bit-wise `umin`. 7596 if (BO->LHS->getType()->isIntegerTy(1)) { 7597 LHS = getSCEV(BO->LHS); 7598 RHS = getSCEV(BO->RHS); 7599 return getUMinExpr(LHS, RHS); 7600 } 7601 break; 7602 7603 case Instruction::Or: 7604 // If the RHS of the Or is a constant, we may have something like: 7605 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 7606 // optimizations will transparently handle this case. 7607 // 7608 // In order for this transformation to be safe, the LHS must be of the 7609 // form X*(2^n) and the Or constant must be less than 2^n. 7610 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 7611 const SCEV *LHS = getSCEV(BO->LHS); 7612 const APInt &CIVal = CI->getValue(); 7613 if (GetMinTrailingZeros(LHS) >= 7614 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 7615 // Build a plain add SCEV. 7616 return getAddExpr(LHS, getSCEV(CI), 7617 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 7618 } 7619 } 7620 // Binary `or` is a bit-wise `umax`. 7621 if (BO->LHS->getType()->isIntegerTy(1)) { 7622 LHS = getSCEV(BO->LHS); 7623 RHS = getSCEV(BO->RHS); 7624 return getUMaxExpr(LHS, RHS); 7625 } 7626 break; 7627 7628 case Instruction::Xor: 7629 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 7630 // If the RHS of xor is -1, then this is a not operation. 7631 if (CI->isMinusOne()) 7632 return getNotSCEV(getSCEV(BO->LHS)); 7633 7634 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 7635 // This is a variant of the check for xor with -1, and it handles 7636 // the case where instcombine has trimmed non-demanded bits out 7637 // of an xor with -1. 7638 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 7639 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 7640 if (LBO->getOpcode() == Instruction::And && 7641 LCI->getValue() == CI->getValue()) 7642 if (const SCEVZeroExtendExpr *Z = 7643 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 7644 Type *UTy = BO->LHS->getType(); 7645 const SCEV *Z0 = Z->getOperand(); 7646 Type *Z0Ty = Z0->getType(); 7647 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 7648 7649 // If C is a low-bits mask, the zero extend is serving to 7650 // mask off the high bits. Complement the operand and 7651 // re-apply the zext. 7652 if (CI->getValue().isMask(Z0TySize)) 7653 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 7654 7655 // If C is a single bit, it may be in the sign-bit position 7656 // before the zero-extend. In this case, represent the xor 7657 // using an add, which is equivalent, and re-apply the zext. 7658 APInt Trunc = CI->getValue().trunc(Z0TySize); 7659 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 7660 Trunc.isSignMask()) 7661 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 7662 UTy); 7663 } 7664 } 7665 break; 7666 7667 case Instruction::Shl: 7668 // Turn shift left of a constant amount into a multiply. 7669 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 7670 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 7671 7672 // If the shift count is not less than the bitwidth, the result of 7673 // the shift is undefined. Don't try to analyze it, because the 7674 // resolution chosen here may differ from the resolution chosen in 7675 // other parts of the compiler. 7676 if (SA->getValue().uge(BitWidth)) 7677 break; 7678 7679 // We can safely preserve the nuw flag in all cases. It's also safe to 7680 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 7681 // requires special handling. It can be preserved as long as we're not 7682 // left shifting by bitwidth - 1. 7683 auto Flags = SCEV::FlagAnyWrap; 7684 if (BO->Op) { 7685 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 7686 if ((MulFlags & SCEV::FlagNSW) && 7687 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 7688 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 7689 if (MulFlags & SCEV::FlagNUW) 7690 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 7691 } 7692 7693 ConstantInt *X = ConstantInt::get( 7694 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 7695 return getMulExpr(getSCEV(BO->LHS), getConstant(X), Flags); 7696 } 7697 break; 7698 7699 case Instruction::AShr: { 7700 // AShr X, C, where C is a constant. 7701 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 7702 if (!CI) 7703 break; 7704 7705 Type *OuterTy = BO->LHS->getType(); 7706 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 7707 // If the shift count is not less than the bitwidth, the result of 7708 // the shift is undefined. Don't try to analyze it, because the 7709 // resolution chosen here may differ from the resolution chosen in 7710 // other parts of the compiler. 7711 if (CI->getValue().uge(BitWidth)) 7712 break; 7713 7714 if (CI->isZero()) 7715 return getSCEV(BO->LHS); // shift by zero --> noop 7716 7717 uint64_t AShrAmt = CI->getZExtValue(); 7718 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 7719 7720 Operator *L = dyn_cast<Operator>(BO->LHS); 7721 if (L && L->getOpcode() == Instruction::Shl) { 7722 // X = Shl A, n 7723 // Y = AShr X, m 7724 // Both n and m are constant. 7725 7726 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 7727 if (L->getOperand(1) == BO->RHS) 7728 // For a two-shift sext-inreg, i.e. n = m, 7729 // use sext(trunc(x)) as the SCEV expression. 7730 return getSignExtendExpr( 7731 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 7732 7733 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 7734 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 7735 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 7736 if (ShlAmt > AShrAmt) { 7737 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 7738 // expression. We already checked that ShlAmt < BitWidth, so 7739 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 7740 // ShlAmt - AShrAmt < Amt. 7741 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 7742 ShlAmt - AShrAmt); 7743 return getSignExtendExpr( 7744 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 7745 getConstant(Mul)), OuterTy); 7746 } 7747 } 7748 } 7749 break; 7750 } 7751 } 7752 } 7753 7754 switch (U->getOpcode()) { 7755 case Instruction::Trunc: 7756 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 7757 7758 case Instruction::ZExt: 7759 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 7760 7761 case Instruction::SExt: 7762 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 7763 // The NSW flag of a subtract does not always survive the conversion to 7764 // A + (-1)*B. By pushing sign extension onto its operands we are much 7765 // more likely to preserve NSW and allow later AddRec optimisations. 7766 // 7767 // NOTE: This is effectively duplicating this logic from getSignExtend: 7768 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 7769 // but by that point the NSW information has potentially been lost. 7770 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 7771 Type *Ty = U->getType(); 7772 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 7773 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 7774 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 7775 } 7776 } 7777 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 7778 7779 case Instruction::BitCast: 7780 // BitCasts are no-op casts so we just eliminate the cast. 7781 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 7782 return getSCEV(U->getOperand(0)); 7783 break; 7784 7785 case Instruction::PtrToInt: { 7786 // Pointer to integer cast is straight-forward, so do model it. 7787 const SCEV *Op = getSCEV(U->getOperand(0)); 7788 Type *DstIntTy = U->getType(); 7789 // But only if effective SCEV (integer) type is wide enough to represent 7790 // all possible pointer values. 7791 const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy); 7792 if (isa<SCEVCouldNotCompute>(IntOp)) 7793 return getUnknown(V); 7794 return IntOp; 7795 } 7796 case Instruction::IntToPtr: 7797 // Just don't deal with inttoptr casts. 7798 return getUnknown(V); 7799 7800 case Instruction::SDiv: 7801 // If both operands are non-negative, this is just an udiv. 7802 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 7803 isKnownNonNegative(getSCEV(U->getOperand(1)))) 7804 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 7805 break; 7806 7807 case Instruction::SRem: 7808 // If both operands are non-negative, this is just an urem. 7809 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 7810 isKnownNonNegative(getSCEV(U->getOperand(1)))) 7811 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 7812 break; 7813 7814 case Instruction::GetElementPtr: 7815 return createNodeForGEP(cast<GEPOperator>(U)); 7816 7817 case Instruction::PHI: 7818 return createNodeForPHI(cast<PHINode>(U)); 7819 7820 case Instruction::Select: 7821 return createNodeForSelectOrPHI(U, U->getOperand(0), U->getOperand(1), 7822 U->getOperand(2)); 7823 7824 case Instruction::Call: 7825 case Instruction::Invoke: 7826 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 7827 return getSCEV(RV); 7828 7829 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 7830 switch (II->getIntrinsicID()) { 7831 case Intrinsic::abs: 7832 return getAbsExpr( 7833 getSCEV(II->getArgOperand(0)), 7834 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne()); 7835 case Intrinsic::umax: 7836 LHS = getSCEV(II->getArgOperand(0)); 7837 RHS = getSCEV(II->getArgOperand(1)); 7838 return getUMaxExpr(LHS, RHS); 7839 case Intrinsic::umin: 7840 LHS = getSCEV(II->getArgOperand(0)); 7841 RHS = getSCEV(II->getArgOperand(1)); 7842 return getUMinExpr(LHS, RHS); 7843 case Intrinsic::smax: 7844 LHS = getSCEV(II->getArgOperand(0)); 7845 RHS = getSCEV(II->getArgOperand(1)); 7846 return getSMaxExpr(LHS, RHS); 7847 case Intrinsic::smin: 7848 LHS = getSCEV(II->getArgOperand(0)); 7849 RHS = getSCEV(II->getArgOperand(1)); 7850 return getSMinExpr(LHS, RHS); 7851 case Intrinsic::usub_sat: { 7852 const SCEV *X = getSCEV(II->getArgOperand(0)); 7853 const SCEV *Y = getSCEV(II->getArgOperand(1)); 7854 const SCEV *ClampedY = getUMinExpr(X, Y); 7855 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); 7856 } 7857 case Intrinsic::uadd_sat: { 7858 const SCEV *X = getSCEV(II->getArgOperand(0)); 7859 const SCEV *Y = getSCEV(II->getArgOperand(1)); 7860 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); 7861 return getAddExpr(ClampedX, Y, SCEV::FlagNUW); 7862 } 7863 case Intrinsic::start_loop_iterations: 7864 case Intrinsic::annotation: 7865 case Intrinsic::ptr_annotation: 7866 // A start_loop_iterations or llvm.annotation or llvm.prt.annotation is 7867 // just eqivalent to the first operand for SCEV purposes. 7868 return getSCEV(II->getArgOperand(0)); 7869 default: 7870 break; 7871 } 7872 } 7873 break; 7874 } 7875 7876 return getUnknown(V); 7877 } 7878 7879 //===----------------------------------------------------------------------===// 7880 // Iteration Count Computation Code 7881 // 7882 7883 const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount, 7884 bool Extend) { 7885 if (isa<SCEVCouldNotCompute>(ExitCount)) 7886 return getCouldNotCompute(); 7887 7888 auto *ExitCountType = ExitCount->getType(); 7889 assert(ExitCountType->isIntegerTy()); 7890 7891 if (!Extend) 7892 return getAddExpr(ExitCount, getOne(ExitCountType)); 7893 7894 auto *WiderType = Type::getIntNTy(ExitCountType->getContext(), 7895 1 + ExitCountType->getScalarSizeInBits()); 7896 return getAddExpr(getNoopOrZeroExtend(ExitCount, WiderType), 7897 getOne(WiderType)); 7898 } 7899 7900 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 7901 if (!ExitCount) 7902 return 0; 7903 7904 ConstantInt *ExitConst = ExitCount->getValue(); 7905 7906 // Guard against huge trip counts. 7907 if (ExitConst->getValue().getActiveBits() > 32) 7908 return 0; 7909 7910 // In case of integer overflow, this returns 0, which is correct. 7911 return ((unsigned)ExitConst->getZExtValue()) + 1; 7912 } 7913 7914 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 7915 auto *ExitCount = dyn_cast<SCEVConstant>(getBackedgeTakenCount(L, Exact)); 7916 return getConstantTripCount(ExitCount); 7917 } 7918 7919 unsigned 7920 ScalarEvolution::getSmallConstantTripCount(const Loop *L, 7921 const BasicBlock *ExitingBlock) { 7922 assert(ExitingBlock && "Must pass a non-null exiting block!"); 7923 assert(L->isLoopExiting(ExitingBlock) && 7924 "Exiting block must actually branch out of the loop!"); 7925 const SCEVConstant *ExitCount = 7926 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 7927 return getConstantTripCount(ExitCount); 7928 } 7929 7930 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 7931 const auto *MaxExitCount = 7932 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 7933 return getConstantTripCount(MaxExitCount); 7934 } 7935 7936 const SCEV *ScalarEvolution::getConstantMaxTripCountFromArray(const Loop *L) { 7937 // We can't infer from Array in Irregular Loop. 7938 // FIXME: It's hard to infer loop bound from array operated in Nested Loop. 7939 if (!L->isLoopSimplifyForm() || !L->isInnermost()) 7940 return getCouldNotCompute(); 7941 7942 // FIXME: To make the scene more typical, we only analysis loops that have 7943 // one exiting block and that block must be the latch. To make it easier to 7944 // capture loops that have memory access and memory access will be executed 7945 // in each iteration. 7946 const BasicBlock *LoopLatch = L->getLoopLatch(); 7947 assert(LoopLatch && "See defination of simplify form loop."); 7948 if (L->getExitingBlock() != LoopLatch) 7949 return getCouldNotCompute(); 7950 7951 const DataLayout &DL = getDataLayout(); 7952 SmallVector<const SCEV *> InferCountColl; 7953 for (auto *BB : L->getBlocks()) { 7954 // Go here, we can know that Loop is a single exiting and simplified form 7955 // loop. Make sure that infer from Memory Operation in those BBs must be 7956 // executed in loop. First step, we can make sure that max execution time 7957 // of MemAccessBB in loop represents latch max excution time. 7958 // If MemAccessBB does not dom Latch, skip. 7959 // Entry 7960 // │ 7961 // ┌─────▼─────┐ 7962 // │Loop Header◄─────┐ 7963 // └──┬──────┬─┘ │ 7964 // │ │ │ 7965 // ┌────────▼──┐ ┌─▼─────┐ │ 7966 // │MemAccessBB│ │OtherBB│ │ 7967 // └────────┬──┘ └─┬─────┘ │ 7968 // │ │ │ 7969 // ┌─▼──────▼─┐ │ 7970 // │Loop Latch├─────┘ 7971 // └────┬─────┘ 7972 // ▼ 7973 // Exit 7974 if (!DT.dominates(BB, LoopLatch)) 7975 continue; 7976 7977 for (Instruction &Inst : *BB) { 7978 // Find Memory Operation Instruction. 7979 auto *GEP = getLoadStorePointerOperand(&Inst); 7980 if (!GEP) 7981 continue; 7982 7983 auto *ElemSize = dyn_cast<SCEVConstant>(getElementSize(&Inst)); 7984 // Do not infer from scalar type, eg."ElemSize = sizeof()". 7985 if (!ElemSize) 7986 continue; 7987 7988 // Use a existing polynomial recurrence on the trip count. 7989 auto *AddRec = dyn_cast<SCEVAddRecExpr>(getSCEV(GEP)); 7990 if (!AddRec) 7991 continue; 7992 auto *ArrBase = dyn_cast<SCEVUnknown>(getPointerBase(AddRec)); 7993 auto *Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*this)); 7994 if (!ArrBase || !Step) 7995 continue; 7996 assert(isLoopInvariant(ArrBase, L) && "See addrec definition"); 7997 7998 // Only handle { %array + step }, 7999 // FIXME: {(SCEVAddRecExpr) + step } could not be analysed here. 8000 if (AddRec->getStart() != ArrBase) 8001 continue; 8002 8003 // Memory operation pattern which have gaps. 8004 // Or repeat memory opreation. 8005 // And index of GEP wraps arround. 8006 if (Step->getAPInt().getActiveBits() > 32 || 8007 Step->getAPInt().getZExtValue() != 8008 ElemSize->getAPInt().getZExtValue() || 8009 Step->isZero() || Step->getAPInt().isNegative()) 8010 continue; 8011 8012 // Only infer from stack array which has certain size. 8013 // Make sure alloca instruction is not excuted in loop. 8014 AllocaInst *AllocateInst = dyn_cast<AllocaInst>(ArrBase->getValue()); 8015 if (!AllocateInst || L->contains(AllocateInst->getParent())) 8016 continue; 8017 8018 // Make sure only handle normal array. 8019 auto *Ty = dyn_cast<ArrayType>(AllocateInst->getAllocatedType()); 8020 auto *ArrSize = dyn_cast<ConstantInt>(AllocateInst->getArraySize()); 8021 if (!Ty || !ArrSize || !ArrSize->isOne()) 8022 continue; 8023 8024 // FIXME: Since gep indices are silently zext to the indexing type, 8025 // we will have a narrow gep index which wraps around rather than 8026 // increasing strictly, we shoule ensure that step is increasing 8027 // strictly by the loop iteration. 8028 // Now we can infer a max execution time by MemLength/StepLength. 8029 const SCEV *MemSize = 8030 getConstant(Step->getType(), DL.getTypeAllocSize(Ty)); 8031 auto *MaxExeCount = 8032 dyn_cast<SCEVConstant>(getUDivCeilSCEV(MemSize, Step)); 8033 if (!MaxExeCount || MaxExeCount->getAPInt().getActiveBits() > 32) 8034 continue; 8035 8036 // If the loop reaches the maximum number of executions, we can not 8037 // access bytes starting outside the statically allocated size without 8038 // being immediate UB. But it is allowed to enter loop header one more 8039 // time. 8040 auto *InferCount = dyn_cast<SCEVConstant>( 8041 getAddExpr(MaxExeCount, getOne(MaxExeCount->getType()))); 8042 // Discard the maximum number of execution times under 32bits. 8043 if (!InferCount || InferCount->getAPInt().getActiveBits() > 32) 8044 continue; 8045 8046 InferCountColl.push_back(InferCount); 8047 } 8048 } 8049 8050 if (InferCountColl.size() == 0) 8051 return getCouldNotCompute(); 8052 8053 return getUMinFromMismatchedTypes(InferCountColl); 8054 } 8055 8056 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 8057 SmallVector<BasicBlock *, 8> ExitingBlocks; 8058 L->getExitingBlocks(ExitingBlocks); 8059 8060 Optional<unsigned> Res = None; 8061 for (auto *ExitingBB : ExitingBlocks) { 8062 unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB); 8063 if (!Res) 8064 Res = Multiple; 8065 Res = (unsigned)GreatestCommonDivisor64(*Res, Multiple); 8066 } 8067 return Res.value_or(1); 8068 } 8069 8070 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 8071 const SCEV *ExitCount) { 8072 if (ExitCount == getCouldNotCompute()) 8073 return 1; 8074 8075 // Get the trip count 8076 const SCEV *TCExpr = getTripCountFromExitCount(ExitCount); 8077 8078 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 8079 if (!TC) 8080 // Attempt to factor more general cases. Returns the greatest power of 8081 // two divisor. If overflow happens, the trip count expression is still 8082 // divisible by the greatest power of 2 divisor returned. 8083 return 1U << std::min((uint32_t)31, 8084 GetMinTrailingZeros(applyLoopGuards(TCExpr, L))); 8085 8086 ConstantInt *Result = TC->getValue(); 8087 8088 // Guard against huge trip counts (this requires checking 8089 // for zero to handle the case where the trip count == -1 and the 8090 // addition wraps). 8091 if (!Result || Result->getValue().getActiveBits() > 32 || 8092 Result->getValue().getActiveBits() == 0) 8093 return 1; 8094 8095 return (unsigned)Result->getZExtValue(); 8096 } 8097 8098 /// Returns the largest constant divisor of the trip count of this loop as a 8099 /// normal unsigned value, if possible. This means that the actual trip count is 8100 /// always a multiple of the returned value (don't forget the trip count could 8101 /// very well be zero as well!). 8102 /// 8103 /// Returns 1 if the trip count is unknown or not guaranteed to be the 8104 /// multiple of a constant (which is also the case if the trip count is simply 8105 /// constant, use getSmallConstantTripCount for that case), Will also return 1 8106 /// if the trip count is very large (>= 2^32). 8107 /// 8108 /// As explained in the comments for getSmallConstantTripCount, this assumes 8109 /// that control exits the loop via ExitingBlock. 8110 unsigned 8111 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 8112 const BasicBlock *ExitingBlock) { 8113 assert(ExitingBlock && "Must pass a non-null exiting block!"); 8114 assert(L->isLoopExiting(ExitingBlock) && 8115 "Exiting block must actually branch out of the loop!"); 8116 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 8117 return getSmallConstantTripMultiple(L, ExitCount); 8118 } 8119 8120 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 8121 const BasicBlock *ExitingBlock, 8122 ExitCountKind Kind) { 8123 switch (Kind) { 8124 case Exact: 8125 case SymbolicMaximum: 8126 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 8127 case ConstantMaximum: 8128 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this); 8129 }; 8130 llvm_unreachable("Invalid ExitCountKind!"); 8131 } 8132 8133 const SCEV * 8134 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 8135 SmallVector<const SCEVPredicate *, 4> &Preds) { 8136 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 8137 } 8138 8139 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 8140 ExitCountKind Kind) { 8141 switch (Kind) { 8142 case Exact: 8143 return getBackedgeTakenInfo(L).getExact(L, this); 8144 case ConstantMaximum: 8145 return getBackedgeTakenInfo(L).getConstantMax(this); 8146 case SymbolicMaximum: 8147 return getBackedgeTakenInfo(L).getSymbolicMax(L, this); 8148 }; 8149 llvm_unreachable("Invalid ExitCountKind!"); 8150 } 8151 8152 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 8153 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this); 8154 } 8155 8156 /// Push PHI nodes in the header of the given loop onto the given Worklist. 8157 static void PushLoopPHIs(const Loop *L, 8158 SmallVectorImpl<Instruction *> &Worklist, 8159 SmallPtrSetImpl<Instruction *> &Visited) { 8160 BasicBlock *Header = L->getHeader(); 8161 8162 // Push all Loop-header PHIs onto the Worklist stack. 8163 for (PHINode &PN : Header->phis()) 8164 if (Visited.insert(&PN).second) 8165 Worklist.push_back(&PN); 8166 } 8167 8168 const ScalarEvolution::BackedgeTakenInfo & 8169 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 8170 auto &BTI = getBackedgeTakenInfo(L); 8171 if (BTI.hasFullInfo()) 8172 return BTI; 8173 8174 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 8175 8176 if (!Pair.second) 8177 return Pair.first->second; 8178 8179 BackedgeTakenInfo Result = 8180 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 8181 8182 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 8183 } 8184 8185 ScalarEvolution::BackedgeTakenInfo & 8186 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 8187 // Initially insert an invalid entry for this loop. If the insertion 8188 // succeeds, proceed to actually compute a backedge-taken count and 8189 // update the value. The temporary CouldNotCompute value tells SCEV 8190 // code elsewhere that it shouldn't attempt to request a new 8191 // backedge-taken count, which could result in infinite recursion. 8192 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 8193 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 8194 if (!Pair.second) 8195 return Pair.first->second; 8196 8197 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 8198 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 8199 // must be cleared in this scope. 8200 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 8201 8202 // In product build, there are no usage of statistic. 8203 (void)NumTripCountsComputed; 8204 (void)NumTripCountsNotComputed; 8205 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 8206 const SCEV *BEExact = Result.getExact(L, this); 8207 if (BEExact != getCouldNotCompute()) { 8208 assert(isLoopInvariant(BEExact, L) && 8209 isLoopInvariant(Result.getConstantMax(this), L) && 8210 "Computed backedge-taken count isn't loop invariant for loop!"); 8211 ++NumTripCountsComputed; 8212 } else if (Result.getConstantMax(this) == getCouldNotCompute() && 8213 isa<PHINode>(L->getHeader()->begin())) { 8214 // Only count loops that have phi nodes as not being computable. 8215 ++NumTripCountsNotComputed; 8216 } 8217 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 8218 8219 // Now that we know more about the trip count for this loop, forget any 8220 // existing SCEV values for PHI nodes in this loop since they are only 8221 // conservative estimates made without the benefit of trip count 8222 // information. This invalidation is not necessary for correctness, and is 8223 // only done to produce more precise results. 8224 if (Result.hasAnyInfo()) { 8225 // Invalidate any expression using an addrec in this loop. 8226 SmallVector<const SCEV *, 8> ToForget; 8227 auto LoopUsersIt = LoopUsers.find(L); 8228 if (LoopUsersIt != LoopUsers.end()) 8229 append_range(ToForget, LoopUsersIt->second); 8230 forgetMemoizedResults(ToForget); 8231 8232 // Invalidate constant-evolved loop header phis. 8233 for (PHINode &PN : L->getHeader()->phis()) 8234 ConstantEvolutionLoopExitValue.erase(&PN); 8235 } 8236 8237 // Re-lookup the insert position, since the call to 8238 // computeBackedgeTakenCount above could result in a 8239 // recusive call to getBackedgeTakenInfo (on a different 8240 // loop), which would invalidate the iterator computed 8241 // earlier. 8242 return BackedgeTakenCounts.find(L)->second = std::move(Result); 8243 } 8244 8245 void ScalarEvolution::forgetAllLoops() { 8246 // This method is intended to forget all info about loops. It should 8247 // invalidate caches as if the following happened: 8248 // - The trip counts of all loops have changed arbitrarily 8249 // - Every llvm::Value has been updated in place to produce a different 8250 // result. 8251 BackedgeTakenCounts.clear(); 8252 PredicatedBackedgeTakenCounts.clear(); 8253 BECountUsers.clear(); 8254 LoopPropertiesCache.clear(); 8255 ConstantEvolutionLoopExitValue.clear(); 8256 ValueExprMap.clear(); 8257 ValuesAtScopes.clear(); 8258 ValuesAtScopesUsers.clear(); 8259 LoopDispositions.clear(); 8260 BlockDispositions.clear(); 8261 UnsignedRanges.clear(); 8262 SignedRanges.clear(); 8263 ExprValueMap.clear(); 8264 HasRecMap.clear(); 8265 MinTrailingZerosCache.clear(); 8266 PredicatedSCEVRewrites.clear(); 8267 } 8268 8269 void ScalarEvolution::forgetLoop(const Loop *L) { 8270 SmallVector<const Loop *, 16> LoopWorklist(1, L); 8271 SmallVector<Instruction *, 32> Worklist; 8272 SmallPtrSet<Instruction *, 16> Visited; 8273 SmallVector<const SCEV *, 16> ToForget; 8274 8275 // Iterate over all the loops and sub-loops to drop SCEV information. 8276 while (!LoopWorklist.empty()) { 8277 auto *CurrL = LoopWorklist.pop_back_val(); 8278 8279 // Drop any stored trip count value. 8280 forgetBackedgeTakenCounts(CurrL, /* Predicated */ false); 8281 forgetBackedgeTakenCounts(CurrL, /* Predicated */ true); 8282 8283 // Drop information about predicated SCEV rewrites for this loop. 8284 for (auto I = PredicatedSCEVRewrites.begin(); 8285 I != PredicatedSCEVRewrites.end();) { 8286 std::pair<const SCEV *, const Loop *> Entry = I->first; 8287 if (Entry.second == CurrL) 8288 PredicatedSCEVRewrites.erase(I++); 8289 else 8290 ++I; 8291 } 8292 8293 auto LoopUsersItr = LoopUsers.find(CurrL); 8294 if (LoopUsersItr != LoopUsers.end()) { 8295 ToForget.insert(ToForget.end(), LoopUsersItr->second.begin(), 8296 LoopUsersItr->second.end()); 8297 } 8298 8299 // Drop information about expressions based on loop-header PHIs. 8300 PushLoopPHIs(CurrL, Worklist, Visited); 8301 8302 while (!Worklist.empty()) { 8303 Instruction *I = Worklist.pop_back_val(); 8304 8305 ValueExprMapType::iterator It = 8306 ValueExprMap.find_as(static_cast<Value *>(I)); 8307 if (It != ValueExprMap.end()) { 8308 eraseValueFromMap(It->first); 8309 ToForget.push_back(It->second); 8310 if (PHINode *PN = dyn_cast<PHINode>(I)) 8311 ConstantEvolutionLoopExitValue.erase(PN); 8312 } 8313 8314 PushDefUseChildren(I, Worklist, Visited); 8315 } 8316 8317 LoopPropertiesCache.erase(CurrL); 8318 // Forget all contained loops too, to avoid dangling entries in the 8319 // ValuesAtScopes map. 8320 LoopWorklist.append(CurrL->begin(), CurrL->end()); 8321 } 8322 forgetMemoizedResults(ToForget); 8323 } 8324 8325 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 8326 forgetLoop(L->getOutermostLoop()); 8327 } 8328 8329 void ScalarEvolution::forgetValue(Value *V) { 8330 Instruction *I = dyn_cast<Instruction>(V); 8331 if (!I) return; 8332 8333 // Drop information about expressions based on loop-header PHIs. 8334 SmallVector<Instruction *, 16> Worklist; 8335 SmallPtrSet<Instruction *, 8> Visited; 8336 SmallVector<const SCEV *, 8> ToForget; 8337 Worklist.push_back(I); 8338 Visited.insert(I); 8339 8340 while (!Worklist.empty()) { 8341 I = Worklist.pop_back_val(); 8342 ValueExprMapType::iterator It = 8343 ValueExprMap.find_as(static_cast<Value *>(I)); 8344 if (It != ValueExprMap.end()) { 8345 eraseValueFromMap(It->first); 8346 ToForget.push_back(It->second); 8347 if (PHINode *PN = dyn_cast<PHINode>(I)) 8348 ConstantEvolutionLoopExitValue.erase(PN); 8349 } 8350 8351 PushDefUseChildren(I, Worklist, Visited); 8352 } 8353 forgetMemoizedResults(ToForget); 8354 } 8355 8356 void ScalarEvolution::forgetLoopDispositions(const Loop *L) { 8357 LoopDispositions.clear(); 8358 } 8359 8360 /// Get the exact loop backedge taken count considering all loop exits. A 8361 /// computable result can only be returned for loops with all exiting blocks 8362 /// dominating the latch. howFarToZero assumes that the limit of each loop test 8363 /// is never skipped. This is a valid assumption as long as the loop exits via 8364 /// that test. For precise results, it is the caller's responsibility to specify 8365 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 8366 const SCEV * 8367 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 8368 SmallVector<const SCEVPredicate *, 4> *Preds) const { 8369 // If any exits were not computable, the loop is not computable. 8370 if (!isComplete() || ExitNotTaken.empty()) 8371 return SE->getCouldNotCompute(); 8372 8373 const BasicBlock *Latch = L->getLoopLatch(); 8374 // All exiting blocks we have collected must dominate the only backedge. 8375 if (!Latch) 8376 return SE->getCouldNotCompute(); 8377 8378 // All exiting blocks we have gathered dominate loop's latch, so exact trip 8379 // count is simply a minimum out of all these calculated exit counts. 8380 SmallVector<const SCEV *, 2> Ops; 8381 for (const auto &ENT : ExitNotTaken) { 8382 const SCEV *BECount = ENT.ExactNotTaken; 8383 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 8384 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 8385 "We should only have known counts for exiting blocks that dominate " 8386 "latch!"); 8387 8388 Ops.push_back(BECount); 8389 8390 if (Preds) 8391 for (const auto *P : ENT.Predicates) 8392 Preds->push_back(P); 8393 8394 assert((Preds || ENT.hasAlwaysTruePredicate()) && 8395 "Predicate should be always true!"); 8396 } 8397 8398 // If an earlier exit exits on the first iteration (exit count zero), then 8399 // a later poison exit count should not propagate into the result. This are 8400 // exactly the semantics provided by umin_seq. 8401 return SE->getUMinFromMismatchedTypes(Ops, /* Sequential */ true); 8402 } 8403 8404 /// Get the exact not taken count for this loop exit. 8405 const SCEV * 8406 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, 8407 ScalarEvolution *SE) const { 8408 for (const auto &ENT : ExitNotTaken) 8409 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 8410 return ENT.ExactNotTaken; 8411 8412 return SE->getCouldNotCompute(); 8413 } 8414 8415 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax( 8416 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { 8417 for (const auto &ENT : ExitNotTaken) 8418 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 8419 return ENT.MaxNotTaken; 8420 8421 return SE->getCouldNotCompute(); 8422 } 8423 8424 /// getConstantMax - Get the constant max backedge taken count for the loop. 8425 const SCEV * 8426 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const { 8427 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 8428 return !ENT.hasAlwaysTruePredicate(); 8429 }; 8430 8431 if (!getConstantMax() || any_of(ExitNotTaken, PredicateNotAlwaysTrue)) 8432 return SE->getCouldNotCompute(); 8433 8434 assert((isa<SCEVCouldNotCompute>(getConstantMax()) || 8435 isa<SCEVConstant>(getConstantMax())) && 8436 "No point in having a non-constant max backedge taken count!"); 8437 return getConstantMax(); 8438 } 8439 8440 const SCEV * 8441 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L, 8442 ScalarEvolution *SE) { 8443 if (!SymbolicMax) 8444 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L); 8445 return SymbolicMax; 8446 } 8447 8448 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( 8449 ScalarEvolution *SE) const { 8450 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 8451 return !ENT.hasAlwaysTruePredicate(); 8452 }; 8453 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 8454 } 8455 8456 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 8457 : ExitLimit(E, E, false, None) { 8458 } 8459 8460 ScalarEvolution::ExitLimit::ExitLimit( 8461 const SCEV *E, const SCEV *M, bool MaxOrZero, 8462 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 8463 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 8464 // If we prove the max count is zero, so is the symbolic bound. This happens 8465 // in practice due to differences in a) how context sensitive we've chosen 8466 // to be and b) how we reason about bounds impied by UB. 8467 if (MaxNotTaken->isZero()) 8468 ExactNotTaken = MaxNotTaken; 8469 8470 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 8471 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 8472 "Exact is not allowed to be less precise than Max"); 8473 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 8474 isa<SCEVConstant>(MaxNotTaken)) && 8475 "No point in having a non-constant max backedge taken count!"); 8476 for (const auto *PredSet : PredSetList) 8477 for (const auto *P : *PredSet) 8478 addPredicate(P); 8479 assert((isa<SCEVCouldNotCompute>(E) || !E->getType()->isPointerTy()) && 8480 "Backedge count should be int"); 8481 assert((isa<SCEVCouldNotCompute>(M) || !M->getType()->isPointerTy()) && 8482 "Max backedge count should be int"); 8483 } 8484 8485 ScalarEvolution::ExitLimit::ExitLimit( 8486 const SCEV *E, const SCEV *M, bool MaxOrZero, 8487 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 8488 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 8489 } 8490 8491 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 8492 bool MaxOrZero) 8493 : ExitLimit(E, M, MaxOrZero, None) { 8494 } 8495 8496 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 8497 /// computable exit into a persistent ExitNotTakenInfo array. 8498 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 8499 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts, 8500 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero) 8501 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) { 8502 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 8503 8504 ExitNotTaken.reserve(ExitCounts.size()); 8505 std::transform( 8506 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 8507 [&](const EdgeExitInfo &EEI) { 8508 BasicBlock *ExitBB = EEI.first; 8509 const ExitLimit &EL = EEI.second; 8510 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 8511 EL.Predicates); 8512 }); 8513 assert((isa<SCEVCouldNotCompute>(ConstantMax) || 8514 isa<SCEVConstant>(ConstantMax)) && 8515 "No point in having a non-constant max backedge taken count!"); 8516 } 8517 8518 /// Compute the number of times the backedge of the specified loop will execute. 8519 ScalarEvolution::BackedgeTakenInfo 8520 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 8521 bool AllowPredicates) { 8522 SmallVector<BasicBlock *, 8> ExitingBlocks; 8523 L->getExitingBlocks(ExitingBlocks); 8524 8525 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 8526 8527 SmallVector<EdgeExitInfo, 4> ExitCounts; 8528 bool CouldComputeBECount = true; 8529 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 8530 const SCEV *MustExitMaxBECount = nullptr; 8531 const SCEV *MayExitMaxBECount = nullptr; 8532 bool MustExitMaxOrZero = false; 8533 8534 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 8535 // and compute maxBECount. 8536 // Do a union of all the predicates here. 8537 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 8538 BasicBlock *ExitBB = ExitingBlocks[i]; 8539 8540 // We canonicalize untaken exits to br (constant), ignore them so that 8541 // proving an exit untaken doesn't negatively impact our ability to reason 8542 // about the loop as whole. 8543 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 8544 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 8545 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 8546 if (ExitIfTrue == CI->isZero()) 8547 continue; 8548 } 8549 8550 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 8551 8552 assert((AllowPredicates || EL.Predicates.empty()) && 8553 "Predicated exit limit when predicates are not allowed!"); 8554 8555 // 1. For each exit that can be computed, add an entry to ExitCounts. 8556 // CouldComputeBECount is true only if all exits can be computed. 8557 if (EL.ExactNotTaken == getCouldNotCompute()) 8558 // We couldn't compute an exact value for this exit, so 8559 // we won't be able to compute an exact value for the loop. 8560 CouldComputeBECount = false; 8561 else 8562 ExitCounts.emplace_back(ExitBB, EL); 8563 8564 // 2. Derive the loop's MaxBECount from each exit's max number of 8565 // non-exiting iterations. Partition the loop exits into two kinds: 8566 // LoopMustExits and LoopMayExits. 8567 // 8568 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 8569 // is a LoopMayExit. If any computable LoopMustExit is found, then 8570 // MaxBECount is the minimum EL.MaxNotTaken of computable 8571 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 8572 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 8573 // computable EL.MaxNotTaken. 8574 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 8575 DT.dominates(ExitBB, Latch)) { 8576 if (!MustExitMaxBECount) { 8577 MustExitMaxBECount = EL.MaxNotTaken; 8578 MustExitMaxOrZero = EL.MaxOrZero; 8579 } else { 8580 MustExitMaxBECount = 8581 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 8582 } 8583 } else if (MayExitMaxBECount != getCouldNotCompute()) { 8584 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 8585 MayExitMaxBECount = EL.MaxNotTaken; 8586 else { 8587 MayExitMaxBECount = 8588 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 8589 } 8590 } 8591 } 8592 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 8593 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 8594 // The loop backedge will be taken the maximum or zero times if there's 8595 // a single exit that must be taken the maximum or zero times. 8596 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 8597 8598 // Remember which SCEVs are used in exit limits for invalidation purposes. 8599 // We only care about non-constant SCEVs here, so we can ignore EL.MaxNotTaken 8600 // and MaxBECount, which must be SCEVConstant. 8601 for (const auto &Pair : ExitCounts) 8602 if (!isa<SCEVConstant>(Pair.second.ExactNotTaken)) 8603 BECountUsers[Pair.second.ExactNotTaken].insert({L, AllowPredicates}); 8604 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 8605 MaxBECount, MaxOrZero); 8606 } 8607 8608 ScalarEvolution::ExitLimit 8609 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 8610 bool AllowPredicates) { 8611 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 8612 // If our exiting block does not dominate the latch, then its connection with 8613 // loop's exit limit may be far from trivial. 8614 const BasicBlock *Latch = L->getLoopLatch(); 8615 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 8616 return getCouldNotCompute(); 8617 8618 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 8619 Instruction *Term = ExitingBlock->getTerminator(); 8620 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 8621 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 8622 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 8623 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 8624 "It should have one successor in loop and one exit block!"); 8625 // Proceed to the next level to examine the exit condition expression. 8626 return computeExitLimitFromCond( 8627 L, BI->getCondition(), ExitIfTrue, 8628 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 8629 } 8630 8631 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 8632 // For switch, make sure that there is a single exit from the loop. 8633 BasicBlock *Exit = nullptr; 8634 for (auto *SBB : successors(ExitingBlock)) 8635 if (!L->contains(SBB)) { 8636 if (Exit) // Multiple exit successors. 8637 return getCouldNotCompute(); 8638 Exit = SBB; 8639 } 8640 assert(Exit && "Exiting block must have at least one exit"); 8641 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 8642 /*ControlsExit=*/IsOnlyExit); 8643 } 8644 8645 return getCouldNotCompute(); 8646 } 8647 8648 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 8649 const Loop *L, Value *ExitCond, bool ExitIfTrue, 8650 bool ControlsExit, bool AllowPredicates) { 8651 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 8652 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 8653 ControlsExit, AllowPredicates); 8654 } 8655 8656 Optional<ScalarEvolution::ExitLimit> 8657 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 8658 bool ExitIfTrue, bool ControlsExit, 8659 bool AllowPredicates) { 8660 (void)this->L; 8661 (void)this->ExitIfTrue; 8662 (void)this->AllowPredicates; 8663 8664 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 8665 this->AllowPredicates == AllowPredicates && 8666 "Variance in assumed invariant key components!"); 8667 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 8668 if (Itr == TripCountMap.end()) 8669 return None; 8670 return Itr->second; 8671 } 8672 8673 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 8674 bool ExitIfTrue, 8675 bool ControlsExit, 8676 bool AllowPredicates, 8677 const ExitLimit &EL) { 8678 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 8679 this->AllowPredicates == AllowPredicates && 8680 "Variance in assumed invariant key components!"); 8681 8682 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 8683 assert(InsertResult.second && "Expected successful insertion!"); 8684 (void)InsertResult; 8685 (void)ExitIfTrue; 8686 } 8687 8688 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 8689 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 8690 bool ControlsExit, bool AllowPredicates) { 8691 8692 if (auto MaybeEL = 8693 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 8694 return *MaybeEL; 8695 8696 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 8697 ControlsExit, AllowPredicates); 8698 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 8699 return EL; 8700 } 8701 8702 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 8703 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 8704 bool ControlsExit, bool AllowPredicates) { 8705 // Handle BinOp conditions (And, Or). 8706 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp( 8707 Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 8708 return *LimitFromBinOp; 8709 8710 // With an icmp, it may be feasible to compute an exact backedge-taken count. 8711 // Proceed to the next level to examine the icmp. 8712 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 8713 ExitLimit EL = 8714 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 8715 if (EL.hasFullInfo() || !AllowPredicates) 8716 return EL; 8717 8718 // Try again, but use SCEV predicates this time. 8719 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 8720 /*AllowPredicates=*/true); 8721 } 8722 8723 // Check for a constant condition. These are normally stripped out by 8724 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 8725 // preserve the CFG and is temporarily leaving constant conditions 8726 // in place. 8727 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 8728 if (ExitIfTrue == !CI->getZExtValue()) 8729 // The backedge is always taken. 8730 return getCouldNotCompute(); 8731 else 8732 // The backedge is never taken. 8733 return getZero(CI->getType()); 8734 } 8735 8736 // If we're exiting based on the overflow flag of an x.with.overflow intrinsic 8737 // with a constant step, we can form an equivalent icmp predicate and figure 8738 // out how many iterations will be taken before we exit. 8739 const WithOverflowInst *WO; 8740 const APInt *C; 8741 if (match(ExitCond, m_ExtractValue<1>(m_WithOverflowInst(WO))) && 8742 match(WO->getRHS(), m_APInt(C))) { 8743 ConstantRange NWR = 8744 ConstantRange::makeExactNoWrapRegion(WO->getBinaryOp(), *C, 8745 WO->getNoWrapKind()); 8746 CmpInst::Predicate Pred; 8747 APInt NewRHSC, Offset; 8748 NWR.getEquivalentICmp(Pred, NewRHSC, Offset); 8749 if (!ExitIfTrue) 8750 Pred = ICmpInst::getInversePredicate(Pred); 8751 auto *LHS = getSCEV(WO->getLHS()); 8752 if (Offset != 0) 8753 LHS = getAddExpr(LHS, getConstant(Offset)); 8754 auto EL = computeExitLimitFromICmp(L, Pred, LHS, getConstant(NewRHSC), 8755 ControlsExit, AllowPredicates); 8756 if (EL.hasAnyInfo()) return EL; 8757 } 8758 8759 // If it's not an integer or pointer comparison then compute it the hard way. 8760 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 8761 } 8762 8763 Optional<ScalarEvolution::ExitLimit> 8764 ScalarEvolution::computeExitLimitFromCondFromBinOp( 8765 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 8766 bool ControlsExit, bool AllowPredicates) { 8767 // Check if the controlling expression for this loop is an And or Or. 8768 Value *Op0, *Op1; 8769 bool IsAnd = false; 8770 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) 8771 IsAnd = true; 8772 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) 8773 IsAnd = false; 8774 else 8775 return None; 8776 8777 // EitherMayExit is true in these two cases: 8778 // br (and Op0 Op1), loop, exit 8779 // br (or Op0 Op1), exit, loop 8780 bool EitherMayExit = IsAnd ^ ExitIfTrue; 8781 ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue, 8782 ControlsExit && !EitherMayExit, 8783 AllowPredicates); 8784 ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue, 8785 ControlsExit && !EitherMayExit, 8786 AllowPredicates); 8787 8788 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement" 8789 const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd); 8790 if (isa<ConstantInt>(Op1)) 8791 return Op1 == NeutralElement ? EL0 : EL1; 8792 if (isa<ConstantInt>(Op0)) 8793 return Op0 == NeutralElement ? EL1 : EL0; 8794 8795 const SCEV *BECount = getCouldNotCompute(); 8796 const SCEV *MaxBECount = getCouldNotCompute(); 8797 if (EitherMayExit) { 8798 // Both conditions must be same for the loop to continue executing. 8799 // Choose the less conservative count. 8800 if (EL0.ExactNotTaken != getCouldNotCompute() && 8801 EL1.ExactNotTaken != getCouldNotCompute()) { 8802 BECount = getUMinFromMismatchedTypes( 8803 EL0.ExactNotTaken, EL1.ExactNotTaken, 8804 /*Sequential=*/!isa<BinaryOperator>(ExitCond)); 8805 } 8806 if (EL0.MaxNotTaken == getCouldNotCompute()) 8807 MaxBECount = EL1.MaxNotTaken; 8808 else if (EL1.MaxNotTaken == getCouldNotCompute()) 8809 MaxBECount = EL0.MaxNotTaken; 8810 else 8811 MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 8812 } else { 8813 // Both conditions must be same at the same time for the loop to exit. 8814 // For now, be conservative. 8815 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 8816 BECount = EL0.ExactNotTaken; 8817 } 8818 8819 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 8820 // to be more aggressive when computing BECount than when computing 8821 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 8822 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 8823 // to not. 8824 if (isa<SCEVCouldNotCompute>(MaxBECount) && 8825 !isa<SCEVCouldNotCompute>(BECount)) 8826 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 8827 8828 return ExitLimit(BECount, MaxBECount, false, 8829 { &EL0.Predicates, &EL1.Predicates }); 8830 } 8831 8832 ScalarEvolution::ExitLimit 8833 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 8834 ICmpInst *ExitCond, 8835 bool ExitIfTrue, 8836 bool ControlsExit, 8837 bool AllowPredicates) { 8838 // If the condition was exit on true, convert the condition to exit on false 8839 ICmpInst::Predicate Pred; 8840 if (!ExitIfTrue) 8841 Pred = ExitCond->getPredicate(); 8842 else 8843 Pred = ExitCond->getInversePredicate(); 8844 const ICmpInst::Predicate OriginalPred = Pred; 8845 8846 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 8847 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 8848 8849 ExitLimit EL = computeExitLimitFromICmp(L, Pred, LHS, RHS, ControlsExit, 8850 AllowPredicates); 8851 if (EL.hasAnyInfo()) return EL; 8852 8853 auto *ExhaustiveCount = 8854 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 8855 8856 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 8857 return ExhaustiveCount; 8858 8859 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 8860 ExitCond->getOperand(1), L, OriginalPred); 8861 } 8862 ScalarEvolution::ExitLimit 8863 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 8864 ICmpInst::Predicate Pred, 8865 const SCEV *LHS, const SCEV *RHS, 8866 bool ControlsExit, 8867 bool AllowPredicates) { 8868 8869 // Try to evaluate any dependencies out of the loop. 8870 LHS = getSCEVAtScope(LHS, L); 8871 RHS = getSCEVAtScope(RHS, L); 8872 8873 // At this point, we would like to compute how many iterations of the 8874 // loop the predicate will return true for these inputs. 8875 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 8876 // If there is a loop-invariant, force it into the RHS. 8877 std::swap(LHS, RHS); 8878 Pred = ICmpInst::getSwappedPredicate(Pred); 8879 } 8880 8881 bool ControllingFiniteLoop = 8882 ControlsExit && loopHasNoAbnormalExits(L) && loopIsFiniteByAssumption(L); 8883 // Simplify the operands before analyzing them. 8884 (void)SimplifyICmpOperands(Pred, LHS, RHS, /*Depth=*/0, 8885 (EnableFiniteLoopControl ? ControllingFiniteLoop 8886 : false)); 8887 8888 // If we have a comparison of a chrec against a constant, try to use value 8889 // ranges to answer this query. 8890 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 8891 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 8892 if (AddRec->getLoop() == L) { 8893 // Form the constant range. 8894 ConstantRange CompRange = 8895 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 8896 8897 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 8898 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 8899 } 8900 8901 // If this loop must exit based on this condition (or execute undefined 8902 // behaviour), and we can prove the test sequence produced must repeat 8903 // the same values on self-wrap of the IV, then we can infer that IV 8904 // doesn't self wrap because if it did, we'd have an infinite (undefined) 8905 // loop. 8906 if (ControllingFiniteLoop && isLoopInvariant(RHS, L)) { 8907 // TODO: We can peel off any functions which are invertible *in L*. Loop 8908 // invariant terms are effectively constants for our purposes here. 8909 auto *InnerLHS = LHS; 8910 if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) 8911 InnerLHS = ZExt->getOperand(); 8912 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(InnerLHS)) { 8913 auto *StrideC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)); 8914 if (!AR->hasNoSelfWrap() && AR->getLoop() == L && AR->isAffine() && 8915 StrideC && StrideC->getAPInt().isPowerOf2()) { 8916 auto Flags = AR->getNoWrapFlags(); 8917 Flags = setFlags(Flags, SCEV::FlagNW); 8918 SmallVector<const SCEV*> Operands{AR->operands()}; 8919 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 8920 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags); 8921 } 8922 } 8923 } 8924 8925 switch (Pred) { 8926 case ICmpInst::ICMP_NE: { // while (X != Y) 8927 // Convert to: while (X-Y != 0) 8928 if (LHS->getType()->isPointerTy()) { 8929 LHS = getLosslessPtrToIntExpr(LHS); 8930 if (isa<SCEVCouldNotCompute>(LHS)) 8931 return LHS; 8932 } 8933 if (RHS->getType()->isPointerTy()) { 8934 RHS = getLosslessPtrToIntExpr(RHS); 8935 if (isa<SCEVCouldNotCompute>(RHS)) 8936 return RHS; 8937 } 8938 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 8939 AllowPredicates); 8940 if (EL.hasAnyInfo()) return EL; 8941 break; 8942 } 8943 case ICmpInst::ICMP_EQ: { // while (X == Y) 8944 // Convert to: while (X-Y == 0) 8945 if (LHS->getType()->isPointerTy()) { 8946 LHS = getLosslessPtrToIntExpr(LHS); 8947 if (isa<SCEVCouldNotCompute>(LHS)) 8948 return LHS; 8949 } 8950 if (RHS->getType()->isPointerTy()) { 8951 RHS = getLosslessPtrToIntExpr(RHS); 8952 if (isa<SCEVCouldNotCompute>(RHS)) 8953 return RHS; 8954 } 8955 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 8956 if (EL.hasAnyInfo()) return EL; 8957 break; 8958 } 8959 case ICmpInst::ICMP_SLT: 8960 case ICmpInst::ICMP_ULT: { // while (X < Y) 8961 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 8962 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 8963 AllowPredicates); 8964 if (EL.hasAnyInfo()) return EL; 8965 break; 8966 } 8967 case ICmpInst::ICMP_SGT: 8968 case ICmpInst::ICMP_UGT: { // while (X > Y) 8969 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 8970 ExitLimit EL = 8971 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 8972 AllowPredicates); 8973 if (EL.hasAnyInfo()) return EL; 8974 break; 8975 } 8976 default: 8977 break; 8978 } 8979 8980 return getCouldNotCompute(); 8981 } 8982 8983 ScalarEvolution::ExitLimit 8984 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 8985 SwitchInst *Switch, 8986 BasicBlock *ExitingBlock, 8987 bool ControlsExit) { 8988 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 8989 8990 // Give up if the exit is the default dest of a switch. 8991 if (Switch->getDefaultDest() == ExitingBlock) 8992 return getCouldNotCompute(); 8993 8994 assert(L->contains(Switch->getDefaultDest()) && 8995 "Default case must not exit the loop!"); 8996 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 8997 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 8998 8999 // while (X != Y) --> while (X-Y != 0) 9000 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 9001 if (EL.hasAnyInfo()) 9002 return EL; 9003 9004 return getCouldNotCompute(); 9005 } 9006 9007 static ConstantInt * 9008 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 9009 ScalarEvolution &SE) { 9010 const SCEV *InVal = SE.getConstant(C); 9011 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 9012 assert(isa<SCEVConstant>(Val) && 9013 "Evaluation of SCEV at constant didn't fold correctly?"); 9014 return cast<SCEVConstant>(Val)->getValue(); 9015 } 9016 9017 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 9018 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 9019 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 9020 if (!RHS) 9021 return getCouldNotCompute(); 9022 9023 const BasicBlock *Latch = L->getLoopLatch(); 9024 if (!Latch) 9025 return getCouldNotCompute(); 9026 9027 const BasicBlock *Predecessor = L->getLoopPredecessor(); 9028 if (!Predecessor) 9029 return getCouldNotCompute(); 9030 9031 // Return true if V is of the form "LHS `shift_op` <positive constant>". 9032 // Return LHS in OutLHS and shift_opt in OutOpCode. 9033 auto MatchPositiveShift = 9034 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 9035 9036 using namespace PatternMatch; 9037 9038 ConstantInt *ShiftAmt; 9039 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 9040 OutOpCode = Instruction::LShr; 9041 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 9042 OutOpCode = Instruction::AShr; 9043 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 9044 OutOpCode = Instruction::Shl; 9045 else 9046 return false; 9047 9048 return ShiftAmt->getValue().isStrictlyPositive(); 9049 }; 9050 9051 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 9052 // 9053 // loop: 9054 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 9055 // %iv.shifted = lshr i32 %iv, <positive constant> 9056 // 9057 // Return true on a successful match. Return the corresponding PHI node (%iv 9058 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 9059 auto MatchShiftRecurrence = 9060 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 9061 Optional<Instruction::BinaryOps> PostShiftOpCode; 9062 9063 { 9064 Instruction::BinaryOps OpC; 9065 Value *V; 9066 9067 // If we encounter a shift instruction, "peel off" the shift operation, 9068 // and remember that we did so. Later when we inspect %iv's backedge 9069 // value, we will make sure that the backedge value uses the same 9070 // operation. 9071 // 9072 // Note: the peeled shift operation does not have to be the same 9073 // instruction as the one feeding into the PHI's backedge value. We only 9074 // really care about it being the same *kind* of shift instruction -- 9075 // that's all that is required for our later inferences to hold. 9076 if (MatchPositiveShift(LHS, V, OpC)) { 9077 PostShiftOpCode = OpC; 9078 LHS = V; 9079 } 9080 } 9081 9082 PNOut = dyn_cast<PHINode>(LHS); 9083 if (!PNOut || PNOut->getParent() != L->getHeader()) 9084 return false; 9085 9086 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 9087 Value *OpLHS; 9088 9089 return 9090 // The backedge value for the PHI node must be a shift by a positive 9091 // amount 9092 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 9093 9094 // of the PHI node itself 9095 OpLHS == PNOut && 9096 9097 // and the kind of shift should be match the kind of shift we peeled 9098 // off, if any. 9099 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut); 9100 }; 9101 9102 PHINode *PN; 9103 Instruction::BinaryOps OpCode; 9104 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 9105 return getCouldNotCompute(); 9106 9107 const DataLayout &DL = getDataLayout(); 9108 9109 // The key rationale for this optimization is that for some kinds of shift 9110 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 9111 // within a finite number of iterations. If the condition guarding the 9112 // backedge (in the sense that the backedge is taken if the condition is true) 9113 // is false for the value the shift recurrence stabilizes to, then we know 9114 // that the backedge is taken only a finite number of times. 9115 9116 ConstantInt *StableValue = nullptr; 9117 switch (OpCode) { 9118 default: 9119 llvm_unreachable("Impossible case!"); 9120 9121 case Instruction::AShr: { 9122 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 9123 // bitwidth(K) iterations. 9124 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 9125 KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC, 9126 Predecessor->getTerminator(), &DT); 9127 auto *Ty = cast<IntegerType>(RHS->getType()); 9128 if (Known.isNonNegative()) 9129 StableValue = ConstantInt::get(Ty, 0); 9130 else if (Known.isNegative()) 9131 StableValue = ConstantInt::get(Ty, -1, true); 9132 else 9133 return getCouldNotCompute(); 9134 9135 break; 9136 } 9137 case Instruction::LShr: 9138 case Instruction::Shl: 9139 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 9140 // stabilize to 0 in at most bitwidth(K) iterations. 9141 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 9142 break; 9143 } 9144 9145 auto *Result = 9146 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 9147 assert(Result->getType()->isIntegerTy(1) && 9148 "Otherwise cannot be an operand to a branch instruction"); 9149 9150 if (Result->isZeroValue()) { 9151 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9152 const SCEV *UpperBound = 9153 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 9154 return ExitLimit(getCouldNotCompute(), UpperBound, false); 9155 } 9156 9157 return getCouldNotCompute(); 9158 } 9159 9160 /// Return true if we can constant fold an instruction of the specified type, 9161 /// assuming that all operands were constants. 9162 static bool CanConstantFold(const Instruction *I) { 9163 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 9164 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 9165 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 9166 return true; 9167 9168 if (const CallInst *CI = dyn_cast<CallInst>(I)) 9169 if (const Function *F = CI->getCalledFunction()) 9170 return canConstantFoldCallTo(CI, F); 9171 return false; 9172 } 9173 9174 /// Determine whether this instruction can constant evolve within this loop 9175 /// assuming its operands can all constant evolve. 9176 static bool canConstantEvolve(Instruction *I, const Loop *L) { 9177 // An instruction outside of the loop can't be derived from a loop PHI. 9178 if (!L->contains(I)) return false; 9179 9180 if (isa<PHINode>(I)) { 9181 // We don't currently keep track of the control flow needed to evaluate 9182 // PHIs, so we cannot handle PHIs inside of loops. 9183 return L->getHeader() == I->getParent(); 9184 } 9185 9186 // If we won't be able to constant fold this expression even if the operands 9187 // are constants, bail early. 9188 return CanConstantFold(I); 9189 } 9190 9191 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 9192 /// recursing through each instruction operand until reaching a loop header phi. 9193 static PHINode * 9194 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 9195 DenseMap<Instruction *, PHINode *> &PHIMap, 9196 unsigned Depth) { 9197 if (Depth > MaxConstantEvolvingDepth) 9198 return nullptr; 9199 9200 // Otherwise, we can evaluate this instruction if all of its operands are 9201 // constant or derived from a PHI node themselves. 9202 PHINode *PHI = nullptr; 9203 for (Value *Op : UseInst->operands()) { 9204 if (isa<Constant>(Op)) continue; 9205 9206 Instruction *OpInst = dyn_cast<Instruction>(Op); 9207 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 9208 9209 PHINode *P = dyn_cast<PHINode>(OpInst); 9210 if (!P) 9211 // If this operand is already visited, reuse the prior result. 9212 // We may have P != PHI if this is the deepest point at which the 9213 // inconsistent paths meet. 9214 P = PHIMap.lookup(OpInst); 9215 if (!P) { 9216 // Recurse and memoize the results, whether a phi is found or not. 9217 // This recursive call invalidates pointers into PHIMap. 9218 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 9219 PHIMap[OpInst] = P; 9220 } 9221 if (!P) 9222 return nullptr; // Not evolving from PHI 9223 if (PHI && PHI != P) 9224 return nullptr; // Evolving from multiple different PHIs. 9225 PHI = P; 9226 } 9227 // This is a expression evolving from a constant PHI! 9228 return PHI; 9229 } 9230 9231 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 9232 /// in the loop that V is derived from. We allow arbitrary operations along the 9233 /// way, but the operands of an operation must either be constants or a value 9234 /// derived from a constant PHI. If this expression does not fit with these 9235 /// constraints, return null. 9236 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 9237 Instruction *I = dyn_cast<Instruction>(V); 9238 if (!I || !canConstantEvolve(I, L)) return nullptr; 9239 9240 if (PHINode *PN = dyn_cast<PHINode>(I)) 9241 return PN; 9242 9243 // Record non-constant instructions contained by the loop. 9244 DenseMap<Instruction *, PHINode *> PHIMap; 9245 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 9246 } 9247 9248 /// EvaluateExpression - Given an expression that passes the 9249 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 9250 /// in the loop has the value PHIVal. If we can't fold this expression for some 9251 /// reason, return null. 9252 static Constant *EvaluateExpression(Value *V, const Loop *L, 9253 DenseMap<Instruction *, Constant *> &Vals, 9254 const DataLayout &DL, 9255 const TargetLibraryInfo *TLI) { 9256 // Convenient constant check, but redundant for recursive calls. 9257 if (Constant *C = dyn_cast<Constant>(V)) return C; 9258 Instruction *I = dyn_cast<Instruction>(V); 9259 if (!I) return nullptr; 9260 9261 if (Constant *C = Vals.lookup(I)) return C; 9262 9263 // An instruction inside the loop depends on a value outside the loop that we 9264 // weren't given a mapping for, or a value such as a call inside the loop. 9265 if (!canConstantEvolve(I, L)) return nullptr; 9266 9267 // An unmapped PHI can be due to a branch or another loop inside this loop, 9268 // or due to this not being the initial iteration through a loop where we 9269 // couldn't compute the evolution of this particular PHI last time. 9270 if (isa<PHINode>(I)) return nullptr; 9271 9272 std::vector<Constant*> Operands(I->getNumOperands()); 9273 9274 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 9275 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 9276 if (!Operand) { 9277 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 9278 if (!Operands[i]) return nullptr; 9279 continue; 9280 } 9281 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 9282 Vals[Operand] = C; 9283 if (!C) return nullptr; 9284 Operands[i] = C; 9285 } 9286 9287 return ConstantFoldInstOperands(I, Operands, DL, TLI); 9288 } 9289 9290 9291 // If every incoming value to PN except the one for BB is a specific Constant, 9292 // return that, else return nullptr. 9293 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 9294 Constant *IncomingVal = nullptr; 9295 9296 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 9297 if (PN->getIncomingBlock(i) == BB) 9298 continue; 9299 9300 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 9301 if (!CurrentVal) 9302 return nullptr; 9303 9304 if (IncomingVal != CurrentVal) { 9305 if (IncomingVal) 9306 return nullptr; 9307 IncomingVal = CurrentVal; 9308 } 9309 } 9310 9311 return IncomingVal; 9312 } 9313 9314 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 9315 /// in the header of its containing loop, we know the loop executes a 9316 /// constant number of times, and the PHI node is just a recurrence 9317 /// involving constants, fold it. 9318 Constant * 9319 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 9320 const APInt &BEs, 9321 const Loop *L) { 9322 auto I = ConstantEvolutionLoopExitValue.find(PN); 9323 if (I != ConstantEvolutionLoopExitValue.end()) 9324 return I->second; 9325 9326 if (BEs.ugt(MaxBruteForceIterations)) 9327 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 9328 9329 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 9330 9331 DenseMap<Instruction *, Constant *> CurrentIterVals; 9332 BasicBlock *Header = L->getHeader(); 9333 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 9334 9335 BasicBlock *Latch = L->getLoopLatch(); 9336 if (!Latch) 9337 return nullptr; 9338 9339 for (PHINode &PHI : Header->phis()) { 9340 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 9341 CurrentIterVals[&PHI] = StartCST; 9342 } 9343 if (!CurrentIterVals.count(PN)) 9344 return RetVal = nullptr; 9345 9346 Value *BEValue = PN->getIncomingValueForBlock(Latch); 9347 9348 // Execute the loop symbolically to determine the exit value. 9349 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 9350 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 9351 9352 unsigned NumIterations = BEs.getZExtValue(); // must be in range 9353 unsigned IterationNum = 0; 9354 const DataLayout &DL = getDataLayout(); 9355 for (; ; ++IterationNum) { 9356 if (IterationNum == NumIterations) 9357 return RetVal = CurrentIterVals[PN]; // Got exit value! 9358 9359 // Compute the value of the PHIs for the next iteration. 9360 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 9361 DenseMap<Instruction *, Constant *> NextIterVals; 9362 Constant *NextPHI = 9363 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 9364 if (!NextPHI) 9365 return nullptr; // Couldn't evaluate! 9366 NextIterVals[PN] = NextPHI; 9367 9368 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 9369 9370 // Also evaluate the other PHI nodes. However, we don't get to stop if we 9371 // cease to be able to evaluate one of them or if they stop evolving, 9372 // because that doesn't necessarily prevent us from computing PN. 9373 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 9374 for (const auto &I : CurrentIterVals) { 9375 PHINode *PHI = dyn_cast<PHINode>(I.first); 9376 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 9377 PHIsToCompute.emplace_back(PHI, I.second); 9378 } 9379 // We use two distinct loops because EvaluateExpression may invalidate any 9380 // iterators into CurrentIterVals. 9381 for (const auto &I : PHIsToCompute) { 9382 PHINode *PHI = I.first; 9383 Constant *&NextPHI = NextIterVals[PHI]; 9384 if (!NextPHI) { // Not already computed. 9385 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 9386 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 9387 } 9388 if (NextPHI != I.second) 9389 StoppedEvolving = false; 9390 } 9391 9392 // If all entries in CurrentIterVals == NextIterVals then we can stop 9393 // iterating, the loop can't continue to change. 9394 if (StoppedEvolving) 9395 return RetVal = CurrentIterVals[PN]; 9396 9397 CurrentIterVals.swap(NextIterVals); 9398 } 9399 } 9400 9401 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 9402 Value *Cond, 9403 bool ExitWhen) { 9404 PHINode *PN = getConstantEvolvingPHI(Cond, L); 9405 if (!PN) return getCouldNotCompute(); 9406 9407 // If the loop is canonicalized, the PHI will have exactly two entries. 9408 // That's the only form we support here. 9409 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 9410 9411 DenseMap<Instruction *, Constant *> CurrentIterVals; 9412 BasicBlock *Header = L->getHeader(); 9413 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 9414 9415 BasicBlock *Latch = L->getLoopLatch(); 9416 assert(Latch && "Should follow from NumIncomingValues == 2!"); 9417 9418 for (PHINode &PHI : Header->phis()) { 9419 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 9420 CurrentIterVals[&PHI] = StartCST; 9421 } 9422 if (!CurrentIterVals.count(PN)) 9423 return getCouldNotCompute(); 9424 9425 // Okay, we find a PHI node that defines the trip count of this loop. Execute 9426 // the loop symbolically to determine when the condition gets a value of 9427 // "ExitWhen". 9428 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 9429 const DataLayout &DL = getDataLayout(); 9430 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 9431 auto *CondVal = dyn_cast_or_null<ConstantInt>( 9432 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 9433 9434 // Couldn't symbolically evaluate. 9435 if (!CondVal) return getCouldNotCompute(); 9436 9437 if (CondVal->getValue() == uint64_t(ExitWhen)) { 9438 ++NumBruteForceTripCountsComputed; 9439 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 9440 } 9441 9442 // Update all the PHI nodes for the next iteration. 9443 DenseMap<Instruction *, Constant *> NextIterVals; 9444 9445 // Create a list of which PHIs we need to compute. We want to do this before 9446 // calling EvaluateExpression on them because that may invalidate iterators 9447 // into CurrentIterVals. 9448 SmallVector<PHINode *, 8> PHIsToCompute; 9449 for (const auto &I : CurrentIterVals) { 9450 PHINode *PHI = dyn_cast<PHINode>(I.first); 9451 if (!PHI || PHI->getParent() != Header) continue; 9452 PHIsToCompute.push_back(PHI); 9453 } 9454 for (PHINode *PHI : PHIsToCompute) { 9455 Constant *&NextPHI = NextIterVals[PHI]; 9456 if (NextPHI) continue; // Already computed! 9457 9458 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 9459 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 9460 } 9461 CurrentIterVals.swap(NextIterVals); 9462 } 9463 9464 // Too many iterations were needed to evaluate. 9465 return getCouldNotCompute(); 9466 } 9467 9468 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 9469 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 9470 ValuesAtScopes[V]; 9471 // Check to see if we've folded this expression at this loop before. 9472 for (auto &LS : Values) 9473 if (LS.first == L) 9474 return LS.second ? LS.second : V; 9475 9476 Values.emplace_back(L, nullptr); 9477 9478 // Otherwise compute it. 9479 const SCEV *C = computeSCEVAtScope(V, L); 9480 for (auto &LS : reverse(ValuesAtScopes[V])) 9481 if (LS.first == L) { 9482 LS.second = C; 9483 if (!isa<SCEVConstant>(C)) 9484 ValuesAtScopesUsers[C].push_back({L, V}); 9485 break; 9486 } 9487 return C; 9488 } 9489 9490 /// This builds up a Constant using the ConstantExpr interface. That way, we 9491 /// will return Constants for objects which aren't represented by a 9492 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 9493 /// Returns NULL if the SCEV isn't representable as a Constant. 9494 static Constant *BuildConstantFromSCEV(const SCEV *V) { 9495 switch (V->getSCEVType()) { 9496 case scCouldNotCompute: 9497 case scAddRecExpr: 9498 return nullptr; 9499 case scConstant: 9500 return cast<SCEVConstant>(V)->getValue(); 9501 case scUnknown: 9502 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 9503 case scSignExtend: { 9504 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 9505 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 9506 return ConstantExpr::getSExt(CastOp, SS->getType()); 9507 return nullptr; 9508 } 9509 case scZeroExtend: { 9510 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 9511 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 9512 return ConstantExpr::getZExt(CastOp, SZ->getType()); 9513 return nullptr; 9514 } 9515 case scPtrToInt: { 9516 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V); 9517 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand())) 9518 return ConstantExpr::getPtrToInt(CastOp, P2I->getType()); 9519 9520 return nullptr; 9521 } 9522 case scTruncate: { 9523 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 9524 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 9525 return ConstantExpr::getTrunc(CastOp, ST->getType()); 9526 return nullptr; 9527 } 9528 case scAddExpr: { 9529 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 9530 Constant *C = nullptr; 9531 for (const SCEV *Op : SA->operands()) { 9532 Constant *OpC = BuildConstantFromSCEV(Op); 9533 if (!OpC) 9534 return nullptr; 9535 if (!C) { 9536 C = OpC; 9537 continue; 9538 } 9539 assert(!C->getType()->isPointerTy() && 9540 "Can only have one pointer, and it must be last"); 9541 if (auto *PT = dyn_cast<PointerType>(OpC->getType())) { 9542 // The offsets have been converted to bytes. We can add bytes to an 9543 // i8* by GEP with the byte count in the first index. 9544 Type *DestPtrTy = 9545 Type::getInt8PtrTy(PT->getContext(), PT->getAddressSpace()); 9546 OpC = ConstantExpr::getBitCast(OpC, DestPtrTy); 9547 C = ConstantExpr::getGetElementPtr(Type::getInt8Ty(C->getContext()), 9548 OpC, C); 9549 } else { 9550 C = ConstantExpr::getAdd(C, OpC); 9551 } 9552 } 9553 return C; 9554 } 9555 case scMulExpr: { 9556 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 9557 Constant *C = nullptr; 9558 for (const SCEV *Op : SM->operands()) { 9559 assert(!Op->getType()->isPointerTy() && "Can't multiply pointers"); 9560 Constant *OpC = BuildConstantFromSCEV(Op); 9561 if (!OpC) 9562 return nullptr; 9563 C = C ? ConstantExpr::getMul(C, OpC) : OpC; 9564 } 9565 return C; 9566 } 9567 case scUDivExpr: 9568 case scSMaxExpr: 9569 case scUMaxExpr: 9570 case scSMinExpr: 9571 case scUMinExpr: 9572 case scSequentialUMinExpr: 9573 return nullptr; // TODO: smax, umax, smin, umax, umin_seq. 9574 } 9575 llvm_unreachable("Unknown SCEV kind!"); 9576 } 9577 9578 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 9579 if (isa<SCEVConstant>(V)) return V; 9580 9581 // If this instruction is evolved from a constant-evolving PHI, compute the 9582 // exit value from the loop without using SCEVs. 9583 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 9584 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 9585 if (PHINode *PN = dyn_cast<PHINode>(I)) { 9586 const Loop *CurrLoop = this->LI[I->getParent()]; 9587 // Looking for loop exit value. 9588 if (CurrLoop && CurrLoop->getParentLoop() == L && 9589 PN->getParent() == CurrLoop->getHeader()) { 9590 // Okay, there is no closed form solution for the PHI node. Check 9591 // to see if the loop that contains it has a known backedge-taken 9592 // count. If so, we may be able to force computation of the exit 9593 // value. 9594 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); 9595 // This trivial case can show up in some degenerate cases where 9596 // the incoming IR has not yet been fully simplified. 9597 if (BackedgeTakenCount->isZero()) { 9598 Value *InitValue = nullptr; 9599 bool MultipleInitValues = false; 9600 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 9601 if (!CurrLoop->contains(PN->getIncomingBlock(i))) { 9602 if (!InitValue) 9603 InitValue = PN->getIncomingValue(i); 9604 else if (InitValue != PN->getIncomingValue(i)) { 9605 MultipleInitValues = true; 9606 break; 9607 } 9608 } 9609 } 9610 if (!MultipleInitValues && InitValue) 9611 return getSCEV(InitValue); 9612 } 9613 // Do we have a loop invariant value flowing around the backedge 9614 // for a loop which must execute the backedge? 9615 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 9616 isKnownPositive(BackedgeTakenCount) && 9617 PN->getNumIncomingValues() == 2) { 9618 9619 unsigned InLoopPred = 9620 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; 9621 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 9622 if (CurrLoop->isLoopInvariant(BackedgeVal)) 9623 return getSCEV(BackedgeVal); 9624 } 9625 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 9626 // Okay, we know how many times the containing loop executes. If 9627 // this is a constant evolving PHI node, get the final value at 9628 // the specified iteration number. 9629 Constant *RV = getConstantEvolutionLoopExitValue( 9630 PN, BTCC->getAPInt(), CurrLoop); 9631 if (RV) return getSCEV(RV); 9632 } 9633 } 9634 9635 // If there is a single-input Phi, evaluate it at our scope. If we can 9636 // prove that this replacement does not break LCSSA form, use new value. 9637 if (PN->getNumOperands() == 1) { 9638 const SCEV *Input = getSCEV(PN->getOperand(0)); 9639 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 9640 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 9641 // for the simplest case just support constants. 9642 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; 9643 } 9644 } 9645 9646 // Okay, this is an expression that we cannot symbolically evaluate 9647 // into a SCEV. Check to see if it's possible to symbolically evaluate 9648 // the arguments into constants, and if so, try to constant propagate the 9649 // result. This is particularly useful for computing loop exit values. 9650 if (CanConstantFold(I)) { 9651 SmallVector<Constant *, 4> Operands; 9652 bool MadeImprovement = false; 9653 for (Value *Op : I->operands()) { 9654 if (Constant *C = dyn_cast<Constant>(Op)) { 9655 Operands.push_back(C); 9656 continue; 9657 } 9658 9659 // If any of the operands is non-constant and if they are 9660 // non-integer and non-pointer, don't even try to analyze them 9661 // with scev techniques. 9662 if (!isSCEVable(Op->getType())) 9663 return V; 9664 9665 const SCEV *OrigV = getSCEV(Op); 9666 const SCEV *OpV = getSCEVAtScope(OrigV, L); 9667 MadeImprovement |= OrigV != OpV; 9668 9669 Constant *C = BuildConstantFromSCEV(OpV); 9670 if (!C) return V; 9671 if (C->getType() != Op->getType()) 9672 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 9673 Op->getType(), 9674 false), 9675 C, Op->getType()); 9676 Operands.push_back(C); 9677 } 9678 9679 // Check to see if getSCEVAtScope actually made an improvement. 9680 if (MadeImprovement) { 9681 Constant *C = nullptr; 9682 const DataLayout &DL = getDataLayout(); 9683 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 9684 if (!C) return V; 9685 return getSCEV(C); 9686 } 9687 } 9688 } 9689 9690 // This is some other type of SCEVUnknown, just return it. 9691 return V; 9692 } 9693 9694 if (isa<SCEVCommutativeExpr>(V) || isa<SCEVSequentialMinMaxExpr>(V)) { 9695 const auto *Comm = cast<SCEVNAryExpr>(V); 9696 // Avoid performing the look-up in the common case where the specified 9697 // expression has no loop-variant portions. 9698 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 9699 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 9700 if (OpAtScope != Comm->getOperand(i)) { 9701 // Okay, at least one of these operands is loop variant but might be 9702 // foldable. Build a new instance of the folded commutative expression. 9703 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 9704 Comm->op_begin()+i); 9705 NewOps.push_back(OpAtScope); 9706 9707 for (++i; i != e; ++i) { 9708 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 9709 NewOps.push_back(OpAtScope); 9710 } 9711 if (isa<SCEVAddExpr>(Comm)) 9712 return getAddExpr(NewOps, Comm->getNoWrapFlags()); 9713 if (isa<SCEVMulExpr>(Comm)) 9714 return getMulExpr(NewOps, Comm->getNoWrapFlags()); 9715 if (isa<SCEVMinMaxExpr>(Comm)) 9716 return getMinMaxExpr(Comm->getSCEVType(), NewOps); 9717 if (isa<SCEVSequentialMinMaxExpr>(Comm)) 9718 return getSequentialMinMaxExpr(Comm->getSCEVType(), NewOps); 9719 llvm_unreachable("Unknown commutative / sequential min/max SCEV type!"); 9720 } 9721 } 9722 // If we got here, all operands are loop invariant. 9723 return Comm; 9724 } 9725 9726 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 9727 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 9728 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 9729 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 9730 return Div; // must be loop invariant 9731 return getUDivExpr(LHS, RHS); 9732 } 9733 9734 // If this is a loop recurrence for a loop that does not contain L, then we 9735 // are dealing with the final value computed by the loop. 9736 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 9737 // First, attempt to evaluate each operand. 9738 // Avoid performing the look-up in the common case where the specified 9739 // expression has no loop-variant portions. 9740 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 9741 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 9742 if (OpAtScope == AddRec->getOperand(i)) 9743 continue; 9744 9745 // Okay, at least one of these operands is loop variant but might be 9746 // foldable. Build a new instance of the folded commutative expression. 9747 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 9748 AddRec->op_begin()+i); 9749 NewOps.push_back(OpAtScope); 9750 for (++i; i != e; ++i) 9751 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 9752 9753 const SCEV *FoldedRec = 9754 getAddRecExpr(NewOps, AddRec->getLoop(), 9755 AddRec->getNoWrapFlags(SCEV::FlagNW)); 9756 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 9757 // The addrec may be folded to a nonrecurrence, for example, if the 9758 // induction variable is multiplied by zero after constant folding. Go 9759 // ahead and return the folded value. 9760 if (!AddRec) 9761 return FoldedRec; 9762 break; 9763 } 9764 9765 // If the scope is outside the addrec's loop, evaluate it by using the 9766 // loop exit value of the addrec. 9767 if (!AddRec->getLoop()->contains(L)) { 9768 // To evaluate this recurrence, we need to know how many times the AddRec 9769 // loop iterates. Compute this now. 9770 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 9771 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 9772 9773 // Then, evaluate the AddRec. 9774 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 9775 } 9776 9777 return AddRec; 9778 } 9779 9780 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 9781 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 9782 if (Op == Cast->getOperand()) 9783 return Cast; // must be loop invariant 9784 return getCastExpr(Cast->getSCEVType(), Op, Cast->getType()); 9785 } 9786 9787 llvm_unreachable("Unknown SCEV type!"); 9788 } 9789 9790 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 9791 return getSCEVAtScope(getSCEV(V), L); 9792 } 9793 9794 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 9795 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 9796 return stripInjectiveFunctions(ZExt->getOperand()); 9797 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 9798 return stripInjectiveFunctions(SExt->getOperand()); 9799 return S; 9800 } 9801 9802 /// Finds the minimum unsigned root of the following equation: 9803 /// 9804 /// A * X = B (mod N) 9805 /// 9806 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 9807 /// A and B isn't important. 9808 /// 9809 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 9810 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 9811 ScalarEvolution &SE) { 9812 uint32_t BW = A.getBitWidth(); 9813 assert(BW == SE.getTypeSizeInBits(B->getType())); 9814 assert(A != 0 && "A must be non-zero."); 9815 9816 // 1. D = gcd(A, N) 9817 // 9818 // The gcd of A and N may have only one prime factor: 2. The number of 9819 // trailing zeros in A is its multiplicity 9820 uint32_t Mult2 = A.countTrailingZeros(); 9821 // D = 2^Mult2 9822 9823 // 2. Check if B is divisible by D. 9824 // 9825 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 9826 // is not less than multiplicity of this prime factor for D. 9827 if (SE.GetMinTrailingZeros(B) < Mult2) 9828 return SE.getCouldNotCompute(); 9829 9830 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 9831 // modulo (N / D). 9832 // 9833 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 9834 // (N / D) in general. The inverse itself always fits into BW bits, though, 9835 // so we immediately truncate it. 9836 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 9837 APInt Mod(BW + 1, 0); 9838 Mod.setBit(BW - Mult2); // Mod = N / D 9839 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 9840 9841 // 4. Compute the minimum unsigned root of the equation: 9842 // I * (B / D) mod (N / D) 9843 // To simplify the computation, we factor out the divide by D: 9844 // (I * B mod N) / D 9845 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 9846 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 9847 } 9848 9849 /// For a given quadratic addrec, generate coefficients of the corresponding 9850 /// quadratic equation, multiplied by a common value to ensure that they are 9851 /// integers. 9852 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 9853 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 9854 /// were multiplied by, and BitWidth is the bit width of the original addrec 9855 /// coefficients. 9856 /// This function returns None if the addrec coefficients are not compile- 9857 /// time constants. 9858 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 9859 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 9860 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 9861 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 9862 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 9863 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 9864 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 9865 << *AddRec << '\n'); 9866 9867 // We currently can only solve this if the coefficients are constants. 9868 if (!LC || !MC || !NC) { 9869 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 9870 return None; 9871 } 9872 9873 APInt L = LC->getAPInt(); 9874 APInt M = MC->getAPInt(); 9875 APInt N = NC->getAPInt(); 9876 assert(!N.isZero() && "This is not a quadratic addrec"); 9877 9878 unsigned BitWidth = LC->getAPInt().getBitWidth(); 9879 unsigned NewWidth = BitWidth + 1; 9880 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 9881 << BitWidth << '\n'); 9882 // The sign-extension (as opposed to a zero-extension) here matches the 9883 // extension used in SolveQuadraticEquationWrap (with the same motivation). 9884 N = N.sext(NewWidth); 9885 M = M.sext(NewWidth); 9886 L = L.sext(NewWidth); 9887 9888 // The increments are M, M+N, M+2N, ..., so the accumulated values are 9889 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 9890 // L+M, L+2M+N, L+3M+3N, ... 9891 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 9892 // 9893 // The equation Acc = 0 is then 9894 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 9895 // In a quadratic form it becomes: 9896 // N n^2 + (2M-N) n + 2L = 0. 9897 9898 APInt A = N; 9899 APInt B = 2 * M - A; 9900 APInt C = 2 * L; 9901 APInt T = APInt(NewWidth, 2); 9902 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 9903 << "x + " << C << ", coeff bw: " << NewWidth 9904 << ", multiplied by " << T << '\n'); 9905 return std::make_tuple(A, B, C, T, BitWidth); 9906 } 9907 9908 /// Helper function to compare optional APInts: 9909 /// (a) if X and Y both exist, return min(X, Y), 9910 /// (b) if neither X nor Y exist, return None, 9911 /// (c) if exactly one of X and Y exists, return that value. 9912 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 9913 if (X && Y) { 9914 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 9915 APInt XW = X->sext(W); 9916 APInt YW = Y->sext(W); 9917 return XW.slt(YW) ? *X : *Y; 9918 } 9919 if (!X && !Y) 9920 return None; 9921 return X ? *X : *Y; 9922 } 9923 9924 /// Helper function to truncate an optional APInt to a given BitWidth. 9925 /// When solving addrec-related equations, it is preferable to return a value 9926 /// that has the same bit width as the original addrec's coefficients. If the 9927 /// solution fits in the original bit width, truncate it (except for i1). 9928 /// Returning a value of a different bit width may inhibit some optimizations. 9929 /// 9930 /// In general, a solution to a quadratic equation generated from an addrec 9931 /// may require BW+1 bits, where BW is the bit width of the addrec's 9932 /// coefficients. The reason is that the coefficients of the quadratic 9933 /// equation are BW+1 bits wide (to avoid truncation when converting from 9934 /// the addrec to the equation). 9935 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 9936 if (!X) 9937 return None; 9938 unsigned W = X->getBitWidth(); 9939 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 9940 return X->trunc(BitWidth); 9941 return X; 9942 } 9943 9944 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 9945 /// iterations. The values L, M, N are assumed to be signed, and they 9946 /// should all have the same bit widths. 9947 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 9948 /// where BW is the bit width of the addrec's coefficients. 9949 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 9950 /// returned as such, otherwise the bit width of the returned value may 9951 /// be greater than BW. 9952 /// 9953 /// This function returns None if 9954 /// (a) the addrec coefficients are not constant, or 9955 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 9956 /// like x^2 = 5, no integer solutions exist, in other cases an integer 9957 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 9958 static Optional<APInt> 9959 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 9960 APInt A, B, C, M; 9961 unsigned BitWidth; 9962 auto T = GetQuadraticEquation(AddRec); 9963 if (!T) 9964 return None; 9965 9966 std::tie(A, B, C, M, BitWidth) = *T; 9967 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 9968 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 9969 if (!X) 9970 return None; 9971 9972 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 9973 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 9974 if (!V->isZero()) 9975 return None; 9976 9977 return TruncIfPossible(X, BitWidth); 9978 } 9979 9980 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 9981 /// iterations. The values M, N are assumed to be signed, and they 9982 /// should all have the same bit widths. 9983 /// Find the least n such that c(n) does not belong to the given range, 9984 /// while c(n-1) does. 9985 /// 9986 /// This function returns None if 9987 /// (a) the addrec coefficients are not constant, or 9988 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 9989 /// bounds of the range. 9990 static Optional<APInt> 9991 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 9992 const ConstantRange &Range, ScalarEvolution &SE) { 9993 assert(AddRec->getOperand(0)->isZero() && 9994 "Starting value of addrec should be 0"); 9995 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 9996 << Range << ", addrec " << *AddRec << '\n'); 9997 // This case is handled in getNumIterationsInRange. Here we can assume that 9998 // we start in the range. 9999 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 10000 "Addrec's initial value should be in range"); 10001 10002 APInt A, B, C, M; 10003 unsigned BitWidth; 10004 auto T = GetQuadraticEquation(AddRec); 10005 if (!T) 10006 return None; 10007 10008 // Be careful about the return value: there can be two reasons for not 10009 // returning an actual number. First, if no solutions to the equations 10010 // were found, and second, if the solutions don't leave the given range. 10011 // The first case means that the actual solution is "unknown", the second 10012 // means that it's known, but not valid. If the solution is unknown, we 10013 // cannot make any conclusions. 10014 // Return a pair: the optional solution and a flag indicating if the 10015 // solution was found. 10016 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 10017 // Solve for signed overflow and unsigned overflow, pick the lower 10018 // solution. 10019 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 10020 << Bound << " (before multiplying by " << M << ")\n"); 10021 Bound *= M; // The quadratic equation multiplier. 10022 10023 Optional<APInt> SO = None; 10024 if (BitWidth > 1) { 10025 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 10026 "signed overflow\n"); 10027 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 10028 } 10029 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 10030 "unsigned overflow\n"); 10031 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 10032 BitWidth+1); 10033 10034 auto LeavesRange = [&] (const APInt &X) { 10035 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 10036 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 10037 if (Range.contains(V0->getValue())) 10038 return false; 10039 // X should be at least 1, so X-1 is non-negative. 10040 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 10041 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 10042 if (Range.contains(V1->getValue())) 10043 return true; 10044 return false; 10045 }; 10046 10047 // If SolveQuadraticEquationWrap returns None, it means that there can 10048 // be a solution, but the function failed to find it. We cannot treat it 10049 // as "no solution". 10050 if (!SO || !UO) 10051 return { None, false }; 10052 10053 // Check the smaller value first to see if it leaves the range. 10054 // At this point, both SO and UO must have values. 10055 Optional<APInt> Min = MinOptional(SO, UO); 10056 if (LeavesRange(*Min)) 10057 return { Min, true }; 10058 Optional<APInt> Max = Min == SO ? UO : SO; 10059 if (LeavesRange(*Max)) 10060 return { Max, true }; 10061 10062 // Solutions were found, but were eliminated, hence the "true". 10063 return { None, true }; 10064 }; 10065 10066 std::tie(A, B, C, M, BitWidth) = *T; 10067 // Lower bound is inclusive, subtract 1 to represent the exiting value. 10068 APInt Lower = Range.getLower().sext(A.getBitWidth()) - 1; 10069 APInt Upper = Range.getUpper().sext(A.getBitWidth()); 10070 auto SL = SolveForBoundary(Lower); 10071 auto SU = SolveForBoundary(Upper); 10072 // If any of the solutions was unknown, no meaninigful conclusions can 10073 // be made. 10074 if (!SL.second || !SU.second) 10075 return None; 10076 10077 // Claim: The correct solution is not some value between Min and Max. 10078 // 10079 // Justification: Assuming that Min and Max are different values, one of 10080 // them is when the first signed overflow happens, the other is when the 10081 // first unsigned overflow happens. Crossing the range boundary is only 10082 // possible via an overflow (treating 0 as a special case of it, modeling 10083 // an overflow as crossing k*2^W for some k). 10084 // 10085 // The interesting case here is when Min was eliminated as an invalid 10086 // solution, but Max was not. The argument is that if there was another 10087 // overflow between Min and Max, it would also have been eliminated if 10088 // it was considered. 10089 // 10090 // For a given boundary, it is possible to have two overflows of the same 10091 // type (signed/unsigned) without having the other type in between: this 10092 // can happen when the vertex of the parabola is between the iterations 10093 // corresponding to the overflows. This is only possible when the two 10094 // overflows cross k*2^W for the same k. In such case, if the second one 10095 // left the range (and was the first one to do so), the first overflow 10096 // would have to enter the range, which would mean that either we had left 10097 // the range before or that we started outside of it. Both of these cases 10098 // are contradictions. 10099 // 10100 // Claim: In the case where SolveForBoundary returns None, the correct 10101 // solution is not some value between the Max for this boundary and the 10102 // Min of the other boundary. 10103 // 10104 // Justification: Assume that we had such Max_A and Min_B corresponding 10105 // to range boundaries A and B and such that Max_A < Min_B. If there was 10106 // a solution between Max_A and Min_B, it would have to be caused by an 10107 // overflow corresponding to either A or B. It cannot correspond to B, 10108 // since Min_B is the first occurrence of such an overflow. If it 10109 // corresponded to A, it would have to be either a signed or an unsigned 10110 // overflow that is larger than both eliminated overflows for A. But 10111 // between the eliminated overflows and this overflow, the values would 10112 // cover the entire value space, thus crossing the other boundary, which 10113 // is a contradiction. 10114 10115 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 10116 } 10117 10118 ScalarEvolution::ExitLimit 10119 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 10120 bool AllowPredicates) { 10121 10122 // This is only used for loops with a "x != y" exit test. The exit condition 10123 // is now expressed as a single expression, V = x-y. So the exit test is 10124 // effectively V != 0. We know and take advantage of the fact that this 10125 // expression only being used in a comparison by zero context. 10126 10127 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10128 // If the value is a constant 10129 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 10130 // If the value is already zero, the branch will execute zero times. 10131 if (C->getValue()->isZero()) return C; 10132 return getCouldNotCompute(); // Otherwise it will loop infinitely. 10133 } 10134 10135 const SCEVAddRecExpr *AddRec = 10136 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 10137 10138 if (!AddRec && AllowPredicates) 10139 // Try to make this an AddRec using runtime tests, in the first X 10140 // iterations of this loop, where X is the SCEV expression found by the 10141 // algorithm below. 10142 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 10143 10144 if (!AddRec || AddRec->getLoop() != L) 10145 return getCouldNotCompute(); 10146 10147 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 10148 // the quadratic equation to solve it. 10149 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 10150 // We can only use this value if the chrec ends up with an exact zero 10151 // value at this index. When solving for "X*X != 5", for example, we 10152 // should not accept a root of 2. 10153 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 10154 const auto *R = cast<SCEVConstant>(getConstant(*S)); 10155 return ExitLimit(R, R, false, Predicates); 10156 } 10157 return getCouldNotCompute(); 10158 } 10159 10160 // Otherwise we can only handle this if it is affine. 10161 if (!AddRec->isAffine()) 10162 return getCouldNotCompute(); 10163 10164 // If this is an affine expression, the execution count of this branch is 10165 // the minimum unsigned root of the following equation: 10166 // 10167 // Start + Step*N = 0 (mod 2^BW) 10168 // 10169 // equivalent to: 10170 // 10171 // Step*N = -Start (mod 2^BW) 10172 // 10173 // where BW is the common bit width of Start and Step. 10174 10175 // Get the initial value for the loop. 10176 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 10177 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 10178 10179 // For now we handle only constant steps. 10180 // 10181 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 10182 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 10183 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 10184 // We have not yet seen any such cases. 10185 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 10186 if (!StepC || StepC->getValue()->isZero()) 10187 return getCouldNotCompute(); 10188 10189 // For positive steps (counting up until unsigned overflow): 10190 // N = -Start/Step (as unsigned) 10191 // For negative steps (counting down to zero): 10192 // N = Start/-Step 10193 // First compute the unsigned distance from zero in the direction of Step. 10194 bool CountDown = StepC->getAPInt().isNegative(); 10195 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 10196 10197 // Handle unitary steps, which cannot wraparound. 10198 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 10199 // N = Distance (as unsigned) 10200 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 10201 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); 10202 MaxBECount = APIntOps::umin(MaxBECount, getUnsignedRangeMax(Distance)); 10203 10204 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 10205 // we end up with a loop whose backedge-taken count is n - 1. Detect this 10206 // case, and see if we can improve the bound. 10207 // 10208 // Explicitly handling this here is necessary because getUnsignedRange 10209 // isn't context-sensitive; it doesn't know that we only care about the 10210 // range inside the loop. 10211 const SCEV *Zero = getZero(Distance->getType()); 10212 const SCEV *One = getOne(Distance->getType()); 10213 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 10214 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 10215 // If Distance + 1 doesn't overflow, we can compute the maximum distance 10216 // as "unsigned_max(Distance + 1) - 1". 10217 ConstantRange CR = getUnsignedRange(DistancePlusOne); 10218 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 10219 } 10220 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 10221 } 10222 10223 // If the condition controls loop exit (the loop exits only if the expression 10224 // is true) and the addition is no-wrap we can use unsigned divide to 10225 // compute the backedge count. In this case, the step may not divide the 10226 // distance, but we don't care because if the condition is "missed" the loop 10227 // will have undefined behavior due to wrapping. 10228 if (ControlsExit && AddRec->hasNoSelfWrap() && 10229 loopHasNoAbnormalExits(AddRec->getLoop())) { 10230 const SCEV *Exact = 10231 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 10232 const SCEV *Max = getCouldNotCompute(); 10233 if (Exact != getCouldNotCompute()) { 10234 APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L)); 10235 Max = getConstant(APIntOps::umin(MaxInt, getUnsignedRangeMax(Exact))); 10236 } 10237 return ExitLimit(Exact, Max, false, Predicates); 10238 } 10239 10240 // Solve the general equation. 10241 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 10242 getNegativeSCEV(Start), *this); 10243 10244 const SCEV *M = E; 10245 if (E != getCouldNotCompute()) { 10246 APInt MaxWithGuards = getUnsignedRangeMax(applyLoopGuards(E, L)); 10247 M = getConstant(APIntOps::umin(MaxWithGuards, getUnsignedRangeMax(E))); 10248 } 10249 return ExitLimit(E, M, false, Predicates); 10250 } 10251 10252 ScalarEvolution::ExitLimit 10253 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 10254 // Loops that look like: while (X == 0) are very strange indeed. We don't 10255 // handle them yet except for the trivial case. This could be expanded in the 10256 // future as needed. 10257 10258 // If the value is a constant, check to see if it is known to be non-zero 10259 // already. If so, the backedge will execute zero times. 10260 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 10261 if (!C->getValue()->isZero()) 10262 return getZero(C->getType()); 10263 return getCouldNotCompute(); // Otherwise it will loop infinitely. 10264 } 10265 10266 // We could implement others, but I really doubt anyone writes loops like 10267 // this, and if they did, they would already be constant folded. 10268 return getCouldNotCompute(); 10269 } 10270 10271 std::pair<const BasicBlock *, const BasicBlock *> 10272 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) 10273 const { 10274 // If the block has a unique predecessor, then there is no path from the 10275 // predecessor to the block that does not go through the direct edge 10276 // from the predecessor to the block. 10277 if (const BasicBlock *Pred = BB->getSinglePredecessor()) 10278 return {Pred, BB}; 10279 10280 // A loop's header is defined to be a block that dominates the loop. 10281 // If the header has a unique predecessor outside the loop, it must be 10282 // a block that has exactly one successor that can reach the loop. 10283 if (const Loop *L = LI.getLoopFor(BB)) 10284 return {L->getLoopPredecessor(), L->getHeader()}; 10285 10286 return {nullptr, nullptr}; 10287 } 10288 10289 /// SCEV structural equivalence is usually sufficient for testing whether two 10290 /// expressions are equal, however for the purposes of looking for a condition 10291 /// guarding a loop, it can be useful to be a little more general, since a 10292 /// front-end may have replicated the controlling expression. 10293 static bool HasSameValue(const SCEV *A, const SCEV *B) { 10294 // Quick check to see if they are the same SCEV. 10295 if (A == B) return true; 10296 10297 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 10298 // Not all instructions that are "identical" compute the same value. For 10299 // instance, two distinct alloca instructions allocating the same type are 10300 // identical and do not read memory; but compute distinct values. 10301 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 10302 }; 10303 10304 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 10305 // two different instructions with the same value. Check for this case. 10306 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 10307 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 10308 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 10309 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 10310 if (ComputesEqualValues(AI, BI)) 10311 return true; 10312 10313 // Otherwise assume they may have a different value. 10314 return false; 10315 } 10316 10317 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 10318 const SCEV *&LHS, const SCEV *&RHS, 10319 unsigned Depth, 10320 bool ControllingFiniteLoop) { 10321 bool Changed = false; 10322 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 10323 // '0 != 0'. 10324 auto TrivialCase = [&](bool TriviallyTrue) { 10325 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 10326 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 10327 return true; 10328 }; 10329 // If we hit the max recursion limit bail out. 10330 if (Depth >= 3) 10331 return false; 10332 10333 // Canonicalize a constant to the right side. 10334 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 10335 // Check for both operands constant. 10336 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 10337 if (ConstantExpr::getICmp(Pred, 10338 LHSC->getValue(), 10339 RHSC->getValue())->isNullValue()) 10340 return TrivialCase(false); 10341 else 10342 return TrivialCase(true); 10343 } 10344 // Otherwise swap the operands to put the constant on the right. 10345 std::swap(LHS, RHS); 10346 Pred = ICmpInst::getSwappedPredicate(Pred); 10347 Changed = true; 10348 } 10349 10350 // If we're comparing an addrec with a value which is loop-invariant in the 10351 // addrec's loop, put the addrec on the left. Also make a dominance check, 10352 // as both operands could be addrecs loop-invariant in each other's loop. 10353 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 10354 const Loop *L = AR->getLoop(); 10355 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 10356 std::swap(LHS, RHS); 10357 Pred = ICmpInst::getSwappedPredicate(Pred); 10358 Changed = true; 10359 } 10360 } 10361 10362 // If there's a constant operand, canonicalize comparisons with boundary 10363 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 10364 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 10365 const APInt &RA = RC->getAPInt(); 10366 10367 bool SimplifiedByConstantRange = false; 10368 10369 if (!ICmpInst::isEquality(Pred)) { 10370 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 10371 if (ExactCR.isFullSet()) 10372 return TrivialCase(true); 10373 else if (ExactCR.isEmptySet()) 10374 return TrivialCase(false); 10375 10376 APInt NewRHS; 10377 CmpInst::Predicate NewPred; 10378 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 10379 ICmpInst::isEquality(NewPred)) { 10380 // We were able to convert an inequality to an equality. 10381 Pred = NewPred; 10382 RHS = getConstant(NewRHS); 10383 Changed = SimplifiedByConstantRange = true; 10384 } 10385 } 10386 10387 if (!SimplifiedByConstantRange) { 10388 switch (Pred) { 10389 default: 10390 break; 10391 case ICmpInst::ICMP_EQ: 10392 case ICmpInst::ICMP_NE: 10393 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 10394 if (!RA) 10395 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 10396 if (const SCEVMulExpr *ME = 10397 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 10398 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 10399 ME->getOperand(0)->isAllOnesValue()) { 10400 RHS = AE->getOperand(1); 10401 LHS = ME->getOperand(1); 10402 Changed = true; 10403 } 10404 break; 10405 10406 10407 // The "Should have been caught earlier!" messages refer to the fact 10408 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 10409 // should have fired on the corresponding cases, and canonicalized the 10410 // check to trivial case. 10411 10412 case ICmpInst::ICMP_UGE: 10413 assert(!RA.isMinValue() && "Should have been caught earlier!"); 10414 Pred = ICmpInst::ICMP_UGT; 10415 RHS = getConstant(RA - 1); 10416 Changed = true; 10417 break; 10418 case ICmpInst::ICMP_ULE: 10419 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 10420 Pred = ICmpInst::ICMP_ULT; 10421 RHS = getConstant(RA + 1); 10422 Changed = true; 10423 break; 10424 case ICmpInst::ICMP_SGE: 10425 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 10426 Pred = ICmpInst::ICMP_SGT; 10427 RHS = getConstant(RA - 1); 10428 Changed = true; 10429 break; 10430 case ICmpInst::ICMP_SLE: 10431 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 10432 Pred = ICmpInst::ICMP_SLT; 10433 RHS = getConstant(RA + 1); 10434 Changed = true; 10435 break; 10436 } 10437 } 10438 } 10439 10440 // Check for obvious equality. 10441 if (HasSameValue(LHS, RHS)) { 10442 if (ICmpInst::isTrueWhenEqual(Pred)) 10443 return TrivialCase(true); 10444 if (ICmpInst::isFalseWhenEqual(Pred)) 10445 return TrivialCase(false); 10446 } 10447 10448 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 10449 // adding or subtracting 1 from one of the operands. This can be done for 10450 // one of two reasons: 10451 // 1) The range of the RHS does not include the (signed/unsigned) boundaries 10452 // 2) The loop is finite, with this comparison controlling the exit. Since the 10453 // loop is finite, the bound cannot include the corresponding boundary 10454 // (otherwise it would loop forever). 10455 switch (Pred) { 10456 case ICmpInst::ICMP_SLE: 10457 if (ControllingFiniteLoop || !getSignedRangeMax(RHS).isMaxSignedValue()) { 10458 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 10459 SCEV::FlagNSW); 10460 Pred = ICmpInst::ICMP_SLT; 10461 Changed = true; 10462 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 10463 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 10464 SCEV::FlagNSW); 10465 Pred = ICmpInst::ICMP_SLT; 10466 Changed = true; 10467 } 10468 break; 10469 case ICmpInst::ICMP_SGE: 10470 if (ControllingFiniteLoop || !getSignedRangeMin(RHS).isMinSignedValue()) { 10471 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 10472 SCEV::FlagNSW); 10473 Pred = ICmpInst::ICMP_SGT; 10474 Changed = true; 10475 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 10476 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 10477 SCEV::FlagNSW); 10478 Pred = ICmpInst::ICMP_SGT; 10479 Changed = true; 10480 } 10481 break; 10482 case ICmpInst::ICMP_ULE: 10483 if (ControllingFiniteLoop || !getUnsignedRangeMax(RHS).isMaxValue()) { 10484 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 10485 SCEV::FlagNUW); 10486 Pred = ICmpInst::ICMP_ULT; 10487 Changed = true; 10488 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 10489 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 10490 Pred = ICmpInst::ICMP_ULT; 10491 Changed = true; 10492 } 10493 break; 10494 case ICmpInst::ICMP_UGE: 10495 if (ControllingFiniteLoop || !getUnsignedRangeMin(RHS).isMinValue()) { 10496 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 10497 Pred = ICmpInst::ICMP_UGT; 10498 Changed = true; 10499 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 10500 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 10501 SCEV::FlagNUW); 10502 Pred = ICmpInst::ICMP_UGT; 10503 Changed = true; 10504 } 10505 break; 10506 default: 10507 break; 10508 } 10509 10510 // TODO: More simplifications are possible here. 10511 10512 // Recursively simplify until we either hit a recursion limit or nothing 10513 // changes. 10514 if (Changed) 10515 return SimplifyICmpOperands(Pred, LHS, RHS, Depth + 1, 10516 ControllingFiniteLoop); 10517 10518 return Changed; 10519 } 10520 10521 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 10522 return getSignedRangeMax(S).isNegative(); 10523 } 10524 10525 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 10526 return getSignedRangeMin(S).isStrictlyPositive(); 10527 } 10528 10529 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 10530 return !getSignedRangeMin(S).isNegative(); 10531 } 10532 10533 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 10534 return !getSignedRangeMax(S).isStrictlyPositive(); 10535 } 10536 10537 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 10538 return getUnsignedRangeMin(S) != 0; 10539 } 10540 10541 std::pair<const SCEV *, const SCEV *> 10542 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 10543 // Compute SCEV on entry of loop L. 10544 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 10545 if (Start == getCouldNotCompute()) 10546 return { Start, Start }; 10547 // Compute post increment SCEV for loop L. 10548 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 10549 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 10550 return { Start, PostInc }; 10551 } 10552 10553 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 10554 const SCEV *LHS, const SCEV *RHS) { 10555 // First collect all loops. 10556 SmallPtrSet<const Loop *, 8> LoopsUsed; 10557 getUsedLoops(LHS, LoopsUsed); 10558 getUsedLoops(RHS, LoopsUsed); 10559 10560 if (LoopsUsed.empty()) 10561 return false; 10562 10563 // Domination relationship must be a linear order on collected loops. 10564 #ifndef NDEBUG 10565 for (const auto *L1 : LoopsUsed) 10566 for (const auto *L2 : LoopsUsed) 10567 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 10568 DT.dominates(L2->getHeader(), L1->getHeader())) && 10569 "Domination relationship is not a linear order"); 10570 #endif 10571 10572 const Loop *MDL = 10573 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 10574 [&](const Loop *L1, const Loop *L2) { 10575 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 10576 }); 10577 10578 // Get init and post increment value for LHS. 10579 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 10580 // if LHS contains unknown non-invariant SCEV then bail out. 10581 if (SplitLHS.first == getCouldNotCompute()) 10582 return false; 10583 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 10584 // Get init and post increment value for RHS. 10585 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 10586 // if RHS contains unknown non-invariant SCEV then bail out. 10587 if (SplitRHS.first == getCouldNotCompute()) 10588 return false; 10589 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 10590 // It is possible that init SCEV contains an invariant load but it does 10591 // not dominate MDL and is not available at MDL loop entry, so we should 10592 // check it here. 10593 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 10594 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 10595 return false; 10596 10597 // It seems backedge guard check is faster than entry one so in some cases 10598 // it can speed up whole estimation by short circuit 10599 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 10600 SplitRHS.second) && 10601 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 10602 } 10603 10604 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 10605 const SCEV *LHS, const SCEV *RHS) { 10606 // Canonicalize the inputs first. 10607 (void)SimplifyICmpOperands(Pred, LHS, RHS); 10608 10609 if (isKnownViaInduction(Pred, LHS, RHS)) 10610 return true; 10611 10612 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 10613 return true; 10614 10615 // Otherwise see what can be done with some simple reasoning. 10616 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 10617 } 10618 10619 Optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred, 10620 const SCEV *LHS, 10621 const SCEV *RHS) { 10622 if (isKnownPredicate(Pred, LHS, RHS)) 10623 return true; 10624 else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS)) 10625 return false; 10626 return None; 10627 } 10628 10629 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, 10630 const SCEV *LHS, const SCEV *RHS, 10631 const Instruction *CtxI) { 10632 // TODO: Analyze guards and assumes from Context's block. 10633 return isKnownPredicate(Pred, LHS, RHS) || 10634 isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS); 10635 } 10636 10637 Optional<bool> ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, 10638 const SCEV *LHS, 10639 const SCEV *RHS, 10640 const Instruction *CtxI) { 10641 Optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS); 10642 if (KnownWithoutContext) 10643 return KnownWithoutContext; 10644 10645 if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS)) 10646 return true; 10647 else if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), 10648 ICmpInst::getInversePredicate(Pred), 10649 LHS, RHS)) 10650 return false; 10651 return None; 10652 } 10653 10654 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 10655 const SCEVAddRecExpr *LHS, 10656 const SCEV *RHS) { 10657 const Loop *L = LHS->getLoop(); 10658 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 10659 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 10660 } 10661 10662 Optional<ScalarEvolution::MonotonicPredicateType> 10663 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, 10664 ICmpInst::Predicate Pred) { 10665 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred); 10666 10667 #ifndef NDEBUG 10668 // Verify an invariant: inverting the predicate should turn a monotonically 10669 // increasing change to a monotonically decreasing one, and vice versa. 10670 if (Result) { 10671 auto ResultSwapped = 10672 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred)); 10673 10674 assert(ResultSwapped && "should be able to analyze both!"); 10675 assert(ResultSwapped.value() != Result.value() && 10676 "monotonicity should flip as we flip the predicate"); 10677 } 10678 #endif 10679 10680 return Result; 10681 } 10682 10683 Optional<ScalarEvolution::MonotonicPredicateType> 10684 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS, 10685 ICmpInst::Predicate Pred) { 10686 // A zero step value for LHS means the induction variable is essentially a 10687 // loop invariant value. We don't really depend on the predicate actually 10688 // flipping from false to true (for increasing predicates, and the other way 10689 // around for decreasing predicates), all we care about is that *if* the 10690 // predicate changes then it only changes from false to true. 10691 // 10692 // A zero step value in itself is not very useful, but there may be places 10693 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 10694 // as general as possible. 10695 10696 // Only handle LE/LT/GE/GT predicates. 10697 if (!ICmpInst::isRelational(Pred)) 10698 return None; 10699 10700 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred); 10701 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) && 10702 "Should be greater or less!"); 10703 10704 // Check that AR does not wrap. 10705 if (ICmpInst::isUnsigned(Pred)) { 10706 if (!LHS->hasNoUnsignedWrap()) 10707 return None; 10708 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 10709 } else { 10710 assert(ICmpInst::isSigned(Pred) && 10711 "Relational predicate is either signed or unsigned!"); 10712 if (!LHS->hasNoSignedWrap()) 10713 return None; 10714 10715 const SCEV *Step = LHS->getStepRecurrence(*this); 10716 10717 if (isKnownNonNegative(Step)) 10718 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 10719 10720 if (isKnownNonPositive(Step)) 10721 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 10722 10723 return None; 10724 } 10725 } 10726 10727 Optional<ScalarEvolution::LoopInvariantPredicate> 10728 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred, 10729 const SCEV *LHS, const SCEV *RHS, 10730 const Loop *L) { 10731 10732 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 10733 if (!isLoopInvariant(RHS, L)) { 10734 if (!isLoopInvariant(LHS, L)) 10735 return None; 10736 10737 std::swap(LHS, RHS); 10738 Pred = ICmpInst::getSwappedPredicate(Pred); 10739 } 10740 10741 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 10742 if (!ArLHS || ArLHS->getLoop() != L) 10743 return None; 10744 10745 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred); 10746 if (!MonotonicType) 10747 return None; 10748 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 10749 // true as the loop iterates, and the backedge is control dependent on 10750 // "ArLHS `Pred` RHS" == true then we can reason as follows: 10751 // 10752 // * if the predicate was false in the first iteration then the predicate 10753 // is never evaluated again, since the loop exits without taking the 10754 // backedge. 10755 // * if the predicate was true in the first iteration then it will 10756 // continue to be true for all future iterations since it is 10757 // monotonically increasing. 10758 // 10759 // For both the above possibilities, we can replace the loop varying 10760 // predicate with its value on the first iteration of the loop (which is 10761 // loop invariant). 10762 // 10763 // A similar reasoning applies for a monotonically decreasing predicate, by 10764 // replacing true with false and false with true in the above two bullets. 10765 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing; 10766 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 10767 10768 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 10769 return None; 10770 10771 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS); 10772 } 10773 10774 Optional<ScalarEvolution::LoopInvariantPredicate> 10775 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations( 10776 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 10777 const Instruction *CtxI, const SCEV *MaxIter) { 10778 // Try to prove the following set of facts: 10779 // - The predicate is monotonic in the iteration space. 10780 // - If the check does not fail on the 1st iteration: 10781 // - No overflow will happen during first MaxIter iterations; 10782 // - It will not fail on the MaxIter'th iteration. 10783 // If the check does fail on the 1st iteration, we leave the loop and no 10784 // other checks matter. 10785 10786 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 10787 if (!isLoopInvariant(RHS, L)) { 10788 if (!isLoopInvariant(LHS, L)) 10789 return None; 10790 10791 std::swap(LHS, RHS); 10792 Pred = ICmpInst::getSwappedPredicate(Pred); 10793 } 10794 10795 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS); 10796 if (!AR || AR->getLoop() != L) 10797 return None; 10798 10799 // The predicate must be relational (i.e. <, <=, >=, >). 10800 if (!ICmpInst::isRelational(Pred)) 10801 return None; 10802 10803 // TODO: Support steps other than +/- 1. 10804 const SCEV *Step = AR->getStepRecurrence(*this); 10805 auto *One = getOne(Step->getType()); 10806 auto *MinusOne = getNegativeSCEV(One); 10807 if (Step != One && Step != MinusOne) 10808 return None; 10809 10810 // Type mismatch here means that MaxIter is potentially larger than max 10811 // unsigned value in start type, which mean we cannot prove no wrap for the 10812 // indvar. 10813 if (AR->getType() != MaxIter->getType()) 10814 return None; 10815 10816 // Value of IV on suggested last iteration. 10817 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this); 10818 // Does it still meet the requirement? 10819 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS)) 10820 return None; 10821 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does 10822 // not exceed max unsigned value of this type), this effectively proves 10823 // that there is no wrap during the iteration. To prove that there is no 10824 // signed/unsigned wrap, we need to check that 10825 // Start <= Last for step = 1 or Start >= Last for step = -1. 10826 ICmpInst::Predicate NoOverflowPred = 10827 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 10828 if (Step == MinusOne) 10829 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred); 10830 const SCEV *Start = AR->getStart(); 10831 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, CtxI)) 10832 return None; 10833 10834 // Everything is fine. 10835 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS); 10836 } 10837 10838 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 10839 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 10840 if (HasSameValue(LHS, RHS)) 10841 return ICmpInst::isTrueWhenEqual(Pred); 10842 10843 // This code is split out from isKnownPredicate because it is called from 10844 // within isLoopEntryGuardedByCond. 10845 10846 auto CheckRanges = [&](const ConstantRange &RangeLHS, 10847 const ConstantRange &RangeRHS) { 10848 return RangeLHS.icmp(Pred, RangeRHS); 10849 }; 10850 10851 // The check at the top of the function catches the case where the values are 10852 // known to be equal. 10853 if (Pred == CmpInst::ICMP_EQ) 10854 return false; 10855 10856 if (Pred == CmpInst::ICMP_NE) { 10857 auto SL = getSignedRange(LHS); 10858 auto SR = getSignedRange(RHS); 10859 if (CheckRanges(SL, SR)) 10860 return true; 10861 auto UL = getUnsignedRange(LHS); 10862 auto UR = getUnsignedRange(RHS); 10863 if (CheckRanges(UL, UR)) 10864 return true; 10865 auto *Diff = getMinusSCEV(LHS, RHS); 10866 return !isa<SCEVCouldNotCompute>(Diff) && isKnownNonZero(Diff); 10867 } 10868 10869 if (CmpInst::isSigned(Pred)) { 10870 auto SL = getSignedRange(LHS); 10871 auto SR = getSignedRange(RHS); 10872 return CheckRanges(SL, SR); 10873 } 10874 10875 auto UL = getUnsignedRange(LHS); 10876 auto UR = getUnsignedRange(RHS); 10877 return CheckRanges(UL, UR); 10878 } 10879 10880 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 10881 const SCEV *LHS, 10882 const SCEV *RHS) { 10883 // Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where 10884 // C1 and C2 are constant integers. If either X or Y are not add expressions, 10885 // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via 10886 // OutC1 and OutC2. 10887 auto MatchBinaryAddToConst = [this](const SCEV *X, const SCEV *Y, 10888 APInt &OutC1, APInt &OutC2, 10889 SCEV::NoWrapFlags ExpectedFlags) { 10890 const SCEV *XNonConstOp, *XConstOp; 10891 const SCEV *YNonConstOp, *YConstOp; 10892 SCEV::NoWrapFlags XFlagsPresent; 10893 SCEV::NoWrapFlags YFlagsPresent; 10894 10895 if (!splitBinaryAdd(X, XConstOp, XNonConstOp, XFlagsPresent)) { 10896 XConstOp = getZero(X->getType()); 10897 XNonConstOp = X; 10898 XFlagsPresent = ExpectedFlags; 10899 } 10900 if (!isa<SCEVConstant>(XConstOp) || 10901 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) 10902 return false; 10903 10904 if (!splitBinaryAdd(Y, YConstOp, YNonConstOp, YFlagsPresent)) { 10905 YConstOp = getZero(Y->getType()); 10906 YNonConstOp = Y; 10907 YFlagsPresent = ExpectedFlags; 10908 } 10909 10910 if (!isa<SCEVConstant>(YConstOp) || 10911 (YFlagsPresent & ExpectedFlags) != ExpectedFlags) 10912 return false; 10913 10914 if (YNonConstOp != XNonConstOp) 10915 return false; 10916 10917 OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt(); 10918 OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt(); 10919 10920 return true; 10921 }; 10922 10923 APInt C1; 10924 APInt C2; 10925 10926 switch (Pred) { 10927 default: 10928 break; 10929 10930 case ICmpInst::ICMP_SGE: 10931 std::swap(LHS, RHS); 10932 LLVM_FALLTHROUGH; 10933 case ICmpInst::ICMP_SLE: 10934 // (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2. 10935 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2)) 10936 return true; 10937 10938 break; 10939 10940 case ICmpInst::ICMP_SGT: 10941 std::swap(LHS, RHS); 10942 LLVM_FALLTHROUGH; 10943 case ICmpInst::ICMP_SLT: 10944 // (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2. 10945 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2)) 10946 return true; 10947 10948 break; 10949 10950 case ICmpInst::ICMP_UGE: 10951 std::swap(LHS, RHS); 10952 LLVM_FALLTHROUGH; 10953 case ICmpInst::ICMP_ULE: 10954 // (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2. 10955 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2)) 10956 return true; 10957 10958 break; 10959 10960 case ICmpInst::ICMP_UGT: 10961 std::swap(LHS, RHS); 10962 LLVM_FALLTHROUGH; 10963 case ICmpInst::ICMP_ULT: 10964 // (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2. 10965 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2)) 10966 return true; 10967 break; 10968 } 10969 10970 return false; 10971 } 10972 10973 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 10974 const SCEV *LHS, 10975 const SCEV *RHS) { 10976 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 10977 return false; 10978 10979 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 10980 // the stack can result in exponential time complexity. 10981 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 10982 10983 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 10984 // 10985 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 10986 // isKnownPredicate. isKnownPredicate is more powerful, but also more 10987 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 10988 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 10989 // use isKnownPredicate later if needed. 10990 return isKnownNonNegative(RHS) && 10991 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 10992 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 10993 } 10994 10995 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, 10996 ICmpInst::Predicate Pred, 10997 const SCEV *LHS, const SCEV *RHS) { 10998 // No need to even try if we know the module has no guards. 10999 if (!HasGuards) 11000 return false; 11001 11002 return any_of(*BB, [&](const Instruction &I) { 11003 using namespace llvm::PatternMatch; 11004 11005 Value *Condition; 11006 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 11007 m_Value(Condition))) && 11008 isImpliedCond(Pred, LHS, RHS, Condition, false); 11009 }); 11010 } 11011 11012 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 11013 /// protected by a conditional between LHS and RHS. This is used to 11014 /// to eliminate casts. 11015 bool 11016 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 11017 ICmpInst::Predicate Pred, 11018 const SCEV *LHS, const SCEV *RHS) { 11019 // Interpret a null as meaning no loop, where there is obviously no guard 11020 // (interprocedural conditions notwithstanding). Do not bother about 11021 // unreachable loops. 11022 if (!L || !DT.isReachableFromEntry(L->getHeader())) 11023 return true; 11024 11025 if (VerifyIR) 11026 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 11027 "This cannot be done on broken IR!"); 11028 11029 11030 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 11031 return true; 11032 11033 BasicBlock *Latch = L->getLoopLatch(); 11034 if (!Latch) 11035 return false; 11036 11037 BranchInst *LoopContinuePredicate = 11038 dyn_cast<BranchInst>(Latch->getTerminator()); 11039 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 11040 isImpliedCond(Pred, LHS, RHS, 11041 LoopContinuePredicate->getCondition(), 11042 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 11043 return true; 11044 11045 // We don't want more than one activation of the following loops on the stack 11046 // -- that can lead to O(n!) time complexity. 11047 if (WalkingBEDominatingConds) 11048 return false; 11049 11050 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 11051 11052 // See if we can exploit a trip count to prove the predicate. 11053 const auto &BETakenInfo = getBackedgeTakenInfo(L); 11054 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 11055 if (LatchBECount != getCouldNotCompute()) { 11056 // We know that Latch branches back to the loop header exactly 11057 // LatchBECount times. This means the backdege condition at Latch is 11058 // equivalent to "{0,+,1} u< LatchBECount". 11059 Type *Ty = LatchBECount->getType(); 11060 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 11061 const SCEV *LoopCounter = 11062 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 11063 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 11064 LatchBECount)) 11065 return true; 11066 } 11067 11068 // Check conditions due to any @llvm.assume intrinsics. 11069 for (auto &AssumeVH : AC.assumptions()) { 11070 if (!AssumeVH) 11071 continue; 11072 auto *CI = cast<CallInst>(AssumeVH); 11073 if (!DT.dominates(CI, Latch->getTerminator())) 11074 continue; 11075 11076 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 11077 return true; 11078 } 11079 11080 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 11081 return true; 11082 11083 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 11084 DTN != HeaderDTN; DTN = DTN->getIDom()) { 11085 assert(DTN && "should reach the loop header before reaching the root!"); 11086 11087 BasicBlock *BB = DTN->getBlock(); 11088 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 11089 return true; 11090 11091 BasicBlock *PBB = BB->getSinglePredecessor(); 11092 if (!PBB) 11093 continue; 11094 11095 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 11096 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 11097 continue; 11098 11099 Value *Condition = ContinuePredicate->getCondition(); 11100 11101 // If we have an edge `E` within the loop body that dominates the only 11102 // latch, the condition guarding `E` also guards the backedge. This 11103 // reasoning works only for loops with a single latch. 11104 11105 BasicBlockEdge DominatingEdge(PBB, BB); 11106 if (DominatingEdge.isSingleEdge()) { 11107 // We're constructively (and conservatively) enumerating edges within the 11108 // loop body that dominate the latch. The dominator tree better agree 11109 // with us on this: 11110 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 11111 11112 if (isImpliedCond(Pred, LHS, RHS, Condition, 11113 BB != ContinuePredicate->getSuccessor(0))) 11114 return true; 11115 } 11116 } 11117 11118 return false; 11119 } 11120 11121 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, 11122 ICmpInst::Predicate Pred, 11123 const SCEV *LHS, 11124 const SCEV *RHS) { 11125 // Do not bother proving facts for unreachable code. 11126 if (!DT.isReachableFromEntry(BB)) 11127 return true; 11128 if (VerifyIR) 11129 assert(!verifyFunction(*BB->getParent(), &dbgs()) && 11130 "This cannot be done on broken IR!"); 11131 11132 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 11133 // the facts (a >= b && a != b) separately. A typical situation is when the 11134 // non-strict comparison is known from ranges and non-equality is known from 11135 // dominating predicates. If we are proving strict comparison, we always try 11136 // to prove non-equality and non-strict comparison separately. 11137 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 11138 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 11139 bool ProvedNonStrictComparison = false; 11140 bool ProvedNonEquality = false; 11141 11142 auto SplitAndProve = 11143 [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool { 11144 if (!ProvedNonStrictComparison) 11145 ProvedNonStrictComparison = Fn(NonStrictPredicate); 11146 if (!ProvedNonEquality) 11147 ProvedNonEquality = Fn(ICmpInst::ICMP_NE); 11148 if (ProvedNonStrictComparison && ProvedNonEquality) 11149 return true; 11150 return false; 11151 }; 11152 11153 if (ProvingStrictComparison) { 11154 auto ProofFn = [&](ICmpInst::Predicate P) { 11155 return isKnownViaNonRecursiveReasoning(P, LHS, RHS); 11156 }; 11157 if (SplitAndProve(ProofFn)) 11158 return true; 11159 } 11160 11161 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 11162 auto ProveViaCond = [&](const Value *Condition, bool Inverse) { 11163 const Instruction *CtxI = &BB->front(); 11164 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, CtxI)) 11165 return true; 11166 if (ProvingStrictComparison) { 11167 auto ProofFn = [&](ICmpInst::Predicate P) { 11168 return isImpliedCond(P, LHS, RHS, Condition, Inverse, CtxI); 11169 }; 11170 if (SplitAndProve(ProofFn)) 11171 return true; 11172 } 11173 return false; 11174 }; 11175 11176 // Starting at the block's predecessor, climb up the predecessor chain, as long 11177 // as there are predecessors that can be found that have unique successors 11178 // leading to the original block. 11179 const Loop *ContainingLoop = LI.getLoopFor(BB); 11180 const BasicBlock *PredBB; 11181 if (ContainingLoop && ContainingLoop->getHeader() == BB) 11182 PredBB = ContainingLoop->getLoopPredecessor(); 11183 else 11184 PredBB = BB->getSinglePredecessor(); 11185 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB); 11186 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 11187 const BranchInst *BlockEntryPredicate = 11188 dyn_cast<BranchInst>(Pair.first->getTerminator()); 11189 if (!BlockEntryPredicate || BlockEntryPredicate->isUnconditional()) 11190 continue; 11191 11192 if (ProveViaCond(BlockEntryPredicate->getCondition(), 11193 BlockEntryPredicate->getSuccessor(0) != Pair.second)) 11194 return true; 11195 } 11196 11197 // Check conditions due to any @llvm.assume intrinsics. 11198 for (auto &AssumeVH : AC.assumptions()) { 11199 if (!AssumeVH) 11200 continue; 11201 auto *CI = cast<CallInst>(AssumeVH); 11202 if (!DT.dominates(CI, BB)) 11203 continue; 11204 11205 if (ProveViaCond(CI->getArgOperand(0), false)) 11206 return true; 11207 } 11208 11209 // Check conditions due to any @llvm.experimental.guard intrinsics. 11210 auto *GuardDecl = F.getParent()->getFunction( 11211 Intrinsic::getName(Intrinsic::experimental_guard)); 11212 if (GuardDecl) 11213 for (const auto *GU : GuardDecl->users()) 11214 if (const auto *Guard = dyn_cast<IntrinsicInst>(GU)) 11215 if (Guard->getFunction() == BB->getParent() && DT.dominates(Guard, BB)) 11216 if (ProveViaCond(Guard->getArgOperand(0), false)) 11217 return true; 11218 return false; 11219 } 11220 11221 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 11222 ICmpInst::Predicate Pred, 11223 const SCEV *LHS, 11224 const SCEV *RHS) { 11225 // Interpret a null as meaning no loop, where there is obviously no guard 11226 // (interprocedural conditions notwithstanding). 11227 if (!L) 11228 return false; 11229 11230 // Both LHS and RHS must be available at loop entry. 11231 assert(isAvailableAtLoopEntry(LHS, L) && 11232 "LHS is not available at Loop Entry"); 11233 assert(isAvailableAtLoopEntry(RHS, L) && 11234 "RHS is not available at Loop Entry"); 11235 11236 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 11237 return true; 11238 11239 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); 11240 } 11241 11242 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 11243 const SCEV *RHS, 11244 const Value *FoundCondValue, bool Inverse, 11245 const Instruction *CtxI) { 11246 // False conditions implies anything. Do not bother analyzing it further. 11247 if (FoundCondValue == 11248 ConstantInt::getBool(FoundCondValue->getContext(), Inverse)) 11249 return true; 11250 11251 if (!PendingLoopPredicates.insert(FoundCondValue).second) 11252 return false; 11253 11254 auto ClearOnExit = 11255 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 11256 11257 // Recursively handle And and Or conditions. 11258 const Value *Op0, *Op1; 11259 if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { 11260 if (!Inverse) 11261 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) || 11262 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI); 11263 } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) { 11264 if (Inverse) 11265 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) || 11266 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI); 11267 } 11268 11269 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 11270 if (!ICI) return false; 11271 11272 // Now that we found a conditional branch that dominates the loop or controls 11273 // the loop latch. Check to see if it is the comparison we are looking for. 11274 ICmpInst::Predicate FoundPred; 11275 if (Inverse) 11276 FoundPred = ICI->getInversePredicate(); 11277 else 11278 FoundPred = ICI->getPredicate(); 11279 11280 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 11281 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 11282 11283 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, CtxI); 11284 } 11285 11286 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 11287 const SCEV *RHS, 11288 ICmpInst::Predicate FoundPred, 11289 const SCEV *FoundLHS, const SCEV *FoundRHS, 11290 const Instruction *CtxI) { 11291 // Balance the types. 11292 if (getTypeSizeInBits(LHS->getType()) < 11293 getTypeSizeInBits(FoundLHS->getType())) { 11294 // For unsigned and equality predicates, try to prove that both found 11295 // operands fit into narrow unsigned range. If so, try to prove facts in 11296 // narrow types. 11297 if (!CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy() && 11298 !FoundRHS->getType()->isPointerTy()) { 11299 auto *NarrowType = LHS->getType(); 11300 auto *WideType = FoundLHS->getType(); 11301 auto BitWidth = getTypeSizeInBits(NarrowType); 11302 const SCEV *MaxValue = getZeroExtendExpr( 11303 getConstant(APInt::getMaxValue(BitWidth)), WideType); 11304 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundLHS, 11305 MaxValue) && 11306 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundRHS, 11307 MaxValue)) { 11308 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType); 11309 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType); 11310 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS, 11311 TruncFoundRHS, CtxI)) 11312 return true; 11313 } 11314 } 11315 11316 if (LHS->getType()->isPointerTy() || RHS->getType()->isPointerTy()) 11317 return false; 11318 if (CmpInst::isSigned(Pred)) { 11319 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 11320 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 11321 } else { 11322 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 11323 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 11324 } 11325 } else if (getTypeSizeInBits(LHS->getType()) > 11326 getTypeSizeInBits(FoundLHS->getType())) { 11327 if (FoundLHS->getType()->isPointerTy() || FoundRHS->getType()->isPointerTy()) 11328 return false; 11329 if (CmpInst::isSigned(FoundPred)) { 11330 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 11331 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 11332 } else { 11333 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 11334 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 11335 } 11336 } 11337 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, 11338 FoundRHS, CtxI); 11339 } 11340 11341 bool ScalarEvolution::isImpliedCondBalancedTypes( 11342 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 11343 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, 11344 const Instruction *CtxI) { 11345 assert(getTypeSizeInBits(LHS->getType()) == 11346 getTypeSizeInBits(FoundLHS->getType()) && 11347 "Types should be balanced!"); 11348 // Canonicalize the query to match the way instcombine will have 11349 // canonicalized the comparison. 11350 if (SimplifyICmpOperands(Pred, LHS, RHS)) 11351 if (LHS == RHS) 11352 return CmpInst::isTrueWhenEqual(Pred); 11353 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 11354 if (FoundLHS == FoundRHS) 11355 return CmpInst::isFalseWhenEqual(FoundPred); 11356 11357 // Check to see if we can make the LHS or RHS match. 11358 if (LHS == FoundRHS || RHS == FoundLHS) { 11359 if (isa<SCEVConstant>(RHS)) { 11360 std::swap(FoundLHS, FoundRHS); 11361 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 11362 } else { 11363 std::swap(LHS, RHS); 11364 Pred = ICmpInst::getSwappedPredicate(Pred); 11365 } 11366 } 11367 11368 // Check whether the found predicate is the same as the desired predicate. 11369 if (FoundPred == Pred) 11370 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI); 11371 11372 // Check whether swapping the found predicate makes it the same as the 11373 // desired predicate. 11374 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 11375 // We can write the implication 11376 // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS 11377 // using one of the following ways: 11378 // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS 11379 // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS 11380 // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS 11381 // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS 11382 // Forms 1. and 2. require swapping the operands of one condition. Don't 11383 // do this if it would break canonical constant/addrec ordering. 11384 if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS)) 11385 return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS, 11386 CtxI); 11387 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS)) 11388 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, CtxI); 11389 11390 // There's no clear preference between forms 3. and 4., try both. Avoid 11391 // forming getNotSCEV of pointer values as the resulting subtract is 11392 // not legal. 11393 if (!LHS->getType()->isPointerTy() && !RHS->getType()->isPointerTy() && 11394 isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS), 11395 FoundLHS, FoundRHS, CtxI)) 11396 return true; 11397 11398 if (!FoundLHS->getType()->isPointerTy() && 11399 !FoundRHS->getType()->isPointerTy() && 11400 isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS), 11401 getNotSCEV(FoundRHS), CtxI)) 11402 return true; 11403 11404 return false; 11405 } 11406 11407 auto IsSignFlippedPredicate = [](CmpInst::Predicate P1, 11408 CmpInst::Predicate P2) { 11409 assert(P1 != P2 && "Handled earlier!"); 11410 return CmpInst::isRelational(P2) && 11411 P1 == CmpInst::getFlippedSignednessPredicate(P2); 11412 }; 11413 if (IsSignFlippedPredicate(Pred, FoundPred)) { 11414 // Unsigned comparison is the same as signed comparison when both the 11415 // operands are non-negative or negative. 11416 if ((isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) || 11417 (isKnownNegative(FoundLHS) && isKnownNegative(FoundRHS))) 11418 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI); 11419 // Create local copies that we can freely swap and canonicalize our 11420 // conditions to "le/lt". 11421 ICmpInst::Predicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred; 11422 const SCEV *CanonicalLHS = LHS, *CanonicalRHS = RHS, 11423 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS; 11424 if (ICmpInst::isGT(CanonicalPred) || ICmpInst::isGE(CanonicalPred)) { 11425 CanonicalPred = ICmpInst::getSwappedPredicate(CanonicalPred); 11426 CanonicalFoundPred = ICmpInst::getSwappedPredicate(CanonicalFoundPred); 11427 std::swap(CanonicalLHS, CanonicalRHS); 11428 std::swap(CanonicalFoundLHS, CanonicalFoundRHS); 11429 } 11430 assert((ICmpInst::isLT(CanonicalPred) || ICmpInst::isLE(CanonicalPred)) && 11431 "Must be!"); 11432 assert((ICmpInst::isLT(CanonicalFoundPred) || 11433 ICmpInst::isLE(CanonicalFoundPred)) && 11434 "Must be!"); 11435 if (ICmpInst::isSigned(CanonicalPred) && isKnownNonNegative(CanonicalRHS)) 11436 // Use implication: 11437 // x <u y && y >=s 0 --> x <s y. 11438 // If we can prove the left part, the right part is also proven. 11439 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS, 11440 CanonicalRHS, CanonicalFoundLHS, 11441 CanonicalFoundRHS); 11442 if (ICmpInst::isUnsigned(CanonicalPred) && isKnownNegative(CanonicalRHS)) 11443 // Use implication: 11444 // x <s y && y <s 0 --> x <u y. 11445 // If we can prove the left part, the right part is also proven. 11446 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS, 11447 CanonicalRHS, CanonicalFoundLHS, 11448 CanonicalFoundRHS); 11449 } 11450 11451 // Check if we can make progress by sharpening ranges. 11452 if (FoundPred == ICmpInst::ICMP_NE && 11453 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 11454 11455 const SCEVConstant *C = nullptr; 11456 const SCEV *V = nullptr; 11457 11458 if (isa<SCEVConstant>(FoundLHS)) { 11459 C = cast<SCEVConstant>(FoundLHS); 11460 V = FoundRHS; 11461 } else { 11462 C = cast<SCEVConstant>(FoundRHS); 11463 V = FoundLHS; 11464 } 11465 11466 // The guarding predicate tells us that C != V. If the known range 11467 // of V is [C, t), we can sharpen the range to [C + 1, t). The 11468 // range we consider has to correspond to same signedness as the 11469 // predicate we're interested in folding. 11470 11471 APInt Min = ICmpInst::isSigned(Pred) ? 11472 getSignedRangeMin(V) : getUnsignedRangeMin(V); 11473 11474 if (Min == C->getAPInt()) { 11475 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 11476 // This is true even if (Min + 1) wraps around -- in case of 11477 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 11478 11479 APInt SharperMin = Min + 1; 11480 11481 switch (Pred) { 11482 case ICmpInst::ICMP_SGE: 11483 case ICmpInst::ICMP_UGE: 11484 // We know V `Pred` SharperMin. If this implies LHS `Pred` 11485 // RHS, we're done. 11486 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), 11487 CtxI)) 11488 return true; 11489 LLVM_FALLTHROUGH; 11490 11491 case ICmpInst::ICMP_SGT: 11492 case ICmpInst::ICMP_UGT: 11493 // We know from the range information that (V `Pred` Min || 11494 // V == Min). We know from the guarding condition that !(V 11495 // == Min). This gives us 11496 // 11497 // V `Pred` Min || V == Min && !(V == Min) 11498 // => V `Pred` Min 11499 // 11500 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 11501 11502 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI)) 11503 return true; 11504 break; 11505 11506 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. 11507 case ICmpInst::ICMP_SLE: 11508 case ICmpInst::ICMP_ULE: 11509 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 11510 LHS, V, getConstant(SharperMin), CtxI)) 11511 return true; 11512 LLVM_FALLTHROUGH; 11513 11514 case ICmpInst::ICMP_SLT: 11515 case ICmpInst::ICMP_ULT: 11516 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 11517 LHS, V, getConstant(Min), CtxI)) 11518 return true; 11519 break; 11520 11521 default: 11522 // No change 11523 break; 11524 } 11525 } 11526 } 11527 11528 // Check whether the actual condition is beyond sufficient. 11529 if (FoundPred == ICmpInst::ICMP_EQ) 11530 if (ICmpInst::isTrueWhenEqual(Pred)) 11531 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI)) 11532 return true; 11533 if (Pred == ICmpInst::ICMP_NE) 11534 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 11535 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, CtxI)) 11536 return true; 11537 11538 // Otherwise assume the worst. 11539 return false; 11540 } 11541 11542 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 11543 const SCEV *&L, const SCEV *&R, 11544 SCEV::NoWrapFlags &Flags) { 11545 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 11546 if (!AE || AE->getNumOperands() != 2) 11547 return false; 11548 11549 L = AE->getOperand(0); 11550 R = AE->getOperand(1); 11551 Flags = AE->getNoWrapFlags(); 11552 return true; 11553 } 11554 11555 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 11556 const SCEV *Less) { 11557 // We avoid subtracting expressions here because this function is usually 11558 // fairly deep in the call stack (i.e. is called many times). 11559 11560 // X - X = 0. 11561 if (More == Less) 11562 return APInt(getTypeSizeInBits(More->getType()), 0); 11563 11564 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 11565 const auto *LAR = cast<SCEVAddRecExpr>(Less); 11566 const auto *MAR = cast<SCEVAddRecExpr>(More); 11567 11568 if (LAR->getLoop() != MAR->getLoop()) 11569 return None; 11570 11571 // We look at affine expressions only; not for correctness but to keep 11572 // getStepRecurrence cheap. 11573 if (!LAR->isAffine() || !MAR->isAffine()) 11574 return None; 11575 11576 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 11577 return None; 11578 11579 Less = LAR->getStart(); 11580 More = MAR->getStart(); 11581 11582 // fall through 11583 } 11584 11585 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 11586 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 11587 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 11588 return M - L; 11589 } 11590 11591 SCEV::NoWrapFlags Flags; 11592 const SCEV *LLess = nullptr, *RLess = nullptr; 11593 const SCEV *LMore = nullptr, *RMore = nullptr; 11594 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 11595 // Compare (X + C1) vs X. 11596 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 11597 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 11598 if (RLess == More) 11599 return -(C1->getAPInt()); 11600 11601 // Compare X vs (X + C2). 11602 if (splitBinaryAdd(More, LMore, RMore, Flags)) 11603 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 11604 if (RMore == Less) 11605 return C2->getAPInt(); 11606 11607 // Compare (X + C1) vs (X + C2). 11608 if (C1 && C2 && RLess == RMore) 11609 return C2->getAPInt() - C1->getAPInt(); 11610 11611 return None; 11612 } 11613 11614 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( 11615 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 11616 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *CtxI) { 11617 // Try to recognize the following pattern: 11618 // 11619 // FoundRHS = ... 11620 // ... 11621 // loop: 11622 // FoundLHS = {Start,+,W} 11623 // context_bb: // Basic block from the same loop 11624 // known(Pred, FoundLHS, FoundRHS) 11625 // 11626 // If some predicate is known in the context of a loop, it is also known on 11627 // each iteration of this loop, including the first iteration. Therefore, in 11628 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to 11629 // prove the original pred using this fact. 11630 if (!CtxI) 11631 return false; 11632 const BasicBlock *ContextBB = CtxI->getParent(); 11633 // Make sure AR varies in the context block. 11634 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) { 11635 const Loop *L = AR->getLoop(); 11636 // Make sure that context belongs to the loop and executes on 1st iteration 11637 // (if it ever executes at all). 11638 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 11639 return false; 11640 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) 11641 return false; 11642 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); 11643 } 11644 11645 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) { 11646 const Loop *L = AR->getLoop(); 11647 // Make sure that context belongs to the loop and executes on 1st iteration 11648 // (if it ever executes at all). 11649 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 11650 return false; 11651 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) 11652 return false; 11653 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); 11654 } 11655 11656 return false; 11657 } 11658 11659 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 11660 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 11661 const SCEV *FoundLHS, const SCEV *FoundRHS) { 11662 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 11663 return false; 11664 11665 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 11666 if (!AddRecLHS) 11667 return false; 11668 11669 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 11670 if (!AddRecFoundLHS) 11671 return false; 11672 11673 // We'd like to let SCEV reason about control dependencies, so we constrain 11674 // both the inequalities to be about add recurrences on the same loop. This 11675 // way we can use isLoopEntryGuardedByCond later. 11676 11677 const Loop *L = AddRecFoundLHS->getLoop(); 11678 if (L != AddRecLHS->getLoop()) 11679 return false; 11680 11681 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 11682 // 11683 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 11684 // ... (2) 11685 // 11686 // Informal proof for (2), assuming (1) [*]: 11687 // 11688 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 11689 // 11690 // Then 11691 // 11692 // FoundLHS s< FoundRHS s< INT_MIN - C 11693 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 11694 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 11695 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 11696 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 11697 // <=> FoundLHS + C s< FoundRHS + C 11698 // 11699 // [*]: (1) can be proved by ruling out overflow. 11700 // 11701 // [**]: This can be proved by analyzing all the four possibilities: 11702 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 11703 // (A s>= 0, B s>= 0). 11704 // 11705 // Note: 11706 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 11707 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 11708 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 11709 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 11710 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 11711 // C)". 11712 11713 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 11714 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 11715 if (!LDiff || !RDiff || *LDiff != *RDiff) 11716 return false; 11717 11718 if (LDiff->isMinValue()) 11719 return true; 11720 11721 APInt FoundRHSLimit; 11722 11723 if (Pred == CmpInst::ICMP_ULT) { 11724 FoundRHSLimit = -(*RDiff); 11725 } else { 11726 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 11727 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 11728 } 11729 11730 // Try to prove (1) or (2), as needed. 11731 return isAvailableAtLoopEntry(FoundRHS, L) && 11732 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 11733 getConstant(FoundRHSLimit)); 11734 } 11735 11736 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 11737 const SCEV *LHS, const SCEV *RHS, 11738 const SCEV *FoundLHS, 11739 const SCEV *FoundRHS, unsigned Depth) { 11740 const PHINode *LPhi = nullptr, *RPhi = nullptr; 11741 11742 auto ClearOnExit = make_scope_exit([&]() { 11743 if (LPhi) { 11744 bool Erased = PendingMerges.erase(LPhi); 11745 assert(Erased && "Failed to erase LPhi!"); 11746 (void)Erased; 11747 } 11748 if (RPhi) { 11749 bool Erased = PendingMerges.erase(RPhi); 11750 assert(Erased && "Failed to erase RPhi!"); 11751 (void)Erased; 11752 } 11753 }); 11754 11755 // Find respective Phis and check that they are not being pending. 11756 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 11757 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 11758 if (!PendingMerges.insert(Phi).second) 11759 return false; 11760 LPhi = Phi; 11761 } 11762 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 11763 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 11764 // If we detect a loop of Phi nodes being processed by this method, for 11765 // example: 11766 // 11767 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 11768 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 11769 // 11770 // we don't want to deal with a case that complex, so return conservative 11771 // answer false. 11772 if (!PendingMerges.insert(Phi).second) 11773 return false; 11774 RPhi = Phi; 11775 } 11776 11777 // If none of LHS, RHS is a Phi, nothing to do here. 11778 if (!LPhi && !RPhi) 11779 return false; 11780 11781 // If there is a SCEVUnknown Phi we are interested in, make it left. 11782 if (!LPhi) { 11783 std::swap(LHS, RHS); 11784 std::swap(FoundLHS, FoundRHS); 11785 std::swap(LPhi, RPhi); 11786 Pred = ICmpInst::getSwappedPredicate(Pred); 11787 } 11788 11789 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 11790 const BasicBlock *LBB = LPhi->getParent(); 11791 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 11792 11793 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 11794 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 11795 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 11796 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 11797 }; 11798 11799 if (RPhi && RPhi->getParent() == LBB) { 11800 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 11801 // If we compare two Phis from the same block, and for each entry block 11802 // the predicate is true for incoming values from this block, then the 11803 // predicate is also true for the Phis. 11804 for (const BasicBlock *IncBB : predecessors(LBB)) { 11805 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 11806 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 11807 if (!ProvedEasily(L, R)) 11808 return false; 11809 } 11810 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 11811 // Case two: RHS is also a Phi from the same basic block, and it is an 11812 // AddRec. It means that there is a loop which has both AddRec and Unknown 11813 // PHIs, for it we can compare incoming values of AddRec from above the loop 11814 // and latch with their respective incoming values of LPhi. 11815 // TODO: Generalize to handle loops with many inputs in a header. 11816 if (LPhi->getNumIncomingValues() != 2) return false; 11817 11818 auto *RLoop = RAR->getLoop(); 11819 auto *Predecessor = RLoop->getLoopPredecessor(); 11820 assert(Predecessor && "Loop with AddRec with no predecessor?"); 11821 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 11822 if (!ProvedEasily(L1, RAR->getStart())) 11823 return false; 11824 auto *Latch = RLoop->getLoopLatch(); 11825 assert(Latch && "Loop with AddRec with no latch?"); 11826 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 11827 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 11828 return false; 11829 } else { 11830 // In all other cases go over inputs of LHS and compare each of them to RHS, 11831 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 11832 // At this point RHS is either a non-Phi, or it is a Phi from some block 11833 // different from LBB. 11834 for (const BasicBlock *IncBB : predecessors(LBB)) { 11835 // Check that RHS is available in this block. 11836 if (!dominates(RHS, IncBB)) 11837 return false; 11838 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 11839 // Make sure L does not refer to a value from a potentially previous 11840 // iteration of a loop. 11841 if (!properlyDominates(L, LBB)) 11842 return false; 11843 if (!ProvedEasily(L, RHS)) 11844 return false; 11845 } 11846 } 11847 return true; 11848 } 11849 11850 bool ScalarEvolution::isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred, 11851 const SCEV *LHS, 11852 const SCEV *RHS, 11853 const SCEV *FoundLHS, 11854 const SCEV *FoundRHS) { 11855 // We want to imply LHS < RHS from LHS < (RHS >> shiftvalue). First, make 11856 // sure that we are dealing with same LHS. 11857 if (RHS == FoundRHS) { 11858 std::swap(LHS, RHS); 11859 std::swap(FoundLHS, FoundRHS); 11860 Pred = ICmpInst::getSwappedPredicate(Pred); 11861 } 11862 if (LHS != FoundLHS) 11863 return false; 11864 11865 auto *SUFoundRHS = dyn_cast<SCEVUnknown>(FoundRHS); 11866 if (!SUFoundRHS) 11867 return false; 11868 11869 Value *Shiftee, *ShiftValue; 11870 11871 using namespace PatternMatch; 11872 if (match(SUFoundRHS->getValue(), 11873 m_LShr(m_Value(Shiftee), m_Value(ShiftValue)))) { 11874 auto *ShifteeS = getSCEV(Shiftee); 11875 // Prove one of the following: 11876 // LHS <u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <u RHS 11877 // LHS <=u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <=u RHS 11878 // LHS <s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0 11879 // ---> LHS <s RHS 11880 // LHS <=s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0 11881 // ---> LHS <=s RHS 11882 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 11883 return isKnownPredicate(ICmpInst::ICMP_ULE, ShifteeS, RHS); 11884 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 11885 if (isKnownNonNegative(ShifteeS)) 11886 return isKnownPredicate(ICmpInst::ICMP_SLE, ShifteeS, RHS); 11887 } 11888 11889 return false; 11890 } 11891 11892 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 11893 const SCEV *LHS, const SCEV *RHS, 11894 const SCEV *FoundLHS, 11895 const SCEV *FoundRHS, 11896 const Instruction *CtxI) { 11897 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11898 return true; 11899 11900 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11901 return true; 11902 11903 if (isImpliedCondOperandsViaShift(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11904 return true; 11905 11906 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, 11907 CtxI)) 11908 return true; 11909 11910 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 11911 FoundLHS, FoundRHS); 11912 } 11913 11914 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 11915 template <typename MinMaxExprType> 11916 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 11917 const SCEV *Candidate) { 11918 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 11919 if (!MinMaxExpr) 11920 return false; 11921 11922 return is_contained(MinMaxExpr->operands(), Candidate); 11923 } 11924 11925 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 11926 ICmpInst::Predicate Pred, 11927 const SCEV *LHS, const SCEV *RHS) { 11928 // If both sides are affine addrecs for the same loop, with equal 11929 // steps, and we know the recurrences don't wrap, then we only 11930 // need to check the predicate on the starting values. 11931 11932 if (!ICmpInst::isRelational(Pred)) 11933 return false; 11934 11935 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 11936 if (!LAR) 11937 return false; 11938 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 11939 if (!RAR) 11940 return false; 11941 if (LAR->getLoop() != RAR->getLoop()) 11942 return false; 11943 if (!LAR->isAffine() || !RAR->isAffine()) 11944 return false; 11945 11946 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 11947 return false; 11948 11949 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 11950 SCEV::FlagNSW : SCEV::FlagNUW; 11951 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 11952 return false; 11953 11954 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 11955 } 11956 11957 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 11958 /// expression? 11959 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 11960 ICmpInst::Predicate Pred, 11961 const SCEV *LHS, const SCEV *RHS) { 11962 switch (Pred) { 11963 default: 11964 return false; 11965 11966 case ICmpInst::ICMP_SGE: 11967 std::swap(LHS, RHS); 11968 LLVM_FALLTHROUGH; 11969 case ICmpInst::ICMP_SLE: 11970 return 11971 // min(A, ...) <= A 11972 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 11973 // A <= max(A, ...) 11974 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 11975 11976 case ICmpInst::ICMP_UGE: 11977 std::swap(LHS, RHS); 11978 LLVM_FALLTHROUGH; 11979 case ICmpInst::ICMP_ULE: 11980 return 11981 // min(A, ...) <= A 11982 // FIXME: what about umin_seq? 11983 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 11984 // A <= max(A, ...) 11985 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 11986 } 11987 11988 llvm_unreachable("covered switch fell through?!"); 11989 } 11990 11991 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 11992 const SCEV *LHS, const SCEV *RHS, 11993 const SCEV *FoundLHS, 11994 const SCEV *FoundRHS, 11995 unsigned Depth) { 11996 assert(getTypeSizeInBits(LHS->getType()) == 11997 getTypeSizeInBits(RHS->getType()) && 11998 "LHS and RHS have different sizes?"); 11999 assert(getTypeSizeInBits(FoundLHS->getType()) == 12000 getTypeSizeInBits(FoundRHS->getType()) && 12001 "FoundLHS and FoundRHS have different sizes?"); 12002 // We want to avoid hurting the compile time with analysis of too big trees. 12003 if (Depth > MaxSCEVOperationsImplicationDepth) 12004 return false; 12005 12006 // We only want to work with GT comparison so far. 12007 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { 12008 Pred = CmpInst::getSwappedPredicate(Pred); 12009 std::swap(LHS, RHS); 12010 std::swap(FoundLHS, FoundRHS); 12011 } 12012 12013 // For unsigned, try to reduce it to corresponding signed comparison. 12014 if (Pred == ICmpInst::ICMP_UGT) 12015 // We can replace unsigned predicate with its signed counterpart if all 12016 // involved values are non-negative. 12017 // TODO: We could have better support for unsigned. 12018 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { 12019 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing 12020 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us 12021 // use this fact to prove that LHS and RHS are non-negative. 12022 const SCEV *MinusOne = getMinusOne(LHS->getType()); 12023 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, 12024 FoundRHS) && 12025 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, 12026 FoundRHS)) 12027 Pred = ICmpInst::ICMP_SGT; 12028 } 12029 12030 if (Pred != ICmpInst::ICMP_SGT) 12031 return false; 12032 12033 auto GetOpFromSExt = [&](const SCEV *S) { 12034 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 12035 return Ext->getOperand(); 12036 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 12037 // the constant in some cases. 12038 return S; 12039 }; 12040 12041 // Acquire values from extensions. 12042 auto *OrigLHS = LHS; 12043 auto *OrigFoundLHS = FoundLHS; 12044 LHS = GetOpFromSExt(LHS); 12045 FoundLHS = GetOpFromSExt(FoundLHS); 12046 12047 // Is the SGT predicate can be proved trivially or using the found context. 12048 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 12049 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 12050 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 12051 FoundRHS, Depth + 1); 12052 }; 12053 12054 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 12055 // We want to avoid creation of any new non-constant SCEV. Since we are 12056 // going to compare the operands to RHS, we should be certain that we don't 12057 // need any size extensions for this. So let's decline all cases when the 12058 // sizes of types of LHS and RHS do not match. 12059 // TODO: Maybe try to get RHS from sext to catch more cases? 12060 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 12061 return false; 12062 12063 // Should not overflow. 12064 if (!LHSAddExpr->hasNoSignedWrap()) 12065 return false; 12066 12067 auto *LL = LHSAddExpr->getOperand(0); 12068 auto *LR = LHSAddExpr->getOperand(1); 12069 auto *MinusOne = getMinusOne(RHS->getType()); 12070 12071 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 12072 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 12073 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 12074 }; 12075 // Try to prove the following rule: 12076 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 12077 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 12078 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 12079 return true; 12080 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 12081 Value *LL, *LR; 12082 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 12083 12084 using namespace llvm::PatternMatch; 12085 12086 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 12087 // Rules for division. 12088 // We are going to perform some comparisons with Denominator and its 12089 // derivative expressions. In general case, creating a SCEV for it may 12090 // lead to a complex analysis of the entire graph, and in particular it 12091 // can request trip count recalculation for the same loop. This would 12092 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 12093 // this, we only want to create SCEVs that are constants in this section. 12094 // So we bail if Denominator is not a constant. 12095 if (!isa<ConstantInt>(LR)) 12096 return false; 12097 12098 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 12099 12100 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 12101 // then a SCEV for the numerator already exists and matches with FoundLHS. 12102 auto *Numerator = getExistingSCEV(LL); 12103 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 12104 return false; 12105 12106 // Make sure that the numerator matches with FoundLHS and the denominator 12107 // is positive. 12108 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 12109 return false; 12110 12111 auto *DTy = Denominator->getType(); 12112 auto *FRHSTy = FoundRHS->getType(); 12113 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 12114 // One of types is a pointer and another one is not. We cannot extend 12115 // them properly to a wider type, so let us just reject this case. 12116 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 12117 // to avoid this check. 12118 return false; 12119 12120 // Given that: 12121 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 12122 auto *WTy = getWiderType(DTy, FRHSTy); 12123 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 12124 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 12125 12126 // Try to prove the following rule: 12127 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 12128 // For example, given that FoundLHS > 2. It means that FoundLHS is at 12129 // least 3. If we divide it by Denominator < 4, we will have at least 1. 12130 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 12131 if (isKnownNonPositive(RHS) && 12132 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 12133 return true; 12134 12135 // Try to prove the following rule: 12136 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 12137 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 12138 // If we divide it by Denominator > 2, then: 12139 // 1. If FoundLHS is negative, then the result is 0. 12140 // 2. If FoundLHS is non-negative, then the result is non-negative. 12141 // Anyways, the result is non-negative. 12142 auto *MinusOne = getMinusOne(WTy); 12143 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 12144 if (isKnownNegative(RHS) && 12145 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 12146 return true; 12147 } 12148 } 12149 12150 // If our expression contained SCEVUnknown Phis, and we split it down and now 12151 // need to prove something for them, try to prove the predicate for every 12152 // possible incoming values of those Phis. 12153 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 12154 return true; 12155 12156 return false; 12157 } 12158 12159 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 12160 const SCEV *LHS, const SCEV *RHS) { 12161 // zext x u<= sext x, sext x s<= zext x 12162 switch (Pred) { 12163 case ICmpInst::ICMP_SGE: 12164 std::swap(LHS, RHS); 12165 LLVM_FALLTHROUGH; 12166 case ICmpInst::ICMP_SLE: { 12167 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 12168 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 12169 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 12170 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 12171 return true; 12172 break; 12173 } 12174 case ICmpInst::ICMP_UGE: 12175 std::swap(LHS, RHS); 12176 LLVM_FALLTHROUGH; 12177 case ICmpInst::ICMP_ULE: { 12178 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 12179 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 12180 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 12181 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 12182 return true; 12183 break; 12184 } 12185 default: 12186 break; 12187 }; 12188 return false; 12189 } 12190 12191 bool 12192 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 12193 const SCEV *LHS, const SCEV *RHS) { 12194 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 12195 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 12196 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 12197 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 12198 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 12199 } 12200 12201 bool 12202 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 12203 const SCEV *LHS, const SCEV *RHS, 12204 const SCEV *FoundLHS, 12205 const SCEV *FoundRHS) { 12206 switch (Pred) { 12207 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 12208 case ICmpInst::ICMP_EQ: 12209 case ICmpInst::ICMP_NE: 12210 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 12211 return true; 12212 break; 12213 case ICmpInst::ICMP_SLT: 12214 case ICmpInst::ICMP_SLE: 12215 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 12216 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 12217 return true; 12218 break; 12219 case ICmpInst::ICMP_SGT: 12220 case ICmpInst::ICMP_SGE: 12221 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 12222 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 12223 return true; 12224 break; 12225 case ICmpInst::ICMP_ULT: 12226 case ICmpInst::ICMP_ULE: 12227 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 12228 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 12229 return true; 12230 break; 12231 case ICmpInst::ICMP_UGT: 12232 case ICmpInst::ICMP_UGE: 12233 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 12234 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 12235 return true; 12236 break; 12237 } 12238 12239 // Maybe it can be proved via operations? 12240 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 12241 return true; 12242 12243 return false; 12244 } 12245 12246 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 12247 const SCEV *LHS, 12248 const SCEV *RHS, 12249 const SCEV *FoundLHS, 12250 const SCEV *FoundRHS) { 12251 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 12252 // The restriction on `FoundRHS` be lifted easily -- it exists only to 12253 // reduce the compile time impact of this optimization. 12254 return false; 12255 12256 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 12257 if (!Addend) 12258 return false; 12259 12260 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 12261 12262 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 12263 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 12264 ConstantRange FoundLHSRange = 12265 ConstantRange::makeExactICmpRegion(Pred, ConstFoundRHS); 12266 12267 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 12268 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 12269 12270 // We can also compute the range of values for `LHS` that satisfy the 12271 // consequent, "`LHS` `Pred` `RHS`": 12272 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 12273 // The antecedent implies the consequent if every value of `LHS` that 12274 // satisfies the antecedent also satisfies the consequent. 12275 return LHSRange.icmp(Pred, ConstRHS); 12276 } 12277 12278 bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 12279 bool IsSigned) { 12280 assert(isKnownPositive(Stride) && "Positive stride expected!"); 12281 12282 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 12283 const SCEV *One = getOne(Stride->getType()); 12284 12285 if (IsSigned) { 12286 APInt MaxRHS = getSignedRangeMax(RHS); 12287 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 12288 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 12289 12290 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 12291 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 12292 } 12293 12294 APInt MaxRHS = getUnsignedRangeMax(RHS); 12295 APInt MaxValue = APInt::getMaxValue(BitWidth); 12296 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 12297 12298 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 12299 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 12300 } 12301 12302 bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 12303 bool IsSigned) { 12304 12305 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 12306 const SCEV *One = getOne(Stride->getType()); 12307 12308 if (IsSigned) { 12309 APInt MinRHS = getSignedRangeMin(RHS); 12310 APInt MinValue = APInt::getSignedMinValue(BitWidth); 12311 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 12312 12313 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 12314 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 12315 } 12316 12317 APInt MinRHS = getUnsignedRangeMin(RHS); 12318 APInt MinValue = APInt::getMinValue(BitWidth); 12319 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 12320 12321 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 12322 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 12323 } 12324 12325 const SCEV *ScalarEvolution::getUDivCeilSCEV(const SCEV *N, const SCEV *D) { 12326 // umin(N, 1) + floor((N - umin(N, 1)) / D) 12327 // This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin 12328 // expression fixes the case of N=0. 12329 const SCEV *MinNOne = getUMinExpr(N, getOne(N->getType())); 12330 const SCEV *NMinusOne = getMinusSCEV(N, MinNOne); 12331 return getAddExpr(MinNOne, getUDivExpr(NMinusOne, D)); 12332 } 12333 12334 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 12335 const SCEV *Stride, 12336 const SCEV *End, 12337 unsigned BitWidth, 12338 bool IsSigned) { 12339 // The logic in this function assumes we can represent a positive stride. 12340 // If we can't, the backedge-taken count must be zero. 12341 if (IsSigned && BitWidth == 1) 12342 return getZero(Stride->getType()); 12343 12344 // This code has only been closely audited for negative strides in the 12345 // unsigned comparison case, it may be correct for signed comparison, but 12346 // that needs to be established. 12347 assert((!IsSigned || !isKnownNonPositive(Stride)) && 12348 "Stride is expected strictly positive for signed case!"); 12349 12350 // Calculate the maximum backedge count based on the range of values 12351 // permitted by Start, End, and Stride. 12352 APInt MinStart = 12353 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 12354 12355 APInt MinStride = 12356 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 12357 12358 // We assume either the stride is positive, or the backedge-taken count 12359 // is zero. So force StrideForMaxBECount to be at least one. 12360 APInt One(BitWidth, 1); 12361 APInt StrideForMaxBECount = IsSigned ? APIntOps::smax(One, MinStride) 12362 : APIntOps::umax(One, MinStride); 12363 12364 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 12365 : APInt::getMaxValue(BitWidth); 12366 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 12367 12368 // Although End can be a MAX expression we estimate MaxEnd considering only 12369 // the case End = RHS of the loop termination condition. This is safe because 12370 // in the other case (End - Start) is zero, leading to a zero maximum backedge 12371 // taken count. 12372 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 12373 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 12374 12375 // MaxBECount = ceil((max(MaxEnd, MinStart) - MinStart) / Stride) 12376 MaxEnd = IsSigned ? APIntOps::smax(MaxEnd, MinStart) 12377 : APIntOps::umax(MaxEnd, MinStart); 12378 12379 return getUDivCeilSCEV(getConstant(MaxEnd - MinStart) /* Delta */, 12380 getConstant(StrideForMaxBECount) /* Step */); 12381 } 12382 12383 ScalarEvolution::ExitLimit 12384 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 12385 const Loop *L, bool IsSigned, 12386 bool ControlsExit, bool AllowPredicates) { 12387 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 12388 12389 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 12390 bool PredicatedIV = false; 12391 12392 auto canAssumeNoSelfWrap = [&](const SCEVAddRecExpr *AR) { 12393 // Can we prove this loop *must* be UB if overflow of IV occurs? 12394 // Reasoning goes as follows: 12395 // * Suppose the IV did self wrap. 12396 // * If Stride evenly divides the iteration space, then once wrap 12397 // occurs, the loop must revisit the same values. 12398 // * We know that RHS is invariant, and that none of those values 12399 // caused this exit to be taken previously. Thus, this exit is 12400 // dynamically dead. 12401 // * If this is the sole exit, then a dead exit implies the loop 12402 // must be infinite if there are no abnormal exits. 12403 // * If the loop were infinite, then it must either not be mustprogress 12404 // or have side effects. Otherwise, it must be UB. 12405 // * It can't (by assumption), be UB so we have contradicted our 12406 // premise and can conclude the IV did not in fact self-wrap. 12407 if (!isLoopInvariant(RHS, L)) 12408 return false; 12409 12410 auto *StrideC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)); 12411 if (!StrideC || !StrideC->getAPInt().isPowerOf2()) 12412 return false; 12413 12414 if (!ControlsExit || !loopHasNoAbnormalExits(L)) 12415 return false; 12416 12417 return loopIsFiniteByAssumption(L); 12418 }; 12419 12420 if (!IV) { 12421 if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) { 12422 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ZExt->getOperand()); 12423 if (AR && AR->getLoop() == L && AR->isAffine()) { 12424 auto canProveNUW = [&]() { 12425 if (!isLoopInvariant(RHS, L)) 12426 return false; 12427 12428 if (!isKnownNonZero(AR->getStepRecurrence(*this))) 12429 // We need the sequence defined by AR to strictly increase in the 12430 // unsigned integer domain for the logic below to hold. 12431 return false; 12432 12433 const unsigned InnerBitWidth = getTypeSizeInBits(AR->getType()); 12434 const unsigned OuterBitWidth = getTypeSizeInBits(RHS->getType()); 12435 // If RHS <=u Limit, then there must exist a value V in the sequence 12436 // defined by AR (e.g. {Start,+,Step}) such that V >u RHS, and 12437 // V <=u UINT_MAX. Thus, we must exit the loop before unsigned 12438 // overflow occurs. This limit also implies that a signed comparison 12439 // (in the wide bitwidth) is equivalent to an unsigned comparison as 12440 // the high bits on both sides must be zero. 12441 APInt StrideMax = getUnsignedRangeMax(AR->getStepRecurrence(*this)); 12442 APInt Limit = APInt::getMaxValue(InnerBitWidth) - (StrideMax - 1); 12443 Limit = Limit.zext(OuterBitWidth); 12444 return getUnsignedRangeMax(applyLoopGuards(RHS, L)).ule(Limit); 12445 }; 12446 auto Flags = AR->getNoWrapFlags(); 12447 if (!hasFlags(Flags, SCEV::FlagNUW) && canProveNUW()) 12448 Flags = setFlags(Flags, SCEV::FlagNUW); 12449 12450 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags); 12451 if (AR->hasNoUnsignedWrap()) { 12452 // Emulate what getZeroExtendExpr would have done during construction 12453 // if we'd been able to infer the fact just above at that time. 12454 const SCEV *Step = AR->getStepRecurrence(*this); 12455 Type *Ty = ZExt->getType(); 12456 auto *S = getAddRecExpr( 12457 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 0), 12458 getZeroExtendExpr(Step, Ty, 0), L, AR->getNoWrapFlags()); 12459 IV = dyn_cast<SCEVAddRecExpr>(S); 12460 } 12461 } 12462 } 12463 } 12464 12465 12466 if (!IV && AllowPredicates) { 12467 // Try to make this an AddRec using runtime tests, in the first X 12468 // iterations of this loop, where X is the SCEV expression found by the 12469 // algorithm below. 12470 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 12471 PredicatedIV = true; 12472 } 12473 12474 // Avoid weird loops 12475 if (!IV || IV->getLoop() != L || !IV->isAffine()) 12476 return getCouldNotCompute(); 12477 12478 // A precondition of this method is that the condition being analyzed 12479 // reaches an exiting branch which dominates the latch. Given that, we can 12480 // assume that an increment which violates the nowrap specification and 12481 // produces poison must cause undefined behavior when the resulting poison 12482 // value is branched upon and thus we can conclude that the backedge is 12483 // taken no more often than would be required to produce that poison value. 12484 // Note that a well defined loop can exit on the iteration which violates 12485 // the nowrap specification if there is another exit (either explicit or 12486 // implicit/exceptional) which causes the loop to execute before the 12487 // exiting instruction we're analyzing would trigger UB. 12488 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; 12489 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); 12490 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; 12491 12492 const SCEV *Stride = IV->getStepRecurrence(*this); 12493 12494 bool PositiveStride = isKnownPositive(Stride); 12495 12496 // Avoid negative or zero stride values. 12497 if (!PositiveStride) { 12498 // We can compute the correct backedge taken count for loops with unknown 12499 // strides if we can prove that the loop is not an infinite loop with side 12500 // effects. Here's the loop structure we are trying to handle - 12501 // 12502 // i = start 12503 // do { 12504 // A[i] = i; 12505 // i += s; 12506 // } while (i < end); 12507 // 12508 // The backedge taken count for such loops is evaluated as - 12509 // (max(end, start + stride) - start - 1) /u stride 12510 // 12511 // The additional preconditions that we need to check to prove correctness 12512 // of the above formula is as follows - 12513 // 12514 // a) IV is either nuw or nsw depending upon signedness (indicated by the 12515 // NoWrap flag). 12516 // b) the loop is guaranteed to be finite (e.g. is mustprogress and has 12517 // no side effects within the loop) 12518 // c) loop has a single static exit (with no abnormal exits) 12519 // 12520 // Precondition a) implies that if the stride is negative, this is a single 12521 // trip loop. The backedge taken count formula reduces to zero in this case. 12522 // 12523 // Precondition b) and c) combine to imply that if rhs is invariant in L, 12524 // then a zero stride means the backedge can't be taken without executing 12525 // undefined behavior. 12526 // 12527 // The positive stride case is the same as isKnownPositive(Stride) returning 12528 // true (original behavior of the function). 12529 // 12530 if (PredicatedIV || !NoWrap || !loopIsFiniteByAssumption(L) || 12531 !loopHasNoAbnormalExits(L)) 12532 return getCouldNotCompute(); 12533 12534 // This bailout is protecting the logic in computeMaxBECountForLT which 12535 // has not yet been sufficiently auditted or tested with negative strides. 12536 // We used to filter out all known-non-positive cases here, we're in the 12537 // process of being less restrictive bit by bit. 12538 if (IsSigned && isKnownNonPositive(Stride)) 12539 return getCouldNotCompute(); 12540 12541 if (!isKnownNonZero(Stride)) { 12542 // If we have a step of zero, and RHS isn't invariant in L, we don't know 12543 // if it might eventually be greater than start and if so, on which 12544 // iteration. We can't even produce a useful upper bound. 12545 if (!isLoopInvariant(RHS, L)) 12546 return getCouldNotCompute(); 12547 12548 // We allow a potentially zero stride, but we need to divide by stride 12549 // below. Since the loop can't be infinite and this check must control 12550 // the sole exit, we can infer the exit must be taken on the first 12551 // iteration (e.g. backedge count = 0) if the stride is zero. Given that, 12552 // we know the numerator in the divides below must be zero, so we can 12553 // pick an arbitrary non-zero value for the denominator (e.g. stride) 12554 // and produce the right result. 12555 // FIXME: Handle the case where Stride is poison? 12556 auto wouldZeroStrideBeUB = [&]() { 12557 // Proof by contradiction. Suppose the stride were zero. If we can 12558 // prove that the backedge *is* taken on the first iteration, then since 12559 // we know this condition controls the sole exit, we must have an 12560 // infinite loop. We can't have a (well defined) infinite loop per 12561 // check just above. 12562 // Note: The (Start - Stride) term is used to get the start' term from 12563 // (start' + stride,+,stride). Remember that we only care about the 12564 // result of this expression when stride == 0 at runtime. 12565 auto *StartIfZero = getMinusSCEV(IV->getStart(), Stride); 12566 return isLoopEntryGuardedByCond(L, Cond, StartIfZero, RHS); 12567 }; 12568 if (!wouldZeroStrideBeUB()) { 12569 Stride = getUMaxExpr(Stride, getOne(Stride->getType())); 12570 } 12571 } 12572 } else if (!Stride->isOne() && !NoWrap) { 12573 auto isUBOnWrap = [&]() { 12574 // From no-self-wrap, we need to then prove no-(un)signed-wrap. This 12575 // follows trivially from the fact that every (un)signed-wrapped, but 12576 // not self-wrapped value must be LT than the last value before 12577 // (un)signed wrap. Since we know that last value didn't exit, nor 12578 // will any smaller one. 12579 return canAssumeNoSelfWrap(IV); 12580 }; 12581 12582 // Avoid proven overflow cases: this will ensure that the backedge taken 12583 // count will not generate any unsigned overflow. Relaxed no-overflow 12584 // conditions exploit NoWrapFlags, allowing to optimize in presence of 12585 // undefined behaviors like the case of C language. 12586 if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap()) 12587 return getCouldNotCompute(); 12588 } 12589 12590 // On all paths just preceeding, we established the following invariant: 12591 // IV can be assumed not to overflow up to and including the exiting 12592 // iteration. We proved this in one of two ways: 12593 // 1) We can show overflow doesn't occur before the exiting iteration 12594 // 1a) canIVOverflowOnLT, and b) step of one 12595 // 2) We can show that if overflow occurs, the loop must execute UB 12596 // before any possible exit. 12597 // Note that we have not yet proved RHS invariant (in general). 12598 12599 const SCEV *Start = IV->getStart(); 12600 12601 // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond. 12602 // If we convert to integers, isLoopEntryGuardedByCond will miss some cases. 12603 // Use integer-typed versions for actual computation; we can't subtract 12604 // pointers in general. 12605 const SCEV *OrigStart = Start; 12606 const SCEV *OrigRHS = RHS; 12607 if (Start->getType()->isPointerTy()) { 12608 Start = getLosslessPtrToIntExpr(Start); 12609 if (isa<SCEVCouldNotCompute>(Start)) 12610 return Start; 12611 } 12612 if (RHS->getType()->isPointerTy()) { 12613 RHS = getLosslessPtrToIntExpr(RHS); 12614 if (isa<SCEVCouldNotCompute>(RHS)) 12615 return RHS; 12616 } 12617 12618 // When the RHS is not invariant, we do not know the end bound of the loop and 12619 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 12620 // calculate the MaxBECount, given the start, stride and max value for the end 12621 // bound of the loop (RHS), and the fact that IV does not overflow (which is 12622 // checked above). 12623 if (!isLoopInvariant(RHS, L)) { 12624 const SCEV *MaxBECount = computeMaxBECountForLT( 12625 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 12626 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 12627 false /*MaxOrZero*/, Predicates); 12628 } 12629 12630 // We use the expression (max(End,Start)-Start)/Stride to describe the 12631 // backedge count, as if the backedge is taken at least once max(End,Start) 12632 // is End and so the result is as above, and if not max(End,Start) is Start 12633 // so we get a backedge count of zero. 12634 const SCEV *BECount = nullptr; 12635 auto *OrigStartMinusStride = getMinusSCEV(OrigStart, Stride); 12636 assert(isAvailableAtLoopEntry(OrigStartMinusStride, L) && "Must be!"); 12637 assert(isAvailableAtLoopEntry(OrigStart, L) && "Must be!"); 12638 assert(isAvailableAtLoopEntry(OrigRHS, L) && "Must be!"); 12639 // Can we prove (max(RHS,Start) > Start - Stride? 12640 if (isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigStart) && 12641 isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigRHS)) { 12642 // In this case, we can use a refined formula for computing backedge taken 12643 // count. The general formula remains: 12644 // "End-Start /uceiling Stride" where "End = max(RHS,Start)" 12645 // We want to use the alternate formula: 12646 // "((End - 1) - (Start - Stride)) /u Stride" 12647 // Let's do a quick case analysis to show these are equivalent under 12648 // our precondition that max(RHS,Start) > Start - Stride. 12649 // * For RHS <= Start, the backedge-taken count must be zero. 12650 // "((End - 1) - (Start - Stride)) /u Stride" reduces to 12651 // "((Start - 1) - (Start - Stride)) /u Stride" which simplies to 12652 // "Stride - 1 /u Stride" which is indeed zero for all non-zero values 12653 // of Stride. For 0 stride, we've use umin(1,Stride) above, reducing 12654 // this to the stride of 1 case. 12655 // * For RHS >= Start, the backedge count must be "RHS-Start /uceil Stride". 12656 // "((End - 1) - (Start - Stride)) /u Stride" reduces to 12657 // "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to 12658 // "((RHS - (Start - Stride) - 1) /u Stride". 12659 // Our preconditions trivially imply no overflow in that form. 12660 const SCEV *MinusOne = getMinusOne(Stride->getType()); 12661 const SCEV *Numerator = 12662 getMinusSCEV(getAddExpr(RHS, MinusOne), getMinusSCEV(Start, Stride)); 12663 BECount = getUDivExpr(Numerator, Stride); 12664 } 12665 12666 const SCEV *BECountIfBackedgeTaken = nullptr; 12667 if (!BECount) { 12668 auto canProveRHSGreaterThanEqualStart = [&]() { 12669 auto CondGE = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 12670 if (isLoopEntryGuardedByCond(L, CondGE, OrigRHS, OrigStart)) 12671 return true; 12672 12673 // (RHS > Start - 1) implies RHS >= Start. 12674 // * "RHS >= Start" is trivially equivalent to "RHS > Start - 1" if 12675 // "Start - 1" doesn't overflow. 12676 // * For signed comparison, if Start - 1 does overflow, it's equal 12677 // to INT_MAX, and "RHS >s INT_MAX" is trivially false. 12678 // * For unsigned comparison, if Start - 1 does overflow, it's equal 12679 // to UINT_MAX, and "RHS >u UINT_MAX" is trivially false. 12680 // 12681 // FIXME: Should isLoopEntryGuardedByCond do this for us? 12682 auto CondGT = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 12683 auto *StartMinusOne = getAddExpr(OrigStart, 12684 getMinusOne(OrigStart->getType())); 12685 return isLoopEntryGuardedByCond(L, CondGT, OrigRHS, StartMinusOne); 12686 }; 12687 12688 // If we know that RHS >= Start in the context of loop, then we know that 12689 // max(RHS, Start) = RHS at this point. 12690 const SCEV *End; 12691 if (canProveRHSGreaterThanEqualStart()) { 12692 End = RHS; 12693 } else { 12694 // If RHS < Start, the backedge will be taken zero times. So in 12695 // general, we can write the backedge-taken count as: 12696 // 12697 // RHS >= Start ? ceil(RHS - Start) / Stride : 0 12698 // 12699 // We convert it to the following to make it more convenient for SCEV: 12700 // 12701 // ceil(max(RHS, Start) - Start) / Stride 12702 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 12703 12704 // See what would happen if we assume the backedge is taken. This is 12705 // used to compute MaxBECount. 12706 BECountIfBackedgeTaken = getUDivCeilSCEV(getMinusSCEV(RHS, Start), Stride); 12707 } 12708 12709 // At this point, we know: 12710 // 12711 // 1. If IsSigned, Start <=s End; otherwise, Start <=u End 12712 // 2. The index variable doesn't overflow. 12713 // 12714 // Therefore, we know N exists such that 12715 // (Start + Stride * N) >= End, and computing "(Start + Stride * N)" 12716 // doesn't overflow. 12717 // 12718 // Using this information, try to prove whether the addition in 12719 // "(Start - End) + (Stride - 1)" has unsigned overflow. 12720 const SCEV *One = getOne(Stride->getType()); 12721 bool MayAddOverflow = [&] { 12722 if (auto *StrideC = dyn_cast<SCEVConstant>(Stride)) { 12723 if (StrideC->getAPInt().isPowerOf2()) { 12724 // Suppose Stride is a power of two, and Start/End are unsigned 12725 // integers. Let UMAX be the largest representable unsigned 12726 // integer. 12727 // 12728 // By the preconditions of this function, we know 12729 // "(Start + Stride * N) >= End", and this doesn't overflow. 12730 // As a formula: 12731 // 12732 // End <= (Start + Stride * N) <= UMAX 12733 // 12734 // Subtracting Start from all the terms: 12735 // 12736 // End - Start <= Stride * N <= UMAX - Start 12737 // 12738 // Since Start is unsigned, UMAX - Start <= UMAX. Therefore: 12739 // 12740 // End - Start <= Stride * N <= UMAX 12741 // 12742 // Stride * N is a multiple of Stride. Therefore, 12743 // 12744 // End - Start <= Stride * N <= UMAX - (UMAX mod Stride) 12745 // 12746 // Since Stride is a power of two, UMAX + 1 is divisible by Stride. 12747 // Therefore, UMAX mod Stride == Stride - 1. So we can write: 12748 // 12749 // End - Start <= Stride * N <= UMAX - Stride - 1 12750 // 12751 // Dropping the middle term: 12752 // 12753 // End - Start <= UMAX - Stride - 1 12754 // 12755 // Adding Stride - 1 to both sides: 12756 // 12757 // (End - Start) + (Stride - 1) <= UMAX 12758 // 12759 // In other words, the addition doesn't have unsigned overflow. 12760 // 12761 // A similar proof works if we treat Start/End as signed values. 12762 // Just rewrite steps before "End - Start <= Stride * N <= UMAX" to 12763 // use signed max instead of unsigned max. Note that we're trying 12764 // to prove a lack of unsigned overflow in either case. 12765 return false; 12766 } 12767 } 12768 if (Start == Stride || Start == getMinusSCEV(Stride, One)) { 12769 // If Start is equal to Stride, (End - Start) + (Stride - 1) == End - 1. 12770 // If !IsSigned, 0 <u Stride == Start <=u End; so 0 <u End - 1 <u End. 12771 // If IsSigned, 0 <s Stride == Start <=s End; so 0 <s End - 1 <s End. 12772 // 12773 // If Start is equal to Stride - 1, (End - Start) + Stride - 1 == End. 12774 return false; 12775 } 12776 return true; 12777 }(); 12778 12779 const SCEV *Delta = getMinusSCEV(End, Start); 12780 if (!MayAddOverflow) { 12781 // floor((D + (S - 1)) / S) 12782 // We prefer this formulation if it's legal because it's fewer operations. 12783 BECount = 12784 getUDivExpr(getAddExpr(Delta, getMinusSCEV(Stride, One)), Stride); 12785 } else { 12786 BECount = getUDivCeilSCEV(Delta, Stride); 12787 } 12788 } 12789 12790 const SCEV *MaxBECount; 12791 bool MaxOrZero = false; 12792 if (isa<SCEVConstant>(BECount)) { 12793 MaxBECount = BECount; 12794 } else if (BECountIfBackedgeTaken && 12795 isa<SCEVConstant>(BECountIfBackedgeTaken)) { 12796 // If we know exactly how many times the backedge will be taken if it's 12797 // taken at least once, then the backedge count will either be that or 12798 // zero. 12799 MaxBECount = BECountIfBackedgeTaken; 12800 MaxOrZero = true; 12801 } else { 12802 MaxBECount = computeMaxBECountForLT( 12803 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 12804 } 12805 12806 if (isa<SCEVCouldNotCompute>(MaxBECount) && 12807 !isa<SCEVCouldNotCompute>(BECount)) 12808 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 12809 12810 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 12811 } 12812 12813 ScalarEvolution::ExitLimit 12814 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 12815 const Loop *L, bool IsSigned, 12816 bool ControlsExit, bool AllowPredicates) { 12817 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 12818 // We handle only IV > Invariant 12819 if (!isLoopInvariant(RHS, L)) 12820 return getCouldNotCompute(); 12821 12822 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 12823 if (!IV && AllowPredicates) 12824 // Try to make this an AddRec using runtime tests, in the first X 12825 // iterations of this loop, where X is the SCEV expression found by the 12826 // algorithm below. 12827 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 12828 12829 // Avoid weird loops 12830 if (!IV || IV->getLoop() != L || !IV->isAffine()) 12831 return getCouldNotCompute(); 12832 12833 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; 12834 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); 12835 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 12836 12837 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 12838 12839 // Avoid negative or zero stride values 12840 if (!isKnownPositive(Stride)) 12841 return getCouldNotCompute(); 12842 12843 // Avoid proven overflow cases: this will ensure that the backedge taken count 12844 // will not generate any unsigned overflow. Relaxed no-overflow conditions 12845 // exploit NoWrapFlags, allowing to optimize in presence of undefined 12846 // behaviors like the case of C language. 12847 if (!Stride->isOne() && !NoWrap) 12848 if (canIVOverflowOnGT(RHS, Stride, IsSigned)) 12849 return getCouldNotCompute(); 12850 12851 const SCEV *Start = IV->getStart(); 12852 const SCEV *End = RHS; 12853 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 12854 // If we know that Start >= RHS in the context of loop, then we know that 12855 // min(RHS, Start) = RHS at this point. 12856 if (isLoopEntryGuardedByCond( 12857 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) 12858 End = RHS; 12859 else 12860 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 12861 } 12862 12863 if (Start->getType()->isPointerTy()) { 12864 Start = getLosslessPtrToIntExpr(Start); 12865 if (isa<SCEVCouldNotCompute>(Start)) 12866 return Start; 12867 } 12868 if (End->getType()->isPointerTy()) { 12869 End = getLosslessPtrToIntExpr(End); 12870 if (isa<SCEVCouldNotCompute>(End)) 12871 return End; 12872 } 12873 12874 // Compute ((Start - End) + (Stride - 1)) / Stride. 12875 // FIXME: This can overflow. Holding off on fixing this for now; 12876 // howManyGreaterThans will hopefully be gone soon. 12877 const SCEV *One = getOne(Stride->getType()); 12878 const SCEV *BECount = getUDivExpr( 12879 getAddExpr(getMinusSCEV(Start, End), getMinusSCEV(Stride, One)), Stride); 12880 12881 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 12882 : getUnsignedRangeMax(Start); 12883 12884 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 12885 : getUnsignedRangeMin(Stride); 12886 12887 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 12888 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 12889 : APInt::getMinValue(BitWidth) + (MinStride - 1); 12890 12891 // Although End can be a MIN expression we estimate MinEnd considering only 12892 // the case End = RHS. This is safe because in the other case (Start - End) 12893 // is zero, leading to a zero maximum backedge taken count. 12894 APInt MinEnd = 12895 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 12896 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 12897 12898 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) 12899 ? BECount 12900 : getUDivCeilSCEV(getConstant(MaxStart - MinEnd), 12901 getConstant(MinStride)); 12902 12903 if (isa<SCEVCouldNotCompute>(MaxBECount)) 12904 MaxBECount = BECount; 12905 12906 return ExitLimit(BECount, MaxBECount, false, Predicates); 12907 } 12908 12909 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 12910 ScalarEvolution &SE) const { 12911 if (Range.isFullSet()) // Infinite loop. 12912 return SE.getCouldNotCompute(); 12913 12914 // If the start is a non-zero constant, shift the range to simplify things. 12915 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 12916 if (!SC->getValue()->isZero()) { 12917 SmallVector<const SCEV *, 4> Operands(operands()); 12918 Operands[0] = SE.getZero(SC->getType()); 12919 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 12920 getNoWrapFlags(FlagNW)); 12921 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 12922 return ShiftedAddRec->getNumIterationsInRange( 12923 Range.subtract(SC->getAPInt()), SE); 12924 // This is strange and shouldn't happen. 12925 return SE.getCouldNotCompute(); 12926 } 12927 12928 // The only time we can solve this is when we have all constant indices. 12929 // Otherwise, we cannot determine the overflow conditions. 12930 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 12931 return SE.getCouldNotCompute(); 12932 12933 // Okay at this point we know that all elements of the chrec are constants and 12934 // that the start element is zero. 12935 12936 // First check to see if the range contains zero. If not, the first 12937 // iteration exits. 12938 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 12939 if (!Range.contains(APInt(BitWidth, 0))) 12940 return SE.getZero(getType()); 12941 12942 if (isAffine()) { 12943 // If this is an affine expression then we have this situation: 12944 // Solve {0,+,A} in Range === Ax in Range 12945 12946 // We know that zero is in the range. If A is positive then we know that 12947 // the upper value of the range must be the first possible exit value. 12948 // If A is negative then the lower of the range is the last possible loop 12949 // value. Also note that we already checked for a full range. 12950 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 12951 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 12952 12953 // The exit value should be (End+A)/A. 12954 APInt ExitVal = (End + A).udiv(A); 12955 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 12956 12957 // Evaluate at the exit value. If we really did fall out of the valid 12958 // range, then we computed our trip count, otherwise wrap around or other 12959 // things must have happened. 12960 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 12961 if (Range.contains(Val->getValue())) 12962 return SE.getCouldNotCompute(); // Something strange happened 12963 12964 // Ensure that the previous value is in the range. 12965 assert(Range.contains( 12966 EvaluateConstantChrecAtConstant(this, 12967 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 12968 "Linear scev computation is off in a bad way!"); 12969 return SE.getConstant(ExitValue); 12970 } 12971 12972 if (isQuadratic()) { 12973 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 12974 return SE.getConstant(*S); 12975 } 12976 12977 return SE.getCouldNotCompute(); 12978 } 12979 12980 const SCEVAddRecExpr * 12981 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 12982 assert(getNumOperands() > 1 && "AddRec with zero step?"); 12983 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 12984 // but in this case we cannot guarantee that the value returned will be an 12985 // AddRec because SCEV does not have a fixed point where it stops 12986 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 12987 // may happen if we reach arithmetic depth limit while simplifying. So we 12988 // construct the returned value explicitly. 12989 SmallVector<const SCEV *, 3> Ops; 12990 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 12991 // (this + Step) is {A+B,+,B+C,+...,+,N}. 12992 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 12993 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 12994 // We know that the last operand is not a constant zero (otherwise it would 12995 // have been popped out earlier). This guarantees us that if the result has 12996 // the same last operand, then it will also not be popped out, meaning that 12997 // the returned value will be an AddRec. 12998 const SCEV *Last = getOperand(getNumOperands() - 1); 12999 assert(!Last->isZero() && "Recurrency with zero step?"); 13000 Ops.push_back(Last); 13001 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 13002 SCEV::FlagAnyWrap)); 13003 } 13004 13005 // Return true when S contains at least an undef value. 13006 bool ScalarEvolution::containsUndefs(const SCEV *S) const { 13007 return SCEVExprContains(S, [](const SCEV *S) { 13008 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 13009 return isa<UndefValue>(SU->getValue()); 13010 return false; 13011 }); 13012 } 13013 13014 // Return true when S contains a value that is a nullptr. 13015 bool ScalarEvolution::containsErasedValue(const SCEV *S) const { 13016 return SCEVExprContains(S, [](const SCEV *S) { 13017 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 13018 return SU->getValue() == nullptr; 13019 return false; 13020 }); 13021 } 13022 13023 /// Return the size of an element read or written by Inst. 13024 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 13025 Type *Ty; 13026 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 13027 Ty = Store->getValueOperand()->getType(); 13028 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 13029 Ty = Load->getType(); 13030 else 13031 return nullptr; 13032 13033 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 13034 return getSizeOfExpr(ETy, Ty); 13035 } 13036 13037 //===----------------------------------------------------------------------===// 13038 // SCEVCallbackVH Class Implementation 13039 //===----------------------------------------------------------------------===// 13040 13041 void ScalarEvolution::SCEVCallbackVH::deleted() { 13042 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 13043 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 13044 SE->ConstantEvolutionLoopExitValue.erase(PN); 13045 SE->eraseValueFromMap(getValPtr()); 13046 // this now dangles! 13047 } 13048 13049 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 13050 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 13051 13052 // Forget all the expressions associated with users of the old value, 13053 // so that future queries will recompute the expressions using the new 13054 // value. 13055 Value *Old = getValPtr(); 13056 SmallVector<User *, 16> Worklist(Old->users()); 13057 SmallPtrSet<User *, 8> Visited; 13058 while (!Worklist.empty()) { 13059 User *U = Worklist.pop_back_val(); 13060 // Deleting the Old value will cause this to dangle. Postpone 13061 // that until everything else is done. 13062 if (U == Old) 13063 continue; 13064 if (!Visited.insert(U).second) 13065 continue; 13066 if (PHINode *PN = dyn_cast<PHINode>(U)) 13067 SE->ConstantEvolutionLoopExitValue.erase(PN); 13068 SE->eraseValueFromMap(U); 13069 llvm::append_range(Worklist, U->users()); 13070 } 13071 // Delete the Old value. 13072 if (PHINode *PN = dyn_cast<PHINode>(Old)) 13073 SE->ConstantEvolutionLoopExitValue.erase(PN); 13074 SE->eraseValueFromMap(Old); 13075 // this now dangles! 13076 } 13077 13078 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 13079 : CallbackVH(V), SE(se) {} 13080 13081 //===----------------------------------------------------------------------===// 13082 // ScalarEvolution Class Implementation 13083 //===----------------------------------------------------------------------===// 13084 13085 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 13086 AssumptionCache &AC, DominatorTree &DT, 13087 LoopInfo &LI) 13088 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 13089 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 13090 LoopDispositions(64), BlockDispositions(64) { 13091 // To use guards for proving predicates, we need to scan every instruction in 13092 // relevant basic blocks, and not just terminators. Doing this is a waste of 13093 // time if the IR does not actually contain any calls to 13094 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 13095 // 13096 // This pessimizes the case where a pass that preserves ScalarEvolution wants 13097 // to _add_ guards to the module when there weren't any before, and wants 13098 // ScalarEvolution to optimize based on those guards. For now we prefer to be 13099 // efficient in lieu of being smart in that rather obscure case. 13100 13101 auto *GuardDecl = F.getParent()->getFunction( 13102 Intrinsic::getName(Intrinsic::experimental_guard)); 13103 HasGuards = GuardDecl && !GuardDecl->use_empty(); 13104 } 13105 13106 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 13107 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 13108 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 13109 ValueExprMap(std::move(Arg.ValueExprMap)), 13110 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 13111 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 13112 PendingMerges(std::move(Arg.PendingMerges)), 13113 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 13114 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 13115 PredicatedBackedgeTakenCounts( 13116 std::move(Arg.PredicatedBackedgeTakenCounts)), 13117 BECountUsers(std::move(Arg.BECountUsers)), 13118 ConstantEvolutionLoopExitValue( 13119 std::move(Arg.ConstantEvolutionLoopExitValue)), 13120 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 13121 ValuesAtScopesUsers(std::move(Arg.ValuesAtScopesUsers)), 13122 LoopDispositions(std::move(Arg.LoopDispositions)), 13123 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 13124 BlockDispositions(std::move(Arg.BlockDispositions)), 13125 SCEVUsers(std::move(Arg.SCEVUsers)), 13126 UnsignedRanges(std::move(Arg.UnsignedRanges)), 13127 SignedRanges(std::move(Arg.SignedRanges)), 13128 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 13129 UniquePreds(std::move(Arg.UniquePreds)), 13130 SCEVAllocator(std::move(Arg.SCEVAllocator)), 13131 LoopUsers(std::move(Arg.LoopUsers)), 13132 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 13133 FirstUnknown(Arg.FirstUnknown) { 13134 Arg.FirstUnknown = nullptr; 13135 } 13136 13137 ScalarEvolution::~ScalarEvolution() { 13138 // Iterate through all the SCEVUnknown instances and call their 13139 // destructors, so that they release their references to their values. 13140 for (SCEVUnknown *U = FirstUnknown; U;) { 13141 SCEVUnknown *Tmp = U; 13142 U = U->Next; 13143 Tmp->~SCEVUnknown(); 13144 } 13145 FirstUnknown = nullptr; 13146 13147 ExprValueMap.clear(); 13148 ValueExprMap.clear(); 13149 HasRecMap.clear(); 13150 BackedgeTakenCounts.clear(); 13151 PredicatedBackedgeTakenCounts.clear(); 13152 13153 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 13154 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 13155 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 13156 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 13157 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 13158 } 13159 13160 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 13161 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 13162 } 13163 13164 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 13165 const Loop *L) { 13166 // Print all inner loops first 13167 for (Loop *I : *L) 13168 PrintLoopInfo(OS, SE, I); 13169 13170 OS << "Loop "; 13171 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13172 OS << ": "; 13173 13174 SmallVector<BasicBlock *, 8> ExitingBlocks; 13175 L->getExitingBlocks(ExitingBlocks); 13176 if (ExitingBlocks.size() != 1) 13177 OS << "<multiple exits> "; 13178 13179 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 13180 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 13181 else 13182 OS << "Unpredictable backedge-taken count.\n"; 13183 13184 if (ExitingBlocks.size() > 1) 13185 for (BasicBlock *ExitingBlock : ExitingBlocks) { 13186 OS << " exit count for " << ExitingBlock->getName() << ": " 13187 << *SE->getExitCount(L, ExitingBlock) << "\n"; 13188 } 13189 13190 OS << "Loop "; 13191 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13192 OS << ": "; 13193 13194 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { 13195 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); 13196 if (SE->isBackedgeTakenCountMaxOrZero(L)) 13197 OS << ", actual taken count either this or zero."; 13198 } else { 13199 OS << "Unpredictable max backedge-taken count. "; 13200 } 13201 13202 OS << "\n" 13203 "Loop "; 13204 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13205 OS << ": "; 13206 13207 SmallVector<const SCEVPredicate *, 4> Preds; 13208 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Preds); 13209 if (!isa<SCEVCouldNotCompute>(PBT)) { 13210 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 13211 OS << " Predicates:\n"; 13212 for (const auto *P : Preds) 13213 P->print(OS, 4); 13214 } else { 13215 OS << "Unpredictable predicated backedge-taken count. "; 13216 } 13217 OS << "\n"; 13218 13219 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 13220 OS << "Loop "; 13221 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13222 OS << ": "; 13223 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 13224 } 13225 } 13226 13227 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 13228 switch (LD) { 13229 case ScalarEvolution::LoopVariant: 13230 return "Variant"; 13231 case ScalarEvolution::LoopInvariant: 13232 return "Invariant"; 13233 case ScalarEvolution::LoopComputable: 13234 return "Computable"; 13235 } 13236 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 13237 } 13238 13239 void ScalarEvolution::print(raw_ostream &OS) const { 13240 // ScalarEvolution's implementation of the print method is to print 13241 // out SCEV values of all instructions that are interesting. Doing 13242 // this potentially causes it to create new SCEV objects though, 13243 // which technically conflicts with the const qualifier. This isn't 13244 // observable from outside the class though, so casting away the 13245 // const isn't dangerous. 13246 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 13247 13248 if (ClassifyExpressions) { 13249 OS << "Classifying expressions for: "; 13250 F.printAsOperand(OS, /*PrintType=*/false); 13251 OS << "\n"; 13252 for (Instruction &I : instructions(F)) 13253 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 13254 OS << I << '\n'; 13255 OS << " --> "; 13256 const SCEV *SV = SE.getSCEV(&I); 13257 SV->print(OS); 13258 if (!isa<SCEVCouldNotCompute>(SV)) { 13259 OS << " U: "; 13260 SE.getUnsignedRange(SV).print(OS); 13261 OS << " S: "; 13262 SE.getSignedRange(SV).print(OS); 13263 } 13264 13265 const Loop *L = LI.getLoopFor(I.getParent()); 13266 13267 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 13268 if (AtUse != SV) { 13269 OS << " --> "; 13270 AtUse->print(OS); 13271 if (!isa<SCEVCouldNotCompute>(AtUse)) { 13272 OS << " U: "; 13273 SE.getUnsignedRange(AtUse).print(OS); 13274 OS << " S: "; 13275 SE.getSignedRange(AtUse).print(OS); 13276 } 13277 } 13278 13279 if (L) { 13280 OS << "\t\t" "Exits: "; 13281 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 13282 if (!SE.isLoopInvariant(ExitValue, L)) { 13283 OS << "<<Unknown>>"; 13284 } else { 13285 OS << *ExitValue; 13286 } 13287 13288 bool First = true; 13289 for (const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 13290 if (First) { 13291 OS << "\t\t" "LoopDispositions: { "; 13292 First = false; 13293 } else { 13294 OS << ", "; 13295 } 13296 13297 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13298 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 13299 } 13300 13301 for (const auto *InnerL : depth_first(L)) { 13302 if (InnerL == L) 13303 continue; 13304 if (First) { 13305 OS << "\t\t" "LoopDispositions: { "; 13306 First = false; 13307 } else { 13308 OS << ", "; 13309 } 13310 13311 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13312 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 13313 } 13314 13315 OS << " }"; 13316 } 13317 13318 OS << "\n"; 13319 } 13320 } 13321 13322 OS << "Determining loop execution counts for: "; 13323 F.printAsOperand(OS, /*PrintType=*/false); 13324 OS << "\n"; 13325 for (Loop *I : LI) 13326 PrintLoopInfo(OS, &SE, I); 13327 } 13328 13329 ScalarEvolution::LoopDisposition 13330 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 13331 auto &Values = LoopDispositions[S]; 13332 for (auto &V : Values) { 13333 if (V.getPointer() == L) 13334 return V.getInt(); 13335 } 13336 Values.emplace_back(L, LoopVariant); 13337 LoopDisposition D = computeLoopDisposition(S, L); 13338 auto &Values2 = LoopDispositions[S]; 13339 for (auto &V : llvm::reverse(Values2)) { 13340 if (V.getPointer() == L) { 13341 V.setInt(D); 13342 break; 13343 } 13344 } 13345 return D; 13346 } 13347 13348 ScalarEvolution::LoopDisposition 13349 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 13350 switch (S->getSCEVType()) { 13351 case scConstant: 13352 return LoopInvariant; 13353 case scPtrToInt: 13354 case scTruncate: 13355 case scZeroExtend: 13356 case scSignExtend: 13357 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 13358 case scAddRecExpr: { 13359 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 13360 13361 // If L is the addrec's loop, it's computable. 13362 if (AR->getLoop() == L) 13363 return LoopComputable; 13364 13365 // Add recurrences are never invariant in the function-body (null loop). 13366 if (!L) 13367 return LoopVariant; 13368 13369 // Everything that is not defined at loop entry is variant. 13370 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 13371 return LoopVariant; 13372 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 13373 " dominate the contained loop's header?"); 13374 13375 // This recurrence is invariant w.r.t. L if AR's loop contains L. 13376 if (AR->getLoop()->contains(L)) 13377 return LoopInvariant; 13378 13379 // This recurrence is variant w.r.t. L if any of its operands 13380 // are variant. 13381 for (const auto *Op : AR->operands()) 13382 if (!isLoopInvariant(Op, L)) 13383 return LoopVariant; 13384 13385 // Otherwise it's loop-invariant. 13386 return LoopInvariant; 13387 } 13388 case scAddExpr: 13389 case scMulExpr: 13390 case scUMaxExpr: 13391 case scSMaxExpr: 13392 case scUMinExpr: 13393 case scSMinExpr: 13394 case scSequentialUMinExpr: { 13395 bool HasVarying = false; 13396 for (const auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 13397 LoopDisposition D = getLoopDisposition(Op, L); 13398 if (D == LoopVariant) 13399 return LoopVariant; 13400 if (D == LoopComputable) 13401 HasVarying = true; 13402 } 13403 return HasVarying ? LoopComputable : LoopInvariant; 13404 } 13405 case scUDivExpr: { 13406 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 13407 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 13408 if (LD == LoopVariant) 13409 return LoopVariant; 13410 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 13411 if (RD == LoopVariant) 13412 return LoopVariant; 13413 return (LD == LoopInvariant && RD == LoopInvariant) ? 13414 LoopInvariant : LoopComputable; 13415 } 13416 case scUnknown: 13417 // All non-instruction values are loop invariant. All instructions are loop 13418 // invariant if they are not contained in the specified loop. 13419 // Instructions are never considered invariant in the function body 13420 // (null loop) because they are defined within the "loop". 13421 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 13422 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 13423 return LoopInvariant; 13424 case scCouldNotCompute: 13425 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 13426 } 13427 llvm_unreachable("Unknown SCEV kind!"); 13428 } 13429 13430 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 13431 return getLoopDisposition(S, L) == LoopInvariant; 13432 } 13433 13434 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 13435 return getLoopDisposition(S, L) == LoopComputable; 13436 } 13437 13438 ScalarEvolution::BlockDisposition 13439 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 13440 auto &Values = BlockDispositions[S]; 13441 for (auto &V : Values) { 13442 if (V.getPointer() == BB) 13443 return V.getInt(); 13444 } 13445 Values.emplace_back(BB, DoesNotDominateBlock); 13446 BlockDisposition D = computeBlockDisposition(S, BB); 13447 auto &Values2 = BlockDispositions[S]; 13448 for (auto &V : llvm::reverse(Values2)) { 13449 if (V.getPointer() == BB) { 13450 V.setInt(D); 13451 break; 13452 } 13453 } 13454 return D; 13455 } 13456 13457 ScalarEvolution::BlockDisposition 13458 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 13459 switch (S->getSCEVType()) { 13460 case scConstant: 13461 return ProperlyDominatesBlock; 13462 case scPtrToInt: 13463 case scTruncate: 13464 case scZeroExtend: 13465 case scSignExtend: 13466 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 13467 case scAddRecExpr: { 13468 // This uses a "dominates" query instead of "properly dominates" query 13469 // to test for proper dominance too, because the instruction which 13470 // produces the addrec's value is a PHI, and a PHI effectively properly 13471 // dominates its entire containing block. 13472 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 13473 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 13474 return DoesNotDominateBlock; 13475 13476 // Fall through into SCEVNAryExpr handling. 13477 LLVM_FALLTHROUGH; 13478 } 13479 case scAddExpr: 13480 case scMulExpr: 13481 case scUMaxExpr: 13482 case scSMaxExpr: 13483 case scUMinExpr: 13484 case scSMinExpr: 13485 case scSequentialUMinExpr: { 13486 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 13487 bool Proper = true; 13488 for (const SCEV *NAryOp : NAry->operands()) { 13489 BlockDisposition D = getBlockDisposition(NAryOp, BB); 13490 if (D == DoesNotDominateBlock) 13491 return DoesNotDominateBlock; 13492 if (D == DominatesBlock) 13493 Proper = false; 13494 } 13495 return Proper ? ProperlyDominatesBlock : DominatesBlock; 13496 } 13497 case scUDivExpr: { 13498 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 13499 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 13500 BlockDisposition LD = getBlockDisposition(LHS, BB); 13501 if (LD == DoesNotDominateBlock) 13502 return DoesNotDominateBlock; 13503 BlockDisposition RD = getBlockDisposition(RHS, BB); 13504 if (RD == DoesNotDominateBlock) 13505 return DoesNotDominateBlock; 13506 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 13507 ProperlyDominatesBlock : DominatesBlock; 13508 } 13509 case scUnknown: 13510 if (Instruction *I = 13511 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 13512 if (I->getParent() == BB) 13513 return DominatesBlock; 13514 if (DT.properlyDominates(I->getParent(), BB)) 13515 return ProperlyDominatesBlock; 13516 return DoesNotDominateBlock; 13517 } 13518 return ProperlyDominatesBlock; 13519 case scCouldNotCompute: 13520 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 13521 } 13522 llvm_unreachable("Unknown SCEV kind!"); 13523 } 13524 13525 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 13526 return getBlockDisposition(S, BB) >= DominatesBlock; 13527 } 13528 13529 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 13530 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 13531 } 13532 13533 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 13534 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 13535 } 13536 13537 void ScalarEvolution::forgetBackedgeTakenCounts(const Loop *L, 13538 bool Predicated) { 13539 auto &BECounts = 13540 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts; 13541 auto It = BECounts.find(L); 13542 if (It != BECounts.end()) { 13543 for (const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) { 13544 if (!isa<SCEVConstant>(ENT.ExactNotTaken)) { 13545 auto UserIt = BECountUsers.find(ENT.ExactNotTaken); 13546 assert(UserIt != BECountUsers.end()); 13547 UserIt->second.erase({L, Predicated}); 13548 } 13549 } 13550 BECounts.erase(It); 13551 } 13552 } 13553 13554 void ScalarEvolution::forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs) { 13555 SmallPtrSet<const SCEV *, 8> ToForget(SCEVs.begin(), SCEVs.end()); 13556 SmallVector<const SCEV *, 8> Worklist(ToForget.begin(), ToForget.end()); 13557 13558 while (!Worklist.empty()) { 13559 const SCEV *Curr = Worklist.pop_back_val(); 13560 auto Users = SCEVUsers.find(Curr); 13561 if (Users != SCEVUsers.end()) 13562 for (const auto *User : Users->second) 13563 if (ToForget.insert(User).second) 13564 Worklist.push_back(User); 13565 } 13566 13567 for (const auto *S : ToForget) 13568 forgetMemoizedResultsImpl(S); 13569 13570 for (auto I = PredicatedSCEVRewrites.begin(); 13571 I != PredicatedSCEVRewrites.end();) { 13572 std::pair<const SCEV *, const Loop *> Entry = I->first; 13573 if (ToForget.count(Entry.first)) 13574 PredicatedSCEVRewrites.erase(I++); 13575 else 13576 ++I; 13577 } 13578 } 13579 13580 void ScalarEvolution::forgetMemoizedResultsImpl(const SCEV *S) { 13581 LoopDispositions.erase(S); 13582 BlockDispositions.erase(S); 13583 UnsignedRanges.erase(S); 13584 SignedRanges.erase(S); 13585 HasRecMap.erase(S); 13586 MinTrailingZerosCache.erase(S); 13587 13588 auto ExprIt = ExprValueMap.find(S); 13589 if (ExprIt != ExprValueMap.end()) { 13590 for (Value *V : ExprIt->second) { 13591 auto ValueIt = ValueExprMap.find_as(V); 13592 if (ValueIt != ValueExprMap.end()) 13593 ValueExprMap.erase(ValueIt); 13594 } 13595 ExprValueMap.erase(ExprIt); 13596 } 13597 13598 auto ScopeIt = ValuesAtScopes.find(S); 13599 if (ScopeIt != ValuesAtScopes.end()) { 13600 for (const auto &Pair : ScopeIt->second) 13601 if (!isa_and_nonnull<SCEVConstant>(Pair.second)) 13602 erase_value(ValuesAtScopesUsers[Pair.second], 13603 std::make_pair(Pair.first, S)); 13604 ValuesAtScopes.erase(ScopeIt); 13605 } 13606 13607 auto ScopeUserIt = ValuesAtScopesUsers.find(S); 13608 if (ScopeUserIt != ValuesAtScopesUsers.end()) { 13609 for (const auto &Pair : ScopeUserIt->second) 13610 erase_value(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S)); 13611 ValuesAtScopesUsers.erase(ScopeUserIt); 13612 } 13613 13614 auto BEUsersIt = BECountUsers.find(S); 13615 if (BEUsersIt != BECountUsers.end()) { 13616 // Work on a copy, as forgetBackedgeTakenCounts() will modify the original. 13617 auto Copy = BEUsersIt->second; 13618 for (const auto &Pair : Copy) 13619 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt()); 13620 BECountUsers.erase(BEUsersIt); 13621 } 13622 } 13623 13624 void 13625 ScalarEvolution::getUsedLoops(const SCEV *S, 13626 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 13627 struct FindUsedLoops { 13628 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 13629 : LoopsUsed(LoopsUsed) {} 13630 SmallPtrSetImpl<const Loop *> &LoopsUsed; 13631 bool follow(const SCEV *S) { 13632 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 13633 LoopsUsed.insert(AR->getLoop()); 13634 return true; 13635 } 13636 13637 bool isDone() const { return false; } 13638 }; 13639 13640 FindUsedLoops F(LoopsUsed); 13641 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 13642 } 13643 13644 void ScalarEvolution::getReachableBlocks( 13645 SmallPtrSetImpl<BasicBlock *> &Reachable, Function &F) { 13646 SmallVector<BasicBlock *> Worklist; 13647 Worklist.push_back(&F.getEntryBlock()); 13648 while (!Worklist.empty()) { 13649 BasicBlock *BB = Worklist.pop_back_val(); 13650 if (!Reachable.insert(BB).second) 13651 continue; 13652 13653 Value *Cond; 13654 BasicBlock *TrueBB, *FalseBB; 13655 if (match(BB->getTerminator(), m_Br(m_Value(Cond), m_BasicBlock(TrueBB), 13656 m_BasicBlock(FalseBB)))) { 13657 if (auto *C = dyn_cast<ConstantInt>(Cond)) { 13658 Worklist.push_back(C->isOne() ? TrueBB : FalseBB); 13659 continue; 13660 } 13661 13662 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) { 13663 const SCEV *L = getSCEV(Cmp->getOperand(0)); 13664 const SCEV *R = getSCEV(Cmp->getOperand(1)); 13665 if (isKnownPredicateViaConstantRanges(Cmp->getPredicate(), L, R)) { 13666 Worklist.push_back(TrueBB); 13667 continue; 13668 } 13669 if (isKnownPredicateViaConstantRanges(Cmp->getInversePredicate(), L, 13670 R)) { 13671 Worklist.push_back(FalseBB); 13672 continue; 13673 } 13674 } 13675 } 13676 13677 append_range(Worklist, successors(BB)); 13678 } 13679 } 13680 13681 void ScalarEvolution::verify() const { 13682 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 13683 ScalarEvolution SE2(F, TLI, AC, DT, LI); 13684 13685 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 13686 13687 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 13688 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 13689 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 13690 13691 const SCEV *visitConstant(const SCEVConstant *Constant) { 13692 return SE.getConstant(Constant->getAPInt()); 13693 } 13694 13695 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 13696 return SE.getUnknown(Expr->getValue()); 13697 } 13698 13699 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 13700 return SE.getCouldNotCompute(); 13701 } 13702 }; 13703 13704 SCEVMapper SCM(SE2); 13705 SmallPtrSet<BasicBlock *, 16> ReachableBlocks; 13706 SE2.getReachableBlocks(ReachableBlocks, F); 13707 13708 auto GetDelta = [&](const SCEV *Old, const SCEV *New) -> const SCEV * { 13709 if (containsUndefs(Old) || containsUndefs(New)) { 13710 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 13711 // not propagate undef aggressively). This means we can (and do) fail 13712 // verification in cases where a transform makes a value go from "undef" 13713 // to "undef+1" (say). The transform is fine, since in both cases the 13714 // result is "undef", but SCEV thinks the value increased by 1. 13715 return nullptr; 13716 } 13717 13718 // Unless VerifySCEVStrict is set, we only compare constant deltas. 13719 const SCEV *Delta = SE2.getMinusSCEV(Old, New); 13720 if (!VerifySCEVStrict && !isa<SCEVConstant>(Delta)) 13721 return nullptr; 13722 13723 return Delta; 13724 }; 13725 13726 while (!LoopStack.empty()) { 13727 auto *L = LoopStack.pop_back_val(); 13728 llvm::append_range(LoopStack, *L); 13729 13730 // Only verify BECounts in reachable loops. For an unreachable loop, 13731 // any BECount is legal. 13732 if (!ReachableBlocks.contains(L->getHeader())) 13733 continue; 13734 13735 // Only verify cached BECounts. Computing new BECounts may change the 13736 // results of subsequent SCEV uses. 13737 auto It = BackedgeTakenCounts.find(L); 13738 if (It == BackedgeTakenCounts.end()) 13739 continue; 13740 13741 auto *CurBECount = 13742 SCM.visit(It->second.getExact(L, const_cast<ScalarEvolution *>(this))); 13743 auto *NewBECount = SE2.getBackedgeTakenCount(L); 13744 13745 if (CurBECount == SE2.getCouldNotCompute() || 13746 NewBECount == SE2.getCouldNotCompute()) { 13747 // NB! This situation is legal, but is very suspicious -- whatever pass 13748 // change the loop to make a trip count go from could not compute to 13749 // computable or vice-versa *should have* invalidated SCEV. However, we 13750 // choose not to assert here (for now) since we don't want false 13751 // positives. 13752 continue; 13753 } 13754 13755 if (SE.getTypeSizeInBits(CurBECount->getType()) > 13756 SE.getTypeSizeInBits(NewBECount->getType())) 13757 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 13758 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 13759 SE.getTypeSizeInBits(NewBECount->getType())) 13760 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 13761 13762 const SCEV *Delta = GetDelta(CurBECount, NewBECount); 13763 if (Delta && !Delta->isZero()) { 13764 dbgs() << "Trip Count for " << *L << " Changed!\n"; 13765 dbgs() << "Old: " << *CurBECount << "\n"; 13766 dbgs() << "New: " << *NewBECount << "\n"; 13767 dbgs() << "Delta: " << *Delta << "\n"; 13768 std::abort(); 13769 } 13770 } 13771 13772 // Collect all valid loops currently in LoopInfo. 13773 SmallPtrSet<Loop *, 32> ValidLoops; 13774 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end()); 13775 while (!Worklist.empty()) { 13776 Loop *L = Worklist.pop_back_val(); 13777 if (ValidLoops.insert(L).second) 13778 Worklist.append(L->begin(), L->end()); 13779 } 13780 for (const auto &KV : ValueExprMap) { 13781 #ifndef NDEBUG 13782 // Check for SCEV expressions referencing invalid/deleted loops. 13783 if (auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second)) { 13784 assert(ValidLoops.contains(AR->getLoop()) && 13785 "AddRec references invalid loop"); 13786 } 13787 #endif 13788 13789 // Check that the value is also part of the reverse map. 13790 auto It = ExprValueMap.find(KV.second); 13791 if (It == ExprValueMap.end() || !It->second.contains(KV.first)) { 13792 dbgs() << "Value " << *KV.first 13793 << " is in ValueExprMap but not in ExprValueMap\n"; 13794 std::abort(); 13795 } 13796 13797 if (auto *I = dyn_cast<Instruction>(&*KV.first)) { 13798 if (!ReachableBlocks.contains(I->getParent())) 13799 continue; 13800 const SCEV *OldSCEV = SCM.visit(KV.second); 13801 const SCEV *NewSCEV = SE2.getSCEV(I); 13802 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV); 13803 if (Delta && !Delta->isZero()) { 13804 dbgs() << "SCEV for value " << *I << " changed!\n" 13805 << "Old: " << *OldSCEV << "\n" 13806 << "New: " << *NewSCEV << "\n" 13807 << "Delta: " << *Delta << "\n"; 13808 std::abort(); 13809 } 13810 } 13811 } 13812 13813 for (const auto &KV : ExprValueMap) { 13814 for (Value *V : KV.second) { 13815 auto It = ValueExprMap.find_as(V); 13816 if (It == ValueExprMap.end()) { 13817 dbgs() << "Value " << *V 13818 << " is in ExprValueMap but not in ValueExprMap\n"; 13819 std::abort(); 13820 } 13821 if (It->second != KV.first) { 13822 dbgs() << "Value " << *V << " mapped to " << *It->second 13823 << " rather than " << *KV.first << "\n"; 13824 std::abort(); 13825 } 13826 } 13827 } 13828 13829 // Verify integrity of SCEV users. 13830 for (const auto &S : UniqueSCEVs) { 13831 SmallVector<const SCEV *, 4> Ops; 13832 collectUniqueOps(&S, Ops); 13833 for (const auto *Op : Ops) { 13834 // We do not store dependencies of constants. 13835 if (isa<SCEVConstant>(Op)) 13836 continue; 13837 auto It = SCEVUsers.find(Op); 13838 if (It != SCEVUsers.end() && It->second.count(&S)) 13839 continue; 13840 dbgs() << "Use of operand " << *Op << " by user " << S 13841 << " is not being tracked!\n"; 13842 std::abort(); 13843 } 13844 } 13845 13846 // Verify integrity of ValuesAtScopes users. 13847 for (const auto &ValueAndVec : ValuesAtScopes) { 13848 const SCEV *Value = ValueAndVec.first; 13849 for (const auto &LoopAndValueAtScope : ValueAndVec.second) { 13850 const Loop *L = LoopAndValueAtScope.first; 13851 const SCEV *ValueAtScope = LoopAndValueAtScope.second; 13852 if (!isa<SCEVConstant>(ValueAtScope)) { 13853 auto It = ValuesAtScopesUsers.find(ValueAtScope); 13854 if (It != ValuesAtScopesUsers.end() && 13855 is_contained(It->second, std::make_pair(L, Value))) 13856 continue; 13857 dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: " 13858 << *ValueAtScope << " missing in ValuesAtScopesUsers\n"; 13859 std::abort(); 13860 } 13861 } 13862 } 13863 13864 for (const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) { 13865 const SCEV *ValueAtScope = ValueAtScopeAndVec.first; 13866 for (const auto &LoopAndValue : ValueAtScopeAndVec.second) { 13867 const Loop *L = LoopAndValue.first; 13868 const SCEV *Value = LoopAndValue.second; 13869 assert(!isa<SCEVConstant>(Value)); 13870 auto It = ValuesAtScopes.find(Value); 13871 if (It != ValuesAtScopes.end() && 13872 is_contained(It->second, std::make_pair(L, ValueAtScope))) 13873 continue; 13874 dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: " 13875 << *ValueAtScope << " missing in ValuesAtScopes\n"; 13876 std::abort(); 13877 } 13878 } 13879 13880 // Verify integrity of BECountUsers. 13881 auto VerifyBECountUsers = [&](bool Predicated) { 13882 auto &BECounts = 13883 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts; 13884 for (const auto &LoopAndBEInfo : BECounts) { 13885 for (const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) { 13886 if (!isa<SCEVConstant>(ENT.ExactNotTaken)) { 13887 auto UserIt = BECountUsers.find(ENT.ExactNotTaken); 13888 if (UserIt != BECountUsers.end() && 13889 UserIt->second.contains({ LoopAndBEInfo.first, Predicated })) 13890 continue; 13891 dbgs() << "Value " << *ENT.ExactNotTaken << " for loop " 13892 << *LoopAndBEInfo.first << " missing from BECountUsers\n"; 13893 std::abort(); 13894 } 13895 } 13896 } 13897 }; 13898 VerifyBECountUsers(/* Predicated */ false); 13899 VerifyBECountUsers(/* Predicated */ true); 13900 } 13901 13902 bool ScalarEvolution::invalidate( 13903 Function &F, const PreservedAnalyses &PA, 13904 FunctionAnalysisManager::Invalidator &Inv) { 13905 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 13906 // of its dependencies is invalidated. 13907 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 13908 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 13909 Inv.invalidate<AssumptionAnalysis>(F, PA) || 13910 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 13911 Inv.invalidate<LoopAnalysis>(F, PA); 13912 } 13913 13914 AnalysisKey ScalarEvolutionAnalysis::Key; 13915 13916 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 13917 FunctionAnalysisManager &AM) { 13918 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 13919 AM.getResult<AssumptionAnalysis>(F), 13920 AM.getResult<DominatorTreeAnalysis>(F), 13921 AM.getResult<LoopAnalysis>(F)); 13922 } 13923 13924 PreservedAnalyses 13925 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 13926 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 13927 return PreservedAnalyses::all(); 13928 } 13929 13930 PreservedAnalyses 13931 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 13932 // For compatibility with opt's -analyze feature under legacy pass manager 13933 // which was not ported to NPM. This keeps tests using 13934 // update_analyze_test_checks.py working. 13935 OS << "Printing analysis 'Scalar Evolution Analysis' for function '" 13936 << F.getName() << "':\n"; 13937 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 13938 return PreservedAnalyses::all(); 13939 } 13940 13941 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 13942 "Scalar Evolution Analysis", false, true) 13943 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 13944 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 13945 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 13946 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 13947 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 13948 "Scalar Evolution Analysis", false, true) 13949 13950 char ScalarEvolutionWrapperPass::ID = 0; 13951 13952 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 13953 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 13954 } 13955 13956 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 13957 SE.reset(new ScalarEvolution( 13958 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 13959 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 13960 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 13961 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 13962 return false; 13963 } 13964 13965 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 13966 13967 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 13968 SE->print(OS); 13969 } 13970 13971 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 13972 if (!VerifySCEV) 13973 return; 13974 13975 SE->verify(); 13976 } 13977 13978 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 13979 AU.setPreservesAll(); 13980 AU.addRequiredTransitive<AssumptionCacheTracker>(); 13981 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 13982 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 13983 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 13984 } 13985 13986 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 13987 const SCEV *RHS) { 13988 return getComparePredicate(ICmpInst::ICMP_EQ, LHS, RHS); 13989 } 13990 13991 const SCEVPredicate * 13992 ScalarEvolution::getComparePredicate(const ICmpInst::Predicate Pred, 13993 const SCEV *LHS, const SCEV *RHS) { 13994 FoldingSetNodeID ID; 13995 assert(LHS->getType() == RHS->getType() && 13996 "Type mismatch between LHS and RHS"); 13997 // Unique this node based on the arguments 13998 ID.AddInteger(SCEVPredicate::P_Compare); 13999 ID.AddInteger(Pred); 14000 ID.AddPointer(LHS); 14001 ID.AddPointer(RHS); 14002 void *IP = nullptr; 14003 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 14004 return S; 14005 SCEVComparePredicate *Eq = new (SCEVAllocator) 14006 SCEVComparePredicate(ID.Intern(SCEVAllocator), Pred, LHS, RHS); 14007 UniquePreds.InsertNode(Eq, IP); 14008 return Eq; 14009 } 14010 14011 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 14012 const SCEVAddRecExpr *AR, 14013 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 14014 FoldingSetNodeID ID; 14015 // Unique this node based on the arguments 14016 ID.AddInteger(SCEVPredicate::P_Wrap); 14017 ID.AddPointer(AR); 14018 ID.AddInteger(AddedFlags); 14019 void *IP = nullptr; 14020 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 14021 return S; 14022 auto *OF = new (SCEVAllocator) 14023 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 14024 UniquePreds.InsertNode(OF, IP); 14025 return OF; 14026 } 14027 14028 namespace { 14029 14030 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 14031 public: 14032 14033 /// Rewrites \p S in the context of a loop L and the SCEV predication 14034 /// infrastructure. 14035 /// 14036 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 14037 /// equivalences present in \p Pred. 14038 /// 14039 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 14040 /// \p NewPreds such that the result will be an AddRecExpr. 14041 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 14042 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 14043 const SCEVPredicate *Pred) { 14044 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 14045 return Rewriter.visit(S); 14046 } 14047 14048 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 14049 if (Pred) { 14050 if (auto *U = dyn_cast<SCEVUnionPredicate>(Pred)) { 14051 for (const auto *Pred : U->getPredicates()) 14052 if (const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred)) 14053 if (IPred->getLHS() == Expr && 14054 IPred->getPredicate() == ICmpInst::ICMP_EQ) 14055 return IPred->getRHS(); 14056 } else if (const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred)) { 14057 if (IPred->getLHS() == Expr && 14058 IPred->getPredicate() == ICmpInst::ICMP_EQ) 14059 return IPred->getRHS(); 14060 } 14061 } 14062 return convertToAddRecWithPreds(Expr); 14063 } 14064 14065 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 14066 const SCEV *Operand = visit(Expr->getOperand()); 14067 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 14068 if (AR && AR->getLoop() == L && AR->isAffine()) { 14069 // This couldn't be folded because the operand didn't have the nuw 14070 // flag. Add the nusw flag as an assumption that we could make. 14071 const SCEV *Step = AR->getStepRecurrence(SE); 14072 Type *Ty = Expr->getType(); 14073 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 14074 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 14075 SE.getSignExtendExpr(Step, Ty), L, 14076 AR->getNoWrapFlags()); 14077 } 14078 return SE.getZeroExtendExpr(Operand, Expr->getType()); 14079 } 14080 14081 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 14082 const SCEV *Operand = visit(Expr->getOperand()); 14083 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 14084 if (AR && AR->getLoop() == L && AR->isAffine()) { 14085 // This couldn't be folded because the operand didn't have the nsw 14086 // flag. Add the nssw flag as an assumption that we could make. 14087 const SCEV *Step = AR->getStepRecurrence(SE); 14088 Type *Ty = Expr->getType(); 14089 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 14090 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 14091 SE.getSignExtendExpr(Step, Ty), L, 14092 AR->getNoWrapFlags()); 14093 } 14094 return SE.getSignExtendExpr(Operand, Expr->getType()); 14095 } 14096 14097 private: 14098 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 14099 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 14100 const SCEVPredicate *Pred) 14101 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 14102 14103 bool addOverflowAssumption(const SCEVPredicate *P) { 14104 if (!NewPreds) { 14105 // Check if we've already made this assumption. 14106 return Pred && Pred->implies(P); 14107 } 14108 NewPreds->insert(P); 14109 return true; 14110 } 14111 14112 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 14113 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 14114 auto *A = SE.getWrapPredicate(AR, AddedFlags); 14115 return addOverflowAssumption(A); 14116 } 14117 14118 // If \p Expr represents a PHINode, we try to see if it can be represented 14119 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 14120 // to add this predicate as a runtime overflow check, we return the AddRec. 14121 // If \p Expr does not meet these conditions (is not a PHI node, or we 14122 // couldn't create an AddRec for it, or couldn't add the predicate), we just 14123 // return \p Expr. 14124 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 14125 if (!isa<PHINode>(Expr->getValue())) 14126 return Expr; 14127 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 14128 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 14129 if (!PredicatedRewrite) 14130 return Expr; 14131 for (const auto *P : PredicatedRewrite->second){ 14132 // Wrap predicates from outer loops are not supported. 14133 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 14134 if (L != WP->getExpr()->getLoop()) 14135 return Expr; 14136 } 14137 if (!addOverflowAssumption(P)) 14138 return Expr; 14139 } 14140 return PredicatedRewrite->first; 14141 } 14142 14143 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 14144 const SCEVPredicate *Pred; 14145 const Loop *L; 14146 }; 14147 14148 } // end anonymous namespace 14149 14150 const SCEV * 14151 ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 14152 const SCEVPredicate &Preds) { 14153 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 14154 } 14155 14156 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 14157 const SCEV *S, const Loop *L, 14158 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 14159 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 14160 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 14161 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 14162 14163 if (!AddRec) 14164 return nullptr; 14165 14166 // Since the transformation was successful, we can now transfer the SCEV 14167 // predicates. 14168 for (const auto *P : TransformPreds) 14169 Preds.insert(P); 14170 14171 return AddRec; 14172 } 14173 14174 /// SCEV predicates 14175 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 14176 SCEVPredicateKind Kind) 14177 : FastID(ID), Kind(Kind) {} 14178 14179 SCEVComparePredicate::SCEVComparePredicate(const FoldingSetNodeIDRef ID, 14180 const ICmpInst::Predicate Pred, 14181 const SCEV *LHS, const SCEV *RHS) 14182 : SCEVPredicate(ID, P_Compare), Pred(Pred), LHS(LHS), RHS(RHS) { 14183 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 14184 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 14185 } 14186 14187 bool SCEVComparePredicate::implies(const SCEVPredicate *N) const { 14188 const auto *Op = dyn_cast<SCEVComparePredicate>(N); 14189 14190 if (!Op) 14191 return false; 14192 14193 if (Pred != ICmpInst::ICMP_EQ) 14194 return false; 14195 14196 return Op->LHS == LHS && Op->RHS == RHS; 14197 } 14198 14199 bool SCEVComparePredicate::isAlwaysTrue() const { return false; } 14200 14201 void SCEVComparePredicate::print(raw_ostream &OS, unsigned Depth) const { 14202 if (Pred == ICmpInst::ICMP_EQ) 14203 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 14204 else 14205 OS.indent(Depth) << "Compare predicate: " << *LHS 14206 << " " << CmpInst::getPredicateName(Pred) << ") " 14207 << *RHS << "\n"; 14208 14209 } 14210 14211 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 14212 const SCEVAddRecExpr *AR, 14213 IncrementWrapFlags Flags) 14214 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 14215 14216 const SCEVAddRecExpr *SCEVWrapPredicate::getExpr() const { return AR; } 14217 14218 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 14219 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 14220 14221 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 14222 } 14223 14224 bool SCEVWrapPredicate::isAlwaysTrue() const { 14225 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 14226 IncrementWrapFlags IFlags = Flags; 14227 14228 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 14229 IFlags = clearFlags(IFlags, IncrementNSSW); 14230 14231 return IFlags == IncrementAnyWrap; 14232 } 14233 14234 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 14235 OS.indent(Depth) << *getExpr() << " Added Flags: "; 14236 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 14237 OS << "<nusw>"; 14238 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 14239 OS << "<nssw>"; 14240 OS << "\n"; 14241 } 14242 14243 SCEVWrapPredicate::IncrementWrapFlags 14244 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 14245 ScalarEvolution &SE) { 14246 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 14247 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 14248 14249 // We can safely transfer the NSW flag as NSSW. 14250 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 14251 ImpliedFlags = IncrementNSSW; 14252 14253 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 14254 // If the increment is positive, the SCEV NUW flag will also imply the 14255 // WrapPredicate NUSW flag. 14256 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 14257 if (Step->getValue()->getValue().isNonNegative()) 14258 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 14259 } 14260 14261 return ImpliedFlags; 14262 } 14263 14264 /// Union predicates don't get cached so create a dummy set ID for it. 14265 SCEVUnionPredicate::SCEVUnionPredicate(ArrayRef<const SCEVPredicate *> Preds) 14266 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) { 14267 for (const auto *P : Preds) 14268 add(P); 14269 } 14270 14271 bool SCEVUnionPredicate::isAlwaysTrue() const { 14272 return all_of(Preds, 14273 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 14274 } 14275 14276 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 14277 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 14278 return all_of(Set->Preds, 14279 [this](const SCEVPredicate *I) { return this->implies(I); }); 14280 14281 return any_of(Preds, 14282 [N](const SCEVPredicate *I) { return I->implies(N); }); 14283 } 14284 14285 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 14286 for (const auto *Pred : Preds) 14287 Pred->print(OS, Depth); 14288 } 14289 14290 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 14291 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 14292 for (const auto *Pred : Set->Preds) 14293 add(Pred); 14294 return; 14295 } 14296 14297 Preds.push_back(N); 14298 } 14299 14300 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 14301 Loop &L) 14302 : SE(SE), L(L) { 14303 SmallVector<const SCEVPredicate*, 4> Empty; 14304 Preds = std::make_unique<SCEVUnionPredicate>(Empty); 14305 } 14306 14307 void ScalarEvolution::registerUser(const SCEV *User, 14308 ArrayRef<const SCEV *> Ops) { 14309 for (const auto *Op : Ops) 14310 // We do not expect that forgetting cached data for SCEVConstants will ever 14311 // open any prospects for sharpening or introduce any correctness issues, 14312 // so we don't bother storing their dependencies. 14313 if (!isa<SCEVConstant>(Op)) 14314 SCEVUsers[Op].insert(User); 14315 } 14316 14317 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 14318 const SCEV *Expr = SE.getSCEV(V); 14319 RewriteEntry &Entry = RewriteMap[Expr]; 14320 14321 // If we already have an entry and the version matches, return it. 14322 if (Entry.second && Generation == Entry.first) 14323 return Entry.second; 14324 14325 // We found an entry but it's stale. Rewrite the stale entry 14326 // according to the current predicate. 14327 if (Entry.second) 14328 Expr = Entry.second; 14329 14330 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds); 14331 Entry = {Generation, NewSCEV}; 14332 14333 return NewSCEV; 14334 } 14335 14336 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 14337 if (!BackedgeCount) { 14338 SmallVector<const SCEVPredicate *, 4> Preds; 14339 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds); 14340 for (const auto *P : Preds) 14341 addPredicate(*P); 14342 } 14343 return BackedgeCount; 14344 } 14345 14346 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 14347 if (Preds->implies(&Pred)) 14348 return; 14349 14350 auto &OldPreds = Preds->getPredicates(); 14351 SmallVector<const SCEVPredicate*, 4> NewPreds(OldPreds.begin(), OldPreds.end()); 14352 NewPreds.push_back(&Pred); 14353 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds); 14354 updateGeneration(); 14355 } 14356 14357 const SCEVPredicate &PredicatedScalarEvolution::getPredicate() const { 14358 return *Preds; 14359 } 14360 14361 void PredicatedScalarEvolution::updateGeneration() { 14362 // If the generation number wrapped recompute everything. 14363 if (++Generation == 0) { 14364 for (auto &II : RewriteMap) { 14365 const SCEV *Rewritten = II.second.second; 14366 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, *Preds)}; 14367 } 14368 } 14369 } 14370 14371 void PredicatedScalarEvolution::setNoOverflow( 14372 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 14373 const SCEV *Expr = getSCEV(V); 14374 const auto *AR = cast<SCEVAddRecExpr>(Expr); 14375 14376 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 14377 14378 // Clear the statically implied flags. 14379 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 14380 addPredicate(*SE.getWrapPredicate(AR, Flags)); 14381 14382 auto II = FlagsMap.insert({V, Flags}); 14383 if (!II.second) 14384 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 14385 } 14386 14387 bool PredicatedScalarEvolution::hasNoOverflow( 14388 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 14389 const SCEV *Expr = getSCEV(V); 14390 const auto *AR = cast<SCEVAddRecExpr>(Expr); 14391 14392 Flags = SCEVWrapPredicate::clearFlags( 14393 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 14394 14395 auto II = FlagsMap.find(V); 14396 14397 if (II != FlagsMap.end()) 14398 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 14399 14400 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 14401 } 14402 14403 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 14404 const SCEV *Expr = this->getSCEV(V); 14405 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 14406 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 14407 14408 if (!New) 14409 return nullptr; 14410 14411 for (const auto *P : NewPreds) 14412 addPredicate(*P); 14413 14414 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 14415 return New; 14416 } 14417 14418 PredicatedScalarEvolution::PredicatedScalarEvolution( 14419 const PredicatedScalarEvolution &Init) 14420 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), 14421 Preds(std::make_unique<SCEVUnionPredicate>(Init.Preds->getPredicates())), 14422 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 14423 for (auto I : Init.FlagsMap) 14424 FlagsMap.insert(I); 14425 } 14426 14427 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 14428 // For each block. 14429 for (auto *BB : L.getBlocks()) 14430 for (auto &I : *BB) { 14431 if (!SE.isSCEVable(I.getType())) 14432 continue; 14433 14434 auto *Expr = SE.getSCEV(&I); 14435 auto II = RewriteMap.find(Expr); 14436 14437 if (II == RewriteMap.end()) 14438 continue; 14439 14440 // Don't print things that are not interesting. 14441 if (II->second.second == Expr) 14442 continue; 14443 14444 OS.indent(Depth) << "[PSE]" << I << ":\n"; 14445 OS.indent(Depth + 2) << *Expr << "\n"; 14446 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 14447 } 14448 } 14449 14450 // Match the mathematical pattern A - (A / B) * B, where A and B can be 14451 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used 14452 // for URem with constant power-of-2 second operands. 14453 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 14454 // 4, A / B becomes X / 8). 14455 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 14456 const SCEV *&RHS) { 14457 // Try to match 'zext (trunc A to iB) to iY', which is used 14458 // for URem with constant power-of-2 second operands. Make sure the size of 14459 // the operand A matches the size of the whole expressions. 14460 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr)) 14461 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) { 14462 LHS = Trunc->getOperand(); 14463 // Bail out if the type of the LHS is larger than the type of the 14464 // expression for now. 14465 if (getTypeSizeInBits(LHS->getType()) > 14466 getTypeSizeInBits(Expr->getType())) 14467 return false; 14468 if (LHS->getType() != Expr->getType()) 14469 LHS = getZeroExtendExpr(LHS, Expr->getType()); 14470 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1) 14471 << getTypeSizeInBits(Trunc->getType())); 14472 return true; 14473 } 14474 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 14475 if (Add == nullptr || Add->getNumOperands() != 2) 14476 return false; 14477 14478 const SCEV *A = Add->getOperand(1); 14479 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 14480 14481 if (Mul == nullptr) 14482 return false; 14483 14484 const auto MatchURemWithDivisor = [&](const SCEV *B) { 14485 // (SomeExpr + (-(SomeExpr / B) * B)). 14486 if (Expr == getURemExpr(A, B)) { 14487 LHS = A; 14488 RHS = B; 14489 return true; 14490 } 14491 return false; 14492 }; 14493 14494 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 14495 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 14496 return MatchURemWithDivisor(Mul->getOperand(1)) || 14497 MatchURemWithDivisor(Mul->getOperand(2)); 14498 14499 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 14500 if (Mul->getNumOperands() == 2) 14501 return MatchURemWithDivisor(Mul->getOperand(1)) || 14502 MatchURemWithDivisor(Mul->getOperand(0)) || 14503 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 14504 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 14505 return false; 14506 } 14507 14508 const SCEV * 14509 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) { 14510 SmallVector<BasicBlock*, 16> ExitingBlocks; 14511 L->getExitingBlocks(ExitingBlocks); 14512 14513 // Form an expression for the maximum exit count possible for this loop. We 14514 // merge the max and exact information to approximate a version of 14515 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. 14516 SmallVector<const SCEV*, 4> ExitCounts; 14517 for (BasicBlock *ExitingBB : ExitingBlocks) { 14518 const SCEV *ExitCount = getExitCount(L, ExitingBB); 14519 if (isa<SCEVCouldNotCompute>(ExitCount)) 14520 ExitCount = getExitCount(L, ExitingBB, 14521 ScalarEvolution::ConstantMaximum); 14522 if (!isa<SCEVCouldNotCompute>(ExitCount)) { 14523 assert(DT.dominates(ExitingBB, L->getLoopLatch()) && 14524 "We should only have known counts for exiting blocks that " 14525 "dominate latch!"); 14526 ExitCounts.push_back(ExitCount); 14527 } 14528 } 14529 if (ExitCounts.empty()) 14530 return getCouldNotCompute(); 14531 return getUMinFromMismatchedTypes(ExitCounts); 14532 } 14533 14534 /// A rewriter to replace SCEV expressions in Map with the corresponding entry 14535 /// in the map. It skips AddRecExpr because we cannot guarantee that the 14536 /// replacement is loop invariant in the loop of the AddRec. 14537 /// 14538 /// At the moment only rewriting SCEVUnknown and SCEVZeroExtendExpr is 14539 /// supported. 14540 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> { 14541 const DenseMap<const SCEV *, const SCEV *> ⤅ 14542 14543 public: 14544 SCEVLoopGuardRewriter(ScalarEvolution &SE, 14545 DenseMap<const SCEV *, const SCEV *> &M) 14546 : SCEVRewriteVisitor(SE), Map(M) {} 14547 14548 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 14549 14550 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 14551 auto I = Map.find(Expr); 14552 if (I == Map.end()) 14553 return Expr; 14554 return I->second; 14555 } 14556 14557 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 14558 auto I = Map.find(Expr); 14559 if (I == Map.end()) 14560 return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitZeroExtendExpr( 14561 Expr); 14562 return I->second; 14563 } 14564 }; 14565 14566 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { 14567 SmallVector<const SCEV *> ExprsToRewrite; 14568 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, 14569 const SCEV *RHS, 14570 DenseMap<const SCEV *, const SCEV *> 14571 &RewriteMap) { 14572 // WARNING: It is generally unsound to apply any wrap flags to the proposed 14573 // replacement SCEV which isn't directly implied by the structure of that 14574 // SCEV. In particular, using contextual facts to imply flags is *NOT* 14575 // legal. See the scoping rules for flags in the header to understand why. 14576 14577 // If LHS is a constant, apply information to the other expression. 14578 if (isa<SCEVConstant>(LHS)) { 14579 std::swap(LHS, RHS); 14580 Predicate = CmpInst::getSwappedPredicate(Predicate); 14581 } 14582 14583 // Check for a condition of the form (-C1 + X < C2). InstCombine will 14584 // create this form when combining two checks of the form (X u< C2 + C1) and 14585 // (X >=u C1). 14586 auto MatchRangeCheckIdiom = [this, Predicate, LHS, RHS, &RewriteMap, 14587 &ExprsToRewrite]() { 14588 auto *AddExpr = dyn_cast<SCEVAddExpr>(LHS); 14589 if (!AddExpr || AddExpr->getNumOperands() != 2) 14590 return false; 14591 14592 auto *C1 = dyn_cast<SCEVConstant>(AddExpr->getOperand(0)); 14593 auto *LHSUnknown = dyn_cast<SCEVUnknown>(AddExpr->getOperand(1)); 14594 auto *C2 = dyn_cast<SCEVConstant>(RHS); 14595 if (!C1 || !C2 || !LHSUnknown) 14596 return false; 14597 14598 auto ExactRegion = 14599 ConstantRange::makeExactICmpRegion(Predicate, C2->getAPInt()) 14600 .sub(C1->getAPInt()); 14601 14602 // Bail out, unless we have a non-wrapping, monotonic range. 14603 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet()) 14604 return false; 14605 auto I = RewriteMap.find(LHSUnknown); 14606 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHSUnknown; 14607 RewriteMap[LHSUnknown] = getUMaxExpr( 14608 getConstant(ExactRegion.getUnsignedMin()), 14609 getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax()))); 14610 ExprsToRewrite.push_back(LHSUnknown); 14611 return true; 14612 }; 14613 if (MatchRangeCheckIdiom()) 14614 return; 14615 14616 // If we have LHS == 0, check if LHS is computing a property of some unknown 14617 // SCEV %v which we can rewrite %v to express explicitly. 14618 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS); 14619 if (Predicate == CmpInst::ICMP_EQ && RHSC && 14620 RHSC->getValue()->isNullValue()) { 14621 // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to 14622 // explicitly express that. 14623 const SCEV *URemLHS = nullptr; 14624 const SCEV *URemRHS = nullptr; 14625 if (matchURem(LHS, URemLHS, URemRHS)) { 14626 if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) { 14627 auto Multiple = getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS); 14628 RewriteMap[LHSUnknown] = Multiple; 14629 ExprsToRewrite.push_back(LHSUnknown); 14630 return; 14631 } 14632 } 14633 } 14634 14635 // Do not apply information for constants or if RHS contains an AddRec. 14636 if (isa<SCEVConstant>(LHS) || containsAddRecurrence(RHS)) 14637 return; 14638 14639 // If RHS is SCEVUnknown, make sure the information is applied to it. 14640 if (!isa<SCEVUnknown>(LHS) && isa<SCEVUnknown>(RHS)) { 14641 std::swap(LHS, RHS); 14642 Predicate = CmpInst::getSwappedPredicate(Predicate); 14643 } 14644 14645 // Limit to expressions that can be rewritten. 14646 if (!isa<SCEVUnknown>(LHS) && !isa<SCEVZeroExtendExpr>(LHS)) 14647 return; 14648 14649 // Check whether LHS has already been rewritten. In that case we want to 14650 // chain further rewrites onto the already rewritten value. 14651 auto I = RewriteMap.find(LHS); 14652 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS; 14653 14654 const SCEV *RewrittenRHS = nullptr; 14655 switch (Predicate) { 14656 case CmpInst::ICMP_ULT: 14657 RewrittenRHS = 14658 getUMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType()))); 14659 break; 14660 case CmpInst::ICMP_SLT: 14661 RewrittenRHS = 14662 getSMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType()))); 14663 break; 14664 case CmpInst::ICMP_ULE: 14665 RewrittenRHS = getUMinExpr(RewrittenLHS, RHS); 14666 break; 14667 case CmpInst::ICMP_SLE: 14668 RewrittenRHS = getSMinExpr(RewrittenLHS, RHS); 14669 break; 14670 case CmpInst::ICMP_UGT: 14671 RewrittenRHS = 14672 getUMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); 14673 break; 14674 case CmpInst::ICMP_SGT: 14675 RewrittenRHS = 14676 getSMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); 14677 break; 14678 case CmpInst::ICMP_UGE: 14679 RewrittenRHS = getUMaxExpr(RewrittenLHS, RHS); 14680 break; 14681 case CmpInst::ICMP_SGE: 14682 RewrittenRHS = getSMaxExpr(RewrittenLHS, RHS); 14683 break; 14684 case CmpInst::ICMP_EQ: 14685 if (isa<SCEVConstant>(RHS)) 14686 RewrittenRHS = RHS; 14687 break; 14688 case CmpInst::ICMP_NE: 14689 if (isa<SCEVConstant>(RHS) && 14690 cast<SCEVConstant>(RHS)->getValue()->isNullValue()) 14691 RewrittenRHS = getUMaxExpr(RewrittenLHS, getOne(RHS->getType())); 14692 break; 14693 default: 14694 break; 14695 } 14696 14697 if (RewrittenRHS) { 14698 RewriteMap[LHS] = RewrittenRHS; 14699 if (LHS == RewrittenLHS) 14700 ExprsToRewrite.push_back(LHS); 14701 } 14702 }; 14703 14704 SmallVector<std::pair<Value *, bool>> Terms; 14705 // First, collect information from assumptions dominating the loop. 14706 for (auto &AssumeVH : AC.assumptions()) { 14707 if (!AssumeVH) 14708 continue; 14709 auto *AssumeI = cast<CallInst>(AssumeVH); 14710 if (!DT.dominates(AssumeI, L->getHeader())) 14711 continue; 14712 Terms.emplace_back(AssumeI->getOperand(0), true); 14713 } 14714 14715 // Second, collect conditions from dominating branches. Starting at the loop 14716 // predecessor, climb up the predecessor chain, as long as there are 14717 // predecessors that can be found that have unique successors leading to the 14718 // original header. 14719 // TODO: share this logic with isLoopEntryGuardedByCond. 14720 for (std::pair<const BasicBlock *, const BasicBlock *> Pair( 14721 L->getLoopPredecessor(), L->getHeader()); 14722 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 14723 14724 const BranchInst *LoopEntryPredicate = 14725 dyn_cast<BranchInst>(Pair.first->getTerminator()); 14726 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) 14727 continue; 14728 14729 Terms.emplace_back(LoopEntryPredicate->getCondition(), 14730 LoopEntryPredicate->getSuccessor(0) == Pair.second); 14731 } 14732 14733 // Now apply the information from the collected conditions to RewriteMap. 14734 // Conditions are processed in reverse order, so the earliest conditions is 14735 // processed first. This ensures the SCEVs with the shortest dependency chains 14736 // are constructed first. 14737 DenseMap<const SCEV *, const SCEV *> RewriteMap; 14738 for (auto &E : reverse(Terms)) { 14739 bool EnterIfTrue = E.second; 14740 SmallVector<Value *, 8> Worklist; 14741 SmallPtrSet<Value *, 8> Visited; 14742 Worklist.push_back(E.first); 14743 while (!Worklist.empty()) { 14744 Value *Cond = Worklist.pop_back_val(); 14745 if (!Visited.insert(Cond).second) 14746 continue; 14747 14748 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) { 14749 auto Predicate = 14750 EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate(); 14751 const auto *LHS = getSCEV(Cmp->getOperand(0)); 14752 const auto *RHS = getSCEV(Cmp->getOperand(1)); 14753 CollectCondition(Predicate, LHS, RHS, RewriteMap); 14754 continue; 14755 } 14756 14757 Value *L, *R; 14758 if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))) 14759 : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) { 14760 Worklist.push_back(L); 14761 Worklist.push_back(R); 14762 } 14763 } 14764 } 14765 14766 if (RewriteMap.empty()) 14767 return Expr; 14768 14769 // Now that all rewrite information is collect, rewrite the collected 14770 // expressions with the information in the map. This applies information to 14771 // sub-expressions. 14772 if (ExprsToRewrite.size() > 1) { 14773 for (const SCEV *Expr : ExprsToRewrite) { 14774 const SCEV *RewriteTo = RewriteMap[Expr]; 14775 RewriteMap.erase(Expr); 14776 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 14777 RewriteMap.insert({Expr, Rewriter.visit(RewriteTo)}); 14778 } 14779 } 14780 14781 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 14782 return Rewriter.visit(Expr); 14783 } 14784