1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This transformation analyzes and transforms the induction variables (and 10 // computations derived from them) into forms suitable for efficient execution 11 // on the target. 12 // 13 // This pass performs a strength reduction on array references inside loops that 14 // have as one or more of their components the loop induction variable, it 15 // rewrites expressions to take advantage of scaled-index addressing modes 16 // available on the target, and it performs a variety of other optimizations 17 // related to loop induction variables. 18 // 19 // Terminology note: this code has a lot of handling for "post-increment" or 20 // "post-inc" users. This is not talking about post-increment addressing modes; 21 // it is instead talking about code like this: 22 // 23 // %i = phi [ 0, %entry ], [ %i.next, %latch ] 24 // ... 25 // %i.next = add %i, 1 26 // %c = icmp eq %i.next, %n 27 // 28 // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however 29 // it's useful to think about these as the same register, with some uses using 30 // the value of the register before the add and some using it after. In this 31 // example, the icmp is a post-increment user, since it uses %i.next, which is 32 // the value of the induction variable after the increment. The other common 33 // case of post-increment users is users outside the loop. 34 // 35 // TODO: More sophistication in the way Formulae are generated and filtered. 36 // 37 // TODO: Handle multiple loops at a time. 38 // 39 // TODO: Should the addressing mode BaseGV be changed to a ConstantExpr instead 40 // of a GlobalValue? 41 // 42 // TODO: When truncation is free, truncate ICmp users' operands to make it a 43 // smaller encoding (on x86 at least). 44 // 45 // TODO: When a negated register is used by an add (such as in a list of 46 // multiple base registers, or as the increment expression in an addrec), 47 // we may not actually need both reg and (-1 * reg) in registers; the 48 // negation can be implemented by using a sub instead of an add. The 49 // lack of support for taking this into consideration when making 50 // register pressure decisions is partly worked around by the "Special" 51 // use kind. 52 // 53 //===----------------------------------------------------------------------===// 54 55 #include "llvm/Transforms/Scalar/LoopStrengthReduce.h" 56 #include "llvm/ADT/APInt.h" 57 #include "llvm/ADT/DenseMap.h" 58 #include "llvm/ADT/DenseSet.h" 59 #include "llvm/ADT/Hashing.h" 60 #include "llvm/ADT/PointerIntPair.h" 61 #include "llvm/ADT/STLExtras.h" 62 #include "llvm/ADT/SetVector.h" 63 #include "llvm/ADT/SmallBitVector.h" 64 #include "llvm/ADT/SmallPtrSet.h" 65 #include "llvm/ADT/SmallSet.h" 66 #include "llvm/ADT/SmallVector.h" 67 #include "llvm/ADT/iterator_range.h" 68 #include "llvm/Analysis/AssumptionCache.h" 69 #include "llvm/Analysis/IVUsers.h" 70 #include "llvm/Analysis/LoopAnalysisManager.h" 71 #include "llvm/Analysis/LoopInfo.h" 72 #include "llvm/Analysis/LoopPass.h" 73 #include "llvm/Analysis/MemorySSA.h" 74 #include "llvm/Analysis/MemorySSAUpdater.h" 75 #include "llvm/Analysis/ScalarEvolution.h" 76 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 77 #include "llvm/Analysis/ScalarEvolutionNormalization.h" 78 #include "llvm/Analysis/TargetLibraryInfo.h" 79 #include "llvm/Analysis/TargetTransformInfo.h" 80 #include "llvm/Config/llvm-config.h" 81 #include "llvm/IR/BasicBlock.h" 82 #include "llvm/IR/Constant.h" 83 #include "llvm/IR/Constants.h" 84 #include "llvm/IR/DebugInfoMetadata.h" 85 #include "llvm/IR/DerivedTypes.h" 86 #include "llvm/IR/Dominators.h" 87 #include "llvm/IR/GlobalValue.h" 88 #include "llvm/IR/IRBuilder.h" 89 #include "llvm/IR/InstrTypes.h" 90 #include "llvm/IR/Instruction.h" 91 #include "llvm/IR/Instructions.h" 92 #include "llvm/IR/IntrinsicInst.h" 93 #include "llvm/IR/Intrinsics.h" 94 #include "llvm/IR/Module.h" 95 #include "llvm/IR/OperandTraits.h" 96 #include "llvm/IR/Operator.h" 97 #include "llvm/IR/PassManager.h" 98 #include "llvm/IR/Type.h" 99 #include "llvm/IR/Use.h" 100 #include "llvm/IR/User.h" 101 #include "llvm/IR/Value.h" 102 #include "llvm/IR/ValueHandle.h" 103 #include "llvm/InitializePasses.h" 104 #include "llvm/Pass.h" 105 #include "llvm/Support/Casting.h" 106 #include "llvm/Support/CommandLine.h" 107 #include "llvm/Support/Compiler.h" 108 #include "llvm/Support/Debug.h" 109 #include "llvm/Support/ErrorHandling.h" 110 #include "llvm/Support/MathExtras.h" 111 #include "llvm/Support/raw_ostream.h" 112 #include "llvm/Transforms/Scalar.h" 113 #include "llvm/Transforms/Utils.h" 114 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 115 #include "llvm/Transforms/Utils/Local.h" 116 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 117 #include <algorithm> 118 #include <cassert> 119 #include <cstddef> 120 #include <cstdint> 121 #include <cstdlib> 122 #include <iterator> 123 #include <limits> 124 #include <map> 125 #include <numeric> 126 #include <utility> 127 128 using namespace llvm; 129 130 #define DEBUG_TYPE "loop-reduce" 131 132 /// MaxIVUsers is an arbitrary threshold that provides an early opportunity for 133 /// bail out. This threshold is far beyond the number of users that LSR can 134 /// conceivably solve, so it should not affect generated code, but catches the 135 /// worst cases before LSR burns too much compile time and stack space. 136 static const unsigned MaxIVUsers = 200; 137 138 // Temporary flag to cleanup congruent phis after LSR phi expansion. 139 // It's currently disabled until we can determine whether it's truly useful or 140 // not. The flag should be removed after the v3.0 release. 141 // This is now needed for ivchains. 142 static cl::opt<bool> EnablePhiElim( 143 "enable-lsr-phielim", cl::Hidden, cl::init(true), 144 cl::desc("Enable LSR phi elimination")); 145 146 // The flag adds instruction count to solutions cost comparision. 147 static cl::opt<bool> InsnsCost( 148 "lsr-insns-cost", cl::Hidden, cl::init(true), 149 cl::desc("Add instruction count to a LSR cost model")); 150 151 // Flag to choose how to narrow complex lsr solution 152 static cl::opt<bool> LSRExpNarrow( 153 "lsr-exp-narrow", cl::Hidden, cl::init(false), 154 cl::desc("Narrow LSR complex solution using" 155 " expectation of registers number")); 156 157 // Flag to narrow search space by filtering non-optimal formulae with 158 // the same ScaledReg and Scale. 159 static cl::opt<bool> FilterSameScaledReg( 160 "lsr-filter-same-scaled-reg", cl::Hidden, cl::init(true), 161 cl::desc("Narrow LSR search space by filtering non-optimal formulae" 162 " with the same ScaledReg and Scale")); 163 164 static cl::opt<bool> EnableBackedgeIndexing( 165 "lsr-backedge-indexing", cl::Hidden, cl::init(true), 166 cl::desc("Enable the generation of cross iteration indexed memops")); 167 168 static cl::opt<unsigned> ComplexityLimit( 169 "lsr-complexity-limit", cl::Hidden, 170 cl::init(std::numeric_limits<uint16_t>::max()), 171 cl::desc("LSR search space complexity limit")); 172 173 static cl::opt<unsigned> SetupCostDepthLimit( 174 "lsr-setupcost-depth-limit", cl::Hidden, cl::init(7), 175 cl::desc("The limit on recursion depth for LSRs setup cost")); 176 177 #ifndef NDEBUG 178 // Stress test IV chain generation. 179 static cl::opt<bool> StressIVChain( 180 "stress-ivchain", cl::Hidden, cl::init(false), 181 cl::desc("Stress test LSR IV chains")); 182 #else 183 static bool StressIVChain = false; 184 #endif 185 186 namespace { 187 188 struct MemAccessTy { 189 /// Used in situations where the accessed memory type is unknown. 190 static const unsigned UnknownAddressSpace = 191 std::numeric_limits<unsigned>::max(); 192 193 Type *MemTy = nullptr; 194 unsigned AddrSpace = UnknownAddressSpace; 195 196 MemAccessTy() = default; 197 MemAccessTy(Type *Ty, unsigned AS) : MemTy(Ty), AddrSpace(AS) {} 198 199 bool operator==(MemAccessTy Other) const { 200 return MemTy == Other.MemTy && AddrSpace == Other.AddrSpace; 201 } 202 203 bool operator!=(MemAccessTy Other) const { return !(*this == Other); } 204 205 static MemAccessTy getUnknown(LLVMContext &Ctx, 206 unsigned AS = UnknownAddressSpace) { 207 return MemAccessTy(Type::getVoidTy(Ctx), AS); 208 } 209 210 Type *getType() { return MemTy; } 211 }; 212 213 /// This class holds data which is used to order reuse candidates. 214 class RegSortData { 215 public: 216 /// This represents the set of LSRUse indices which reference 217 /// a particular register. 218 SmallBitVector UsedByIndices; 219 220 void print(raw_ostream &OS) const; 221 void dump() const; 222 }; 223 224 } // end anonymous namespace 225 226 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 227 void RegSortData::print(raw_ostream &OS) const { 228 OS << "[NumUses=" << UsedByIndices.count() << ']'; 229 } 230 231 LLVM_DUMP_METHOD void RegSortData::dump() const { 232 print(errs()); errs() << '\n'; 233 } 234 #endif 235 236 namespace { 237 238 /// Map register candidates to information about how they are used. 239 class RegUseTracker { 240 using RegUsesTy = DenseMap<const SCEV *, RegSortData>; 241 242 RegUsesTy RegUsesMap; 243 SmallVector<const SCEV *, 16> RegSequence; 244 245 public: 246 void countRegister(const SCEV *Reg, size_t LUIdx); 247 void dropRegister(const SCEV *Reg, size_t LUIdx); 248 void swapAndDropUse(size_t LUIdx, size_t LastLUIdx); 249 250 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; 251 252 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const; 253 254 void clear(); 255 256 using iterator = SmallVectorImpl<const SCEV *>::iterator; 257 using const_iterator = SmallVectorImpl<const SCEV *>::const_iterator; 258 259 iterator begin() { return RegSequence.begin(); } 260 iterator end() { return RegSequence.end(); } 261 const_iterator begin() const { return RegSequence.begin(); } 262 const_iterator end() const { return RegSequence.end(); } 263 }; 264 265 } // end anonymous namespace 266 267 void 268 RegUseTracker::countRegister(const SCEV *Reg, size_t LUIdx) { 269 std::pair<RegUsesTy::iterator, bool> Pair = 270 RegUsesMap.insert(std::make_pair(Reg, RegSortData())); 271 RegSortData &RSD = Pair.first->second; 272 if (Pair.second) 273 RegSequence.push_back(Reg); 274 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1)); 275 RSD.UsedByIndices.set(LUIdx); 276 } 277 278 void 279 RegUseTracker::dropRegister(const SCEV *Reg, size_t LUIdx) { 280 RegUsesTy::iterator It = RegUsesMap.find(Reg); 281 assert(It != RegUsesMap.end()); 282 RegSortData &RSD = It->second; 283 assert(RSD.UsedByIndices.size() > LUIdx); 284 RSD.UsedByIndices.reset(LUIdx); 285 } 286 287 void 288 RegUseTracker::swapAndDropUse(size_t LUIdx, size_t LastLUIdx) { 289 assert(LUIdx <= LastLUIdx); 290 291 // Update RegUses. The data structure is not optimized for this purpose; 292 // we must iterate through it and update each of the bit vectors. 293 for (auto &Pair : RegUsesMap) { 294 SmallBitVector &UsedByIndices = Pair.second.UsedByIndices; 295 if (LUIdx < UsedByIndices.size()) 296 UsedByIndices[LUIdx] = 297 LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : false; 298 UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx)); 299 } 300 } 301 302 bool 303 RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { 304 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 305 if (I == RegUsesMap.end()) 306 return false; 307 const SmallBitVector &UsedByIndices = I->second.UsedByIndices; 308 int i = UsedByIndices.find_first(); 309 if (i == -1) return false; 310 if ((size_t)i != LUIdx) return true; 311 return UsedByIndices.find_next(i) != -1; 312 } 313 314 const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const { 315 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 316 assert(I != RegUsesMap.end() && "Unknown register!"); 317 return I->second.UsedByIndices; 318 } 319 320 void RegUseTracker::clear() { 321 RegUsesMap.clear(); 322 RegSequence.clear(); 323 } 324 325 namespace { 326 327 /// This class holds information that describes a formula for computing 328 /// satisfying a use. It may include broken-out immediates and scaled registers. 329 struct Formula { 330 /// Global base address used for complex addressing. 331 GlobalValue *BaseGV = nullptr; 332 333 /// Base offset for complex addressing. 334 int64_t BaseOffset = 0; 335 336 /// Whether any complex addressing has a base register. 337 bool HasBaseReg = false; 338 339 /// The scale of any complex addressing. 340 int64_t Scale = 0; 341 342 /// The list of "base" registers for this use. When this is non-empty. The 343 /// canonical representation of a formula is 344 /// 1. BaseRegs.size > 1 implies ScaledReg != NULL and 345 /// 2. ScaledReg != NULL implies Scale != 1 || !BaseRegs.empty(). 346 /// 3. The reg containing recurrent expr related with currect loop in the 347 /// formula should be put in the ScaledReg. 348 /// #1 enforces that the scaled register is always used when at least two 349 /// registers are needed by the formula: e.g., reg1 + reg2 is reg1 + 1 * reg2. 350 /// #2 enforces that 1 * reg is reg. 351 /// #3 ensures invariant regs with respect to current loop can be combined 352 /// together in LSR codegen. 353 /// This invariant can be temporarily broken while building a formula. 354 /// However, every formula inserted into the LSRInstance must be in canonical 355 /// form. 356 SmallVector<const SCEV *, 4> BaseRegs; 357 358 /// The 'scaled' register for this use. This should be non-null when Scale is 359 /// not zero. 360 const SCEV *ScaledReg = nullptr; 361 362 /// An additional constant offset which added near the use. This requires a 363 /// temporary register, but the offset itself can live in an add immediate 364 /// field rather than a register. 365 int64_t UnfoldedOffset = 0; 366 367 Formula() = default; 368 369 void initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE); 370 371 bool isCanonical(const Loop &L) const; 372 373 void canonicalize(const Loop &L); 374 375 bool unscale(); 376 377 bool hasZeroEnd() const; 378 379 size_t getNumRegs() const; 380 Type *getType() const; 381 382 void deleteBaseReg(const SCEV *&S); 383 384 bool referencesReg(const SCEV *S) const; 385 bool hasRegsUsedByUsesOtherThan(size_t LUIdx, 386 const RegUseTracker &RegUses) const; 387 388 void print(raw_ostream &OS) const; 389 void dump() const; 390 }; 391 392 } // end anonymous namespace 393 394 /// Recursion helper for initialMatch. 395 static void DoInitialMatch(const SCEV *S, Loop *L, 396 SmallVectorImpl<const SCEV *> &Good, 397 SmallVectorImpl<const SCEV *> &Bad, 398 ScalarEvolution &SE) { 399 // Collect expressions which properly dominate the loop header. 400 if (SE.properlyDominates(S, L->getHeader())) { 401 Good.push_back(S); 402 return; 403 } 404 405 // Look at add operands. 406 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 407 for (const SCEV *S : Add->operands()) 408 DoInitialMatch(S, L, Good, Bad, SE); 409 return; 410 } 411 412 // Look at addrec operands. 413 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 414 if (!AR->getStart()->isZero() && AR->isAffine()) { 415 DoInitialMatch(AR->getStart(), L, Good, Bad, SE); 416 DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), 417 AR->getStepRecurrence(SE), 418 // FIXME: AR->getNoWrapFlags() 419 AR->getLoop(), SCEV::FlagAnyWrap), 420 L, Good, Bad, SE); 421 return; 422 } 423 424 // Handle a multiplication by -1 (negation) if it didn't fold. 425 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) 426 if (Mul->getOperand(0)->isAllOnesValue()) { 427 SmallVector<const SCEV *, 4> Ops(drop_begin(Mul->operands())); 428 const SCEV *NewMul = SE.getMulExpr(Ops); 429 430 SmallVector<const SCEV *, 4> MyGood; 431 SmallVector<const SCEV *, 4> MyBad; 432 DoInitialMatch(NewMul, L, MyGood, MyBad, SE); 433 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( 434 SE.getEffectiveSCEVType(NewMul->getType()))); 435 for (const SCEV *S : MyGood) 436 Good.push_back(SE.getMulExpr(NegOne, S)); 437 for (const SCEV *S : MyBad) 438 Bad.push_back(SE.getMulExpr(NegOne, S)); 439 return; 440 } 441 442 // Ok, we can't do anything interesting. Just stuff the whole thing into a 443 // register and hope for the best. 444 Bad.push_back(S); 445 } 446 447 /// Incorporate loop-variant parts of S into this Formula, attempting to keep 448 /// all loop-invariant and loop-computable values in a single base register. 449 void Formula::initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) { 450 SmallVector<const SCEV *, 4> Good; 451 SmallVector<const SCEV *, 4> Bad; 452 DoInitialMatch(S, L, Good, Bad, SE); 453 if (!Good.empty()) { 454 const SCEV *Sum = SE.getAddExpr(Good); 455 if (!Sum->isZero()) 456 BaseRegs.push_back(Sum); 457 HasBaseReg = true; 458 } 459 if (!Bad.empty()) { 460 const SCEV *Sum = SE.getAddExpr(Bad); 461 if (!Sum->isZero()) 462 BaseRegs.push_back(Sum); 463 HasBaseReg = true; 464 } 465 canonicalize(*L); 466 } 467 468 /// Check whether or not this formula satisfies the canonical 469 /// representation. 470 /// \see Formula::BaseRegs. 471 bool Formula::isCanonical(const Loop &L) const { 472 if (!ScaledReg) 473 return BaseRegs.size() <= 1; 474 475 if (Scale != 1) 476 return true; 477 478 if (Scale == 1 && BaseRegs.empty()) 479 return false; 480 481 const SCEVAddRecExpr *SAR = dyn_cast<const SCEVAddRecExpr>(ScaledReg); 482 if (SAR && SAR->getLoop() == &L) 483 return true; 484 485 // If ScaledReg is not a recurrent expr, or it is but its loop is not current 486 // loop, meanwhile BaseRegs contains a recurrent expr reg related with current 487 // loop, we want to swap the reg in BaseRegs with ScaledReg. 488 auto I = find_if(BaseRegs, [&](const SCEV *S) { 489 return isa<const SCEVAddRecExpr>(S) && 490 (cast<SCEVAddRecExpr>(S)->getLoop() == &L); 491 }); 492 return I == BaseRegs.end(); 493 } 494 495 /// Helper method to morph a formula into its canonical representation. 496 /// \see Formula::BaseRegs. 497 /// Every formula having more than one base register, must use the ScaledReg 498 /// field. Otherwise, we would have to do special cases everywhere in LSR 499 /// to treat reg1 + reg2 + ... the same way as reg1 + 1*reg2 + ... 500 /// On the other hand, 1*reg should be canonicalized into reg. 501 void Formula::canonicalize(const Loop &L) { 502 if (isCanonical(L)) 503 return; 504 // So far we did not need this case. This is easy to implement but it is 505 // useless to maintain dead code. Beside it could hurt compile time. 506 assert(!BaseRegs.empty() && "1*reg => reg, should not be needed."); 507 508 // Keep the invariant sum in BaseRegs and one of the variant sum in ScaledReg. 509 if (!ScaledReg) { 510 ScaledReg = BaseRegs.pop_back_val(); 511 Scale = 1; 512 } 513 514 // If ScaledReg is an invariant with respect to L, find the reg from 515 // BaseRegs containing the recurrent expr related with Loop L. Swap the 516 // reg with ScaledReg. 517 const SCEVAddRecExpr *SAR = dyn_cast<const SCEVAddRecExpr>(ScaledReg); 518 if (!SAR || SAR->getLoop() != &L) { 519 auto I = find_if(BaseRegs, [&](const SCEV *S) { 520 return isa<const SCEVAddRecExpr>(S) && 521 (cast<SCEVAddRecExpr>(S)->getLoop() == &L); 522 }); 523 if (I != BaseRegs.end()) 524 std::swap(ScaledReg, *I); 525 } 526 } 527 528 /// Get rid of the scale in the formula. 529 /// In other words, this method morphes reg1 + 1*reg2 into reg1 + reg2. 530 /// \return true if it was possible to get rid of the scale, false otherwise. 531 /// \note After this operation the formula may not be in the canonical form. 532 bool Formula::unscale() { 533 if (Scale != 1) 534 return false; 535 Scale = 0; 536 BaseRegs.push_back(ScaledReg); 537 ScaledReg = nullptr; 538 return true; 539 } 540 541 bool Formula::hasZeroEnd() const { 542 if (UnfoldedOffset || BaseOffset) 543 return false; 544 if (BaseRegs.size() != 1 || ScaledReg) 545 return false; 546 return true; 547 } 548 549 /// Return the total number of register operands used by this formula. This does 550 /// not include register uses implied by non-constant addrec strides. 551 size_t Formula::getNumRegs() const { 552 return !!ScaledReg + BaseRegs.size(); 553 } 554 555 /// Return the type of this formula, if it has one, or null otherwise. This type 556 /// is meaningless except for the bit size. 557 Type *Formula::getType() const { 558 return !BaseRegs.empty() ? BaseRegs.front()->getType() : 559 ScaledReg ? ScaledReg->getType() : 560 BaseGV ? BaseGV->getType() : 561 nullptr; 562 } 563 564 /// Delete the given base reg from the BaseRegs list. 565 void Formula::deleteBaseReg(const SCEV *&S) { 566 if (&S != &BaseRegs.back()) 567 std::swap(S, BaseRegs.back()); 568 BaseRegs.pop_back(); 569 } 570 571 /// Test if this formula references the given register. 572 bool Formula::referencesReg(const SCEV *S) const { 573 return S == ScaledReg || is_contained(BaseRegs, S); 574 } 575 576 /// Test whether this formula uses registers which are used by uses other than 577 /// the use with the given index. 578 bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, 579 const RegUseTracker &RegUses) const { 580 if (ScaledReg) 581 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) 582 return true; 583 for (const SCEV *BaseReg : BaseRegs) 584 if (RegUses.isRegUsedByUsesOtherThan(BaseReg, LUIdx)) 585 return true; 586 return false; 587 } 588 589 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 590 void Formula::print(raw_ostream &OS) const { 591 bool First = true; 592 if (BaseGV) { 593 if (!First) OS << " + "; else First = false; 594 BaseGV->printAsOperand(OS, /*PrintType=*/false); 595 } 596 if (BaseOffset != 0) { 597 if (!First) OS << " + "; else First = false; 598 OS << BaseOffset; 599 } 600 for (const SCEV *BaseReg : BaseRegs) { 601 if (!First) OS << " + "; else First = false; 602 OS << "reg(" << *BaseReg << ')'; 603 } 604 if (HasBaseReg && BaseRegs.empty()) { 605 if (!First) OS << " + "; else First = false; 606 OS << "**error: HasBaseReg**"; 607 } else if (!HasBaseReg && !BaseRegs.empty()) { 608 if (!First) OS << " + "; else First = false; 609 OS << "**error: !HasBaseReg**"; 610 } 611 if (Scale != 0) { 612 if (!First) OS << " + "; else First = false; 613 OS << Scale << "*reg("; 614 if (ScaledReg) 615 OS << *ScaledReg; 616 else 617 OS << "<unknown>"; 618 OS << ')'; 619 } 620 if (UnfoldedOffset != 0) { 621 if (!First) OS << " + "; 622 OS << "imm(" << UnfoldedOffset << ')'; 623 } 624 } 625 626 LLVM_DUMP_METHOD void Formula::dump() const { 627 print(errs()); errs() << '\n'; 628 } 629 #endif 630 631 /// Return true if the given addrec can be sign-extended without changing its 632 /// value. 633 static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 634 Type *WideTy = 635 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1); 636 return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 637 } 638 639 /// Return true if the given add can be sign-extended without changing its 640 /// value. 641 static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) { 642 Type *WideTy = 643 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1); 644 return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy)); 645 } 646 647 /// Return true if the given mul can be sign-extended without changing its 648 /// value. 649 static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) { 650 Type *WideTy = 651 IntegerType::get(SE.getContext(), 652 SE.getTypeSizeInBits(M->getType()) * M->getNumOperands()); 653 return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy)); 654 } 655 656 /// Return an expression for LHS /s RHS, if it can be determined and if the 657 /// remainder is known to be zero, or null otherwise. If IgnoreSignificantBits 658 /// is true, expressions like (X * Y) /s Y are simplified to Y, ignoring that 659 /// the multiplication may overflow, which is useful when the result will be 660 /// used in a context where the most significant bits are ignored. 661 static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, 662 ScalarEvolution &SE, 663 bool IgnoreSignificantBits = false) { 664 // Handle the trivial case, which works for any SCEV type. 665 if (LHS == RHS) 666 return SE.getConstant(LHS->getType(), 1); 667 668 // Handle a few RHS special cases. 669 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS); 670 if (RC) { 671 const APInt &RA = RC->getAPInt(); 672 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do 673 // some folding. 674 if (RA.isAllOnesValue()) 675 return SE.getMulExpr(LHS, RC); 676 // Handle x /s 1 as x. 677 if (RA == 1) 678 return LHS; 679 } 680 681 // Check for a division of a constant by a constant. 682 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) { 683 if (!RC) 684 return nullptr; 685 const APInt &LA = C->getAPInt(); 686 const APInt &RA = RC->getAPInt(); 687 if (LA.srem(RA) != 0) 688 return nullptr; 689 return SE.getConstant(LA.sdiv(RA)); 690 } 691 692 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow. 693 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) { 694 if ((IgnoreSignificantBits || isAddRecSExtable(AR, SE)) && AR->isAffine()) { 695 const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, 696 IgnoreSignificantBits); 697 if (!Step) return nullptr; 698 const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, 699 IgnoreSignificantBits); 700 if (!Start) return nullptr; 701 // FlagNW is independent of the start value, step direction, and is 702 // preserved with smaller magnitude steps. 703 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 704 return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap); 705 } 706 return nullptr; 707 } 708 709 // Distribute the sdiv over add operands, if the add doesn't overflow. 710 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) { 711 if (IgnoreSignificantBits || isAddSExtable(Add, SE)) { 712 SmallVector<const SCEV *, 8> Ops; 713 for (const SCEV *S : Add->operands()) { 714 const SCEV *Op = getExactSDiv(S, RHS, SE, IgnoreSignificantBits); 715 if (!Op) return nullptr; 716 Ops.push_back(Op); 717 } 718 return SE.getAddExpr(Ops); 719 } 720 return nullptr; 721 } 722 723 // Check for a multiply operand that we can pull RHS out of. 724 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) { 725 if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) { 726 SmallVector<const SCEV *, 4> Ops; 727 bool Found = false; 728 for (const SCEV *S : Mul->operands()) { 729 if (!Found) 730 if (const SCEV *Q = getExactSDiv(S, RHS, SE, 731 IgnoreSignificantBits)) { 732 S = Q; 733 Found = true; 734 } 735 Ops.push_back(S); 736 } 737 return Found ? SE.getMulExpr(Ops) : nullptr; 738 } 739 return nullptr; 740 } 741 742 // Otherwise we don't know. 743 return nullptr; 744 } 745 746 /// If S involves the addition of a constant integer value, return that integer 747 /// value, and mutate S to point to a new SCEV with that value excluded. 748 static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { 749 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 750 if (C->getAPInt().getMinSignedBits() <= 64) { 751 S = SE.getConstant(C->getType(), 0); 752 return C->getValue()->getSExtValue(); 753 } 754 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 755 SmallVector<const SCEV *, 8> NewOps(Add->operands()); 756 int64_t Result = ExtractImmediate(NewOps.front(), SE); 757 if (Result != 0) 758 S = SE.getAddExpr(NewOps); 759 return Result; 760 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 761 SmallVector<const SCEV *, 8> NewOps(AR->operands()); 762 int64_t Result = ExtractImmediate(NewOps.front(), SE); 763 if (Result != 0) 764 S = SE.getAddRecExpr(NewOps, AR->getLoop(), 765 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 766 SCEV::FlagAnyWrap); 767 return Result; 768 } 769 return 0; 770 } 771 772 /// If S involves the addition of a GlobalValue address, return that symbol, and 773 /// mutate S to point to a new SCEV with that value excluded. 774 static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { 775 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 776 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) { 777 S = SE.getConstant(GV->getType(), 0); 778 return GV; 779 } 780 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 781 SmallVector<const SCEV *, 8> NewOps(Add->operands()); 782 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); 783 if (Result) 784 S = SE.getAddExpr(NewOps); 785 return Result; 786 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 787 SmallVector<const SCEV *, 8> NewOps(AR->operands()); 788 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); 789 if (Result) 790 S = SE.getAddRecExpr(NewOps, AR->getLoop(), 791 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 792 SCEV::FlagAnyWrap); 793 return Result; 794 } 795 return nullptr; 796 } 797 798 /// Returns true if the specified instruction is using the specified value as an 799 /// address. 800 static bool isAddressUse(const TargetTransformInfo &TTI, 801 Instruction *Inst, Value *OperandVal) { 802 bool isAddress = isa<LoadInst>(Inst); 803 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 804 if (SI->getPointerOperand() == OperandVal) 805 isAddress = true; 806 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 807 // Addressing modes can also be folded into prefetches and a variety 808 // of intrinsics. 809 switch (II->getIntrinsicID()) { 810 case Intrinsic::memset: 811 case Intrinsic::prefetch: 812 case Intrinsic::masked_load: 813 if (II->getArgOperand(0) == OperandVal) 814 isAddress = true; 815 break; 816 case Intrinsic::masked_store: 817 if (II->getArgOperand(1) == OperandVal) 818 isAddress = true; 819 break; 820 case Intrinsic::memmove: 821 case Intrinsic::memcpy: 822 if (II->getArgOperand(0) == OperandVal || 823 II->getArgOperand(1) == OperandVal) 824 isAddress = true; 825 break; 826 default: { 827 MemIntrinsicInfo IntrInfo; 828 if (TTI.getTgtMemIntrinsic(II, IntrInfo)) { 829 if (IntrInfo.PtrVal == OperandVal) 830 isAddress = true; 831 } 832 } 833 } 834 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) { 835 if (RMW->getPointerOperand() == OperandVal) 836 isAddress = true; 837 } else if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) { 838 if (CmpX->getPointerOperand() == OperandVal) 839 isAddress = true; 840 } 841 return isAddress; 842 } 843 844 /// Return the type of the memory being accessed. 845 static MemAccessTy getAccessType(const TargetTransformInfo &TTI, 846 Instruction *Inst, Value *OperandVal) { 847 MemAccessTy AccessTy(Inst->getType(), MemAccessTy::UnknownAddressSpace); 848 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 849 AccessTy.MemTy = SI->getOperand(0)->getType(); 850 AccessTy.AddrSpace = SI->getPointerAddressSpace(); 851 } else if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 852 AccessTy.AddrSpace = LI->getPointerAddressSpace(); 853 } else if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) { 854 AccessTy.AddrSpace = RMW->getPointerAddressSpace(); 855 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) { 856 AccessTy.AddrSpace = CmpX->getPointerAddressSpace(); 857 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 858 switch (II->getIntrinsicID()) { 859 case Intrinsic::prefetch: 860 case Intrinsic::memset: 861 AccessTy.AddrSpace = II->getArgOperand(0)->getType()->getPointerAddressSpace(); 862 AccessTy.MemTy = OperandVal->getType(); 863 break; 864 case Intrinsic::memmove: 865 case Intrinsic::memcpy: 866 AccessTy.AddrSpace = OperandVal->getType()->getPointerAddressSpace(); 867 AccessTy.MemTy = OperandVal->getType(); 868 break; 869 case Intrinsic::masked_load: 870 AccessTy.AddrSpace = 871 II->getArgOperand(0)->getType()->getPointerAddressSpace(); 872 break; 873 case Intrinsic::masked_store: 874 AccessTy.MemTy = II->getOperand(0)->getType(); 875 AccessTy.AddrSpace = 876 II->getArgOperand(1)->getType()->getPointerAddressSpace(); 877 break; 878 default: { 879 MemIntrinsicInfo IntrInfo; 880 if (TTI.getTgtMemIntrinsic(II, IntrInfo) && IntrInfo.PtrVal) { 881 AccessTy.AddrSpace 882 = IntrInfo.PtrVal->getType()->getPointerAddressSpace(); 883 } 884 885 break; 886 } 887 } 888 } 889 890 // All pointers have the same requirements, so canonicalize them to an 891 // arbitrary pointer type to minimize variation. 892 if (PointerType *PTy = dyn_cast<PointerType>(AccessTy.MemTy)) 893 AccessTy.MemTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), 894 PTy->getAddressSpace()); 895 896 return AccessTy; 897 } 898 899 /// Return true if this AddRec is already a phi in its loop. 900 static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 901 for (PHINode &PN : AR->getLoop()->getHeader()->phis()) { 902 if (SE.isSCEVable(PN.getType()) && 903 (SE.getEffectiveSCEVType(PN.getType()) == 904 SE.getEffectiveSCEVType(AR->getType())) && 905 SE.getSCEV(&PN) == AR) 906 return true; 907 } 908 return false; 909 } 910 911 /// Check if expanding this expression is likely to incur significant cost. This 912 /// is tricky because SCEV doesn't track which expressions are actually computed 913 /// by the current IR. 914 /// 915 /// We currently allow expansion of IV increments that involve adds, 916 /// multiplication by constants, and AddRecs from existing phis. 917 /// 918 /// TODO: Allow UDivExpr if we can find an existing IV increment that is an 919 /// obvious multiple of the UDivExpr. 920 static bool isHighCostExpansion(const SCEV *S, 921 SmallPtrSetImpl<const SCEV*> &Processed, 922 ScalarEvolution &SE) { 923 // Zero/One operand expressions 924 switch (S->getSCEVType()) { 925 case scUnknown: 926 case scConstant: 927 return false; 928 case scTruncate: 929 return isHighCostExpansion(cast<SCEVTruncateExpr>(S)->getOperand(), 930 Processed, SE); 931 case scZeroExtend: 932 return isHighCostExpansion(cast<SCEVZeroExtendExpr>(S)->getOperand(), 933 Processed, SE); 934 case scSignExtend: 935 return isHighCostExpansion(cast<SCEVSignExtendExpr>(S)->getOperand(), 936 Processed, SE); 937 default: 938 break; 939 } 940 941 if (!Processed.insert(S).second) 942 return false; 943 944 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 945 for (const SCEV *S : Add->operands()) { 946 if (isHighCostExpansion(S, Processed, SE)) 947 return true; 948 } 949 return false; 950 } 951 952 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 953 if (Mul->getNumOperands() == 2) { 954 // Multiplication by a constant is ok 955 if (isa<SCEVConstant>(Mul->getOperand(0))) 956 return isHighCostExpansion(Mul->getOperand(1), Processed, SE); 957 958 // If we have the value of one operand, check if an existing 959 // multiplication already generates this expression. 960 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) { 961 Value *UVal = U->getValue(); 962 for (User *UR : UVal->users()) { 963 // If U is a constant, it may be used by a ConstantExpr. 964 Instruction *UI = dyn_cast<Instruction>(UR); 965 if (UI && UI->getOpcode() == Instruction::Mul && 966 SE.isSCEVable(UI->getType())) { 967 return SE.getSCEV(UI) == Mul; 968 } 969 } 970 } 971 } 972 } 973 974 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 975 if (isExistingPhi(AR, SE)) 976 return false; 977 } 978 979 // Fow now, consider any other type of expression (div/mul/min/max) high cost. 980 return true; 981 } 982 983 namespace { 984 985 class LSRUse; 986 987 } // end anonymous namespace 988 989 /// Check if the addressing mode defined by \p F is completely 990 /// folded in \p LU at isel time. 991 /// This includes address-mode folding and special icmp tricks. 992 /// This function returns true if \p LU can accommodate what \p F 993 /// defines and up to 1 base + 1 scaled + offset. 994 /// In other words, if \p F has several base registers, this function may 995 /// still return true. Therefore, users still need to account for 996 /// additional base registers and/or unfolded offsets to derive an 997 /// accurate cost model. 998 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 999 const LSRUse &LU, const Formula &F); 1000 1001 // Get the cost of the scaling factor used in F for LU. 1002 static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, 1003 const LSRUse &LU, const Formula &F, 1004 const Loop &L); 1005 1006 namespace { 1007 1008 /// This class is used to measure and compare candidate formulae. 1009 class Cost { 1010 const Loop *L = nullptr; 1011 ScalarEvolution *SE = nullptr; 1012 const TargetTransformInfo *TTI = nullptr; 1013 TargetTransformInfo::LSRCost C; 1014 1015 public: 1016 Cost() = delete; 1017 Cost(const Loop *L, ScalarEvolution &SE, const TargetTransformInfo &TTI) : 1018 L(L), SE(&SE), TTI(&TTI) { 1019 C.Insns = 0; 1020 C.NumRegs = 0; 1021 C.AddRecCost = 0; 1022 C.NumIVMuls = 0; 1023 C.NumBaseAdds = 0; 1024 C.ImmCost = 0; 1025 C.SetupCost = 0; 1026 C.ScaleCost = 0; 1027 } 1028 1029 bool isLess(Cost &Other); 1030 1031 void Lose(); 1032 1033 #ifndef NDEBUG 1034 // Once any of the metrics loses, they must all remain losers. 1035 bool isValid() { 1036 return ((C.Insns | C.NumRegs | C.AddRecCost | C.NumIVMuls | C.NumBaseAdds 1037 | C.ImmCost | C.SetupCost | C.ScaleCost) != ~0u) 1038 || ((C.Insns & C.NumRegs & C.AddRecCost & C.NumIVMuls & C.NumBaseAdds 1039 & C.ImmCost & C.SetupCost & C.ScaleCost) == ~0u); 1040 } 1041 #endif 1042 1043 bool isLoser() { 1044 assert(isValid() && "invalid cost"); 1045 return C.NumRegs == ~0u; 1046 } 1047 1048 void RateFormula(const Formula &F, 1049 SmallPtrSetImpl<const SCEV *> &Regs, 1050 const DenseSet<const SCEV *> &VisitedRegs, 1051 const LSRUse &LU, 1052 SmallPtrSetImpl<const SCEV *> *LoserRegs = nullptr); 1053 1054 void print(raw_ostream &OS) const; 1055 void dump() const; 1056 1057 private: 1058 void RateRegister(const Formula &F, const SCEV *Reg, 1059 SmallPtrSetImpl<const SCEV *> &Regs); 1060 void RatePrimaryRegister(const Formula &F, const SCEV *Reg, 1061 SmallPtrSetImpl<const SCEV *> &Regs, 1062 SmallPtrSetImpl<const SCEV *> *LoserRegs); 1063 }; 1064 1065 /// An operand value in an instruction which is to be replaced with some 1066 /// equivalent, possibly strength-reduced, replacement. 1067 struct LSRFixup { 1068 /// The instruction which will be updated. 1069 Instruction *UserInst = nullptr; 1070 1071 /// The operand of the instruction which will be replaced. The operand may be 1072 /// used more than once; every instance will be replaced. 1073 Value *OperandValToReplace = nullptr; 1074 1075 /// If this user is to use the post-incremented value of an induction 1076 /// variable, this set is non-empty and holds the loops associated with the 1077 /// induction variable. 1078 PostIncLoopSet PostIncLoops; 1079 1080 /// A constant offset to be added to the LSRUse expression. This allows 1081 /// multiple fixups to share the same LSRUse with different offsets, for 1082 /// example in an unrolled loop. 1083 int64_t Offset = 0; 1084 1085 LSRFixup() = default; 1086 1087 bool isUseFullyOutsideLoop(const Loop *L) const; 1088 1089 void print(raw_ostream &OS) const; 1090 void dump() const; 1091 }; 1092 1093 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of sorted 1094 /// SmallVectors of const SCEV*. 1095 struct UniquifierDenseMapInfo { 1096 static SmallVector<const SCEV *, 4> getEmptyKey() { 1097 SmallVector<const SCEV *, 4> V; 1098 V.push_back(reinterpret_cast<const SCEV *>(-1)); 1099 return V; 1100 } 1101 1102 static SmallVector<const SCEV *, 4> getTombstoneKey() { 1103 SmallVector<const SCEV *, 4> V; 1104 V.push_back(reinterpret_cast<const SCEV *>(-2)); 1105 return V; 1106 } 1107 1108 static unsigned getHashValue(const SmallVector<const SCEV *, 4> &V) { 1109 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 1110 } 1111 1112 static bool isEqual(const SmallVector<const SCEV *, 4> &LHS, 1113 const SmallVector<const SCEV *, 4> &RHS) { 1114 return LHS == RHS; 1115 } 1116 }; 1117 1118 /// This class holds the state that LSR keeps for each use in IVUsers, as well 1119 /// as uses invented by LSR itself. It includes information about what kinds of 1120 /// things can be folded into the user, information about the user itself, and 1121 /// information about how the use may be satisfied. TODO: Represent multiple 1122 /// users of the same expression in common? 1123 class LSRUse { 1124 DenseSet<SmallVector<const SCEV *, 4>, UniquifierDenseMapInfo> Uniquifier; 1125 1126 public: 1127 /// An enum for a kind of use, indicating what types of scaled and immediate 1128 /// operands it might support. 1129 enum KindType { 1130 Basic, ///< A normal use, with no folding. 1131 Special, ///< A special case of basic, allowing -1 scales. 1132 Address, ///< An address use; folding according to TargetLowering 1133 ICmpZero ///< An equality icmp with both operands folded into one. 1134 // TODO: Add a generic icmp too? 1135 }; 1136 1137 using SCEVUseKindPair = PointerIntPair<const SCEV *, 2, KindType>; 1138 1139 KindType Kind; 1140 MemAccessTy AccessTy; 1141 1142 /// The list of operands which are to be replaced. 1143 SmallVector<LSRFixup, 8> Fixups; 1144 1145 /// Keep track of the min and max offsets of the fixups. 1146 int64_t MinOffset = std::numeric_limits<int64_t>::max(); 1147 int64_t MaxOffset = std::numeric_limits<int64_t>::min(); 1148 1149 /// This records whether all of the fixups using this LSRUse are outside of 1150 /// the loop, in which case some special-case heuristics may be used. 1151 bool AllFixupsOutsideLoop = true; 1152 1153 /// RigidFormula is set to true to guarantee that this use will be associated 1154 /// with a single formula--the one that initially matched. Some SCEV 1155 /// expressions cannot be expanded. This allows LSR to consider the registers 1156 /// used by those expressions without the need to expand them later after 1157 /// changing the formula. 1158 bool RigidFormula = false; 1159 1160 /// This records the widest use type for any fixup using this 1161 /// LSRUse. FindUseWithSimilarFormula can't consider uses with different max 1162 /// fixup widths to be equivalent, because the narrower one may be relying on 1163 /// the implicit truncation to truncate away bogus bits. 1164 Type *WidestFixupType = nullptr; 1165 1166 /// A list of ways to build a value that can satisfy this user. After the 1167 /// list is populated, one of these is selected heuristically and used to 1168 /// formulate a replacement for OperandValToReplace in UserInst. 1169 SmallVector<Formula, 12> Formulae; 1170 1171 /// The set of register candidates used by all formulae in this LSRUse. 1172 SmallPtrSet<const SCEV *, 4> Regs; 1173 1174 LSRUse(KindType K, MemAccessTy AT) : Kind(K), AccessTy(AT) {} 1175 1176 LSRFixup &getNewFixup() { 1177 Fixups.push_back(LSRFixup()); 1178 return Fixups.back(); 1179 } 1180 1181 void pushFixup(LSRFixup &f) { 1182 Fixups.push_back(f); 1183 if (f.Offset > MaxOffset) 1184 MaxOffset = f.Offset; 1185 if (f.Offset < MinOffset) 1186 MinOffset = f.Offset; 1187 } 1188 1189 bool HasFormulaWithSameRegs(const Formula &F) const; 1190 float getNotSelectedProbability(const SCEV *Reg) const; 1191 bool InsertFormula(const Formula &F, const Loop &L); 1192 void DeleteFormula(Formula &F); 1193 void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses); 1194 1195 void print(raw_ostream &OS) const; 1196 void dump() const; 1197 }; 1198 1199 } // end anonymous namespace 1200 1201 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1202 LSRUse::KindType Kind, MemAccessTy AccessTy, 1203 GlobalValue *BaseGV, int64_t BaseOffset, 1204 bool HasBaseReg, int64_t Scale, 1205 Instruction *Fixup = nullptr); 1206 1207 static unsigned getSetupCost(const SCEV *Reg, unsigned Depth) { 1208 if (isa<SCEVUnknown>(Reg) || isa<SCEVConstant>(Reg)) 1209 return 1; 1210 if (Depth == 0) 1211 return 0; 1212 if (const auto *S = dyn_cast<SCEVAddRecExpr>(Reg)) 1213 return getSetupCost(S->getStart(), Depth - 1); 1214 if (auto S = dyn_cast<SCEVIntegralCastExpr>(Reg)) 1215 return getSetupCost(S->getOperand(), Depth - 1); 1216 if (auto S = dyn_cast<SCEVNAryExpr>(Reg)) 1217 return std::accumulate(S->op_begin(), S->op_end(), 0, 1218 [&](unsigned i, const SCEV *Reg) { 1219 return i + getSetupCost(Reg, Depth - 1); 1220 }); 1221 if (auto S = dyn_cast<SCEVUDivExpr>(Reg)) 1222 return getSetupCost(S->getLHS(), Depth - 1) + 1223 getSetupCost(S->getRHS(), Depth - 1); 1224 return 0; 1225 } 1226 1227 /// Tally up interesting quantities from the given register. 1228 void Cost::RateRegister(const Formula &F, const SCEV *Reg, 1229 SmallPtrSetImpl<const SCEV *> &Regs) { 1230 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) { 1231 // If this is an addrec for another loop, it should be an invariant 1232 // with respect to L since L is the innermost loop (at least 1233 // for now LSR only handles innermost loops). 1234 if (AR->getLoop() != L) { 1235 // If the AddRec exists, consider it's register free and leave it alone. 1236 if (isExistingPhi(AR, *SE) && !TTI->shouldFavorPostInc()) 1237 return; 1238 1239 // It is bad to allow LSR for current loop to add induction variables 1240 // for its sibling loops. 1241 if (!AR->getLoop()->contains(L)) { 1242 Lose(); 1243 return; 1244 } 1245 1246 // Otherwise, it will be an invariant with respect to Loop L. 1247 ++C.NumRegs; 1248 return; 1249 } 1250 1251 unsigned LoopCost = 1; 1252 if (TTI->isIndexedLoadLegal(TTI->MIM_PostInc, AR->getType()) || 1253 TTI->isIndexedStoreLegal(TTI->MIM_PostInc, AR->getType())) { 1254 1255 // If the step size matches the base offset, we could use pre-indexed 1256 // addressing. 1257 if (TTI->shouldFavorBackedgeIndex(L)) { 1258 if (auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE))) 1259 if (Step->getAPInt() == F.BaseOffset) 1260 LoopCost = 0; 1261 } 1262 1263 if (TTI->shouldFavorPostInc()) { 1264 const SCEV *LoopStep = AR->getStepRecurrence(*SE); 1265 if (isa<SCEVConstant>(LoopStep)) { 1266 const SCEV *LoopStart = AR->getStart(); 1267 if (!isa<SCEVConstant>(LoopStart) && 1268 SE->isLoopInvariant(LoopStart, L)) 1269 LoopCost = 0; 1270 } 1271 } 1272 } 1273 C.AddRecCost += LoopCost; 1274 1275 // Add the step value register, if it needs one. 1276 // TODO: The non-affine case isn't precisely modeled here. 1277 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) { 1278 if (!Regs.count(AR->getOperand(1))) { 1279 RateRegister(F, AR->getOperand(1), Regs); 1280 if (isLoser()) 1281 return; 1282 } 1283 } 1284 } 1285 ++C.NumRegs; 1286 1287 // Rough heuristic; favor registers which don't require extra setup 1288 // instructions in the preheader. 1289 C.SetupCost += getSetupCost(Reg, SetupCostDepthLimit); 1290 // Ensure we don't, even with the recusion limit, produce invalid costs. 1291 C.SetupCost = std::min<unsigned>(C.SetupCost, 1 << 16); 1292 1293 C.NumIVMuls += isa<SCEVMulExpr>(Reg) && 1294 SE->hasComputableLoopEvolution(Reg, L); 1295 } 1296 1297 /// Record this register in the set. If we haven't seen it before, rate 1298 /// it. Optional LoserRegs provides a way to declare any formula that refers to 1299 /// one of those regs an instant loser. 1300 void Cost::RatePrimaryRegister(const Formula &F, const SCEV *Reg, 1301 SmallPtrSetImpl<const SCEV *> &Regs, 1302 SmallPtrSetImpl<const SCEV *> *LoserRegs) { 1303 if (LoserRegs && LoserRegs->count(Reg)) { 1304 Lose(); 1305 return; 1306 } 1307 if (Regs.insert(Reg).second) { 1308 RateRegister(F, Reg, Regs); 1309 if (LoserRegs && isLoser()) 1310 LoserRegs->insert(Reg); 1311 } 1312 } 1313 1314 void Cost::RateFormula(const Formula &F, 1315 SmallPtrSetImpl<const SCEV *> &Regs, 1316 const DenseSet<const SCEV *> &VisitedRegs, 1317 const LSRUse &LU, 1318 SmallPtrSetImpl<const SCEV *> *LoserRegs) { 1319 assert(F.isCanonical(*L) && "Cost is accurate only for canonical formula"); 1320 // Tally up the registers. 1321 unsigned PrevAddRecCost = C.AddRecCost; 1322 unsigned PrevNumRegs = C.NumRegs; 1323 unsigned PrevNumBaseAdds = C.NumBaseAdds; 1324 if (const SCEV *ScaledReg = F.ScaledReg) { 1325 if (VisitedRegs.count(ScaledReg)) { 1326 Lose(); 1327 return; 1328 } 1329 RatePrimaryRegister(F, ScaledReg, Regs, LoserRegs); 1330 if (isLoser()) 1331 return; 1332 } 1333 for (const SCEV *BaseReg : F.BaseRegs) { 1334 if (VisitedRegs.count(BaseReg)) { 1335 Lose(); 1336 return; 1337 } 1338 RatePrimaryRegister(F, BaseReg, Regs, LoserRegs); 1339 if (isLoser()) 1340 return; 1341 } 1342 1343 // Determine how many (unfolded) adds we'll need inside the loop. 1344 size_t NumBaseParts = F.getNumRegs(); 1345 if (NumBaseParts > 1) 1346 // Do not count the base and a possible second register if the target 1347 // allows to fold 2 registers. 1348 C.NumBaseAdds += 1349 NumBaseParts - (1 + (F.Scale && isAMCompletelyFolded(*TTI, LU, F))); 1350 C.NumBaseAdds += (F.UnfoldedOffset != 0); 1351 1352 // Accumulate non-free scaling amounts. 1353 C.ScaleCost += getScalingFactorCost(*TTI, LU, F, *L); 1354 1355 // Tally up the non-zero immediates. 1356 for (const LSRFixup &Fixup : LU.Fixups) { 1357 int64_t O = Fixup.Offset; 1358 int64_t Offset = (uint64_t)O + F.BaseOffset; 1359 if (F.BaseGV) 1360 C.ImmCost += 64; // Handle symbolic values conservatively. 1361 // TODO: This should probably be the pointer size. 1362 else if (Offset != 0) 1363 C.ImmCost += APInt(64, Offset, true).getMinSignedBits(); 1364 1365 // Check with target if this offset with this instruction is 1366 // specifically not supported. 1367 if (LU.Kind == LSRUse::Address && Offset != 0 && 1368 !isAMCompletelyFolded(*TTI, LSRUse::Address, LU.AccessTy, F.BaseGV, 1369 Offset, F.HasBaseReg, F.Scale, Fixup.UserInst)) 1370 C.NumBaseAdds++; 1371 } 1372 1373 // If we don't count instruction cost exit here. 1374 if (!InsnsCost) { 1375 assert(isValid() && "invalid cost"); 1376 return; 1377 } 1378 1379 // Treat every new register that exceeds TTI.getNumberOfRegisters() - 1 as 1380 // additional instruction (at least fill). 1381 // TODO: Need distinguish register class? 1382 unsigned TTIRegNum = TTI->getNumberOfRegisters( 1383 TTI->getRegisterClassForType(false, F.getType())) - 1; 1384 if (C.NumRegs > TTIRegNum) { 1385 // Cost already exceeded TTIRegNum, then only newly added register can add 1386 // new instructions. 1387 if (PrevNumRegs > TTIRegNum) 1388 C.Insns += (C.NumRegs - PrevNumRegs); 1389 else 1390 C.Insns += (C.NumRegs - TTIRegNum); 1391 } 1392 1393 // If ICmpZero formula ends with not 0, it could not be replaced by 1394 // just add or sub. We'll need to compare final result of AddRec. 1395 // That means we'll need an additional instruction. But if the target can 1396 // macro-fuse a compare with a branch, don't count this extra instruction. 1397 // For -10 + {0, +, 1}: 1398 // i = i + 1; 1399 // cmp i, 10 1400 // 1401 // For {-10, +, 1}: 1402 // i = i + 1; 1403 if (LU.Kind == LSRUse::ICmpZero && !F.hasZeroEnd() && 1404 !TTI->canMacroFuseCmp()) 1405 C.Insns++; 1406 // Each new AddRec adds 1 instruction to calculation. 1407 C.Insns += (C.AddRecCost - PrevAddRecCost); 1408 1409 // BaseAdds adds instructions for unfolded registers. 1410 if (LU.Kind != LSRUse::ICmpZero) 1411 C.Insns += C.NumBaseAdds - PrevNumBaseAdds; 1412 assert(isValid() && "invalid cost"); 1413 } 1414 1415 /// Set this cost to a losing value. 1416 void Cost::Lose() { 1417 C.Insns = std::numeric_limits<unsigned>::max(); 1418 C.NumRegs = std::numeric_limits<unsigned>::max(); 1419 C.AddRecCost = std::numeric_limits<unsigned>::max(); 1420 C.NumIVMuls = std::numeric_limits<unsigned>::max(); 1421 C.NumBaseAdds = std::numeric_limits<unsigned>::max(); 1422 C.ImmCost = std::numeric_limits<unsigned>::max(); 1423 C.SetupCost = std::numeric_limits<unsigned>::max(); 1424 C.ScaleCost = std::numeric_limits<unsigned>::max(); 1425 } 1426 1427 /// Choose the lower cost. 1428 bool Cost::isLess(Cost &Other) { 1429 if (InsnsCost.getNumOccurrences() > 0 && InsnsCost && 1430 C.Insns != Other.C.Insns) 1431 return C.Insns < Other.C.Insns; 1432 return TTI->isLSRCostLess(C, Other.C); 1433 } 1434 1435 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1436 void Cost::print(raw_ostream &OS) const { 1437 if (InsnsCost) 1438 OS << C.Insns << " instruction" << (C.Insns == 1 ? " " : "s "); 1439 OS << C.NumRegs << " reg" << (C.NumRegs == 1 ? "" : "s"); 1440 if (C.AddRecCost != 0) 1441 OS << ", with addrec cost " << C.AddRecCost; 1442 if (C.NumIVMuls != 0) 1443 OS << ", plus " << C.NumIVMuls << " IV mul" 1444 << (C.NumIVMuls == 1 ? "" : "s"); 1445 if (C.NumBaseAdds != 0) 1446 OS << ", plus " << C.NumBaseAdds << " base add" 1447 << (C.NumBaseAdds == 1 ? "" : "s"); 1448 if (C.ScaleCost != 0) 1449 OS << ", plus " << C.ScaleCost << " scale cost"; 1450 if (C.ImmCost != 0) 1451 OS << ", plus " << C.ImmCost << " imm cost"; 1452 if (C.SetupCost != 0) 1453 OS << ", plus " << C.SetupCost << " setup cost"; 1454 } 1455 1456 LLVM_DUMP_METHOD void Cost::dump() const { 1457 print(errs()); errs() << '\n'; 1458 } 1459 #endif 1460 1461 /// Test whether this fixup always uses its value outside of the given loop. 1462 bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const { 1463 // PHI nodes use their value in their incoming blocks. 1464 if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) { 1465 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 1466 if (PN->getIncomingValue(i) == OperandValToReplace && 1467 L->contains(PN->getIncomingBlock(i))) 1468 return false; 1469 return true; 1470 } 1471 1472 return !L->contains(UserInst); 1473 } 1474 1475 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1476 void LSRFixup::print(raw_ostream &OS) const { 1477 OS << "UserInst="; 1478 // Store is common and interesting enough to be worth special-casing. 1479 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { 1480 OS << "store "; 1481 Store->getOperand(0)->printAsOperand(OS, /*PrintType=*/false); 1482 } else if (UserInst->getType()->isVoidTy()) 1483 OS << UserInst->getOpcodeName(); 1484 else 1485 UserInst->printAsOperand(OS, /*PrintType=*/false); 1486 1487 OS << ", OperandValToReplace="; 1488 OperandValToReplace->printAsOperand(OS, /*PrintType=*/false); 1489 1490 for (const Loop *PIL : PostIncLoops) { 1491 OS << ", PostIncLoop="; 1492 PIL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 1493 } 1494 1495 if (Offset != 0) 1496 OS << ", Offset=" << Offset; 1497 } 1498 1499 LLVM_DUMP_METHOD void LSRFixup::dump() const { 1500 print(errs()); errs() << '\n'; 1501 } 1502 #endif 1503 1504 /// Test whether this use as a formula which has the same registers as the given 1505 /// formula. 1506 bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { 1507 SmallVector<const SCEV *, 4> Key = F.BaseRegs; 1508 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1509 // Unstable sort by host order ok, because this is only used for uniquifying. 1510 llvm::sort(Key); 1511 return Uniquifier.count(Key); 1512 } 1513 1514 /// The function returns a probability of selecting formula without Reg. 1515 float LSRUse::getNotSelectedProbability(const SCEV *Reg) const { 1516 unsigned FNum = 0; 1517 for (const Formula &F : Formulae) 1518 if (F.referencesReg(Reg)) 1519 FNum++; 1520 return ((float)(Formulae.size() - FNum)) / Formulae.size(); 1521 } 1522 1523 /// If the given formula has not yet been inserted, add it to the list, and 1524 /// return true. Return false otherwise. The formula must be in canonical form. 1525 bool LSRUse::InsertFormula(const Formula &F, const Loop &L) { 1526 assert(F.isCanonical(L) && "Invalid canonical representation"); 1527 1528 if (!Formulae.empty() && RigidFormula) 1529 return false; 1530 1531 SmallVector<const SCEV *, 4> Key = F.BaseRegs; 1532 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1533 // Unstable sort by host order ok, because this is only used for uniquifying. 1534 llvm::sort(Key); 1535 1536 if (!Uniquifier.insert(Key).second) 1537 return false; 1538 1539 // Using a register to hold the value of 0 is not profitable. 1540 assert((!F.ScaledReg || !F.ScaledReg->isZero()) && 1541 "Zero allocated in a scaled register!"); 1542 #ifndef NDEBUG 1543 for (const SCEV *BaseReg : F.BaseRegs) 1544 assert(!BaseReg->isZero() && "Zero allocated in a base register!"); 1545 #endif 1546 1547 // Add the formula to the list. 1548 Formulae.push_back(F); 1549 1550 // Record registers now being used by this use. 1551 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1552 if (F.ScaledReg) 1553 Regs.insert(F.ScaledReg); 1554 1555 return true; 1556 } 1557 1558 /// Remove the given formula from this use's list. 1559 void LSRUse::DeleteFormula(Formula &F) { 1560 if (&F != &Formulae.back()) 1561 std::swap(F, Formulae.back()); 1562 Formulae.pop_back(); 1563 } 1564 1565 /// Recompute the Regs field, and update RegUses. 1566 void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) { 1567 // Now that we've filtered out some formulae, recompute the Regs set. 1568 SmallPtrSet<const SCEV *, 4> OldRegs = std::move(Regs); 1569 Regs.clear(); 1570 for (const Formula &F : Formulae) { 1571 if (F.ScaledReg) Regs.insert(F.ScaledReg); 1572 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1573 } 1574 1575 // Update the RegTracker. 1576 for (const SCEV *S : OldRegs) 1577 if (!Regs.count(S)) 1578 RegUses.dropRegister(S, LUIdx); 1579 } 1580 1581 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1582 void LSRUse::print(raw_ostream &OS) const { 1583 OS << "LSR Use: Kind="; 1584 switch (Kind) { 1585 case Basic: OS << "Basic"; break; 1586 case Special: OS << "Special"; break; 1587 case ICmpZero: OS << "ICmpZero"; break; 1588 case Address: 1589 OS << "Address of "; 1590 if (AccessTy.MemTy->isPointerTy()) 1591 OS << "pointer"; // the full pointer type could be really verbose 1592 else { 1593 OS << *AccessTy.MemTy; 1594 } 1595 1596 OS << " in addrspace(" << AccessTy.AddrSpace << ')'; 1597 } 1598 1599 OS << ", Offsets={"; 1600 bool NeedComma = false; 1601 for (const LSRFixup &Fixup : Fixups) { 1602 if (NeedComma) OS << ','; 1603 OS << Fixup.Offset; 1604 NeedComma = true; 1605 } 1606 OS << '}'; 1607 1608 if (AllFixupsOutsideLoop) 1609 OS << ", all-fixups-outside-loop"; 1610 1611 if (WidestFixupType) 1612 OS << ", widest fixup type: " << *WidestFixupType; 1613 } 1614 1615 LLVM_DUMP_METHOD void LSRUse::dump() const { 1616 print(errs()); errs() << '\n'; 1617 } 1618 #endif 1619 1620 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1621 LSRUse::KindType Kind, MemAccessTy AccessTy, 1622 GlobalValue *BaseGV, int64_t BaseOffset, 1623 bool HasBaseReg, int64_t Scale, 1624 Instruction *Fixup/*= nullptr*/) { 1625 switch (Kind) { 1626 case LSRUse::Address: 1627 return TTI.isLegalAddressingMode(AccessTy.MemTy, BaseGV, BaseOffset, 1628 HasBaseReg, Scale, AccessTy.AddrSpace, Fixup); 1629 1630 case LSRUse::ICmpZero: 1631 // There's not even a target hook for querying whether it would be legal to 1632 // fold a GV into an ICmp. 1633 if (BaseGV) 1634 return false; 1635 1636 // ICmp only has two operands; don't allow more than two non-trivial parts. 1637 if (Scale != 0 && HasBaseReg && BaseOffset != 0) 1638 return false; 1639 1640 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by 1641 // putting the scaled register in the other operand of the icmp. 1642 if (Scale != 0 && Scale != -1) 1643 return false; 1644 1645 // If we have low-level target information, ask the target if it can fold an 1646 // integer immediate on an icmp. 1647 if (BaseOffset != 0) { 1648 // We have one of: 1649 // ICmpZero BaseReg + BaseOffset => ICmp BaseReg, -BaseOffset 1650 // ICmpZero -1*ScaleReg + BaseOffset => ICmp ScaleReg, BaseOffset 1651 // Offs is the ICmp immediate. 1652 if (Scale == 0) 1653 // The cast does the right thing with 1654 // std::numeric_limits<int64_t>::min(). 1655 BaseOffset = -(uint64_t)BaseOffset; 1656 return TTI.isLegalICmpImmediate(BaseOffset); 1657 } 1658 1659 // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg 1660 return true; 1661 1662 case LSRUse::Basic: 1663 // Only handle single-register values. 1664 return !BaseGV && Scale == 0 && BaseOffset == 0; 1665 1666 case LSRUse::Special: 1667 // Special case Basic to handle -1 scales. 1668 return !BaseGV && (Scale == 0 || Scale == -1) && BaseOffset == 0; 1669 } 1670 1671 llvm_unreachable("Invalid LSRUse Kind!"); 1672 } 1673 1674 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1675 int64_t MinOffset, int64_t MaxOffset, 1676 LSRUse::KindType Kind, MemAccessTy AccessTy, 1677 GlobalValue *BaseGV, int64_t BaseOffset, 1678 bool HasBaseReg, int64_t Scale) { 1679 // Check for overflow. 1680 if (((int64_t)((uint64_t)BaseOffset + MinOffset) > BaseOffset) != 1681 (MinOffset > 0)) 1682 return false; 1683 MinOffset = (uint64_t)BaseOffset + MinOffset; 1684 if (((int64_t)((uint64_t)BaseOffset + MaxOffset) > BaseOffset) != 1685 (MaxOffset > 0)) 1686 return false; 1687 MaxOffset = (uint64_t)BaseOffset + MaxOffset; 1688 1689 return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MinOffset, 1690 HasBaseReg, Scale) && 1691 isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MaxOffset, 1692 HasBaseReg, Scale); 1693 } 1694 1695 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1696 int64_t MinOffset, int64_t MaxOffset, 1697 LSRUse::KindType Kind, MemAccessTy AccessTy, 1698 const Formula &F, const Loop &L) { 1699 // For the purpose of isAMCompletelyFolded either having a canonical formula 1700 // or a scale not equal to zero is correct. 1701 // Problems may arise from non canonical formulae having a scale == 0. 1702 // Strictly speaking it would best to just rely on canonical formulae. 1703 // However, when we generate the scaled formulae, we first check that the 1704 // scaling factor is profitable before computing the actual ScaledReg for 1705 // compile time sake. 1706 assert((F.isCanonical(L) || F.Scale != 0)); 1707 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, 1708 F.BaseGV, F.BaseOffset, F.HasBaseReg, F.Scale); 1709 } 1710 1711 /// Test whether we know how to expand the current formula. 1712 static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, 1713 int64_t MaxOffset, LSRUse::KindType Kind, 1714 MemAccessTy AccessTy, GlobalValue *BaseGV, 1715 int64_t BaseOffset, bool HasBaseReg, int64_t Scale) { 1716 // We know how to expand completely foldable formulae. 1717 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, 1718 BaseOffset, HasBaseReg, Scale) || 1719 // Or formulae that use a base register produced by a sum of base 1720 // registers. 1721 (Scale == 1 && 1722 isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, 1723 BaseGV, BaseOffset, true, 0)); 1724 } 1725 1726 static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, 1727 int64_t MaxOffset, LSRUse::KindType Kind, 1728 MemAccessTy AccessTy, const Formula &F) { 1729 return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, F.BaseGV, 1730 F.BaseOffset, F.HasBaseReg, F.Scale); 1731 } 1732 1733 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1734 const LSRUse &LU, const Formula &F) { 1735 // Target may want to look at the user instructions. 1736 if (LU.Kind == LSRUse::Address && TTI.LSRWithInstrQueries()) { 1737 for (const LSRFixup &Fixup : LU.Fixups) 1738 if (!isAMCompletelyFolded(TTI, LSRUse::Address, LU.AccessTy, F.BaseGV, 1739 (F.BaseOffset + Fixup.Offset), F.HasBaseReg, 1740 F.Scale, Fixup.UserInst)) 1741 return false; 1742 return true; 1743 } 1744 1745 return isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, 1746 LU.AccessTy, F.BaseGV, F.BaseOffset, F.HasBaseReg, 1747 F.Scale); 1748 } 1749 1750 static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, 1751 const LSRUse &LU, const Formula &F, 1752 const Loop &L) { 1753 if (!F.Scale) 1754 return 0; 1755 1756 // If the use is not completely folded in that instruction, we will have to 1757 // pay an extra cost only for scale != 1. 1758 if (!isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, 1759 LU.AccessTy, F, L)) 1760 return F.Scale != 1; 1761 1762 switch (LU.Kind) { 1763 case LSRUse::Address: { 1764 // Check the scaling factor cost with both the min and max offsets. 1765 int ScaleCostMinOffset = TTI.getScalingFactorCost( 1766 LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MinOffset, F.HasBaseReg, 1767 F.Scale, LU.AccessTy.AddrSpace); 1768 int ScaleCostMaxOffset = TTI.getScalingFactorCost( 1769 LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MaxOffset, F.HasBaseReg, 1770 F.Scale, LU.AccessTy.AddrSpace); 1771 1772 assert(ScaleCostMinOffset >= 0 && ScaleCostMaxOffset >= 0 && 1773 "Legal addressing mode has an illegal cost!"); 1774 return std::max(ScaleCostMinOffset, ScaleCostMaxOffset); 1775 } 1776 case LSRUse::ICmpZero: 1777 case LSRUse::Basic: 1778 case LSRUse::Special: 1779 // The use is completely folded, i.e., everything is folded into the 1780 // instruction. 1781 return 0; 1782 } 1783 1784 llvm_unreachable("Invalid LSRUse Kind!"); 1785 } 1786 1787 static bool isAlwaysFoldable(const TargetTransformInfo &TTI, 1788 LSRUse::KindType Kind, MemAccessTy AccessTy, 1789 GlobalValue *BaseGV, int64_t BaseOffset, 1790 bool HasBaseReg) { 1791 // Fast-path: zero is always foldable. 1792 if (BaseOffset == 0 && !BaseGV) return true; 1793 1794 // Conservatively, create an address with an immediate and a 1795 // base and a scale. 1796 int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1797 1798 // Canonicalize a scale of 1 to a base register if the formula doesn't 1799 // already have a base register. 1800 if (!HasBaseReg && Scale == 1) { 1801 Scale = 0; 1802 HasBaseReg = true; 1803 } 1804 1805 return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, BaseOffset, 1806 HasBaseReg, Scale); 1807 } 1808 1809 static bool isAlwaysFoldable(const TargetTransformInfo &TTI, 1810 ScalarEvolution &SE, int64_t MinOffset, 1811 int64_t MaxOffset, LSRUse::KindType Kind, 1812 MemAccessTy AccessTy, const SCEV *S, 1813 bool HasBaseReg) { 1814 // Fast-path: zero is always foldable. 1815 if (S->isZero()) return true; 1816 1817 // Conservatively, create an address with an immediate and a 1818 // base and a scale. 1819 int64_t BaseOffset = ExtractImmediate(S, SE); 1820 GlobalValue *BaseGV = ExtractSymbol(S, SE); 1821 1822 // If there's anything else involved, it's not foldable. 1823 if (!S->isZero()) return false; 1824 1825 // Fast-path: zero is always foldable. 1826 if (BaseOffset == 0 && !BaseGV) return true; 1827 1828 // Conservatively, create an address with an immediate and a 1829 // base and a scale. 1830 int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1831 1832 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, 1833 BaseOffset, HasBaseReg, Scale); 1834 } 1835 1836 namespace { 1837 1838 /// An individual increment in a Chain of IV increments. Relate an IV user to 1839 /// an expression that computes the IV it uses from the IV used by the previous 1840 /// link in the Chain. 1841 /// 1842 /// For the head of a chain, IncExpr holds the absolute SCEV expression for the 1843 /// original IVOperand. The head of the chain's IVOperand is only valid during 1844 /// chain collection, before LSR replaces IV users. During chain generation, 1845 /// IncExpr can be used to find the new IVOperand that computes the same 1846 /// expression. 1847 struct IVInc { 1848 Instruction *UserInst; 1849 Value* IVOperand; 1850 const SCEV *IncExpr; 1851 1852 IVInc(Instruction *U, Value *O, const SCEV *E) 1853 : UserInst(U), IVOperand(O), IncExpr(E) {} 1854 }; 1855 1856 // The list of IV increments in program order. We typically add the head of a 1857 // chain without finding subsequent links. 1858 struct IVChain { 1859 SmallVector<IVInc, 1> Incs; 1860 const SCEV *ExprBase = nullptr; 1861 1862 IVChain() = default; 1863 IVChain(const IVInc &Head, const SCEV *Base) 1864 : Incs(1, Head), ExprBase(Base) {} 1865 1866 using const_iterator = SmallVectorImpl<IVInc>::const_iterator; 1867 1868 // Return the first increment in the chain. 1869 const_iterator begin() const { 1870 assert(!Incs.empty()); 1871 return std::next(Incs.begin()); 1872 } 1873 const_iterator end() const { 1874 return Incs.end(); 1875 } 1876 1877 // Returns true if this chain contains any increments. 1878 bool hasIncs() const { return Incs.size() >= 2; } 1879 1880 // Add an IVInc to the end of this chain. 1881 void add(const IVInc &X) { Incs.push_back(X); } 1882 1883 // Returns the last UserInst in the chain. 1884 Instruction *tailUserInst() const { return Incs.back().UserInst; } 1885 1886 // Returns true if IncExpr can be profitably added to this chain. 1887 bool isProfitableIncrement(const SCEV *OperExpr, 1888 const SCEV *IncExpr, 1889 ScalarEvolution&); 1890 }; 1891 1892 /// Helper for CollectChains to track multiple IV increment uses. Distinguish 1893 /// between FarUsers that definitely cross IV increments and NearUsers that may 1894 /// be used between IV increments. 1895 struct ChainUsers { 1896 SmallPtrSet<Instruction*, 4> FarUsers; 1897 SmallPtrSet<Instruction*, 4> NearUsers; 1898 }; 1899 1900 /// This class holds state for the main loop strength reduction logic. 1901 class LSRInstance { 1902 IVUsers &IU; 1903 ScalarEvolution &SE; 1904 DominatorTree &DT; 1905 LoopInfo &LI; 1906 AssumptionCache &AC; 1907 TargetLibraryInfo &TLI; 1908 const TargetTransformInfo &TTI; 1909 Loop *const L; 1910 MemorySSAUpdater *MSSAU; 1911 bool FavorBackedgeIndex = false; 1912 bool Changed = false; 1913 1914 /// This is the insert position that the current loop's induction variable 1915 /// increment should be placed. In simple loops, this is the latch block's 1916 /// terminator. But in more complicated cases, this is a position which will 1917 /// dominate all the in-loop post-increment users. 1918 Instruction *IVIncInsertPos = nullptr; 1919 1920 /// Interesting factors between use strides. 1921 /// 1922 /// We explicitly use a SetVector which contains a SmallSet, instead of the 1923 /// default, a SmallDenseSet, because we need to use the full range of 1924 /// int64_ts, and there's currently no good way of doing that with 1925 /// SmallDenseSet. 1926 SetVector<int64_t, SmallVector<int64_t, 8>, SmallSet<int64_t, 8>> Factors; 1927 1928 /// Interesting use types, to facilitate truncation reuse. 1929 SmallSetVector<Type *, 4> Types; 1930 1931 /// The list of interesting uses. 1932 mutable SmallVector<LSRUse, 16> Uses; 1933 1934 /// Track which uses use which register candidates. 1935 RegUseTracker RegUses; 1936 1937 // Limit the number of chains to avoid quadratic behavior. We don't expect to 1938 // have more than a few IV increment chains in a loop. Missing a Chain falls 1939 // back to normal LSR behavior for those uses. 1940 static const unsigned MaxChains = 8; 1941 1942 /// IV users can form a chain of IV increments. 1943 SmallVector<IVChain, MaxChains> IVChainVec; 1944 1945 /// IV users that belong to profitable IVChains. 1946 SmallPtrSet<Use*, MaxChains> IVIncSet; 1947 1948 void OptimizeShadowIV(); 1949 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); 1950 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); 1951 void OptimizeLoopTermCond(); 1952 1953 void ChainInstruction(Instruction *UserInst, Instruction *IVOper, 1954 SmallVectorImpl<ChainUsers> &ChainUsersVec); 1955 void FinalizeChain(IVChain &Chain); 1956 void CollectChains(); 1957 void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, 1958 SmallVectorImpl<WeakTrackingVH> &DeadInsts); 1959 1960 void CollectInterestingTypesAndFactors(); 1961 void CollectFixupsAndInitialFormulae(); 1962 1963 // Support for sharing of LSRUses between LSRFixups. 1964 using UseMapTy = DenseMap<LSRUse::SCEVUseKindPair, size_t>; 1965 UseMapTy UseMap; 1966 1967 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, 1968 LSRUse::KindType Kind, MemAccessTy AccessTy); 1969 1970 std::pair<size_t, int64_t> getUse(const SCEV *&Expr, LSRUse::KindType Kind, 1971 MemAccessTy AccessTy); 1972 1973 void DeleteUse(LSRUse &LU, size_t LUIdx); 1974 1975 LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU); 1976 1977 void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1978 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1979 void CountRegisters(const Formula &F, size_t LUIdx); 1980 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F); 1981 1982 void CollectLoopInvariantFixupsAndFormulae(); 1983 1984 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, 1985 unsigned Depth = 0); 1986 1987 void GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx, 1988 const Formula &Base, unsigned Depth, 1989 size_t Idx, bool IsScaledReg = false); 1990 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); 1991 void GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx, 1992 const Formula &Base, size_t Idx, 1993 bool IsScaledReg = false); 1994 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1995 void GenerateConstantOffsetsImpl(LSRUse &LU, unsigned LUIdx, 1996 const Formula &Base, 1997 const SmallVectorImpl<int64_t> &Worklist, 1998 size_t Idx, bool IsScaledReg = false); 1999 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 2000 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); 2001 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); 2002 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base); 2003 void GenerateCrossUseConstantOffsets(); 2004 void GenerateAllReuseFormulae(); 2005 2006 void FilterOutUndesirableDedicatedRegisters(); 2007 2008 size_t EstimateSearchSpaceComplexity() const; 2009 void NarrowSearchSpaceByDetectingSupersets(); 2010 void NarrowSearchSpaceByCollapsingUnrolledCode(); 2011 void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); 2012 void NarrowSearchSpaceByFilterFormulaWithSameScaledReg(); 2013 void NarrowSearchSpaceByFilterPostInc(); 2014 void NarrowSearchSpaceByDeletingCostlyFormulas(); 2015 void NarrowSearchSpaceByPickingWinnerRegs(); 2016 void NarrowSearchSpaceUsingHeuristics(); 2017 2018 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 2019 Cost &SolutionCost, 2020 SmallVectorImpl<const Formula *> &Workspace, 2021 const Cost &CurCost, 2022 const SmallPtrSet<const SCEV *, 16> &CurRegs, 2023 DenseSet<const SCEV *> &VisitedRegs) const; 2024 void Solve(SmallVectorImpl<const Formula *> &Solution) const; 2025 2026 BasicBlock::iterator 2027 HoistInsertPosition(BasicBlock::iterator IP, 2028 const SmallVectorImpl<Instruction *> &Inputs) const; 2029 BasicBlock::iterator 2030 AdjustInsertPositionForExpand(BasicBlock::iterator IP, 2031 const LSRFixup &LF, 2032 const LSRUse &LU, 2033 SCEVExpander &Rewriter) const; 2034 2035 Value *Expand(const LSRUse &LU, const LSRFixup &LF, const Formula &F, 2036 BasicBlock::iterator IP, SCEVExpander &Rewriter, 2037 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const; 2038 void RewriteForPHI(PHINode *PN, const LSRUse &LU, const LSRFixup &LF, 2039 const Formula &F, SCEVExpander &Rewriter, 2040 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const; 2041 void Rewrite(const LSRUse &LU, const LSRFixup &LF, const Formula &F, 2042 SCEVExpander &Rewriter, 2043 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const; 2044 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution); 2045 2046 public: 2047 LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, DominatorTree &DT, 2048 LoopInfo &LI, const TargetTransformInfo &TTI, AssumptionCache &AC, 2049 TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU); 2050 2051 bool getChanged() const { return Changed; } 2052 2053 void print_factors_and_types(raw_ostream &OS) const; 2054 void print_fixups(raw_ostream &OS) const; 2055 void print_uses(raw_ostream &OS) const; 2056 void print(raw_ostream &OS) const; 2057 void dump() const; 2058 }; 2059 2060 } // end anonymous namespace 2061 2062 /// If IV is used in a int-to-float cast inside the loop then try to eliminate 2063 /// the cast operation. 2064 void LSRInstance::OptimizeShadowIV() { 2065 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 2066 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 2067 return; 2068 2069 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); 2070 UI != E; /* empty */) { 2071 IVUsers::const_iterator CandidateUI = UI; 2072 ++UI; 2073 Instruction *ShadowUse = CandidateUI->getUser(); 2074 Type *DestTy = nullptr; 2075 bool IsSigned = false; 2076 2077 /* If shadow use is a int->float cast then insert a second IV 2078 to eliminate this cast. 2079 2080 for (unsigned i = 0; i < n; ++i) 2081 foo((double)i); 2082 2083 is transformed into 2084 2085 double d = 0.0; 2086 for (unsigned i = 0; i < n; ++i, ++d) 2087 foo(d); 2088 */ 2089 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) { 2090 IsSigned = false; 2091 DestTy = UCast->getDestTy(); 2092 } 2093 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) { 2094 IsSigned = true; 2095 DestTy = SCast->getDestTy(); 2096 } 2097 if (!DestTy) continue; 2098 2099 // If target does not support DestTy natively then do not apply 2100 // this transformation. 2101 if (!TTI.isTypeLegal(DestTy)) continue; 2102 2103 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); 2104 if (!PH) continue; 2105 if (PH->getNumIncomingValues() != 2) continue; 2106 2107 // If the calculation in integers overflows, the result in FP type will 2108 // differ. So we only can do this transformation if we are guaranteed to not 2109 // deal with overflowing values 2110 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PH)); 2111 if (!AR) continue; 2112 if (IsSigned && !AR->hasNoSignedWrap()) continue; 2113 if (!IsSigned && !AR->hasNoUnsignedWrap()) continue; 2114 2115 Type *SrcTy = PH->getType(); 2116 int Mantissa = DestTy->getFPMantissaWidth(); 2117 if (Mantissa == -1) continue; 2118 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) 2119 continue; 2120 2121 unsigned Entry, Latch; 2122 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { 2123 Entry = 0; 2124 Latch = 1; 2125 } else { 2126 Entry = 1; 2127 Latch = 0; 2128 } 2129 2130 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); 2131 if (!Init) continue; 2132 Constant *NewInit = ConstantFP::get(DestTy, IsSigned ? 2133 (double)Init->getSExtValue() : 2134 (double)Init->getZExtValue()); 2135 2136 BinaryOperator *Incr = 2137 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); 2138 if (!Incr) continue; 2139 if (Incr->getOpcode() != Instruction::Add 2140 && Incr->getOpcode() != Instruction::Sub) 2141 continue; 2142 2143 /* Initialize new IV, double d = 0.0 in above example. */ 2144 ConstantInt *C = nullptr; 2145 if (Incr->getOperand(0) == PH) 2146 C = dyn_cast<ConstantInt>(Incr->getOperand(1)); 2147 else if (Incr->getOperand(1) == PH) 2148 C = dyn_cast<ConstantInt>(Incr->getOperand(0)); 2149 else 2150 continue; 2151 2152 if (!C) continue; 2153 2154 // Ignore negative constants, as the code below doesn't handle them 2155 // correctly. TODO: Remove this restriction. 2156 if (!C->getValue().isStrictlyPositive()) continue; 2157 2158 /* Add new PHINode. */ 2159 PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH); 2160 2161 /* create new increment. '++d' in above example. */ 2162 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); 2163 BinaryOperator *NewIncr = 2164 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? 2165 Instruction::FAdd : Instruction::FSub, 2166 NewPH, CFP, "IV.S.next.", Incr); 2167 2168 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); 2169 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); 2170 2171 /* Remove cast operation */ 2172 ShadowUse->replaceAllUsesWith(NewPH); 2173 ShadowUse->eraseFromParent(); 2174 Changed = true; 2175 break; 2176 } 2177 } 2178 2179 /// If Cond has an operand that is an expression of an IV, set the IV user and 2180 /// stride information and return true, otherwise return false. 2181 bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) { 2182 for (IVStrideUse &U : IU) 2183 if (U.getUser() == Cond) { 2184 // NOTE: we could handle setcc instructions with multiple uses here, but 2185 // InstCombine does it as well for simple uses, it's not clear that it 2186 // occurs enough in real life to handle. 2187 CondUse = &U; 2188 return true; 2189 } 2190 return false; 2191 } 2192 2193 /// Rewrite the loop's terminating condition if it uses a max computation. 2194 /// 2195 /// This is a narrow solution to a specific, but acute, problem. For loops 2196 /// like this: 2197 /// 2198 /// i = 0; 2199 /// do { 2200 /// p[i] = 0.0; 2201 /// } while (++i < n); 2202 /// 2203 /// the trip count isn't just 'n', because 'n' might not be positive. And 2204 /// unfortunately this can come up even for loops where the user didn't use 2205 /// a C do-while loop. For example, seemingly well-behaved top-test loops 2206 /// will commonly be lowered like this: 2207 /// 2208 /// if (n > 0) { 2209 /// i = 0; 2210 /// do { 2211 /// p[i] = 0.0; 2212 /// } while (++i < n); 2213 /// } 2214 /// 2215 /// and then it's possible for subsequent optimization to obscure the if 2216 /// test in such a way that indvars can't find it. 2217 /// 2218 /// When indvars can't find the if test in loops like this, it creates a 2219 /// max expression, which allows it to give the loop a canonical 2220 /// induction variable: 2221 /// 2222 /// i = 0; 2223 /// max = n < 1 ? 1 : n; 2224 /// do { 2225 /// p[i] = 0.0; 2226 /// } while (++i != max); 2227 /// 2228 /// Canonical induction variables are necessary because the loop passes 2229 /// are designed around them. The most obvious example of this is the 2230 /// LoopInfo analysis, which doesn't remember trip count values. It 2231 /// expects to be able to rediscover the trip count each time it is 2232 /// needed, and it does this using a simple analysis that only succeeds if 2233 /// the loop has a canonical induction variable. 2234 /// 2235 /// However, when it comes time to generate code, the maximum operation 2236 /// can be quite costly, especially if it's inside of an outer loop. 2237 /// 2238 /// This function solves this problem by detecting this type of loop and 2239 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting 2240 /// the instructions for the maximum computation. 2241 ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { 2242 // Check that the loop matches the pattern we're looking for. 2243 if (Cond->getPredicate() != CmpInst::ICMP_EQ && 2244 Cond->getPredicate() != CmpInst::ICMP_NE) 2245 return Cond; 2246 2247 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); 2248 if (!Sel || !Sel->hasOneUse()) return Cond; 2249 2250 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 2251 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 2252 return Cond; 2253 const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1); 2254 2255 // Add one to the backedge-taken count to get the trip count. 2256 const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount); 2257 if (IterationCount != SE.getSCEV(Sel)) return Cond; 2258 2259 // Check for a max calculation that matches the pattern. There's no check 2260 // for ICMP_ULE here because the comparison would be with zero, which 2261 // isn't interesting. 2262 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 2263 const SCEVNAryExpr *Max = nullptr; 2264 if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) { 2265 Pred = ICmpInst::ICMP_SLE; 2266 Max = S; 2267 } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) { 2268 Pred = ICmpInst::ICMP_SLT; 2269 Max = S; 2270 } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) { 2271 Pred = ICmpInst::ICMP_ULT; 2272 Max = U; 2273 } else { 2274 // No match; bail. 2275 return Cond; 2276 } 2277 2278 // To handle a max with more than two operands, this optimization would 2279 // require additional checking and setup. 2280 if (Max->getNumOperands() != 2) 2281 return Cond; 2282 2283 const SCEV *MaxLHS = Max->getOperand(0); 2284 const SCEV *MaxRHS = Max->getOperand(1); 2285 2286 // ScalarEvolution canonicalizes constants to the left. For < and >, look 2287 // for a comparison with 1. For <= and >=, a comparison with zero. 2288 if (!MaxLHS || 2289 (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One))) 2290 return Cond; 2291 2292 // Check the relevant induction variable for conformance to 2293 // the pattern. 2294 const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); 2295 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 2296 if (!AR || !AR->isAffine() || 2297 AR->getStart() != One || 2298 AR->getStepRecurrence(SE) != One) 2299 return Cond; 2300 2301 assert(AR->getLoop() == L && 2302 "Loop condition operand is an addrec in a different loop!"); 2303 2304 // Check the right operand of the select, and remember it, as it will 2305 // be used in the new comparison instruction. 2306 Value *NewRHS = nullptr; 2307 if (ICmpInst::isTrueWhenEqual(Pred)) { 2308 // Look for n+1, and grab n. 2309 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1))) 2310 if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1))) 2311 if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS) 2312 NewRHS = BO->getOperand(0); 2313 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2))) 2314 if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1))) 2315 if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS) 2316 NewRHS = BO->getOperand(0); 2317 if (!NewRHS) 2318 return Cond; 2319 } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) 2320 NewRHS = Sel->getOperand(1); 2321 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) 2322 NewRHS = Sel->getOperand(2); 2323 else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS)) 2324 NewRHS = SU->getValue(); 2325 else 2326 // Max doesn't match expected pattern. 2327 return Cond; 2328 2329 // Determine the new comparison opcode. It may be signed or unsigned, 2330 // and the original comparison may be either equality or inequality. 2331 if (Cond->getPredicate() == CmpInst::ICMP_EQ) 2332 Pred = CmpInst::getInversePredicate(Pred); 2333 2334 // Ok, everything looks ok to change the condition into an SLT or SGE and 2335 // delete the max calculation. 2336 ICmpInst *NewCond = 2337 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); 2338 2339 // Delete the max calculation instructions. 2340 Cond->replaceAllUsesWith(NewCond); 2341 CondUse->setUser(NewCond); 2342 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); 2343 Cond->eraseFromParent(); 2344 Sel->eraseFromParent(); 2345 if (Cmp->use_empty()) 2346 Cmp->eraseFromParent(); 2347 return NewCond; 2348 } 2349 2350 /// Change loop terminating condition to use the postinc iv when possible. 2351 void 2352 LSRInstance::OptimizeLoopTermCond() { 2353 SmallPtrSet<Instruction *, 4> PostIncs; 2354 2355 // We need a different set of heuristics for rotated and non-rotated loops. 2356 // If a loop is rotated then the latch is also the backedge, so inserting 2357 // post-inc expressions just before the latch is ideal. To reduce live ranges 2358 // it also makes sense to rewrite terminating conditions to use post-inc 2359 // expressions. 2360 // 2361 // If the loop is not rotated then the latch is not a backedge; the latch 2362 // check is done in the loop head. Adding post-inc expressions before the 2363 // latch will cause overlapping live-ranges of pre-inc and post-inc expressions 2364 // in the loop body. In this case we do *not* want to use post-inc expressions 2365 // in the latch check, and we want to insert post-inc expressions before 2366 // the backedge. 2367 BasicBlock *LatchBlock = L->getLoopLatch(); 2368 SmallVector<BasicBlock*, 8> ExitingBlocks; 2369 L->getExitingBlocks(ExitingBlocks); 2370 if (llvm::all_of(ExitingBlocks, [&LatchBlock](const BasicBlock *BB) { 2371 return LatchBlock != BB; 2372 })) { 2373 // The backedge doesn't exit the loop; treat this as a head-tested loop. 2374 IVIncInsertPos = LatchBlock->getTerminator(); 2375 return; 2376 } 2377 2378 // Otherwise treat this as a rotated loop. 2379 for (BasicBlock *ExitingBlock : ExitingBlocks) { 2380 // Get the terminating condition for the loop if possible. If we 2381 // can, we want to change it to use a post-incremented version of its 2382 // induction variable, to allow coalescing the live ranges for the IV into 2383 // one register value. 2384 2385 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 2386 if (!TermBr) 2387 continue; 2388 // FIXME: Overly conservative, termination condition could be an 'or' etc.. 2389 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) 2390 continue; 2391 2392 // Search IVUsesByStride to find Cond's IVUse if there is one. 2393 IVStrideUse *CondUse = nullptr; 2394 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 2395 if (!FindIVUserForCond(Cond, CondUse)) 2396 continue; 2397 2398 // If the trip count is computed in terms of a max (due to ScalarEvolution 2399 // being unable to find a sufficient guard, for example), change the loop 2400 // comparison to use SLT or ULT instead of NE. 2401 // One consequence of doing this now is that it disrupts the count-down 2402 // optimization. That's not always a bad thing though, because in such 2403 // cases it may still be worthwhile to avoid a max. 2404 Cond = OptimizeMax(Cond, CondUse); 2405 2406 // If this exiting block dominates the latch block, it may also use 2407 // the post-inc value if it won't be shared with other uses. 2408 // Check for dominance. 2409 if (!DT.dominates(ExitingBlock, LatchBlock)) 2410 continue; 2411 2412 // Conservatively avoid trying to use the post-inc value in non-latch 2413 // exits if there may be pre-inc users in intervening blocks. 2414 if (LatchBlock != ExitingBlock) 2415 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 2416 // Test if the use is reachable from the exiting block. This dominator 2417 // query is a conservative approximation of reachability. 2418 if (&*UI != CondUse && 2419 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { 2420 // Conservatively assume there may be reuse if the quotient of their 2421 // strides could be a legal scale. 2422 const SCEV *A = IU.getStride(*CondUse, L); 2423 const SCEV *B = IU.getStride(*UI, L); 2424 if (!A || !B) continue; 2425 if (SE.getTypeSizeInBits(A->getType()) != 2426 SE.getTypeSizeInBits(B->getType())) { 2427 if (SE.getTypeSizeInBits(A->getType()) > 2428 SE.getTypeSizeInBits(B->getType())) 2429 B = SE.getSignExtendExpr(B, A->getType()); 2430 else 2431 A = SE.getSignExtendExpr(A, B->getType()); 2432 } 2433 if (const SCEVConstant *D = 2434 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) { 2435 const ConstantInt *C = D->getValue(); 2436 // Stride of one or negative one can have reuse with non-addresses. 2437 if (C->isOne() || C->isMinusOne()) 2438 goto decline_post_inc; 2439 // Avoid weird situations. 2440 if (C->getValue().getMinSignedBits() >= 64 || 2441 C->getValue().isMinSignedValue()) 2442 goto decline_post_inc; 2443 // Check for possible scaled-address reuse. 2444 if (isAddressUse(TTI, UI->getUser(), UI->getOperandValToReplace())) { 2445 MemAccessTy AccessTy = getAccessType( 2446 TTI, UI->getUser(), UI->getOperandValToReplace()); 2447 int64_t Scale = C->getSExtValue(); 2448 if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr, 2449 /*BaseOffset=*/0, 2450 /*HasBaseReg=*/false, Scale, 2451 AccessTy.AddrSpace)) 2452 goto decline_post_inc; 2453 Scale = -Scale; 2454 if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr, 2455 /*BaseOffset=*/0, 2456 /*HasBaseReg=*/false, Scale, 2457 AccessTy.AddrSpace)) 2458 goto decline_post_inc; 2459 } 2460 } 2461 } 2462 2463 LLVM_DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " 2464 << *Cond << '\n'); 2465 2466 // It's possible for the setcc instruction to be anywhere in the loop, and 2467 // possible for it to have multiple users. If it is not immediately before 2468 // the exiting block branch, move it. 2469 if (&*++BasicBlock::iterator(Cond) != TermBr) { 2470 if (Cond->hasOneUse()) { 2471 Cond->moveBefore(TermBr); 2472 } else { 2473 // Clone the terminating condition and insert into the loopend. 2474 ICmpInst *OldCond = Cond; 2475 Cond = cast<ICmpInst>(Cond->clone()); 2476 Cond->setName(L->getHeader()->getName() + ".termcond"); 2477 ExitingBlock->getInstList().insert(TermBr->getIterator(), Cond); 2478 2479 // Clone the IVUse, as the old use still exists! 2480 CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace()); 2481 TermBr->replaceUsesOfWith(OldCond, Cond); 2482 } 2483 } 2484 2485 // If we get to here, we know that we can transform the setcc instruction to 2486 // use the post-incremented version of the IV, allowing us to coalesce the 2487 // live ranges for the IV correctly. 2488 CondUse->transformToPostInc(L); 2489 Changed = true; 2490 2491 PostIncs.insert(Cond); 2492 decline_post_inc:; 2493 } 2494 2495 // Determine an insertion point for the loop induction variable increment. It 2496 // must dominate all the post-inc comparisons we just set up, and it must 2497 // dominate the loop latch edge. 2498 IVIncInsertPos = L->getLoopLatch()->getTerminator(); 2499 for (Instruction *Inst : PostIncs) { 2500 BasicBlock *BB = 2501 DT.findNearestCommonDominator(IVIncInsertPos->getParent(), 2502 Inst->getParent()); 2503 if (BB == Inst->getParent()) 2504 IVIncInsertPos = Inst; 2505 else if (BB != IVIncInsertPos->getParent()) 2506 IVIncInsertPos = BB->getTerminator(); 2507 } 2508 } 2509 2510 /// Determine if the given use can accommodate a fixup at the given offset and 2511 /// other details. If so, update the use and return true. 2512 bool LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, 2513 bool HasBaseReg, LSRUse::KindType Kind, 2514 MemAccessTy AccessTy) { 2515 int64_t NewMinOffset = LU.MinOffset; 2516 int64_t NewMaxOffset = LU.MaxOffset; 2517 MemAccessTy NewAccessTy = AccessTy; 2518 2519 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to 2520 // something conservative, however this can pessimize in the case that one of 2521 // the uses will have all its uses outside the loop, for example. 2522 if (LU.Kind != Kind) 2523 return false; 2524 2525 // Check for a mismatched access type, and fall back conservatively as needed. 2526 // TODO: Be less conservative when the type is similar and can use the same 2527 // addressing modes. 2528 if (Kind == LSRUse::Address) { 2529 if (AccessTy.MemTy != LU.AccessTy.MemTy) { 2530 NewAccessTy = MemAccessTy::getUnknown(AccessTy.MemTy->getContext(), 2531 AccessTy.AddrSpace); 2532 } 2533 } 2534 2535 // Conservatively assume HasBaseReg is true for now. 2536 if (NewOffset < LU.MinOffset) { 2537 if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr, 2538 LU.MaxOffset - NewOffset, HasBaseReg)) 2539 return false; 2540 NewMinOffset = NewOffset; 2541 } else if (NewOffset > LU.MaxOffset) { 2542 if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr, 2543 NewOffset - LU.MinOffset, HasBaseReg)) 2544 return false; 2545 NewMaxOffset = NewOffset; 2546 } 2547 2548 // Update the use. 2549 LU.MinOffset = NewMinOffset; 2550 LU.MaxOffset = NewMaxOffset; 2551 LU.AccessTy = NewAccessTy; 2552 return true; 2553 } 2554 2555 /// Return an LSRUse index and an offset value for a fixup which needs the given 2556 /// expression, with the given kind and optional access type. Either reuse an 2557 /// existing use or create a new one, as needed. 2558 std::pair<size_t, int64_t> LSRInstance::getUse(const SCEV *&Expr, 2559 LSRUse::KindType Kind, 2560 MemAccessTy AccessTy) { 2561 const SCEV *Copy = Expr; 2562 int64_t Offset = ExtractImmediate(Expr, SE); 2563 2564 // Basic uses can't accept any offset, for example. 2565 if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr, 2566 Offset, /*HasBaseReg=*/ true)) { 2567 Expr = Copy; 2568 Offset = 0; 2569 } 2570 2571 std::pair<UseMapTy::iterator, bool> P = 2572 UseMap.insert(std::make_pair(LSRUse::SCEVUseKindPair(Expr, Kind), 0)); 2573 if (!P.second) { 2574 // A use already existed with this base. 2575 size_t LUIdx = P.first->second; 2576 LSRUse &LU = Uses[LUIdx]; 2577 if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy)) 2578 // Reuse this use. 2579 return std::make_pair(LUIdx, Offset); 2580 } 2581 2582 // Create a new use. 2583 size_t LUIdx = Uses.size(); 2584 P.first->second = LUIdx; 2585 Uses.push_back(LSRUse(Kind, AccessTy)); 2586 LSRUse &LU = Uses[LUIdx]; 2587 2588 LU.MinOffset = Offset; 2589 LU.MaxOffset = Offset; 2590 return std::make_pair(LUIdx, Offset); 2591 } 2592 2593 /// Delete the given use from the Uses list. 2594 void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) { 2595 if (&LU != &Uses.back()) 2596 std::swap(LU, Uses.back()); 2597 Uses.pop_back(); 2598 2599 // Update RegUses. 2600 RegUses.swapAndDropUse(LUIdx, Uses.size()); 2601 } 2602 2603 /// Look for a use distinct from OrigLU which is has a formula that has the same 2604 /// registers as the given formula. 2605 LSRUse * 2606 LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, 2607 const LSRUse &OrigLU) { 2608 // Search all uses for the formula. This could be more clever. 2609 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2610 LSRUse &LU = Uses[LUIdx]; 2611 // Check whether this use is close enough to OrigLU, to see whether it's 2612 // worthwhile looking through its formulae. 2613 // Ignore ICmpZero uses because they may contain formulae generated by 2614 // GenerateICmpZeroScales, in which case adding fixup offsets may 2615 // be invalid. 2616 if (&LU != &OrigLU && 2617 LU.Kind != LSRUse::ICmpZero && 2618 LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy && 2619 LU.WidestFixupType == OrigLU.WidestFixupType && 2620 LU.HasFormulaWithSameRegs(OrigF)) { 2621 // Scan through this use's formulae. 2622 for (const Formula &F : LU.Formulae) { 2623 // Check to see if this formula has the same registers and symbols 2624 // as OrigF. 2625 if (F.BaseRegs == OrigF.BaseRegs && 2626 F.ScaledReg == OrigF.ScaledReg && 2627 F.BaseGV == OrigF.BaseGV && 2628 F.Scale == OrigF.Scale && 2629 F.UnfoldedOffset == OrigF.UnfoldedOffset) { 2630 if (F.BaseOffset == 0) 2631 return &LU; 2632 // This is the formula where all the registers and symbols matched; 2633 // there aren't going to be any others. Since we declined it, we 2634 // can skip the rest of the formulae and proceed to the next LSRUse. 2635 break; 2636 } 2637 } 2638 } 2639 } 2640 2641 // Nothing looked good. 2642 return nullptr; 2643 } 2644 2645 void LSRInstance::CollectInterestingTypesAndFactors() { 2646 SmallSetVector<const SCEV *, 4> Strides; 2647 2648 // Collect interesting types and strides. 2649 SmallVector<const SCEV *, 4> Worklist; 2650 for (const IVStrideUse &U : IU) { 2651 const SCEV *Expr = IU.getExpr(U); 2652 2653 // Collect interesting types. 2654 Types.insert(SE.getEffectiveSCEVType(Expr->getType())); 2655 2656 // Add strides for mentioned loops. 2657 Worklist.push_back(Expr); 2658 do { 2659 const SCEV *S = Worklist.pop_back_val(); 2660 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 2661 if (AR->getLoop() == L) 2662 Strides.insert(AR->getStepRecurrence(SE)); 2663 Worklist.push_back(AR->getStart()); 2664 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2665 Worklist.append(Add->op_begin(), Add->op_end()); 2666 } 2667 } while (!Worklist.empty()); 2668 } 2669 2670 // Compute interesting factors from the set of interesting strides. 2671 for (SmallSetVector<const SCEV *, 4>::const_iterator 2672 I = Strides.begin(), E = Strides.end(); I != E; ++I) 2673 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter = 2674 std::next(I); NewStrideIter != E; ++NewStrideIter) { 2675 const SCEV *OldStride = *I; 2676 const SCEV *NewStride = *NewStrideIter; 2677 2678 if (SE.getTypeSizeInBits(OldStride->getType()) != 2679 SE.getTypeSizeInBits(NewStride->getType())) { 2680 if (SE.getTypeSizeInBits(OldStride->getType()) > 2681 SE.getTypeSizeInBits(NewStride->getType())) 2682 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType()); 2683 else 2684 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType()); 2685 } 2686 if (const SCEVConstant *Factor = 2687 dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride, 2688 SE, true))) { 2689 if (Factor->getAPInt().getMinSignedBits() <= 64) 2690 Factors.insert(Factor->getAPInt().getSExtValue()); 2691 } else if (const SCEVConstant *Factor = 2692 dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride, 2693 NewStride, 2694 SE, true))) { 2695 if (Factor->getAPInt().getMinSignedBits() <= 64) 2696 Factors.insert(Factor->getAPInt().getSExtValue()); 2697 } 2698 } 2699 2700 // If all uses use the same type, don't bother looking for truncation-based 2701 // reuse. 2702 if (Types.size() == 1) 2703 Types.clear(); 2704 2705 LLVM_DEBUG(print_factors_and_types(dbgs())); 2706 } 2707 2708 /// Helper for CollectChains that finds an IV operand (computed by an AddRec in 2709 /// this loop) within [OI,OE) or returns OE. If IVUsers mapped Instructions to 2710 /// IVStrideUses, we could partially skip this. 2711 static User::op_iterator 2712 findIVOperand(User::op_iterator OI, User::op_iterator OE, 2713 Loop *L, ScalarEvolution &SE) { 2714 for(; OI != OE; ++OI) { 2715 if (Instruction *Oper = dyn_cast<Instruction>(*OI)) { 2716 if (!SE.isSCEVable(Oper->getType())) 2717 continue; 2718 2719 if (const SCEVAddRecExpr *AR = 2720 dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Oper))) { 2721 if (AR->getLoop() == L) 2722 break; 2723 } 2724 } 2725 } 2726 return OI; 2727 } 2728 2729 /// IVChain logic must consistently peek base TruncInst operands, so wrap it in 2730 /// a convenient helper. 2731 static Value *getWideOperand(Value *Oper) { 2732 if (TruncInst *Trunc = dyn_cast<TruncInst>(Oper)) 2733 return Trunc->getOperand(0); 2734 return Oper; 2735 } 2736 2737 /// Return true if we allow an IV chain to include both types. 2738 static bool isCompatibleIVType(Value *LVal, Value *RVal) { 2739 Type *LType = LVal->getType(); 2740 Type *RType = RVal->getType(); 2741 return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy() && 2742 // Different address spaces means (possibly) 2743 // different types of the pointer implementation, 2744 // e.g. i16 vs i32 so disallow that. 2745 (LType->getPointerAddressSpace() == 2746 RType->getPointerAddressSpace())); 2747 } 2748 2749 /// Return an approximation of this SCEV expression's "base", or NULL for any 2750 /// constant. Returning the expression itself is conservative. Returning a 2751 /// deeper subexpression is more precise and valid as long as it isn't less 2752 /// complex than another subexpression. For expressions involving multiple 2753 /// unscaled values, we need to return the pointer-type SCEVUnknown. This avoids 2754 /// forming chains across objects, such as: PrevOper==a[i], IVOper==b[i], 2755 /// IVInc==b-a. 2756 /// 2757 /// Since SCEVUnknown is the rightmost type, and pointers are the rightmost 2758 /// SCEVUnknown, we simply return the rightmost SCEV operand. 2759 static const SCEV *getExprBase(const SCEV *S) { 2760 switch (S->getSCEVType()) { 2761 default: // uncluding scUnknown. 2762 return S; 2763 case scConstant: 2764 return nullptr; 2765 case scTruncate: 2766 return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand()); 2767 case scZeroExtend: 2768 return getExprBase(cast<SCEVZeroExtendExpr>(S)->getOperand()); 2769 case scSignExtend: 2770 return getExprBase(cast<SCEVSignExtendExpr>(S)->getOperand()); 2771 case scAddExpr: { 2772 // Skip over scaled operands (scMulExpr) to follow add operands as long as 2773 // there's nothing more complex. 2774 // FIXME: not sure if we want to recognize negation. 2775 const SCEVAddExpr *Add = cast<SCEVAddExpr>(S); 2776 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(Add->op_end()), 2777 E(Add->op_begin()); I != E; ++I) { 2778 const SCEV *SubExpr = *I; 2779 if (SubExpr->getSCEVType() == scAddExpr) 2780 return getExprBase(SubExpr); 2781 2782 if (SubExpr->getSCEVType() != scMulExpr) 2783 return SubExpr; 2784 } 2785 return S; // all operands are scaled, be conservative. 2786 } 2787 case scAddRecExpr: 2788 return getExprBase(cast<SCEVAddRecExpr>(S)->getStart()); 2789 } 2790 llvm_unreachable("Unknown SCEV kind!"); 2791 } 2792 2793 /// Return true if the chain increment is profitable to expand into a loop 2794 /// invariant value, which may require its own register. A profitable chain 2795 /// increment will be an offset relative to the same base. We allow such offsets 2796 /// to potentially be used as chain increment as long as it's not obviously 2797 /// expensive to expand using real instructions. 2798 bool IVChain::isProfitableIncrement(const SCEV *OperExpr, 2799 const SCEV *IncExpr, 2800 ScalarEvolution &SE) { 2801 // Aggressively form chains when -stress-ivchain. 2802 if (StressIVChain) 2803 return true; 2804 2805 // Do not replace a constant offset from IV head with a nonconstant IV 2806 // increment. 2807 if (!isa<SCEVConstant>(IncExpr)) { 2808 const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Incs[0].IVOperand)); 2809 if (isa<SCEVConstant>(SE.getMinusSCEV(OperExpr, HeadExpr))) 2810 return false; 2811 } 2812 2813 SmallPtrSet<const SCEV*, 8> Processed; 2814 return !isHighCostExpansion(IncExpr, Processed, SE); 2815 } 2816 2817 /// Return true if the number of registers needed for the chain is estimated to 2818 /// be less than the number required for the individual IV users. First prohibit 2819 /// any IV users that keep the IV live across increments (the Users set should 2820 /// be empty). Next count the number and type of increments in the chain. 2821 /// 2822 /// Chaining IVs can lead to considerable code bloat if ISEL doesn't 2823 /// effectively use postinc addressing modes. Only consider it profitable it the 2824 /// increments can be computed in fewer registers when chained. 2825 /// 2826 /// TODO: Consider IVInc free if it's already used in another chains. 2827 static bool isProfitableChain(IVChain &Chain, 2828 SmallPtrSetImpl<Instruction *> &Users, 2829 ScalarEvolution &SE, 2830 const TargetTransformInfo &TTI) { 2831 if (StressIVChain) 2832 return true; 2833 2834 if (!Chain.hasIncs()) 2835 return false; 2836 2837 if (!Users.empty()) { 2838 LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n"; 2839 for (Instruction *Inst 2840 : Users) { dbgs() << " " << *Inst << "\n"; }); 2841 return false; 2842 } 2843 assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); 2844 2845 // The chain itself may require a register, so intialize cost to 1. 2846 int cost = 1; 2847 2848 // A complete chain likely eliminates the need for keeping the original IV in 2849 // a register. LSR does not currently know how to form a complete chain unless 2850 // the header phi already exists. 2851 if (isa<PHINode>(Chain.tailUserInst()) 2852 && SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) { 2853 --cost; 2854 } 2855 const SCEV *LastIncExpr = nullptr; 2856 unsigned NumConstIncrements = 0; 2857 unsigned NumVarIncrements = 0; 2858 unsigned NumReusedIncrements = 0; 2859 2860 if (TTI.isProfitableLSRChainElement(Chain.Incs[0].UserInst)) 2861 return true; 2862 2863 for (const IVInc &Inc : Chain) { 2864 if (TTI.isProfitableLSRChainElement(Inc.UserInst)) 2865 return true; 2866 if (Inc.IncExpr->isZero()) 2867 continue; 2868 2869 // Incrementing by zero or some constant is neutral. We assume constants can 2870 // be folded into an addressing mode or an add's immediate operand. 2871 if (isa<SCEVConstant>(Inc.IncExpr)) { 2872 ++NumConstIncrements; 2873 continue; 2874 } 2875 2876 if (Inc.IncExpr == LastIncExpr) 2877 ++NumReusedIncrements; 2878 else 2879 ++NumVarIncrements; 2880 2881 LastIncExpr = Inc.IncExpr; 2882 } 2883 // An IV chain with a single increment is handled by LSR's postinc 2884 // uses. However, a chain with multiple increments requires keeping the IV's 2885 // value live longer than it needs to be if chained. 2886 if (NumConstIncrements > 1) 2887 --cost; 2888 2889 // Materializing increment expressions in the preheader that didn't exist in 2890 // the original code may cost a register. For example, sign-extended array 2891 // indices can produce ridiculous increments like this: 2892 // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64))) 2893 cost += NumVarIncrements; 2894 2895 // Reusing variable increments likely saves a register to hold the multiple of 2896 // the stride. 2897 cost -= NumReusedIncrements; 2898 2899 LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost 2900 << "\n"); 2901 2902 return cost < 0; 2903 } 2904 2905 /// Add this IV user to an existing chain or make it the head of a new chain. 2906 void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, 2907 SmallVectorImpl<ChainUsers> &ChainUsersVec) { 2908 // When IVs are used as types of varying widths, they are generally converted 2909 // to a wider type with some uses remaining narrow under a (free) trunc. 2910 Value *const NextIV = getWideOperand(IVOper); 2911 const SCEV *const OperExpr = SE.getSCEV(NextIV); 2912 const SCEV *const OperExprBase = getExprBase(OperExpr); 2913 2914 // Visit all existing chains. Check if its IVOper can be computed as a 2915 // profitable loop invariant increment from the last link in the Chain. 2916 unsigned ChainIdx = 0, NChains = IVChainVec.size(); 2917 const SCEV *LastIncExpr = nullptr; 2918 for (; ChainIdx < NChains; ++ChainIdx) { 2919 IVChain &Chain = IVChainVec[ChainIdx]; 2920 2921 // Prune the solution space aggressively by checking that both IV operands 2922 // are expressions that operate on the same unscaled SCEVUnknown. This 2923 // "base" will be canceled by the subsequent getMinusSCEV call. Checking 2924 // first avoids creating extra SCEV expressions. 2925 if (!StressIVChain && Chain.ExprBase != OperExprBase) 2926 continue; 2927 2928 Value *PrevIV = getWideOperand(Chain.Incs.back().IVOperand); 2929 if (!isCompatibleIVType(PrevIV, NextIV)) 2930 continue; 2931 2932 // A phi node terminates a chain. 2933 if (isa<PHINode>(UserInst) && isa<PHINode>(Chain.tailUserInst())) 2934 continue; 2935 2936 // The increment must be loop-invariant so it can be kept in a register. 2937 const SCEV *PrevExpr = SE.getSCEV(PrevIV); 2938 const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr); 2939 if (!SE.isLoopInvariant(IncExpr, L)) 2940 continue; 2941 2942 if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) { 2943 LastIncExpr = IncExpr; 2944 break; 2945 } 2946 } 2947 // If we haven't found a chain, create a new one, unless we hit the max. Don't 2948 // bother for phi nodes, because they must be last in the chain. 2949 if (ChainIdx == NChains) { 2950 if (isa<PHINode>(UserInst)) 2951 return; 2952 if (NChains >= MaxChains && !StressIVChain) { 2953 LLVM_DEBUG(dbgs() << "IV Chain Limit\n"); 2954 return; 2955 } 2956 LastIncExpr = OperExpr; 2957 // IVUsers may have skipped over sign/zero extensions. We don't currently 2958 // attempt to form chains involving extensions unless they can be hoisted 2959 // into this loop's AddRec. 2960 if (!isa<SCEVAddRecExpr>(LastIncExpr)) 2961 return; 2962 ++NChains; 2963 IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr), 2964 OperExprBase)); 2965 ChainUsersVec.resize(NChains); 2966 LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst 2967 << ") IV=" << *LastIncExpr << "\n"); 2968 } else { 2969 LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInst 2970 << ") IV+" << *LastIncExpr << "\n"); 2971 // Add this IV user to the end of the chain. 2972 IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr)); 2973 } 2974 IVChain &Chain = IVChainVec[ChainIdx]; 2975 2976 SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers; 2977 // This chain's NearUsers become FarUsers. 2978 if (!LastIncExpr->isZero()) { 2979 ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(), 2980 NearUsers.end()); 2981 NearUsers.clear(); 2982 } 2983 2984 // All other uses of IVOperand become near uses of the chain. 2985 // We currently ignore intermediate values within SCEV expressions, assuming 2986 // they will eventually be used be the current chain, or can be computed 2987 // from one of the chain increments. To be more precise we could 2988 // transitively follow its user and only add leaf IV users to the set. 2989 for (User *U : IVOper->users()) { 2990 Instruction *OtherUse = dyn_cast<Instruction>(U); 2991 if (!OtherUse) 2992 continue; 2993 // Uses in the chain will no longer be uses if the chain is formed. 2994 // Include the head of the chain in this iteration (not Chain.begin()). 2995 IVChain::const_iterator IncIter = Chain.Incs.begin(); 2996 IVChain::const_iterator IncEnd = Chain.Incs.end(); 2997 for( ; IncIter != IncEnd; ++IncIter) { 2998 if (IncIter->UserInst == OtherUse) 2999 break; 3000 } 3001 if (IncIter != IncEnd) 3002 continue; 3003 3004 if (SE.isSCEVable(OtherUse->getType()) 3005 && !isa<SCEVUnknown>(SE.getSCEV(OtherUse)) 3006 && IU.isIVUserOrOperand(OtherUse)) { 3007 continue; 3008 } 3009 NearUsers.insert(OtherUse); 3010 } 3011 3012 // Since this user is part of the chain, it's no longer considered a use 3013 // of the chain. 3014 ChainUsersVec[ChainIdx].FarUsers.erase(UserInst); 3015 } 3016 3017 /// Populate the vector of Chains. 3018 /// 3019 /// This decreases ILP at the architecture level. Targets with ample registers, 3020 /// multiple memory ports, and no register renaming probably don't want 3021 /// this. However, such targets should probably disable LSR altogether. 3022 /// 3023 /// The job of LSR is to make a reasonable choice of induction variables across 3024 /// the loop. Subsequent passes can easily "unchain" computation exposing more 3025 /// ILP *within the loop* if the target wants it. 3026 /// 3027 /// Finding the best IV chain is potentially a scheduling problem. Since LSR 3028 /// will not reorder memory operations, it will recognize this as a chain, but 3029 /// will generate redundant IV increments. Ideally this would be corrected later 3030 /// by a smart scheduler: 3031 /// = A[i] 3032 /// = A[i+x] 3033 /// A[i] = 3034 /// A[i+x] = 3035 /// 3036 /// TODO: Walk the entire domtree within this loop, not just the path to the 3037 /// loop latch. This will discover chains on side paths, but requires 3038 /// maintaining multiple copies of the Chains state. 3039 void LSRInstance::CollectChains() { 3040 LLVM_DEBUG(dbgs() << "Collecting IV Chains.\n"); 3041 SmallVector<ChainUsers, 8> ChainUsersVec; 3042 3043 SmallVector<BasicBlock *,8> LatchPath; 3044 BasicBlock *LoopHeader = L->getHeader(); 3045 for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch()); 3046 Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) { 3047 LatchPath.push_back(Rung->getBlock()); 3048 } 3049 LatchPath.push_back(LoopHeader); 3050 3051 // Walk the instruction stream from the loop header to the loop latch. 3052 for (BasicBlock *BB : reverse(LatchPath)) { 3053 for (Instruction &I : *BB) { 3054 // Skip instructions that weren't seen by IVUsers analysis. 3055 if (isa<PHINode>(I) || !IU.isIVUserOrOperand(&I)) 3056 continue; 3057 3058 // Ignore users that are part of a SCEV expression. This way we only 3059 // consider leaf IV Users. This effectively rediscovers a portion of 3060 // IVUsers analysis but in program order this time. 3061 if (SE.isSCEVable(I.getType()) && !isa<SCEVUnknown>(SE.getSCEV(&I))) 3062 continue; 3063 3064 // Remove this instruction from any NearUsers set it may be in. 3065 for (unsigned ChainIdx = 0, NChains = IVChainVec.size(); 3066 ChainIdx < NChains; ++ChainIdx) { 3067 ChainUsersVec[ChainIdx].NearUsers.erase(&I); 3068 } 3069 // Search for operands that can be chained. 3070 SmallPtrSet<Instruction*, 4> UniqueOperands; 3071 User::op_iterator IVOpEnd = I.op_end(); 3072 User::op_iterator IVOpIter = findIVOperand(I.op_begin(), IVOpEnd, L, SE); 3073 while (IVOpIter != IVOpEnd) { 3074 Instruction *IVOpInst = cast<Instruction>(*IVOpIter); 3075 if (UniqueOperands.insert(IVOpInst).second) 3076 ChainInstruction(&I, IVOpInst, ChainUsersVec); 3077 IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); 3078 } 3079 } // Continue walking down the instructions. 3080 } // Continue walking down the domtree. 3081 // Visit phi backedges to determine if the chain can generate the IV postinc. 3082 for (PHINode &PN : L->getHeader()->phis()) { 3083 if (!SE.isSCEVable(PN.getType())) 3084 continue; 3085 3086 Instruction *IncV = 3087 dyn_cast<Instruction>(PN.getIncomingValueForBlock(L->getLoopLatch())); 3088 if (IncV) 3089 ChainInstruction(&PN, IncV, ChainUsersVec); 3090 } 3091 // Remove any unprofitable chains. 3092 unsigned ChainIdx = 0; 3093 for (unsigned UsersIdx = 0, NChains = IVChainVec.size(); 3094 UsersIdx < NChains; ++UsersIdx) { 3095 if (!isProfitableChain(IVChainVec[UsersIdx], 3096 ChainUsersVec[UsersIdx].FarUsers, SE, TTI)) 3097 continue; 3098 // Preserve the chain at UsesIdx. 3099 if (ChainIdx != UsersIdx) 3100 IVChainVec[ChainIdx] = IVChainVec[UsersIdx]; 3101 FinalizeChain(IVChainVec[ChainIdx]); 3102 ++ChainIdx; 3103 } 3104 IVChainVec.resize(ChainIdx); 3105 } 3106 3107 void LSRInstance::FinalizeChain(IVChain &Chain) { 3108 assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); 3109 LLVM_DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n"); 3110 3111 for (const IVInc &Inc : Chain) { 3112 LLVM_DEBUG(dbgs() << " Inc: " << *Inc.UserInst << "\n"); 3113 auto UseI = find(Inc.UserInst->operands(), Inc.IVOperand); 3114 assert(UseI != Inc.UserInst->op_end() && "cannot find IV operand"); 3115 IVIncSet.insert(UseI); 3116 } 3117 } 3118 3119 /// Return true if the IVInc can be folded into an addressing mode. 3120 static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst, 3121 Value *Operand, const TargetTransformInfo &TTI) { 3122 const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr); 3123 if (!IncConst || !isAddressUse(TTI, UserInst, Operand)) 3124 return false; 3125 3126 if (IncConst->getAPInt().getMinSignedBits() > 64) 3127 return false; 3128 3129 MemAccessTy AccessTy = getAccessType(TTI, UserInst, Operand); 3130 int64_t IncOffset = IncConst->getValue()->getSExtValue(); 3131 if (!isAlwaysFoldable(TTI, LSRUse::Address, AccessTy, /*BaseGV=*/nullptr, 3132 IncOffset, /*HasBaseReg=*/false)) 3133 return false; 3134 3135 return true; 3136 } 3137 3138 /// Generate an add or subtract for each IVInc in a chain to materialize the IV 3139 /// user's operand from the previous IV user's operand. 3140 void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, 3141 SmallVectorImpl<WeakTrackingVH> &DeadInsts) { 3142 // Find the new IVOperand for the head of the chain. It may have been replaced 3143 // by LSR. 3144 const IVInc &Head = Chain.Incs[0]; 3145 User::op_iterator IVOpEnd = Head.UserInst->op_end(); 3146 // findIVOperand returns IVOpEnd if it can no longer find a valid IV user. 3147 User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(), 3148 IVOpEnd, L, SE); 3149 Value *IVSrc = nullptr; 3150 while (IVOpIter != IVOpEnd) { 3151 IVSrc = getWideOperand(*IVOpIter); 3152 3153 // If this operand computes the expression that the chain needs, we may use 3154 // it. (Check this after setting IVSrc which is used below.) 3155 // 3156 // Note that if Head.IncExpr is wider than IVSrc, then this phi is too 3157 // narrow for the chain, so we can no longer use it. We do allow using a 3158 // wider phi, assuming the LSR checked for free truncation. In that case we 3159 // should already have a truncate on this operand such that 3160 // getSCEV(IVSrc) == IncExpr. 3161 if (SE.getSCEV(*IVOpIter) == Head.IncExpr 3162 || SE.getSCEV(IVSrc) == Head.IncExpr) { 3163 break; 3164 } 3165 IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); 3166 } 3167 if (IVOpIter == IVOpEnd) { 3168 // Gracefully give up on this chain. 3169 LLVM_DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n"); 3170 return; 3171 } 3172 assert(IVSrc && "Failed to find IV chain source"); 3173 3174 LLVM_DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n"); 3175 Type *IVTy = IVSrc->getType(); 3176 Type *IntTy = SE.getEffectiveSCEVType(IVTy); 3177 const SCEV *LeftOverExpr = nullptr; 3178 for (const IVInc &Inc : Chain) { 3179 Instruction *InsertPt = Inc.UserInst; 3180 if (isa<PHINode>(InsertPt)) 3181 InsertPt = L->getLoopLatch()->getTerminator(); 3182 3183 // IVOper will replace the current IV User's operand. IVSrc is the IV 3184 // value currently held in a register. 3185 Value *IVOper = IVSrc; 3186 if (!Inc.IncExpr->isZero()) { 3187 // IncExpr was the result of subtraction of two narrow values, so must 3188 // be signed. 3189 const SCEV *IncExpr = SE.getNoopOrSignExtend(Inc.IncExpr, IntTy); 3190 LeftOverExpr = LeftOverExpr ? 3191 SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr; 3192 } 3193 if (LeftOverExpr && !LeftOverExpr->isZero()) { 3194 // Expand the IV increment. 3195 Rewriter.clearPostInc(); 3196 Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt); 3197 const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc), 3198 SE.getUnknown(IncV)); 3199 IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt); 3200 3201 // If an IV increment can't be folded, use it as the next IV value. 3202 if (!canFoldIVIncExpr(LeftOverExpr, Inc.UserInst, Inc.IVOperand, TTI)) { 3203 assert(IVTy == IVOper->getType() && "inconsistent IV increment type"); 3204 IVSrc = IVOper; 3205 LeftOverExpr = nullptr; 3206 } 3207 } 3208 Type *OperTy = Inc.IVOperand->getType(); 3209 if (IVTy != OperTy) { 3210 assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) && 3211 "cannot extend a chained IV"); 3212 IRBuilder<> Builder(InsertPt); 3213 IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain"); 3214 } 3215 Inc.UserInst->replaceUsesOfWith(Inc.IVOperand, IVOper); 3216 if (auto *OperandIsInstr = dyn_cast<Instruction>(Inc.IVOperand)) 3217 DeadInsts.emplace_back(OperandIsInstr); 3218 } 3219 // If LSR created a new, wider phi, we may also replace its postinc. We only 3220 // do this if we also found a wide value for the head of the chain. 3221 if (isa<PHINode>(Chain.tailUserInst())) { 3222 for (PHINode &Phi : L->getHeader()->phis()) { 3223 if (!isCompatibleIVType(&Phi, IVSrc)) 3224 continue; 3225 Instruction *PostIncV = dyn_cast<Instruction>( 3226 Phi.getIncomingValueForBlock(L->getLoopLatch())); 3227 if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc))) 3228 continue; 3229 Value *IVOper = IVSrc; 3230 Type *PostIncTy = PostIncV->getType(); 3231 if (IVTy != PostIncTy) { 3232 assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types"); 3233 IRBuilder<> Builder(L->getLoopLatch()->getTerminator()); 3234 Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc()); 3235 IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain"); 3236 } 3237 Phi.replaceUsesOfWith(PostIncV, IVOper); 3238 DeadInsts.emplace_back(PostIncV); 3239 } 3240 } 3241 } 3242 3243 void LSRInstance::CollectFixupsAndInitialFormulae() { 3244 BranchInst *ExitBranch = nullptr; 3245 bool SaveCmp = TTI.canSaveCmp(L, &ExitBranch, &SE, &LI, &DT, &AC, &TLI); 3246 3247 for (const IVStrideUse &U : IU) { 3248 Instruction *UserInst = U.getUser(); 3249 // Skip IV users that are part of profitable IV Chains. 3250 User::op_iterator UseI = 3251 find(UserInst->operands(), U.getOperandValToReplace()); 3252 assert(UseI != UserInst->op_end() && "cannot find IV operand"); 3253 if (IVIncSet.count(UseI)) { 3254 LLVM_DEBUG(dbgs() << "Use is in profitable chain: " << **UseI << '\n'); 3255 continue; 3256 } 3257 3258 LSRUse::KindType Kind = LSRUse::Basic; 3259 MemAccessTy AccessTy; 3260 if (isAddressUse(TTI, UserInst, U.getOperandValToReplace())) { 3261 Kind = LSRUse::Address; 3262 AccessTy = getAccessType(TTI, UserInst, U.getOperandValToReplace()); 3263 } 3264 3265 const SCEV *S = IU.getExpr(U); 3266 PostIncLoopSet TmpPostIncLoops = U.getPostIncLoops(); 3267 3268 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as 3269 // (N - i == 0), and this allows (N - i) to be the expression that we work 3270 // with rather than just N or i, so we can consider the register 3271 // requirements for both N and i at the same time. Limiting this code to 3272 // equality icmps is not a problem because all interesting loops use 3273 // equality icmps, thanks to IndVarSimplify. 3274 if (ICmpInst *CI = dyn_cast<ICmpInst>(UserInst)) { 3275 // If CI can be saved in some target, like replaced inside hardware loop 3276 // in PowerPC, no need to generate initial formulae for it. 3277 if (SaveCmp && CI == dyn_cast<ICmpInst>(ExitBranch->getCondition())) 3278 continue; 3279 if (CI->isEquality()) { 3280 // Swap the operands if needed to put the OperandValToReplace on the 3281 // left, for consistency. 3282 Value *NV = CI->getOperand(1); 3283 if (NV == U.getOperandValToReplace()) { 3284 CI->setOperand(1, CI->getOperand(0)); 3285 CI->setOperand(0, NV); 3286 NV = CI->getOperand(1); 3287 Changed = true; 3288 } 3289 3290 // x == y --> x - y == 0 3291 const SCEV *N = SE.getSCEV(NV); 3292 if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE)) { 3293 // S is normalized, so normalize N before folding it into S 3294 // to keep the result normalized. 3295 N = normalizeForPostIncUse(N, TmpPostIncLoops, SE); 3296 Kind = LSRUse::ICmpZero; 3297 S = SE.getMinusSCEV(N, S); 3298 } 3299 3300 // -1 and the negations of all interesting strides (except the negation 3301 // of -1) are now also interesting. 3302 for (size_t i = 0, e = Factors.size(); i != e; ++i) 3303 if (Factors[i] != -1) 3304 Factors.insert(-(uint64_t)Factors[i]); 3305 Factors.insert(-1); 3306 } 3307 } 3308 3309 // Get or create an LSRUse. 3310 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy); 3311 size_t LUIdx = P.first; 3312 int64_t Offset = P.second; 3313 LSRUse &LU = Uses[LUIdx]; 3314 3315 // Record the fixup. 3316 LSRFixup &LF = LU.getNewFixup(); 3317 LF.UserInst = UserInst; 3318 LF.OperandValToReplace = U.getOperandValToReplace(); 3319 LF.PostIncLoops = TmpPostIncLoops; 3320 LF.Offset = Offset; 3321 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 3322 3323 if (!LU.WidestFixupType || 3324 SE.getTypeSizeInBits(LU.WidestFixupType) < 3325 SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) 3326 LU.WidestFixupType = LF.OperandValToReplace->getType(); 3327 3328 // If this is the first use of this LSRUse, give it a formula. 3329 if (LU.Formulae.empty()) { 3330 InsertInitialFormula(S, LU, LUIdx); 3331 CountRegisters(LU.Formulae.back(), LUIdx); 3332 } 3333 } 3334 3335 LLVM_DEBUG(print_fixups(dbgs())); 3336 } 3337 3338 /// Insert a formula for the given expression into the given use, separating out 3339 /// loop-variant portions from loop-invariant and loop-computable portions. 3340 void 3341 LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { 3342 // Mark uses whose expressions cannot be expanded. 3343 if (!isSafeToExpand(S, SE)) 3344 LU.RigidFormula = true; 3345 3346 Formula F; 3347 F.initialMatch(S, L, SE); 3348 bool Inserted = InsertFormula(LU, LUIdx, F); 3349 assert(Inserted && "Initial formula already exists!"); (void)Inserted; 3350 } 3351 3352 /// Insert a simple single-register formula for the given expression into the 3353 /// given use. 3354 void 3355 LSRInstance::InsertSupplementalFormula(const SCEV *S, 3356 LSRUse &LU, size_t LUIdx) { 3357 Formula F; 3358 F.BaseRegs.push_back(S); 3359 F.HasBaseReg = true; 3360 bool Inserted = InsertFormula(LU, LUIdx, F); 3361 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; 3362 } 3363 3364 /// Note which registers are used by the given formula, updating RegUses. 3365 void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { 3366 if (F.ScaledReg) 3367 RegUses.countRegister(F.ScaledReg, LUIdx); 3368 for (const SCEV *BaseReg : F.BaseRegs) 3369 RegUses.countRegister(BaseReg, LUIdx); 3370 } 3371 3372 /// If the given formula has not yet been inserted, add it to the list, and 3373 /// return true. Return false otherwise. 3374 bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { 3375 // Do not insert formula that we will not be able to expand. 3376 assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) && 3377 "Formula is illegal"); 3378 3379 if (!LU.InsertFormula(F, *L)) 3380 return false; 3381 3382 CountRegisters(F, LUIdx); 3383 return true; 3384 } 3385 3386 /// Check for other uses of loop-invariant values which we're tracking. These 3387 /// other uses will pin these values in registers, making them less profitable 3388 /// for elimination. 3389 /// TODO: This currently misses non-constant addrec step registers. 3390 /// TODO: Should this give more weight to users inside the loop? 3391 void 3392 LSRInstance::CollectLoopInvariantFixupsAndFormulae() { 3393 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end()); 3394 SmallPtrSet<const SCEV *, 32> Visited; 3395 3396 while (!Worklist.empty()) { 3397 const SCEV *S = Worklist.pop_back_val(); 3398 3399 // Don't process the same SCEV twice 3400 if (!Visited.insert(S).second) 3401 continue; 3402 3403 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) 3404 Worklist.append(N->op_begin(), N->op_end()); 3405 else if (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(S)) 3406 Worklist.push_back(C->getOperand()); 3407 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 3408 Worklist.push_back(D->getLHS()); 3409 Worklist.push_back(D->getRHS()); 3410 } else if (const SCEVUnknown *US = dyn_cast<SCEVUnknown>(S)) { 3411 const Value *V = US->getValue(); 3412 if (const Instruction *Inst = dyn_cast<Instruction>(V)) { 3413 // Look for instructions defined outside the loop. 3414 if (L->contains(Inst)) continue; 3415 } else if (isa<UndefValue>(V)) 3416 // Undef doesn't have a live range, so it doesn't matter. 3417 continue; 3418 for (const Use &U : V->uses()) { 3419 const Instruction *UserInst = dyn_cast<Instruction>(U.getUser()); 3420 // Ignore non-instructions. 3421 if (!UserInst) 3422 continue; 3423 // Ignore instructions in other functions (as can happen with 3424 // Constants). 3425 if (UserInst->getParent()->getParent() != L->getHeader()->getParent()) 3426 continue; 3427 // Ignore instructions not dominated by the loop. 3428 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ? 3429 UserInst->getParent() : 3430 cast<PHINode>(UserInst)->getIncomingBlock( 3431 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 3432 if (!DT.dominates(L->getHeader(), UseBB)) 3433 continue; 3434 // Don't bother if the instruction is in a BB which ends in an EHPad. 3435 if (UseBB->getTerminator()->isEHPad()) 3436 continue; 3437 // Don't bother rewriting PHIs in catchswitch blocks. 3438 if (isa<CatchSwitchInst>(UserInst->getParent()->getTerminator())) 3439 continue; 3440 // Ignore uses which are part of other SCEV expressions, to avoid 3441 // analyzing them multiple times. 3442 if (SE.isSCEVable(UserInst->getType())) { 3443 const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst)); 3444 // If the user is a no-op, look through to its uses. 3445 if (!isa<SCEVUnknown>(UserS)) 3446 continue; 3447 if (UserS == US) { 3448 Worklist.push_back( 3449 SE.getUnknown(const_cast<Instruction *>(UserInst))); 3450 continue; 3451 } 3452 } 3453 // Ignore icmp instructions which are already being analyzed. 3454 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) { 3455 unsigned OtherIdx = !U.getOperandNo(); 3456 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx)); 3457 if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L)) 3458 continue; 3459 } 3460 3461 std::pair<size_t, int64_t> P = getUse( 3462 S, LSRUse::Basic, MemAccessTy()); 3463 size_t LUIdx = P.first; 3464 int64_t Offset = P.second; 3465 LSRUse &LU = Uses[LUIdx]; 3466 LSRFixup &LF = LU.getNewFixup(); 3467 LF.UserInst = const_cast<Instruction *>(UserInst); 3468 LF.OperandValToReplace = U; 3469 LF.Offset = Offset; 3470 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 3471 if (!LU.WidestFixupType || 3472 SE.getTypeSizeInBits(LU.WidestFixupType) < 3473 SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) 3474 LU.WidestFixupType = LF.OperandValToReplace->getType(); 3475 InsertSupplementalFormula(US, LU, LUIdx); 3476 CountRegisters(LU.Formulae.back(), Uses.size() - 1); 3477 break; 3478 } 3479 } 3480 } 3481 } 3482 3483 /// Split S into subexpressions which can be pulled out into separate 3484 /// registers. If C is non-null, multiply each subexpression by C. 3485 /// 3486 /// Return remainder expression after factoring the subexpressions captured by 3487 /// Ops. If Ops is complete, return NULL. 3488 static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C, 3489 SmallVectorImpl<const SCEV *> &Ops, 3490 const Loop *L, 3491 ScalarEvolution &SE, 3492 unsigned Depth = 0) { 3493 // Arbitrarily cap recursion to protect compile time. 3494 if (Depth >= 3) 3495 return S; 3496 3497 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 3498 // Break out add operands. 3499 for (const SCEV *S : Add->operands()) { 3500 const SCEV *Remainder = CollectSubexprs(S, C, Ops, L, SE, Depth+1); 3501 if (Remainder) 3502 Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); 3503 } 3504 return nullptr; 3505 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 3506 // Split a non-zero base out of an addrec. 3507 if (AR->getStart()->isZero() || !AR->isAffine()) 3508 return S; 3509 3510 const SCEV *Remainder = CollectSubexprs(AR->getStart(), 3511 C, Ops, L, SE, Depth+1); 3512 // Split the non-zero AddRec unless it is part of a nested recurrence that 3513 // does not pertain to this loop. 3514 if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) { 3515 Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); 3516 Remainder = nullptr; 3517 } 3518 if (Remainder != AR->getStart()) { 3519 if (!Remainder) 3520 Remainder = SE.getConstant(AR->getType(), 0); 3521 return SE.getAddRecExpr(Remainder, 3522 AR->getStepRecurrence(SE), 3523 AR->getLoop(), 3524 //FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 3525 SCEV::FlagAnyWrap); 3526 } 3527 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 3528 // Break (C * (a + b + c)) into C*a + C*b + C*c. 3529 if (Mul->getNumOperands() != 2) 3530 return S; 3531 if (const SCEVConstant *Op0 = 3532 dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3533 C = C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0; 3534 const SCEV *Remainder = 3535 CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1); 3536 if (Remainder) 3537 Ops.push_back(SE.getMulExpr(C, Remainder)); 3538 return nullptr; 3539 } 3540 } 3541 return S; 3542 } 3543 3544 /// Return true if the SCEV represents a value that may end up as a 3545 /// post-increment operation. 3546 static bool mayUsePostIncMode(const TargetTransformInfo &TTI, 3547 LSRUse &LU, const SCEV *S, const Loop *L, 3548 ScalarEvolution &SE) { 3549 if (LU.Kind != LSRUse::Address || 3550 !LU.AccessTy.getType()->isIntOrIntVectorTy()) 3551 return false; 3552 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S); 3553 if (!AR) 3554 return false; 3555 const SCEV *LoopStep = AR->getStepRecurrence(SE); 3556 if (!isa<SCEVConstant>(LoopStep)) 3557 return false; 3558 // Check if a post-indexed load/store can be used. 3559 if (TTI.isIndexedLoadLegal(TTI.MIM_PostInc, AR->getType()) || 3560 TTI.isIndexedStoreLegal(TTI.MIM_PostInc, AR->getType())) { 3561 const SCEV *LoopStart = AR->getStart(); 3562 if (!isa<SCEVConstant>(LoopStart) && SE.isLoopInvariant(LoopStart, L)) 3563 return true; 3564 } 3565 return false; 3566 } 3567 3568 /// Helper function for LSRInstance::GenerateReassociations. 3569 void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx, 3570 const Formula &Base, 3571 unsigned Depth, size_t Idx, 3572 bool IsScaledReg) { 3573 const SCEV *BaseReg = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; 3574 // Don't generate reassociations for the base register of a value that 3575 // may generate a post-increment operator. The reason is that the 3576 // reassociations cause extra base+register formula to be created, 3577 // and possibly chosen, but the post-increment is more efficient. 3578 if (TTI.shouldFavorPostInc() && mayUsePostIncMode(TTI, LU, BaseReg, L, SE)) 3579 return; 3580 SmallVector<const SCEV *, 8> AddOps; 3581 const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE); 3582 if (Remainder) 3583 AddOps.push_back(Remainder); 3584 3585 if (AddOps.size() == 1) 3586 return; 3587 3588 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(), 3589 JE = AddOps.end(); 3590 J != JE; ++J) { 3591 // Loop-variant "unknown" values are uninteresting; we won't be able to 3592 // do anything meaningful with them. 3593 if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L)) 3594 continue; 3595 3596 // Don't pull a constant into a register if the constant could be folded 3597 // into an immediate field. 3598 if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, 3599 LU.AccessTy, *J, Base.getNumRegs() > 1)) 3600 continue; 3601 3602 // Collect all operands except *J. 3603 SmallVector<const SCEV *, 8> InnerAddOps( 3604 ((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J); 3605 InnerAddOps.append(std::next(J), 3606 ((const SmallVector<const SCEV *, 8> &)AddOps).end()); 3607 3608 // Don't leave just a constant behind in a register if the constant could 3609 // be folded into an immediate field. 3610 if (InnerAddOps.size() == 1 && 3611 isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, 3612 LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1)) 3613 continue; 3614 3615 const SCEV *InnerSum = SE.getAddExpr(InnerAddOps); 3616 if (InnerSum->isZero()) 3617 continue; 3618 Formula F = Base; 3619 3620 // Add the remaining pieces of the add back into the new formula. 3621 const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum); 3622 if (InnerSumSC && SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 && 3623 TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + 3624 InnerSumSC->getValue()->getZExtValue())) { 3625 F.UnfoldedOffset = 3626 (uint64_t)F.UnfoldedOffset + InnerSumSC->getValue()->getZExtValue(); 3627 if (IsScaledReg) 3628 F.ScaledReg = nullptr; 3629 else 3630 F.BaseRegs.erase(F.BaseRegs.begin() + Idx); 3631 } else if (IsScaledReg) 3632 F.ScaledReg = InnerSum; 3633 else 3634 F.BaseRegs[Idx] = InnerSum; 3635 3636 // Add J as its own register, or an unfolded immediate. 3637 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J); 3638 if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 && 3639 TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + 3640 SC->getValue()->getZExtValue())) 3641 F.UnfoldedOffset = 3642 (uint64_t)F.UnfoldedOffset + SC->getValue()->getZExtValue(); 3643 else 3644 F.BaseRegs.push_back(*J); 3645 // We may have changed the number of register in base regs, adjust the 3646 // formula accordingly. 3647 F.canonicalize(*L); 3648 3649 if (InsertFormula(LU, LUIdx, F)) 3650 // If that formula hadn't been seen before, recurse to find more like 3651 // it. 3652 // Add check on Log16(AddOps.size()) - same as Log2_32(AddOps.size()) >> 2) 3653 // Because just Depth is not enough to bound compile time. 3654 // This means that every time AddOps.size() is greater 16^x we will add 3655 // x to Depth. 3656 GenerateReassociations(LU, LUIdx, LU.Formulae.back(), 3657 Depth + 1 + (Log2_32(AddOps.size()) >> 2)); 3658 } 3659 } 3660 3661 /// Split out subexpressions from adds and the bases of addrecs. 3662 void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, 3663 Formula Base, unsigned Depth) { 3664 assert(Base.isCanonical(*L) && "Input must be in the canonical form"); 3665 // Arbitrarily cap recursion to protect compile time. 3666 if (Depth >= 3) 3667 return; 3668 3669 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 3670 GenerateReassociationsImpl(LU, LUIdx, Base, Depth, i); 3671 3672 if (Base.Scale == 1) 3673 GenerateReassociationsImpl(LU, LUIdx, Base, Depth, 3674 /* Idx */ -1, /* IsScaledReg */ true); 3675 } 3676 3677 /// Generate a formula consisting of all of the loop-dominating registers added 3678 /// into a single register. 3679 void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, 3680 Formula Base) { 3681 // This method is only interesting on a plurality of registers. 3682 if (Base.BaseRegs.size() + (Base.Scale == 1) + 3683 (Base.UnfoldedOffset != 0) <= 1) 3684 return; 3685 3686 // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before 3687 // processing the formula. 3688 Base.unscale(); 3689 SmallVector<const SCEV *, 4> Ops; 3690 Formula NewBase = Base; 3691 NewBase.BaseRegs.clear(); 3692 Type *CombinedIntegerType = nullptr; 3693 for (const SCEV *BaseReg : Base.BaseRegs) { 3694 if (SE.properlyDominates(BaseReg, L->getHeader()) && 3695 !SE.hasComputableLoopEvolution(BaseReg, L)) { 3696 if (!CombinedIntegerType) 3697 CombinedIntegerType = SE.getEffectiveSCEVType(BaseReg->getType()); 3698 Ops.push_back(BaseReg); 3699 } 3700 else 3701 NewBase.BaseRegs.push_back(BaseReg); 3702 } 3703 3704 // If no register is relevant, we're done. 3705 if (Ops.size() == 0) 3706 return; 3707 3708 // Utility function for generating the required variants of the combined 3709 // registers. 3710 auto GenerateFormula = [&](const SCEV *Sum) { 3711 Formula F = NewBase; 3712 3713 // TODO: If Sum is zero, it probably means ScalarEvolution missed an 3714 // opportunity to fold something. For now, just ignore such cases 3715 // rather than proceed with zero in a register. 3716 if (Sum->isZero()) 3717 return; 3718 3719 F.BaseRegs.push_back(Sum); 3720 F.canonicalize(*L); 3721 (void)InsertFormula(LU, LUIdx, F); 3722 }; 3723 3724 // If we collected at least two registers, generate a formula combining them. 3725 if (Ops.size() > 1) { 3726 SmallVector<const SCEV *, 4> OpsCopy(Ops); // Don't let SE modify Ops. 3727 GenerateFormula(SE.getAddExpr(OpsCopy)); 3728 } 3729 3730 // If we have an unfolded offset, generate a formula combining it with the 3731 // registers collected. 3732 if (NewBase.UnfoldedOffset) { 3733 assert(CombinedIntegerType && "Missing a type for the unfolded offset"); 3734 Ops.push_back(SE.getConstant(CombinedIntegerType, NewBase.UnfoldedOffset, 3735 true)); 3736 NewBase.UnfoldedOffset = 0; 3737 GenerateFormula(SE.getAddExpr(Ops)); 3738 } 3739 } 3740 3741 /// Helper function for LSRInstance::GenerateSymbolicOffsets. 3742 void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx, 3743 const Formula &Base, size_t Idx, 3744 bool IsScaledReg) { 3745 const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; 3746 GlobalValue *GV = ExtractSymbol(G, SE); 3747 if (G->isZero() || !GV) 3748 return; 3749 Formula F = Base; 3750 F.BaseGV = GV; 3751 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) 3752 return; 3753 if (IsScaledReg) 3754 F.ScaledReg = G; 3755 else 3756 F.BaseRegs[Idx] = G; 3757 (void)InsertFormula(LU, LUIdx, F); 3758 } 3759 3760 /// Generate reuse formulae using symbolic offsets. 3761 void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, 3762 Formula Base) { 3763 // We can't add a symbolic offset if the address already contains one. 3764 if (Base.BaseGV) return; 3765 3766 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 3767 GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, i); 3768 if (Base.Scale == 1) 3769 GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, /* Idx */ -1, 3770 /* IsScaledReg */ true); 3771 } 3772 3773 /// Helper function for LSRInstance::GenerateConstantOffsets. 3774 void LSRInstance::GenerateConstantOffsetsImpl( 3775 LSRUse &LU, unsigned LUIdx, const Formula &Base, 3776 const SmallVectorImpl<int64_t> &Worklist, size_t Idx, bool IsScaledReg) { 3777 3778 auto GenerateOffset = [&](const SCEV *G, int64_t Offset) { 3779 Formula F = Base; 3780 F.BaseOffset = (uint64_t)Base.BaseOffset - Offset; 3781 3782 if (isLegalUse(TTI, LU.MinOffset - Offset, LU.MaxOffset - Offset, LU.Kind, 3783 LU.AccessTy, F)) { 3784 // Add the offset to the base register. 3785 const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), Offset), G); 3786 // If it cancelled out, drop the base register, otherwise update it. 3787 if (NewG->isZero()) { 3788 if (IsScaledReg) { 3789 F.Scale = 0; 3790 F.ScaledReg = nullptr; 3791 } else 3792 F.deleteBaseReg(F.BaseRegs[Idx]); 3793 F.canonicalize(*L); 3794 } else if (IsScaledReg) 3795 F.ScaledReg = NewG; 3796 else 3797 F.BaseRegs[Idx] = NewG; 3798 3799 (void)InsertFormula(LU, LUIdx, F); 3800 } 3801 }; 3802 3803 const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; 3804 3805 // With constant offsets and constant steps, we can generate pre-inc 3806 // accesses by having the offset equal the step. So, for access #0 with a 3807 // step of 8, we generate a G - 8 base which would require the first access 3808 // to be ((G - 8) + 8),+,8. The pre-indexed access then updates the pointer 3809 // for itself and hopefully becomes the base for other accesses. This means 3810 // means that a single pre-indexed access can be generated to become the new 3811 // base pointer for each iteration of the loop, resulting in no extra add/sub 3812 // instructions for pointer updating. 3813 if (FavorBackedgeIndex && LU.Kind == LSRUse::Address) { 3814 if (auto *GAR = dyn_cast<SCEVAddRecExpr>(G)) { 3815 if (auto *StepRec = 3816 dyn_cast<SCEVConstant>(GAR->getStepRecurrence(SE))) { 3817 const APInt &StepInt = StepRec->getAPInt(); 3818 int64_t Step = StepInt.isNegative() ? 3819 StepInt.getSExtValue() : StepInt.getZExtValue(); 3820 3821 for (int64_t Offset : Worklist) { 3822 Offset -= Step; 3823 GenerateOffset(G, Offset); 3824 } 3825 } 3826 } 3827 } 3828 for (int64_t Offset : Worklist) 3829 GenerateOffset(G, Offset); 3830 3831 int64_t Imm = ExtractImmediate(G, SE); 3832 if (G->isZero() || Imm == 0) 3833 return; 3834 Formula F = Base; 3835 F.BaseOffset = (uint64_t)F.BaseOffset + Imm; 3836 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) 3837 return; 3838 if (IsScaledReg) { 3839 F.ScaledReg = G; 3840 } else { 3841 F.BaseRegs[Idx] = G; 3842 // We may generate non canonical Formula if G is a recurrent expr reg 3843 // related with current loop while F.ScaledReg is not. 3844 F.canonicalize(*L); 3845 } 3846 (void)InsertFormula(LU, LUIdx, F); 3847 } 3848 3849 /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. 3850 void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, 3851 Formula Base) { 3852 // TODO: For now, just add the min and max offset, because it usually isn't 3853 // worthwhile looking at everything inbetween. 3854 SmallVector<int64_t, 2> Worklist; 3855 Worklist.push_back(LU.MinOffset); 3856 if (LU.MaxOffset != LU.MinOffset) 3857 Worklist.push_back(LU.MaxOffset); 3858 3859 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 3860 GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, i); 3861 if (Base.Scale == 1) 3862 GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, /* Idx */ -1, 3863 /* IsScaledReg */ true); 3864 } 3865 3866 /// For ICmpZero, check to see if we can scale up the comparison. For example, x 3867 /// == y -> x*c == y*c. 3868 void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, 3869 Formula Base) { 3870 if (LU.Kind != LSRUse::ICmpZero) return; 3871 3872 // Determine the integer type for the base formula. 3873 Type *IntTy = Base.getType(); 3874 if (!IntTy) return; 3875 if (SE.getTypeSizeInBits(IntTy) > 64) return; 3876 3877 // Don't do this if there is more than one offset. 3878 if (LU.MinOffset != LU.MaxOffset) return; 3879 3880 // Check if transformation is valid. It is illegal to multiply pointer. 3881 if (Base.ScaledReg && Base.ScaledReg->getType()->isPointerTy()) 3882 return; 3883 for (const SCEV *BaseReg : Base.BaseRegs) 3884 if (BaseReg->getType()->isPointerTy()) 3885 return; 3886 assert(!Base.BaseGV && "ICmpZero use is not legal!"); 3887 3888 // Check each interesting stride. 3889 for (int64_t Factor : Factors) { 3890 // Check that the multiplication doesn't overflow. 3891 if (Base.BaseOffset == std::numeric_limits<int64_t>::min() && Factor == -1) 3892 continue; 3893 int64_t NewBaseOffset = (uint64_t)Base.BaseOffset * Factor; 3894 if (NewBaseOffset / Factor != Base.BaseOffset) 3895 continue; 3896 // If the offset will be truncated at this use, check that it is in bounds. 3897 if (!IntTy->isPointerTy() && 3898 !ConstantInt::isValueValidForType(IntTy, NewBaseOffset)) 3899 continue; 3900 3901 // Check that multiplying with the use offset doesn't overflow. 3902 int64_t Offset = LU.MinOffset; 3903 if (Offset == std::numeric_limits<int64_t>::min() && Factor == -1) 3904 continue; 3905 Offset = (uint64_t)Offset * Factor; 3906 if (Offset / Factor != LU.MinOffset) 3907 continue; 3908 // If the offset will be truncated at this use, check that it is in bounds. 3909 if (!IntTy->isPointerTy() && 3910 !ConstantInt::isValueValidForType(IntTy, Offset)) 3911 continue; 3912 3913 Formula F = Base; 3914 F.BaseOffset = NewBaseOffset; 3915 3916 // Check that this scale is legal. 3917 if (!isLegalUse(TTI, Offset, Offset, LU.Kind, LU.AccessTy, F)) 3918 continue; 3919 3920 // Compensate for the use having MinOffset built into it. 3921 F.BaseOffset = (uint64_t)F.BaseOffset + Offset - LU.MinOffset; 3922 3923 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 3924 3925 // Check that multiplying with each base register doesn't overflow. 3926 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) { 3927 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS); 3928 if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i]) 3929 goto next; 3930 } 3931 3932 // Check that multiplying with the scaled register doesn't overflow. 3933 if (F.ScaledReg) { 3934 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS); 3935 if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg) 3936 continue; 3937 } 3938 3939 // Check that multiplying with the unfolded offset doesn't overflow. 3940 if (F.UnfoldedOffset != 0) { 3941 if (F.UnfoldedOffset == std::numeric_limits<int64_t>::min() && 3942 Factor == -1) 3943 continue; 3944 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor; 3945 if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset) 3946 continue; 3947 // If the offset will be truncated, check that it is in bounds. 3948 if (!IntTy->isPointerTy() && 3949 !ConstantInt::isValueValidForType(IntTy, F.UnfoldedOffset)) 3950 continue; 3951 } 3952 3953 // If we make it here and it's legal, add it. 3954 (void)InsertFormula(LU, LUIdx, F); 3955 next:; 3956 } 3957 } 3958 3959 /// Generate stride factor reuse formulae by making use of scaled-offset address 3960 /// modes, for example. 3961 void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { 3962 // Determine the integer type for the base formula. 3963 Type *IntTy = Base.getType(); 3964 if (!IntTy) return; 3965 3966 // If this Formula already has a scaled register, we can't add another one. 3967 // Try to unscale the formula to generate a better scale. 3968 if (Base.Scale != 0 && !Base.unscale()) 3969 return; 3970 3971 assert(Base.Scale == 0 && "unscale did not did its job!"); 3972 3973 // Check each interesting stride. 3974 for (int64_t Factor : Factors) { 3975 Base.Scale = Factor; 3976 Base.HasBaseReg = Base.BaseRegs.size() > 1; 3977 // Check whether this scale is going to be legal. 3978 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, 3979 Base)) { 3980 // As a special-case, handle special out-of-loop Basic users specially. 3981 // TODO: Reconsider this special case. 3982 if (LU.Kind == LSRUse::Basic && 3983 isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LSRUse::Special, 3984 LU.AccessTy, Base) && 3985 LU.AllFixupsOutsideLoop) 3986 LU.Kind = LSRUse::Special; 3987 else 3988 continue; 3989 } 3990 // For an ICmpZero, negating a solitary base register won't lead to 3991 // new solutions. 3992 if (LU.Kind == LSRUse::ICmpZero && 3993 !Base.HasBaseReg && Base.BaseOffset == 0 && !Base.BaseGV) 3994 continue; 3995 // For each addrec base reg, if its loop is current loop, apply the scale. 3996 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 3997 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i]); 3998 if (AR && (AR->getLoop() == L || LU.AllFixupsOutsideLoop)) { 3999 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 4000 if (FactorS->isZero()) 4001 continue; 4002 // Divide out the factor, ignoring high bits, since we'll be 4003 // scaling the value back up in the end. 4004 if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) { 4005 // TODO: This could be optimized to avoid all the copying. 4006 Formula F = Base; 4007 F.ScaledReg = Quotient; 4008 F.deleteBaseReg(F.BaseRegs[i]); 4009 // The canonical representation of 1*reg is reg, which is already in 4010 // Base. In that case, do not try to insert the formula, it will be 4011 // rejected anyway. 4012 if (F.Scale == 1 && (F.BaseRegs.empty() || 4013 (AR->getLoop() != L && LU.AllFixupsOutsideLoop))) 4014 continue; 4015 // If AllFixupsOutsideLoop is true and F.Scale is 1, we may generate 4016 // non canonical Formula with ScaledReg's loop not being L. 4017 if (F.Scale == 1 && LU.AllFixupsOutsideLoop) 4018 F.canonicalize(*L); 4019 (void)InsertFormula(LU, LUIdx, F); 4020 } 4021 } 4022 } 4023 } 4024 } 4025 4026 /// Generate reuse formulae from different IV types. 4027 void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { 4028 // Don't bother truncating symbolic values. 4029 if (Base.BaseGV) return; 4030 4031 // Determine the integer type for the base formula. 4032 Type *DstTy = Base.getType(); 4033 if (!DstTy) return; 4034 DstTy = SE.getEffectiveSCEVType(DstTy); 4035 4036 for (Type *SrcTy : Types) { 4037 if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) { 4038 Formula F = Base; 4039 4040 // Sometimes SCEV is able to prove zero during ext transform. It may 4041 // happen if SCEV did not do all possible transforms while creating the 4042 // initial node (maybe due to depth limitations), but it can do them while 4043 // taking ext. 4044 if (F.ScaledReg) { 4045 const SCEV *NewScaledReg = SE.getAnyExtendExpr(F.ScaledReg, SrcTy); 4046 if (NewScaledReg->isZero()) 4047 continue; 4048 F.ScaledReg = NewScaledReg; 4049 } 4050 bool HasZeroBaseReg = false; 4051 for (const SCEV *&BaseReg : F.BaseRegs) { 4052 const SCEV *NewBaseReg = SE.getAnyExtendExpr(BaseReg, SrcTy); 4053 if (NewBaseReg->isZero()) { 4054 HasZeroBaseReg = true; 4055 break; 4056 } 4057 BaseReg = NewBaseReg; 4058 } 4059 if (HasZeroBaseReg) 4060 continue; 4061 4062 // TODO: This assumes we've done basic processing on all uses and 4063 // have an idea what the register usage is. 4064 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses)) 4065 continue; 4066 4067 F.canonicalize(*L); 4068 (void)InsertFormula(LU, LUIdx, F); 4069 } 4070 } 4071 } 4072 4073 namespace { 4074 4075 /// Helper class for GenerateCrossUseConstantOffsets. It's used to defer 4076 /// modifications so that the search phase doesn't have to worry about the data 4077 /// structures moving underneath it. 4078 struct WorkItem { 4079 size_t LUIdx; 4080 int64_t Imm; 4081 const SCEV *OrigReg; 4082 4083 WorkItem(size_t LI, int64_t I, const SCEV *R) 4084 : LUIdx(LI), Imm(I), OrigReg(R) {} 4085 4086 void print(raw_ostream &OS) const; 4087 void dump() const; 4088 }; 4089 4090 } // end anonymous namespace 4091 4092 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 4093 void WorkItem::print(raw_ostream &OS) const { 4094 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx 4095 << " , add offset " << Imm; 4096 } 4097 4098 LLVM_DUMP_METHOD void WorkItem::dump() const { 4099 print(errs()); errs() << '\n'; 4100 } 4101 #endif 4102 4103 /// Look for registers which are a constant distance apart and try to form reuse 4104 /// opportunities between them. 4105 void LSRInstance::GenerateCrossUseConstantOffsets() { 4106 // Group the registers by their value without any added constant offset. 4107 using ImmMapTy = std::map<int64_t, const SCEV *>; 4108 4109 DenseMap<const SCEV *, ImmMapTy> Map; 4110 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap; 4111 SmallVector<const SCEV *, 8> Sequence; 4112 for (const SCEV *Use : RegUses) { 4113 const SCEV *Reg = Use; // Make a copy for ExtractImmediate to modify. 4114 int64_t Imm = ExtractImmediate(Reg, SE); 4115 auto Pair = Map.insert(std::make_pair(Reg, ImmMapTy())); 4116 if (Pair.second) 4117 Sequence.push_back(Reg); 4118 Pair.first->second.insert(std::make_pair(Imm, Use)); 4119 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(Use); 4120 } 4121 4122 // Now examine each set of registers with the same base value. Build up 4123 // a list of work to do and do the work in a separate step so that we're 4124 // not adding formulae and register counts while we're searching. 4125 SmallVector<WorkItem, 32> WorkItems; 4126 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems; 4127 for (const SCEV *Reg : Sequence) { 4128 const ImmMapTy &Imms = Map.find(Reg)->second; 4129 4130 // It's not worthwhile looking for reuse if there's only one offset. 4131 if (Imms.size() == 1) 4132 continue; 4133 4134 LLVM_DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; 4135 for (const auto &Entry 4136 : Imms) dbgs() 4137 << ' ' << Entry.first; 4138 dbgs() << '\n'); 4139 4140 // Examine each offset. 4141 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 4142 J != JE; ++J) { 4143 const SCEV *OrigReg = J->second; 4144 4145 int64_t JImm = J->first; 4146 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg); 4147 4148 if (!isa<SCEVConstant>(OrigReg) && 4149 UsedByIndicesMap[Reg].count() == 1) { 4150 LLVM_DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg 4151 << '\n'); 4152 continue; 4153 } 4154 4155 // Conservatively examine offsets between this orig reg a few selected 4156 // other orig regs. 4157 int64_t First = Imms.begin()->first; 4158 int64_t Last = std::prev(Imms.end())->first; 4159 // Compute (First + Last) / 2 without overflow using the fact that 4160 // First + Last = 2 * (First + Last) + (First ^ Last). 4161 int64_t Avg = (First & Last) + ((First ^ Last) >> 1); 4162 // If the result is negative and First is odd and Last even (or vice versa), 4163 // we rounded towards -inf. Add 1 in that case, to round towards 0. 4164 Avg = Avg + ((First ^ Last) & ((uint64_t)Avg >> 63)); 4165 ImmMapTy::const_iterator OtherImms[] = { 4166 Imms.begin(), std::prev(Imms.end()), 4167 Imms.lower_bound(Avg)}; 4168 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { 4169 ImmMapTy::const_iterator M = OtherImms[i]; 4170 if (M == J || M == JE) continue; 4171 4172 // Compute the difference between the two. 4173 int64_t Imm = (uint64_t)JImm - M->first; 4174 for (unsigned LUIdx : UsedByIndices.set_bits()) 4175 // Make a memo of this use, offset, and register tuple. 4176 if (UniqueItems.insert(std::make_pair(LUIdx, Imm)).second) 4177 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); 4178 } 4179 } 4180 } 4181 4182 Map.clear(); 4183 Sequence.clear(); 4184 UsedByIndicesMap.clear(); 4185 UniqueItems.clear(); 4186 4187 // Now iterate through the worklist and add new formulae. 4188 for (const WorkItem &WI : WorkItems) { 4189 size_t LUIdx = WI.LUIdx; 4190 LSRUse &LU = Uses[LUIdx]; 4191 int64_t Imm = WI.Imm; 4192 const SCEV *OrigReg = WI.OrigReg; 4193 4194 Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); 4195 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); 4196 unsigned BitWidth = SE.getTypeSizeInBits(IntTy); 4197 4198 // TODO: Use a more targeted data structure. 4199 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { 4200 Formula F = LU.Formulae[L]; 4201 // FIXME: The code for the scaled and unscaled registers looks 4202 // very similar but slightly different. Investigate if they 4203 // could be merged. That way, we would not have to unscale the 4204 // Formula. 4205 F.unscale(); 4206 // Use the immediate in the scaled register. 4207 if (F.ScaledReg == OrigReg) { 4208 int64_t Offset = (uint64_t)F.BaseOffset + Imm * (uint64_t)F.Scale; 4209 // Don't create 50 + reg(-50). 4210 if (F.referencesReg(SE.getSCEV( 4211 ConstantInt::get(IntTy, -(uint64_t)Offset)))) 4212 continue; 4213 Formula NewF = F; 4214 NewF.BaseOffset = Offset; 4215 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, 4216 NewF)) 4217 continue; 4218 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); 4219 4220 // If the new scale is a constant in a register, and adding the constant 4221 // value to the immediate would produce a value closer to zero than the 4222 // immediate itself, then the formula isn't worthwhile. 4223 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg)) 4224 if (C->getValue()->isNegative() != (NewF.BaseOffset < 0) && 4225 (C->getAPInt().abs() * APInt(BitWidth, F.Scale)) 4226 .ule(std::abs(NewF.BaseOffset))) 4227 continue; 4228 4229 // OK, looks good. 4230 NewF.canonicalize(*this->L); 4231 (void)InsertFormula(LU, LUIdx, NewF); 4232 } else { 4233 // Use the immediate in a base register. 4234 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) { 4235 const SCEV *BaseReg = F.BaseRegs[N]; 4236 if (BaseReg != OrigReg) 4237 continue; 4238 Formula NewF = F; 4239 NewF.BaseOffset = (uint64_t)NewF.BaseOffset + Imm; 4240 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, 4241 LU.Kind, LU.AccessTy, NewF)) { 4242 if (TTI.shouldFavorPostInc() && 4243 mayUsePostIncMode(TTI, LU, OrigReg, this->L, SE)) 4244 continue; 4245 if (!TTI.isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm)) 4246 continue; 4247 NewF = F; 4248 NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm; 4249 } 4250 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); 4251 4252 // If the new formula has a constant in a register, and adding the 4253 // constant value to the immediate would produce a value closer to 4254 // zero than the immediate itself, then the formula isn't worthwhile. 4255 for (const SCEV *NewReg : NewF.BaseRegs) 4256 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewReg)) 4257 if ((C->getAPInt() + NewF.BaseOffset) 4258 .abs() 4259 .slt(std::abs(NewF.BaseOffset)) && 4260 (C->getAPInt() + NewF.BaseOffset).countTrailingZeros() >= 4261 countTrailingZeros<uint64_t>(NewF.BaseOffset)) 4262 goto skip_formula; 4263 4264 // Ok, looks good. 4265 NewF.canonicalize(*this->L); 4266 (void)InsertFormula(LU, LUIdx, NewF); 4267 break; 4268 skip_formula:; 4269 } 4270 } 4271 } 4272 } 4273 } 4274 4275 /// Generate formulae for each use. 4276 void 4277 LSRInstance::GenerateAllReuseFormulae() { 4278 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan 4279 // queries are more precise. 4280 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4281 LSRUse &LU = Uses[LUIdx]; 4282 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4283 GenerateReassociations(LU, LUIdx, LU.Formulae[i]); 4284 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4285 GenerateCombinations(LU, LUIdx, LU.Formulae[i]); 4286 } 4287 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4288 LSRUse &LU = Uses[LUIdx]; 4289 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4290 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]); 4291 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4292 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]); 4293 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4294 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]); 4295 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4296 GenerateScales(LU, LUIdx, LU.Formulae[i]); 4297 } 4298 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4299 LSRUse &LU = Uses[LUIdx]; 4300 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4301 GenerateTruncates(LU, LUIdx, LU.Formulae[i]); 4302 } 4303 4304 GenerateCrossUseConstantOffsets(); 4305 4306 LLVM_DEBUG(dbgs() << "\n" 4307 "After generating reuse formulae:\n"; 4308 print_uses(dbgs())); 4309 } 4310 4311 /// If there are multiple formulae with the same set of registers used 4312 /// by other uses, pick the best one and delete the others. 4313 void LSRInstance::FilterOutUndesirableDedicatedRegisters() { 4314 DenseSet<const SCEV *> VisitedRegs; 4315 SmallPtrSet<const SCEV *, 16> Regs; 4316 SmallPtrSet<const SCEV *, 16> LoserRegs; 4317 #ifndef NDEBUG 4318 bool ChangedFormulae = false; 4319 #endif 4320 4321 // Collect the best formula for each unique set of shared registers. This 4322 // is reset for each use. 4323 using BestFormulaeTy = 4324 DenseMap<SmallVector<const SCEV *, 4>, size_t, UniquifierDenseMapInfo>; 4325 4326 BestFormulaeTy BestFormulae; 4327 4328 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4329 LSRUse &LU = Uses[LUIdx]; 4330 LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); 4331 dbgs() << '\n'); 4332 4333 bool Any = false; 4334 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); 4335 FIdx != NumForms; ++FIdx) { 4336 Formula &F = LU.Formulae[FIdx]; 4337 4338 // Some formulas are instant losers. For example, they may depend on 4339 // nonexistent AddRecs from other loops. These need to be filtered 4340 // immediately, otherwise heuristics could choose them over others leading 4341 // to an unsatisfactory solution. Passing LoserRegs into RateFormula here 4342 // avoids the need to recompute this information across formulae using the 4343 // same bad AddRec. Passing LoserRegs is also essential unless we remove 4344 // the corresponding bad register from the Regs set. 4345 Cost CostF(L, SE, TTI); 4346 Regs.clear(); 4347 CostF.RateFormula(F, Regs, VisitedRegs, LU, &LoserRegs); 4348 if (CostF.isLoser()) { 4349 // During initial formula generation, undesirable formulae are generated 4350 // by uses within other loops that have some non-trivial address mode or 4351 // use the postinc form of the IV. LSR needs to provide these formulae 4352 // as the basis of rediscovering the desired formula that uses an AddRec 4353 // corresponding to the existing phi. Once all formulae have been 4354 // generated, these initial losers may be pruned. 4355 LLVM_DEBUG(dbgs() << " Filtering loser "; F.print(dbgs()); 4356 dbgs() << "\n"); 4357 } 4358 else { 4359 SmallVector<const SCEV *, 4> Key; 4360 for (const SCEV *Reg : F.BaseRegs) { 4361 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) 4362 Key.push_back(Reg); 4363 } 4364 if (F.ScaledReg && 4365 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) 4366 Key.push_back(F.ScaledReg); 4367 // Unstable sort by host order ok, because this is only used for 4368 // uniquifying. 4369 llvm::sort(Key); 4370 4371 std::pair<BestFormulaeTy::const_iterator, bool> P = 4372 BestFormulae.insert(std::make_pair(Key, FIdx)); 4373 if (P.second) 4374 continue; 4375 4376 Formula &Best = LU.Formulae[P.first->second]; 4377 4378 Cost CostBest(L, SE, TTI); 4379 Regs.clear(); 4380 CostBest.RateFormula(Best, Regs, VisitedRegs, LU); 4381 if (CostF.isLess(CostBest)) 4382 std::swap(F, Best); 4383 LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); 4384 dbgs() << "\n" 4385 " in favor of formula "; 4386 Best.print(dbgs()); dbgs() << '\n'); 4387 } 4388 #ifndef NDEBUG 4389 ChangedFormulae = true; 4390 #endif 4391 LU.DeleteFormula(F); 4392 --FIdx; 4393 --NumForms; 4394 Any = true; 4395 } 4396 4397 // Now that we've filtered out some formulae, recompute the Regs set. 4398 if (Any) 4399 LU.RecomputeRegs(LUIdx, RegUses); 4400 4401 // Reset this to prepare for the next use. 4402 BestFormulae.clear(); 4403 } 4404 4405 LLVM_DEBUG(if (ChangedFormulae) { 4406 dbgs() << "\n" 4407 "After filtering out undesirable candidates:\n"; 4408 print_uses(dbgs()); 4409 }); 4410 } 4411 4412 /// Estimate the worst-case number of solutions the solver might have to 4413 /// consider. It almost never considers this many solutions because it prune the 4414 /// search space, but the pruning isn't always sufficient. 4415 size_t LSRInstance::EstimateSearchSpaceComplexity() const { 4416 size_t Power = 1; 4417 for (const LSRUse &LU : Uses) { 4418 size_t FSize = LU.Formulae.size(); 4419 if (FSize >= ComplexityLimit) { 4420 Power = ComplexityLimit; 4421 break; 4422 } 4423 Power *= FSize; 4424 if (Power >= ComplexityLimit) 4425 break; 4426 } 4427 return Power; 4428 } 4429 4430 /// When one formula uses a superset of the registers of another formula, it 4431 /// won't help reduce register pressure (though it may not necessarily hurt 4432 /// register pressure); remove it to simplify the system. 4433 void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { 4434 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 4435 LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); 4436 4437 LLVM_DEBUG(dbgs() << "Narrowing the search space by eliminating formulae " 4438 "which use a superset of registers used by other " 4439 "formulae.\n"); 4440 4441 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4442 LSRUse &LU = Uses[LUIdx]; 4443 bool Any = false; 4444 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 4445 Formula &F = LU.Formulae[i]; 4446 // Look for a formula with a constant or GV in a register. If the use 4447 // also has a formula with that same value in an immediate field, 4448 // delete the one that uses a register. 4449 for (SmallVectorImpl<const SCEV *>::const_iterator 4450 I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) { 4451 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) { 4452 Formula NewF = F; 4453 //FIXME: Formulas should store bitwidth to do wrapping properly. 4454 // See PR41034. 4455 NewF.BaseOffset += (uint64_t)C->getValue()->getSExtValue(); 4456 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 4457 (I - F.BaseRegs.begin())); 4458 if (LU.HasFormulaWithSameRegs(NewF)) { 4459 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 4460 dbgs() << '\n'); 4461 LU.DeleteFormula(F); 4462 --i; 4463 --e; 4464 Any = true; 4465 break; 4466 } 4467 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) { 4468 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) 4469 if (!F.BaseGV) { 4470 Formula NewF = F; 4471 NewF.BaseGV = GV; 4472 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 4473 (I - F.BaseRegs.begin())); 4474 if (LU.HasFormulaWithSameRegs(NewF)) { 4475 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 4476 dbgs() << '\n'); 4477 LU.DeleteFormula(F); 4478 --i; 4479 --e; 4480 Any = true; 4481 break; 4482 } 4483 } 4484 } 4485 } 4486 } 4487 if (Any) 4488 LU.RecomputeRegs(LUIdx, RegUses); 4489 } 4490 4491 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4492 } 4493 } 4494 4495 /// When there are many registers for expressions like A, A+1, A+2, etc., 4496 /// allocate a single register for them. 4497 void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { 4498 if (EstimateSearchSpaceComplexity() < ComplexityLimit) 4499 return; 4500 4501 LLVM_DEBUG( 4502 dbgs() << "The search space is too complex.\n" 4503 "Narrowing the search space by assuming that uses separated " 4504 "by a constant offset will use the same registers.\n"); 4505 4506 // This is especially useful for unrolled loops. 4507 4508 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4509 LSRUse &LU = Uses[LUIdx]; 4510 for (const Formula &F : LU.Formulae) { 4511 if (F.BaseOffset == 0 || (F.Scale != 0 && F.Scale != 1)) 4512 continue; 4513 4514 LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU); 4515 if (!LUThatHas) 4516 continue; 4517 4518 if (!reconcileNewOffset(*LUThatHas, F.BaseOffset, /*HasBaseReg=*/ false, 4519 LU.Kind, LU.AccessTy)) 4520 continue; 4521 4522 LLVM_DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); dbgs() << '\n'); 4523 4524 LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; 4525 4526 // Transfer the fixups of LU to LUThatHas. 4527 for (LSRFixup &Fixup : LU.Fixups) { 4528 Fixup.Offset += F.BaseOffset; 4529 LUThatHas->pushFixup(Fixup); 4530 LLVM_DEBUG(dbgs() << "New fixup has offset " << Fixup.Offset << '\n'); 4531 } 4532 4533 // Delete formulae from the new use which are no longer legal. 4534 bool Any = false; 4535 for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) { 4536 Formula &F = LUThatHas->Formulae[i]; 4537 if (!isLegalUse(TTI, LUThatHas->MinOffset, LUThatHas->MaxOffset, 4538 LUThatHas->Kind, LUThatHas->AccessTy, F)) { 4539 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 4540 LUThatHas->DeleteFormula(F); 4541 --i; 4542 --e; 4543 Any = true; 4544 } 4545 } 4546 4547 if (Any) 4548 LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses); 4549 4550 // Delete the old use. 4551 DeleteUse(LU, LUIdx); 4552 --LUIdx; 4553 --NumUses; 4554 break; 4555 } 4556 } 4557 4558 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4559 } 4560 4561 /// Call FilterOutUndesirableDedicatedRegisters again, if necessary, now that 4562 /// we've done more filtering, as it may be able to find more formulae to 4563 /// eliminate. 4564 void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){ 4565 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 4566 LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); 4567 4568 LLVM_DEBUG(dbgs() << "Narrowing the search space by re-filtering out " 4569 "undesirable dedicated registers.\n"); 4570 4571 FilterOutUndesirableDedicatedRegisters(); 4572 4573 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4574 } 4575 } 4576 4577 /// If a LSRUse has multiple formulae with the same ScaledReg and Scale. 4578 /// Pick the best one and delete the others. 4579 /// This narrowing heuristic is to keep as many formulae with different 4580 /// Scale and ScaledReg pair as possible while narrowing the search space. 4581 /// The benefit is that it is more likely to find out a better solution 4582 /// from a formulae set with more Scale and ScaledReg variations than 4583 /// a formulae set with the same Scale and ScaledReg. The picking winner 4584 /// reg heuristic will often keep the formulae with the same Scale and 4585 /// ScaledReg and filter others, and we want to avoid that if possible. 4586 void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() { 4587 if (EstimateSearchSpaceComplexity() < ComplexityLimit) 4588 return; 4589 4590 LLVM_DEBUG( 4591 dbgs() << "The search space is too complex.\n" 4592 "Narrowing the search space by choosing the best Formula " 4593 "from the Formulae with the same Scale and ScaledReg.\n"); 4594 4595 // Map the "Scale * ScaledReg" pair to the best formula of current LSRUse. 4596 using BestFormulaeTy = DenseMap<std::pair<const SCEV *, int64_t>, size_t>; 4597 4598 BestFormulaeTy BestFormulae; 4599 #ifndef NDEBUG 4600 bool ChangedFormulae = false; 4601 #endif 4602 DenseSet<const SCEV *> VisitedRegs; 4603 SmallPtrSet<const SCEV *, 16> Regs; 4604 4605 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4606 LSRUse &LU = Uses[LUIdx]; 4607 LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); 4608 dbgs() << '\n'); 4609 4610 // Return true if Formula FA is better than Formula FB. 4611 auto IsBetterThan = [&](Formula &FA, Formula &FB) { 4612 // First we will try to choose the Formula with fewer new registers. 4613 // For a register used by current Formula, the more the register is 4614 // shared among LSRUses, the less we increase the register number 4615 // counter of the formula. 4616 size_t FARegNum = 0; 4617 for (const SCEV *Reg : FA.BaseRegs) { 4618 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg); 4619 FARegNum += (NumUses - UsedByIndices.count() + 1); 4620 } 4621 size_t FBRegNum = 0; 4622 for (const SCEV *Reg : FB.BaseRegs) { 4623 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg); 4624 FBRegNum += (NumUses - UsedByIndices.count() + 1); 4625 } 4626 if (FARegNum != FBRegNum) 4627 return FARegNum < FBRegNum; 4628 4629 // If the new register numbers are the same, choose the Formula with 4630 // less Cost. 4631 Cost CostFA(L, SE, TTI); 4632 Cost CostFB(L, SE, TTI); 4633 Regs.clear(); 4634 CostFA.RateFormula(FA, Regs, VisitedRegs, LU); 4635 Regs.clear(); 4636 CostFB.RateFormula(FB, Regs, VisitedRegs, LU); 4637 return CostFA.isLess(CostFB); 4638 }; 4639 4640 bool Any = false; 4641 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms; 4642 ++FIdx) { 4643 Formula &F = LU.Formulae[FIdx]; 4644 if (!F.ScaledReg) 4645 continue; 4646 auto P = BestFormulae.insert({{F.ScaledReg, F.Scale}, FIdx}); 4647 if (P.second) 4648 continue; 4649 4650 Formula &Best = LU.Formulae[P.first->second]; 4651 if (IsBetterThan(F, Best)) 4652 std::swap(F, Best); 4653 LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); 4654 dbgs() << "\n" 4655 " in favor of formula "; 4656 Best.print(dbgs()); dbgs() << '\n'); 4657 #ifndef NDEBUG 4658 ChangedFormulae = true; 4659 #endif 4660 LU.DeleteFormula(F); 4661 --FIdx; 4662 --NumForms; 4663 Any = true; 4664 } 4665 if (Any) 4666 LU.RecomputeRegs(LUIdx, RegUses); 4667 4668 // Reset this to prepare for the next use. 4669 BestFormulae.clear(); 4670 } 4671 4672 LLVM_DEBUG(if (ChangedFormulae) { 4673 dbgs() << "\n" 4674 "After filtering out undesirable candidates:\n"; 4675 print_uses(dbgs()); 4676 }); 4677 } 4678 4679 /// If we are over the complexity limit, filter out any post-inc prefering 4680 /// variables to only post-inc values. 4681 void LSRInstance::NarrowSearchSpaceByFilterPostInc() { 4682 if (!TTI.shouldFavorPostInc()) 4683 return; 4684 if (EstimateSearchSpaceComplexity() < ComplexityLimit) 4685 return; 4686 4687 LLVM_DEBUG(dbgs() << "The search space is too complex.\n" 4688 "Narrowing the search space by choosing the lowest " 4689 "register Formula for PostInc Uses.\n"); 4690 4691 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4692 LSRUse &LU = Uses[LUIdx]; 4693 4694 if (LU.Kind != LSRUse::Address) 4695 continue; 4696 if (!TTI.isIndexedLoadLegal(TTI.MIM_PostInc, LU.AccessTy.getType()) && 4697 !TTI.isIndexedStoreLegal(TTI.MIM_PostInc, LU.AccessTy.getType())) 4698 continue; 4699 4700 size_t MinRegs = std::numeric_limits<size_t>::max(); 4701 for (const Formula &F : LU.Formulae) 4702 MinRegs = std::min(F.getNumRegs(), MinRegs); 4703 4704 bool Any = false; 4705 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms; 4706 ++FIdx) { 4707 Formula &F = LU.Formulae[FIdx]; 4708 if (F.getNumRegs() > MinRegs) { 4709 LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); 4710 dbgs() << "\n"); 4711 LU.DeleteFormula(F); 4712 --FIdx; 4713 --NumForms; 4714 Any = true; 4715 } 4716 } 4717 if (Any) 4718 LU.RecomputeRegs(LUIdx, RegUses); 4719 4720 if (EstimateSearchSpaceComplexity() < ComplexityLimit) 4721 break; 4722 } 4723 4724 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4725 } 4726 4727 /// The function delete formulas with high registers number expectation. 4728 /// Assuming we don't know the value of each formula (already delete 4729 /// all inefficient), generate probability of not selecting for each 4730 /// register. 4731 /// For example, 4732 /// Use1: 4733 /// reg(a) + reg({0,+,1}) 4734 /// reg(a) + reg({-1,+,1}) + 1 4735 /// reg({a,+,1}) 4736 /// Use2: 4737 /// reg(b) + reg({0,+,1}) 4738 /// reg(b) + reg({-1,+,1}) + 1 4739 /// reg({b,+,1}) 4740 /// Use3: 4741 /// reg(c) + reg(b) + reg({0,+,1}) 4742 /// reg(c) + reg({b,+,1}) 4743 /// 4744 /// Probability of not selecting 4745 /// Use1 Use2 Use3 4746 /// reg(a) (1/3) * 1 * 1 4747 /// reg(b) 1 * (1/3) * (1/2) 4748 /// reg({0,+,1}) (2/3) * (2/3) * (1/2) 4749 /// reg({-1,+,1}) (2/3) * (2/3) * 1 4750 /// reg({a,+,1}) (2/3) * 1 * 1 4751 /// reg({b,+,1}) 1 * (2/3) * (2/3) 4752 /// reg(c) 1 * 1 * 0 4753 /// 4754 /// Now count registers number mathematical expectation for each formula: 4755 /// Note that for each use we exclude probability if not selecting for the use. 4756 /// For example for Use1 probability for reg(a) would be just 1 * 1 (excluding 4757 /// probabilty 1/3 of not selecting for Use1). 4758 /// Use1: 4759 /// reg(a) + reg({0,+,1}) 1 + 1/3 -- to be deleted 4760 /// reg(a) + reg({-1,+,1}) + 1 1 + 4/9 -- to be deleted 4761 /// reg({a,+,1}) 1 4762 /// Use2: 4763 /// reg(b) + reg({0,+,1}) 1/2 + 1/3 -- to be deleted 4764 /// reg(b) + reg({-1,+,1}) + 1 1/2 + 2/3 -- to be deleted 4765 /// reg({b,+,1}) 2/3 4766 /// Use3: 4767 /// reg(c) + reg(b) + reg({0,+,1}) 1 + 1/3 + 4/9 -- to be deleted 4768 /// reg(c) + reg({b,+,1}) 1 + 2/3 4769 void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() { 4770 if (EstimateSearchSpaceComplexity() < ComplexityLimit) 4771 return; 4772 // Ok, we have too many of formulae on our hands to conveniently handle. 4773 // Use a rough heuristic to thin out the list. 4774 4775 // Set of Regs wich will be 100% used in final solution. 4776 // Used in each formula of a solution (in example above this is reg(c)). 4777 // We can skip them in calculations. 4778 SmallPtrSet<const SCEV *, 4> UniqRegs; 4779 LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); 4780 4781 // Map each register to probability of not selecting 4782 DenseMap <const SCEV *, float> RegNumMap; 4783 for (const SCEV *Reg : RegUses) { 4784 if (UniqRegs.count(Reg)) 4785 continue; 4786 float PNotSel = 1; 4787 for (const LSRUse &LU : Uses) { 4788 if (!LU.Regs.count(Reg)) 4789 continue; 4790 float P = LU.getNotSelectedProbability(Reg); 4791 if (P != 0.0) 4792 PNotSel *= P; 4793 else 4794 UniqRegs.insert(Reg); 4795 } 4796 RegNumMap.insert(std::make_pair(Reg, PNotSel)); 4797 } 4798 4799 LLVM_DEBUG( 4800 dbgs() << "Narrowing the search space by deleting costly formulas\n"); 4801 4802 // Delete formulas where registers number expectation is high. 4803 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4804 LSRUse &LU = Uses[LUIdx]; 4805 // If nothing to delete - continue. 4806 if (LU.Formulae.size() < 2) 4807 continue; 4808 // This is temporary solution to test performance. Float should be 4809 // replaced with round independent type (based on integers) to avoid 4810 // different results for different target builds. 4811 float FMinRegNum = LU.Formulae[0].getNumRegs(); 4812 float FMinARegNum = LU.Formulae[0].getNumRegs(); 4813 size_t MinIdx = 0; 4814 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 4815 Formula &F = LU.Formulae[i]; 4816 float FRegNum = 0; 4817 float FARegNum = 0; 4818 for (const SCEV *BaseReg : F.BaseRegs) { 4819 if (UniqRegs.count(BaseReg)) 4820 continue; 4821 FRegNum += RegNumMap[BaseReg] / LU.getNotSelectedProbability(BaseReg); 4822 if (isa<SCEVAddRecExpr>(BaseReg)) 4823 FARegNum += 4824 RegNumMap[BaseReg] / LU.getNotSelectedProbability(BaseReg); 4825 } 4826 if (const SCEV *ScaledReg = F.ScaledReg) { 4827 if (!UniqRegs.count(ScaledReg)) { 4828 FRegNum += 4829 RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg); 4830 if (isa<SCEVAddRecExpr>(ScaledReg)) 4831 FARegNum += 4832 RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg); 4833 } 4834 } 4835 if (FMinRegNum > FRegNum || 4836 (FMinRegNum == FRegNum && FMinARegNum > FARegNum)) { 4837 FMinRegNum = FRegNum; 4838 FMinARegNum = FARegNum; 4839 MinIdx = i; 4840 } 4841 } 4842 LLVM_DEBUG(dbgs() << " The formula "; LU.Formulae[MinIdx].print(dbgs()); 4843 dbgs() << " with min reg num " << FMinRegNum << '\n'); 4844 if (MinIdx != 0) 4845 std::swap(LU.Formulae[MinIdx], LU.Formulae[0]); 4846 while (LU.Formulae.size() != 1) { 4847 LLVM_DEBUG(dbgs() << " Deleting "; LU.Formulae.back().print(dbgs()); 4848 dbgs() << '\n'); 4849 LU.Formulae.pop_back(); 4850 } 4851 LU.RecomputeRegs(LUIdx, RegUses); 4852 assert(LU.Formulae.size() == 1 && "Should be exactly 1 min regs formula"); 4853 Formula &F = LU.Formulae[0]; 4854 LLVM_DEBUG(dbgs() << " Leaving only "; F.print(dbgs()); dbgs() << '\n'); 4855 // When we choose the formula, the regs become unique. 4856 UniqRegs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 4857 if (F.ScaledReg) 4858 UniqRegs.insert(F.ScaledReg); 4859 } 4860 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4861 } 4862 4863 /// Pick a register which seems likely to be profitable, and then in any use 4864 /// which has any reference to that register, delete all formulae which do not 4865 /// reference that register. 4866 void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { 4867 // With all other options exhausted, loop until the system is simple 4868 // enough to handle. 4869 SmallPtrSet<const SCEV *, 4> Taken; 4870 while (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 4871 // Ok, we have too many of formulae on our hands to conveniently handle. 4872 // Use a rough heuristic to thin out the list. 4873 LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); 4874 4875 // Pick the register which is used by the most LSRUses, which is likely 4876 // to be a good reuse register candidate. 4877 const SCEV *Best = nullptr; 4878 unsigned BestNum = 0; 4879 for (const SCEV *Reg : RegUses) { 4880 if (Taken.count(Reg)) 4881 continue; 4882 if (!Best) { 4883 Best = Reg; 4884 BestNum = RegUses.getUsedByIndices(Reg).count(); 4885 } else { 4886 unsigned Count = RegUses.getUsedByIndices(Reg).count(); 4887 if (Count > BestNum) { 4888 Best = Reg; 4889 BestNum = Count; 4890 } 4891 } 4892 } 4893 assert(Best && "Failed to find best LSRUse candidate"); 4894 4895 LLVM_DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best 4896 << " will yield profitable reuse.\n"); 4897 Taken.insert(Best); 4898 4899 // In any use with formulae which references this register, delete formulae 4900 // which don't reference it. 4901 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4902 LSRUse &LU = Uses[LUIdx]; 4903 if (!LU.Regs.count(Best)) continue; 4904 4905 bool Any = false; 4906 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 4907 Formula &F = LU.Formulae[i]; 4908 if (!F.referencesReg(Best)) { 4909 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 4910 LU.DeleteFormula(F); 4911 --e; 4912 --i; 4913 Any = true; 4914 assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?"); 4915 continue; 4916 } 4917 } 4918 4919 if (Any) 4920 LU.RecomputeRegs(LUIdx, RegUses); 4921 } 4922 4923 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4924 } 4925 } 4926 4927 /// If there are an extraordinary number of formulae to choose from, use some 4928 /// rough heuristics to prune down the number of formulae. This keeps the main 4929 /// solver from taking an extraordinary amount of time in some worst-case 4930 /// scenarios. 4931 void LSRInstance::NarrowSearchSpaceUsingHeuristics() { 4932 NarrowSearchSpaceByDetectingSupersets(); 4933 NarrowSearchSpaceByCollapsingUnrolledCode(); 4934 NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); 4935 if (FilterSameScaledReg) 4936 NarrowSearchSpaceByFilterFormulaWithSameScaledReg(); 4937 NarrowSearchSpaceByFilterPostInc(); 4938 if (LSRExpNarrow) 4939 NarrowSearchSpaceByDeletingCostlyFormulas(); 4940 else 4941 NarrowSearchSpaceByPickingWinnerRegs(); 4942 } 4943 4944 /// This is the recursive solver. 4945 void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 4946 Cost &SolutionCost, 4947 SmallVectorImpl<const Formula *> &Workspace, 4948 const Cost &CurCost, 4949 const SmallPtrSet<const SCEV *, 16> &CurRegs, 4950 DenseSet<const SCEV *> &VisitedRegs) const { 4951 // Some ideas: 4952 // - prune more: 4953 // - use more aggressive filtering 4954 // - sort the formula so that the most profitable solutions are found first 4955 // - sort the uses too 4956 // - search faster: 4957 // - don't compute a cost, and then compare. compare while computing a cost 4958 // and bail early. 4959 // - track register sets with SmallBitVector 4960 4961 const LSRUse &LU = Uses[Workspace.size()]; 4962 4963 // If this use references any register that's already a part of the 4964 // in-progress solution, consider it a requirement that a formula must 4965 // reference that register in order to be considered. This prunes out 4966 // unprofitable searching. 4967 SmallSetVector<const SCEV *, 4> ReqRegs; 4968 for (const SCEV *S : CurRegs) 4969 if (LU.Regs.count(S)) 4970 ReqRegs.insert(S); 4971 4972 SmallPtrSet<const SCEV *, 16> NewRegs; 4973 Cost NewCost(L, SE, TTI); 4974 for (const Formula &F : LU.Formulae) { 4975 // Ignore formulae which may not be ideal in terms of register reuse of 4976 // ReqRegs. The formula should use all required registers before 4977 // introducing new ones. 4978 // This can sometimes (notably when trying to favour postinc) lead to 4979 // sub-optimial decisions. There it is best left to the cost modelling to 4980 // get correct. 4981 if (!TTI.shouldFavorPostInc() || LU.Kind != LSRUse::Address) { 4982 int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size()); 4983 for (const SCEV *Reg : ReqRegs) { 4984 if ((F.ScaledReg && F.ScaledReg == Reg) || 4985 is_contained(F.BaseRegs, Reg)) { 4986 --NumReqRegsToFind; 4987 if (NumReqRegsToFind == 0) 4988 break; 4989 } 4990 } 4991 if (NumReqRegsToFind != 0) { 4992 // If none of the formulae satisfied the required registers, then we could 4993 // clear ReqRegs and try again. Currently, we simply give up in this case. 4994 continue; 4995 } 4996 } 4997 4998 // Evaluate the cost of the current formula. If it's already worse than 4999 // the current best, prune the search at that point. 5000 NewCost = CurCost; 5001 NewRegs = CurRegs; 5002 NewCost.RateFormula(F, NewRegs, VisitedRegs, LU); 5003 if (NewCost.isLess(SolutionCost)) { 5004 Workspace.push_back(&F); 5005 if (Workspace.size() != Uses.size()) { 5006 SolveRecurse(Solution, SolutionCost, Workspace, NewCost, 5007 NewRegs, VisitedRegs); 5008 if (F.getNumRegs() == 1 && Workspace.size() == 1) 5009 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); 5010 } else { 5011 LLVM_DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); 5012 dbgs() << ".\nRegs:\n"; 5013 for (const SCEV *S : NewRegs) dbgs() 5014 << "- " << *S << "\n"; 5015 dbgs() << '\n'); 5016 5017 SolutionCost = NewCost; 5018 Solution = Workspace; 5019 } 5020 Workspace.pop_back(); 5021 } 5022 } 5023 } 5024 5025 /// Choose one formula from each use. Return the results in the given Solution 5026 /// vector. 5027 void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { 5028 SmallVector<const Formula *, 8> Workspace; 5029 Cost SolutionCost(L, SE, TTI); 5030 SolutionCost.Lose(); 5031 Cost CurCost(L, SE, TTI); 5032 SmallPtrSet<const SCEV *, 16> CurRegs; 5033 DenseSet<const SCEV *> VisitedRegs; 5034 Workspace.reserve(Uses.size()); 5035 5036 // SolveRecurse does all the work. 5037 SolveRecurse(Solution, SolutionCost, Workspace, CurCost, 5038 CurRegs, VisitedRegs); 5039 if (Solution.empty()) { 5040 LLVM_DEBUG(dbgs() << "\nNo Satisfactory Solution\n"); 5041 return; 5042 } 5043 5044 // Ok, we've now made all our decisions. 5045 LLVM_DEBUG(dbgs() << "\n" 5046 "The chosen solution requires "; 5047 SolutionCost.print(dbgs()); dbgs() << ":\n"; 5048 for (size_t i = 0, e = Uses.size(); i != e; ++i) { 5049 dbgs() << " "; 5050 Uses[i].print(dbgs()); 5051 dbgs() << "\n" 5052 " "; 5053 Solution[i]->print(dbgs()); 5054 dbgs() << '\n'; 5055 }); 5056 5057 assert(Solution.size() == Uses.size() && "Malformed solution!"); 5058 } 5059 5060 /// Helper for AdjustInsertPositionForExpand. Climb up the dominator tree far as 5061 /// we can go while still being dominated by the input positions. This helps 5062 /// canonicalize the insert position, which encourages sharing. 5063 BasicBlock::iterator 5064 LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, 5065 const SmallVectorImpl<Instruction *> &Inputs) 5066 const { 5067 Instruction *Tentative = &*IP; 5068 while (true) { 5069 bool AllDominate = true; 5070 Instruction *BetterPos = nullptr; 5071 // Don't bother attempting to insert before a catchswitch, their basic block 5072 // cannot have other non-PHI instructions. 5073 if (isa<CatchSwitchInst>(Tentative)) 5074 return IP; 5075 5076 for (Instruction *Inst : Inputs) { 5077 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { 5078 AllDominate = false; 5079 break; 5080 } 5081 // Attempt to find an insert position in the middle of the block, 5082 // instead of at the end, so that it can be used for other expansions. 5083 if (Tentative->getParent() == Inst->getParent() && 5084 (!BetterPos || !DT.dominates(Inst, BetterPos))) 5085 BetterPos = &*std::next(BasicBlock::iterator(Inst)); 5086 } 5087 if (!AllDominate) 5088 break; 5089 if (BetterPos) 5090 IP = BetterPos->getIterator(); 5091 else 5092 IP = Tentative->getIterator(); 5093 5094 const Loop *IPLoop = LI.getLoopFor(IP->getParent()); 5095 unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0; 5096 5097 BasicBlock *IDom; 5098 for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) { 5099 if (!Rung) return IP; 5100 Rung = Rung->getIDom(); 5101 if (!Rung) return IP; 5102 IDom = Rung->getBlock(); 5103 5104 // Don't climb into a loop though. 5105 const Loop *IDomLoop = LI.getLoopFor(IDom); 5106 unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0; 5107 if (IDomDepth <= IPLoopDepth && 5108 (IDomDepth != IPLoopDepth || IDomLoop == IPLoop)) 5109 break; 5110 } 5111 5112 Tentative = IDom->getTerminator(); 5113 } 5114 5115 return IP; 5116 } 5117 5118 /// Determine an input position which will be dominated by the operands and 5119 /// which will dominate the result. 5120 BasicBlock::iterator 5121 LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, 5122 const LSRFixup &LF, 5123 const LSRUse &LU, 5124 SCEVExpander &Rewriter) const { 5125 // Collect some instructions which must be dominated by the 5126 // expanding replacement. These must be dominated by any operands that 5127 // will be required in the expansion. 5128 SmallVector<Instruction *, 4> Inputs; 5129 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace)) 5130 Inputs.push_back(I); 5131 if (LU.Kind == LSRUse::ICmpZero) 5132 if (Instruction *I = 5133 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1))) 5134 Inputs.push_back(I); 5135 if (LF.PostIncLoops.count(L)) { 5136 if (LF.isUseFullyOutsideLoop(L)) 5137 Inputs.push_back(L->getLoopLatch()->getTerminator()); 5138 else 5139 Inputs.push_back(IVIncInsertPos); 5140 } 5141 // The expansion must also be dominated by the increment positions of any 5142 // loops it for which it is using post-inc mode. 5143 for (const Loop *PIL : LF.PostIncLoops) { 5144 if (PIL == L) continue; 5145 5146 // Be dominated by the loop exit. 5147 SmallVector<BasicBlock *, 4> ExitingBlocks; 5148 PIL->getExitingBlocks(ExitingBlocks); 5149 if (!ExitingBlocks.empty()) { 5150 BasicBlock *BB = ExitingBlocks[0]; 5151 for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i) 5152 BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]); 5153 Inputs.push_back(BB->getTerminator()); 5154 } 5155 } 5156 5157 assert(!isa<PHINode>(LowestIP) && !LowestIP->isEHPad() 5158 && !isa<DbgInfoIntrinsic>(LowestIP) && 5159 "Insertion point must be a normal instruction"); 5160 5161 // Then, climb up the immediate dominator tree as far as we can go while 5162 // still being dominated by the input positions. 5163 BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs); 5164 5165 // Don't insert instructions before PHI nodes. 5166 while (isa<PHINode>(IP)) ++IP; 5167 5168 // Ignore landingpad instructions. 5169 while (IP->isEHPad()) ++IP; 5170 5171 // Ignore debug intrinsics. 5172 while (isa<DbgInfoIntrinsic>(IP)) ++IP; 5173 5174 // Set IP below instructions recently inserted by SCEVExpander. This keeps the 5175 // IP consistent across expansions and allows the previously inserted 5176 // instructions to be reused by subsequent expansion. 5177 while (Rewriter.isInsertedInstruction(&*IP) && IP != LowestIP) 5178 ++IP; 5179 5180 return IP; 5181 } 5182 5183 /// Emit instructions for the leading candidate expression for this LSRUse (this 5184 /// is called "expanding"). 5185 Value *LSRInstance::Expand(const LSRUse &LU, const LSRFixup &LF, 5186 const Formula &F, BasicBlock::iterator IP, 5187 SCEVExpander &Rewriter, 5188 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const { 5189 if (LU.RigidFormula) 5190 return LF.OperandValToReplace; 5191 5192 // Determine an input position which will be dominated by the operands and 5193 // which will dominate the result. 5194 IP = AdjustInsertPositionForExpand(IP, LF, LU, Rewriter); 5195 Rewriter.setInsertPoint(&*IP); 5196 5197 // Inform the Rewriter if we have a post-increment use, so that it can 5198 // perform an advantageous expansion. 5199 Rewriter.setPostInc(LF.PostIncLoops); 5200 5201 // This is the type that the user actually needs. 5202 Type *OpTy = LF.OperandValToReplace->getType(); 5203 // This will be the type that we'll initially expand to. 5204 Type *Ty = F.getType(); 5205 if (!Ty) 5206 // No type known; just expand directly to the ultimate type. 5207 Ty = OpTy; 5208 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy)) 5209 // Expand directly to the ultimate type if it's the right size. 5210 Ty = OpTy; 5211 // This is the type to do integer arithmetic in. 5212 Type *IntTy = SE.getEffectiveSCEVType(Ty); 5213 5214 // Build up a list of operands to add together to form the full base. 5215 SmallVector<const SCEV *, 8> Ops; 5216 5217 // Expand the BaseRegs portion. 5218 for (const SCEV *Reg : F.BaseRegs) { 5219 assert(!Reg->isZero() && "Zero allocated in a base register!"); 5220 5221 // If we're expanding for a post-inc user, make the post-inc adjustment. 5222 Reg = denormalizeForPostIncUse(Reg, LF.PostIncLoops, SE); 5223 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr))); 5224 } 5225 5226 // Expand the ScaledReg portion. 5227 Value *ICmpScaledV = nullptr; 5228 if (F.Scale != 0) { 5229 const SCEV *ScaledS = F.ScaledReg; 5230 5231 // If we're expanding for a post-inc user, make the post-inc adjustment. 5232 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); 5233 ScaledS = denormalizeForPostIncUse(ScaledS, Loops, SE); 5234 5235 if (LU.Kind == LSRUse::ICmpZero) { 5236 // Expand ScaleReg as if it was part of the base regs. 5237 if (F.Scale == 1) 5238 Ops.push_back( 5239 SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr))); 5240 else { 5241 // An interesting way of "folding" with an icmp is to use a negated 5242 // scale, which we'll implement by inserting it into the other operand 5243 // of the icmp. 5244 assert(F.Scale == -1 && 5245 "The only scale supported by ICmpZero uses is -1!"); 5246 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr); 5247 } 5248 } else { 5249 // Otherwise just expand the scaled register and an explicit scale, 5250 // which is expected to be matched as part of the address. 5251 5252 // Flush the operand list to suppress SCEVExpander hoisting address modes. 5253 // Unless the addressing mode will not be folded. 5254 if (!Ops.empty() && LU.Kind == LSRUse::Address && 5255 isAMCompletelyFolded(TTI, LU, F)) { 5256 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), nullptr); 5257 Ops.clear(); 5258 Ops.push_back(SE.getUnknown(FullV)); 5259 } 5260 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr)); 5261 if (F.Scale != 1) 5262 ScaledS = 5263 SE.getMulExpr(ScaledS, SE.getConstant(ScaledS->getType(), F.Scale)); 5264 Ops.push_back(ScaledS); 5265 } 5266 } 5267 5268 // Expand the GV portion. 5269 if (F.BaseGV) { 5270 // Flush the operand list to suppress SCEVExpander hoisting. 5271 if (!Ops.empty()) { 5272 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty); 5273 Ops.clear(); 5274 Ops.push_back(SE.getUnknown(FullV)); 5275 } 5276 Ops.push_back(SE.getUnknown(F.BaseGV)); 5277 } 5278 5279 // Flush the operand list to suppress SCEVExpander hoisting of both folded and 5280 // unfolded offsets. LSR assumes they both live next to their uses. 5281 if (!Ops.empty()) { 5282 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty); 5283 Ops.clear(); 5284 Ops.push_back(SE.getUnknown(FullV)); 5285 } 5286 5287 // Expand the immediate portion. 5288 int64_t Offset = (uint64_t)F.BaseOffset + LF.Offset; 5289 if (Offset != 0) { 5290 if (LU.Kind == LSRUse::ICmpZero) { 5291 // The other interesting way of "folding" with an ICmpZero is to use a 5292 // negated immediate. 5293 if (!ICmpScaledV) 5294 ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset); 5295 else { 5296 Ops.push_back(SE.getUnknown(ICmpScaledV)); 5297 ICmpScaledV = ConstantInt::get(IntTy, Offset); 5298 } 5299 } else { 5300 // Just add the immediate values. These again are expected to be matched 5301 // as part of the address. 5302 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset))); 5303 } 5304 } 5305 5306 // Expand the unfolded offset portion. 5307 int64_t UnfoldedOffset = F.UnfoldedOffset; 5308 if (UnfoldedOffset != 0) { 5309 // Just add the immediate values. 5310 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, 5311 UnfoldedOffset))); 5312 } 5313 5314 // Emit instructions summing all the operands. 5315 const SCEV *FullS = Ops.empty() ? 5316 SE.getConstant(IntTy, 0) : 5317 SE.getAddExpr(Ops); 5318 Value *FullV = Rewriter.expandCodeFor(FullS, Ty); 5319 5320 // We're done expanding now, so reset the rewriter. 5321 Rewriter.clearPostInc(); 5322 5323 // An ICmpZero Formula represents an ICmp which we're handling as a 5324 // comparison against zero. Now that we've expanded an expression for that 5325 // form, update the ICmp's other operand. 5326 if (LU.Kind == LSRUse::ICmpZero) { 5327 ICmpInst *CI = cast<ICmpInst>(LF.UserInst); 5328 if (auto *OperandIsInstr = dyn_cast<Instruction>(CI->getOperand(1))) 5329 DeadInsts.emplace_back(OperandIsInstr); 5330 assert(!F.BaseGV && "ICmp does not support folding a global value and " 5331 "a scale at the same time!"); 5332 if (F.Scale == -1) { 5333 if (ICmpScaledV->getType() != OpTy) { 5334 Instruction *Cast = 5335 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, 5336 OpTy, false), 5337 ICmpScaledV, OpTy, "tmp", CI); 5338 ICmpScaledV = Cast; 5339 } 5340 CI->setOperand(1, ICmpScaledV); 5341 } else { 5342 // A scale of 1 means that the scale has been expanded as part of the 5343 // base regs. 5344 assert((F.Scale == 0 || F.Scale == 1) && 5345 "ICmp does not support folding a global value and " 5346 "a scale at the same time!"); 5347 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), 5348 -(uint64_t)Offset); 5349 if (C->getType() != OpTy) 5350 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 5351 OpTy, false), 5352 C, OpTy); 5353 5354 CI->setOperand(1, C); 5355 } 5356 } 5357 5358 return FullV; 5359 } 5360 5361 /// Helper for Rewrite. PHI nodes are special because the use of their operands 5362 /// effectively happens in their predecessor blocks, so the expression may need 5363 /// to be expanded in multiple places. 5364 void LSRInstance::RewriteForPHI( 5365 PHINode *PN, const LSRUse &LU, const LSRFixup &LF, const Formula &F, 5366 SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts) const { 5367 DenseMap<BasicBlock *, Value *> Inserted; 5368 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5369 if (PN->getIncomingValue(i) == LF.OperandValToReplace) { 5370 bool needUpdateFixups = false; 5371 BasicBlock *BB = PN->getIncomingBlock(i); 5372 5373 // If this is a critical edge, split the edge so that we do not insert 5374 // the code on all predecessor/successor paths. We do this unless this 5375 // is the canonical backedge for this loop, which complicates post-inc 5376 // users. 5377 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && 5378 !isa<IndirectBrInst>(BB->getTerminator()) && 5379 !isa<CatchSwitchInst>(BB->getTerminator())) { 5380 BasicBlock *Parent = PN->getParent(); 5381 Loop *PNLoop = LI.getLoopFor(Parent); 5382 if (!PNLoop || Parent != PNLoop->getHeader()) { 5383 // Split the critical edge. 5384 BasicBlock *NewBB = nullptr; 5385 if (!Parent->isLandingPad()) { 5386 NewBB = 5387 SplitCriticalEdge(BB, Parent, 5388 CriticalEdgeSplittingOptions(&DT, &LI, MSSAU) 5389 .setMergeIdenticalEdges() 5390 .setKeepOneInputPHIs()); 5391 } else { 5392 SmallVector<BasicBlock*, 2> NewBBs; 5393 SplitLandingPadPredecessors(Parent, BB, "", "", NewBBs, &DT, &LI); 5394 NewBB = NewBBs[0]; 5395 } 5396 // If NewBB==NULL, then SplitCriticalEdge refused to split because all 5397 // phi predecessors are identical. The simple thing to do is skip 5398 // splitting in this case rather than complicate the API. 5399 if (NewBB) { 5400 // If PN is outside of the loop and BB is in the loop, we want to 5401 // move the block to be immediately before the PHI block, not 5402 // immediately after BB. 5403 if (L->contains(BB) && !L->contains(PN)) 5404 NewBB->moveBefore(PN->getParent()); 5405 5406 // Splitting the edge can reduce the number of PHI entries we have. 5407 e = PN->getNumIncomingValues(); 5408 BB = NewBB; 5409 i = PN->getBasicBlockIndex(BB); 5410 5411 needUpdateFixups = true; 5412 } 5413 } 5414 } 5415 5416 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair = 5417 Inserted.insert(std::make_pair(BB, static_cast<Value *>(nullptr))); 5418 if (!Pair.second) 5419 PN->setIncomingValue(i, Pair.first->second); 5420 else { 5421 Value *FullV = Expand(LU, LF, F, BB->getTerminator()->getIterator(), 5422 Rewriter, DeadInsts); 5423 5424 // If this is reuse-by-noop-cast, insert the noop cast. 5425 Type *OpTy = LF.OperandValToReplace->getType(); 5426 if (FullV->getType() != OpTy) 5427 FullV = 5428 CastInst::Create(CastInst::getCastOpcode(FullV, false, 5429 OpTy, false), 5430 FullV, LF.OperandValToReplace->getType(), 5431 "tmp", BB->getTerminator()); 5432 5433 PN->setIncomingValue(i, FullV); 5434 Pair.first->second = FullV; 5435 } 5436 5437 // If LSR splits critical edge and phi node has other pending 5438 // fixup operands, we need to update those pending fixups. Otherwise 5439 // formulae will not be implemented completely and some instructions 5440 // will not be eliminated. 5441 if (needUpdateFixups) { 5442 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) 5443 for (LSRFixup &Fixup : Uses[LUIdx].Fixups) 5444 // If fixup is supposed to rewrite some operand in the phi 5445 // that was just updated, it may be already moved to 5446 // another phi node. Such fixup requires update. 5447 if (Fixup.UserInst == PN) { 5448 // Check if the operand we try to replace still exists in the 5449 // original phi. 5450 bool foundInOriginalPHI = false; 5451 for (const auto &val : PN->incoming_values()) 5452 if (val == Fixup.OperandValToReplace) { 5453 foundInOriginalPHI = true; 5454 break; 5455 } 5456 5457 // If fixup operand found in original PHI - nothing to do. 5458 if (foundInOriginalPHI) 5459 continue; 5460 5461 // Otherwise it might be moved to another PHI and requires update. 5462 // If fixup operand not found in any of the incoming blocks that 5463 // means we have already rewritten it - nothing to do. 5464 for (const auto &Block : PN->blocks()) 5465 for (BasicBlock::iterator I = Block->begin(); isa<PHINode>(I); 5466 ++I) { 5467 PHINode *NewPN = cast<PHINode>(I); 5468 for (const auto &val : NewPN->incoming_values()) 5469 if (val == Fixup.OperandValToReplace) 5470 Fixup.UserInst = NewPN; 5471 } 5472 } 5473 } 5474 } 5475 } 5476 5477 /// Emit instructions for the leading candidate expression for this LSRUse (this 5478 /// is called "expanding"), and update the UserInst to reference the newly 5479 /// expanded value. 5480 void LSRInstance::Rewrite(const LSRUse &LU, const LSRFixup &LF, 5481 const Formula &F, SCEVExpander &Rewriter, 5482 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const { 5483 // First, find an insertion point that dominates UserInst. For PHI nodes, 5484 // find the nearest block which dominates all the relevant uses. 5485 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) { 5486 RewriteForPHI(PN, LU, LF, F, Rewriter, DeadInsts); 5487 } else { 5488 Value *FullV = 5489 Expand(LU, LF, F, LF.UserInst->getIterator(), Rewriter, DeadInsts); 5490 5491 // If this is reuse-by-noop-cast, insert the noop cast. 5492 Type *OpTy = LF.OperandValToReplace->getType(); 5493 if (FullV->getType() != OpTy) { 5494 Instruction *Cast = 5495 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), 5496 FullV, OpTy, "tmp", LF.UserInst); 5497 FullV = Cast; 5498 } 5499 5500 // Update the user. ICmpZero is handled specially here (for now) because 5501 // Expand may have updated one of the operands of the icmp already, and 5502 // its new value may happen to be equal to LF.OperandValToReplace, in 5503 // which case doing replaceUsesOfWith leads to replacing both operands 5504 // with the same value. TODO: Reorganize this. 5505 if (LU.Kind == LSRUse::ICmpZero) 5506 LF.UserInst->setOperand(0, FullV); 5507 else 5508 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); 5509 } 5510 5511 if (auto *OperandIsInstr = dyn_cast<Instruction>(LF.OperandValToReplace)) 5512 DeadInsts.emplace_back(OperandIsInstr); 5513 } 5514 5515 /// Rewrite all the fixup locations with new values, following the chosen 5516 /// solution. 5517 void LSRInstance::ImplementSolution( 5518 const SmallVectorImpl<const Formula *> &Solution) { 5519 // Keep track of instructions we may have made dead, so that 5520 // we can remove them after we are done working. 5521 SmallVector<WeakTrackingVH, 16> DeadInsts; 5522 5523 SCEVExpander Rewriter(SE, L->getHeader()->getModule()->getDataLayout(), "lsr", 5524 false); 5525 #ifndef NDEBUG 5526 Rewriter.setDebugType(DEBUG_TYPE); 5527 #endif 5528 Rewriter.disableCanonicalMode(); 5529 Rewriter.enableLSRMode(); 5530 Rewriter.setIVIncInsertPos(L, IVIncInsertPos); 5531 5532 // Mark phi nodes that terminate chains so the expander tries to reuse them. 5533 for (const IVChain &Chain : IVChainVec) { 5534 if (PHINode *PN = dyn_cast<PHINode>(Chain.tailUserInst())) 5535 Rewriter.setChainedPhi(PN); 5536 } 5537 5538 // Expand the new value definitions and update the users. 5539 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) 5540 for (const LSRFixup &Fixup : Uses[LUIdx].Fixups) { 5541 Rewrite(Uses[LUIdx], Fixup, *Solution[LUIdx], Rewriter, DeadInsts); 5542 Changed = true; 5543 } 5544 5545 for (const IVChain &Chain : IVChainVec) { 5546 GenerateIVChain(Chain, Rewriter, DeadInsts); 5547 Changed = true; 5548 } 5549 // Clean up after ourselves. This must be done before deleting any 5550 // instructions. 5551 Rewriter.clear(); 5552 5553 Changed |= RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts, 5554 &TLI, MSSAU); 5555 } 5556 5557 LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, 5558 DominatorTree &DT, LoopInfo &LI, 5559 const TargetTransformInfo &TTI, AssumptionCache &AC, 5560 TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU) 5561 : IU(IU), SE(SE), DT(DT), LI(LI), AC(AC), TLI(TLI), TTI(TTI), L(L), 5562 MSSAU(MSSAU), FavorBackedgeIndex(EnableBackedgeIndexing && 5563 TTI.shouldFavorBackedgeIndex(L)) { 5564 // If LoopSimplify form is not available, stay out of trouble. 5565 if (!L->isLoopSimplifyForm()) 5566 return; 5567 5568 // If there's no interesting work to be done, bail early. 5569 if (IU.empty()) return; 5570 5571 // If there's too much analysis to be done, bail early. We won't be able to 5572 // model the problem anyway. 5573 unsigned NumUsers = 0; 5574 for (const IVStrideUse &U : IU) { 5575 if (++NumUsers > MaxIVUsers) { 5576 (void)U; 5577 LLVM_DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << U 5578 << "\n"); 5579 return; 5580 } 5581 // Bail out if we have a PHI on an EHPad that gets a value from a 5582 // CatchSwitchInst. Because the CatchSwitchInst cannot be split, there is 5583 // no good place to stick any instructions. 5584 if (auto *PN = dyn_cast<PHINode>(U.getUser())) { 5585 auto *FirstNonPHI = PN->getParent()->getFirstNonPHI(); 5586 if (isa<FuncletPadInst>(FirstNonPHI) || 5587 isa<CatchSwitchInst>(FirstNonPHI)) 5588 for (BasicBlock *PredBB : PN->blocks()) 5589 if (isa<CatchSwitchInst>(PredBB->getFirstNonPHI())) 5590 return; 5591 } 5592 } 5593 5594 #ifndef NDEBUG 5595 // All dominating loops must have preheaders, or SCEVExpander may not be able 5596 // to materialize an AddRecExpr whose Start is an outer AddRecExpr. 5597 // 5598 // IVUsers analysis should only create users that are dominated by simple loop 5599 // headers. Since this loop should dominate all of its users, its user list 5600 // should be empty if this loop itself is not within a simple loop nest. 5601 for (DomTreeNode *Rung = DT.getNode(L->getLoopPreheader()); 5602 Rung; Rung = Rung->getIDom()) { 5603 BasicBlock *BB = Rung->getBlock(); 5604 const Loop *DomLoop = LI.getLoopFor(BB); 5605 if (DomLoop && DomLoop->getHeader() == BB) { 5606 assert(DomLoop->getLoopPreheader() && "LSR needs a simplified loop nest"); 5607 } 5608 } 5609 #endif // DEBUG 5610 5611 LLVM_DEBUG(dbgs() << "\nLSR on loop "; 5612 L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false); 5613 dbgs() << ":\n"); 5614 5615 // First, perform some low-level loop optimizations. 5616 OptimizeShadowIV(); 5617 OptimizeLoopTermCond(); 5618 5619 // If loop preparation eliminates all interesting IV users, bail. 5620 if (IU.empty()) return; 5621 5622 // Skip nested loops until we can model them better with formulae. 5623 if (!L->isInnermost()) { 5624 LLVM_DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n"); 5625 return; 5626 } 5627 5628 // Start collecting data and preparing for the solver. 5629 // If number of registers is not the major cost, we cannot benefit from the 5630 // current profitable chain optimization which is based on number of 5631 // registers. 5632 // FIXME: add profitable chain optimization for other kinds major cost, for 5633 // example number of instructions. 5634 if (TTI.isNumRegsMajorCostOfLSR() || StressIVChain) 5635 CollectChains(); 5636 CollectInterestingTypesAndFactors(); 5637 CollectFixupsAndInitialFormulae(); 5638 CollectLoopInvariantFixupsAndFormulae(); 5639 5640 if (Uses.empty()) 5641 return; 5642 5643 LLVM_DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; 5644 print_uses(dbgs())); 5645 5646 // Now use the reuse data to generate a bunch of interesting ways 5647 // to formulate the values needed for the uses. 5648 GenerateAllReuseFormulae(); 5649 5650 FilterOutUndesirableDedicatedRegisters(); 5651 NarrowSearchSpaceUsingHeuristics(); 5652 5653 SmallVector<const Formula *, 8> Solution; 5654 Solve(Solution); 5655 5656 // Release memory that is no longer needed. 5657 Factors.clear(); 5658 Types.clear(); 5659 RegUses.clear(); 5660 5661 if (Solution.empty()) 5662 return; 5663 5664 #ifndef NDEBUG 5665 // Formulae should be legal. 5666 for (const LSRUse &LU : Uses) { 5667 for (const Formula &F : LU.Formulae) 5668 assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, 5669 F) && "Illegal formula generated!"); 5670 }; 5671 #endif 5672 5673 // Now that we've decided what we want, make it so. 5674 ImplementSolution(Solution); 5675 } 5676 5677 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 5678 void LSRInstance::print_factors_and_types(raw_ostream &OS) const { 5679 if (Factors.empty() && Types.empty()) return; 5680 5681 OS << "LSR has identified the following interesting factors and types: "; 5682 bool First = true; 5683 5684 for (int64_t Factor : Factors) { 5685 if (!First) OS << ", "; 5686 First = false; 5687 OS << '*' << Factor; 5688 } 5689 5690 for (Type *Ty : Types) { 5691 if (!First) OS << ", "; 5692 First = false; 5693 OS << '(' << *Ty << ')'; 5694 } 5695 OS << '\n'; 5696 } 5697 5698 void LSRInstance::print_fixups(raw_ostream &OS) const { 5699 OS << "LSR is examining the following fixup sites:\n"; 5700 for (const LSRUse &LU : Uses) 5701 for (const LSRFixup &LF : LU.Fixups) { 5702 dbgs() << " "; 5703 LF.print(OS); 5704 OS << '\n'; 5705 } 5706 } 5707 5708 void LSRInstance::print_uses(raw_ostream &OS) const { 5709 OS << "LSR is examining the following uses:\n"; 5710 for (const LSRUse &LU : Uses) { 5711 dbgs() << " "; 5712 LU.print(OS); 5713 OS << '\n'; 5714 for (const Formula &F : LU.Formulae) { 5715 OS << " "; 5716 F.print(OS); 5717 OS << '\n'; 5718 } 5719 } 5720 } 5721 5722 void LSRInstance::print(raw_ostream &OS) const { 5723 print_factors_and_types(OS); 5724 print_fixups(OS); 5725 print_uses(OS); 5726 } 5727 5728 LLVM_DUMP_METHOD void LSRInstance::dump() const { 5729 print(errs()); errs() << '\n'; 5730 } 5731 #endif 5732 5733 namespace { 5734 5735 class LoopStrengthReduce : public LoopPass { 5736 public: 5737 static char ID; // Pass ID, replacement for typeid 5738 5739 LoopStrengthReduce(); 5740 5741 private: 5742 bool runOnLoop(Loop *L, LPPassManager &LPM) override; 5743 void getAnalysisUsage(AnalysisUsage &AU) const override; 5744 }; 5745 5746 } // end anonymous namespace 5747 5748 LoopStrengthReduce::LoopStrengthReduce() : LoopPass(ID) { 5749 initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry()); 5750 } 5751 5752 void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { 5753 // We split critical edges, so we change the CFG. However, we do update 5754 // many analyses if they are around. 5755 AU.addPreservedID(LoopSimplifyID); 5756 5757 AU.addRequired<LoopInfoWrapperPass>(); 5758 AU.addPreserved<LoopInfoWrapperPass>(); 5759 AU.addRequiredID(LoopSimplifyID); 5760 AU.addRequired<DominatorTreeWrapperPass>(); 5761 AU.addPreserved<DominatorTreeWrapperPass>(); 5762 AU.addRequired<ScalarEvolutionWrapperPass>(); 5763 AU.addPreserved<ScalarEvolutionWrapperPass>(); 5764 AU.addRequired<AssumptionCacheTracker>(); 5765 AU.addRequired<TargetLibraryInfoWrapperPass>(); 5766 // Requiring LoopSimplify a second time here prevents IVUsers from running 5767 // twice, since LoopSimplify was invalidated by running ScalarEvolution. 5768 AU.addRequiredID(LoopSimplifyID); 5769 AU.addRequired<IVUsersWrapperPass>(); 5770 AU.addPreserved<IVUsersWrapperPass>(); 5771 AU.addRequired<TargetTransformInfoWrapperPass>(); 5772 AU.addPreserved<MemorySSAWrapperPass>(); 5773 } 5774 5775 using EqualValues = SmallVector<std::tuple<WeakVH, int64_t, DIExpression *>, 4>; 5776 using EqualValuesMap = DenseMap<DbgValueInst *, EqualValues>; 5777 5778 static void DbgGatherEqualValues(Loop *L, ScalarEvolution &SE, 5779 EqualValuesMap &DbgValueToEqualSet) { 5780 for (auto &B : L->getBlocks()) { 5781 for (auto &I : *B) { 5782 auto DVI = dyn_cast<DbgValueInst>(&I); 5783 if (!DVI) 5784 continue; 5785 auto V = DVI->getVariableLocation(); 5786 if (!V || !SE.isSCEVable(V->getType())) 5787 continue; 5788 auto DbgValueSCEV = SE.getSCEV(V); 5789 EqualValues EqSet; 5790 for (PHINode &Phi : L->getHeader()->phis()) { 5791 if (V->getType() != Phi.getType()) 5792 continue; 5793 if (!SE.isSCEVable(Phi.getType())) 5794 continue; 5795 auto PhiSCEV = SE.getSCEV(&Phi); 5796 Optional<APInt> Offset = 5797 SE.computeConstantDifference(DbgValueSCEV, PhiSCEV); 5798 if (Offset && Offset->getMinSignedBits() <= 64) 5799 EqSet.emplace_back(std::make_tuple( 5800 &Phi, Offset.getValue().getSExtValue(), DVI->getExpression())); 5801 } 5802 DbgValueToEqualSet[DVI] = std::move(EqSet); 5803 } 5804 } 5805 } 5806 5807 static void DbgApplyEqualValues(EqualValuesMap &DbgValueToEqualSet) { 5808 for (auto A : DbgValueToEqualSet) { 5809 auto DVI = A.first; 5810 // Only update those that are now undef. 5811 if (!isa_and_nonnull<UndefValue>(DVI->getVariableLocation())) 5812 continue; 5813 for (auto EV : A.second) { 5814 auto V = std::get<WeakVH>(EV); 5815 if (!V) 5816 continue; 5817 auto DbgDIExpr = std::get<DIExpression *>(EV); 5818 auto Offset = std::get<int64_t>(EV); 5819 auto &Ctx = DVI->getContext(); 5820 DVI->setOperand(0, MetadataAsValue::get(Ctx, ValueAsMetadata::get(V))); 5821 if (Offset) { 5822 SmallVector<uint64_t, 8> Ops; 5823 DIExpression::appendOffset(Ops, Offset); 5824 DbgDIExpr = DIExpression::prependOpcodes(DbgDIExpr, Ops, true); 5825 } 5826 DVI->setOperand(2, MetadataAsValue::get(Ctx, DbgDIExpr)); 5827 break; 5828 } 5829 } 5830 } 5831 5832 static bool ReduceLoopStrength(Loop *L, IVUsers &IU, ScalarEvolution &SE, 5833 DominatorTree &DT, LoopInfo &LI, 5834 const TargetTransformInfo &TTI, 5835 AssumptionCache &AC, TargetLibraryInfo &TLI, 5836 MemorySSA *MSSA) { 5837 5838 bool Changed = false; 5839 std::unique_ptr<MemorySSAUpdater> MSSAU; 5840 if (MSSA) 5841 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA); 5842 5843 // Run the main LSR transformation. 5844 Changed |= 5845 LSRInstance(L, IU, SE, DT, LI, TTI, AC, TLI, MSSAU.get()).getChanged(); 5846 5847 // Debug preservation - before we start removing anything create equivalence 5848 // sets for the llvm.dbg.value intrinsics. 5849 EqualValuesMap DbgValueToEqualSet; 5850 DbgGatherEqualValues(L, SE, DbgValueToEqualSet); 5851 5852 // Remove any extra phis created by processing inner loops. 5853 Changed |= DeleteDeadPHIs(L->getHeader(), &TLI, MSSAU.get()); 5854 if (EnablePhiElim && L->isLoopSimplifyForm()) { 5855 SmallVector<WeakTrackingVH, 16> DeadInsts; 5856 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 5857 SCEVExpander Rewriter(SE, DL, "lsr", false); 5858 #ifndef NDEBUG 5859 Rewriter.setDebugType(DEBUG_TYPE); 5860 #endif 5861 unsigned numFolded = Rewriter.replaceCongruentIVs(L, &DT, DeadInsts, &TTI); 5862 if (numFolded) { 5863 Changed = true; 5864 RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts, &TLI, 5865 MSSAU.get()); 5866 DeleteDeadPHIs(L->getHeader(), &TLI, MSSAU.get()); 5867 } 5868 } 5869 5870 DbgApplyEqualValues(DbgValueToEqualSet); 5871 5872 return Changed; 5873 } 5874 5875 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { 5876 if (skipLoop(L)) 5877 return false; 5878 5879 auto &IU = getAnalysis<IVUsersWrapperPass>().getIU(); 5880 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 5881 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 5882 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 5883 const auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 5884 *L->getHeader()->getParent()); 5885 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache( 5886 *L->getHeader()->getParent()); 5887 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI( 5888 *L->getHeader()->getParent()); 5889 auto *MSSAAnalysis = getAnalysisIfAvailable<MemorySSAWrapperPass>(); 5890 MemorySSA *MSSA = nullptr; 5891 if (MSSAAnalysis) 5892 MSSA = &MSSAAnalysis->getMSSA(); 5893 return ReduceLoopStrength(L, IU, SE, DT, LI, TTI, AC, TLI, MSSA); 5894 } 5895 5896 PreservedAnalyses LoopStrengthReducePass::run(Loop &L, LoopAnalysisManager &AM, 5897 LoopStandardAnalysisResults &AR, 5898 LPMUpdater &) { 5899 if (!ReduceLoopStrength(&L, AM.getResult<IVUsersAnalysis>(L, AR), AR.SE, 5900 AR.DT, AR.LI, AR.TTI, AR.AC, AR.TLI, AR.MSSA)) 5901 return PreservedAnalyses::all(); 5902 5903 auto PA = getLoopPassPreservedAnalyses(); 5904 if (AR.MSSA) 5905 PA.preserve<MemorySSAAnalysis>(); 5906 return PA; 5907 } 5908 5909 char LoopStrengthReduce::ID = 0; 5910 5911 INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce", 5912 "Loop Strength Reduction", false, false) 5913 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 5914 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 5915 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 5916 INITIALIZE_PASS_DEPENDENCY(IVUsersWrapperPass) 5917 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 5918 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 5919 INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce", 5920 "Loop Strength Reduction", false, false) 5921 5922 Pass *llvm::createLoopStrengthReducePass() { return new LoopStrengthReduce(); } 5923