1 //===- StraightLineStrengthReduce.cpp - -----------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements straight-line strength reduction (SLSR). Unlike loop 10 // strength reduction, this algorithm is designed to reduce arithmetic 11 // redundancy in straight-line code instead of loops. It has proven to be 12 // effective in simplifying arithmetic statements derived from an unrolled loop. 13 // It can also simplify the logic of SeparateConstOffsetFromGEP. 14 // 15 // There are many optimizations we can perform in the domain of SLSR. This file 16 // for now contains only an initial step. Specifically, we look for strength 17 // reduction candidates in the following forms: 18 // 19 // Form 1: B + i * S 20 // Form 2: (B + i) * S 21 // Form 3: &B[i * S] 22 // 23 // where S is an integer variable, and i is a constant integer. If we found two 24 // candidates S1 and S2 in the same form and S1 dominates S2, we may rewrite S2 25 // in a simpler way with respect to S1. For example, 26 // 27 // S1: X = B + i * S 28 // S2: Y = B + i' * S => X + (i' - i) * S 29 // 30 // S1: X = (B + i) * S 31 // S2: Y = (B + i') * S => X + (i' - i) * S 32 // 33 // S1: X = &B[i * S] 34 // S2: Y = &B[i' * S] => &X[(i' - i) * S] 35 // 36 // Note: (i' - i) * S is folded to the extent possible. 37 // 38 // This rewriting is in general a good idea. The code patterns we focus on 39 // usually come from loop unrolling, so (i' - i) * S is likely the same 40 // across iterations and can be reused. When that happens, the optimized form 41 // takes only one add starting from the second iteration. 42 // 43 // When such rewriting is possible, we call S1 a "basis" of S2. When S2 has 44 // multiple bases, we choose to rewrite S2 with respect to its "immediate" 45 // basis, the basis that is the closest ancestor in the dominator tree. 46 // 47 // TODO: 48 // 49 // - Floating point arithmetics when fast math is enabled. 50 // 51 // - SLSR may decrease ILP at the architecture level. Targets that are very 52 // sensitive to ILP may want to disable it. Having SLSR to consider ILP is 53 // left as future work. 54 // 55 // - When (i' - i) is constant but i and i' are not, we could still perform 56 // SLSR. 57 58 #include "llvm/Transforms/Scalar/StraightLineStrengthReduce.h" 59 #include "llvm/ADT/APInt.h" 60 #include "llvm/ADT/DepthFirstIterator.h" 61 #include "llvm/ADT/SmallVector.h" 62 #include "llvm/Analysis/ScalarEvolution.h" 63 #include "llvm/Analysis/TargetTransformInfo.h" 64 #include "llvm/Analysis/ValueTracking.h" 65 #include "llvm/IR/Constants.h" 66 #include "llvm/IR/DataLayout.h" 67 #include "llvm/IR/DerivedTypes.h" 68 #include "llvm/IR/Dominators.h" 69 #include "llvm/IR/GetElementPtrTypeIterator.h" 70 #include "llvm/IR/IRBuilder.h" 71 #include "llvm/IR/Instruction.h" 72 #include "llvm/IR/Instructions.h" 73 #include "llvm/IR/Module.h" 74 #include "llvm/IR/Operator.h" 75 #include "llvm/IR/PatternMatch.h" 76 #include "llvm/IR/Type.h" 77 #include "llvm/IR/Value.h" 78 #include "llvm/InitializePasses.h" 79 #include "llvm/Pass.h" 80 #include "llvm/Support/Casting.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Transforms/Scalar.h" 83 #include "llvm/Transforms/Utils/Local.h" 84 #include <cassert> 85 #include <cstdint> 86 #include <limits> 87 #include <list> 88 #include <vector> 89 90 using namespace llvm; 91 using namespace PatternMatch; 92 93 static const unsigned UnknownAddressSpace = 94 std::numeric_limits<unsigned>::max(); 95 96 namespace { 97 98 class StraightLineStrengthReduceLegacyPass : public FunctionPass { 99 const DataLayout *DL = nullptr; 100 101 public: 102 static char ID; 103 104 StraightLineStrengthReduceLegacyPass() : FunctionPass(ID) { 105 initializeStraightLineStrengthReduceLegacyPassPass( 106 *PassRegistry::getPassRegistry()); 107 } 108 109 void getAnalysisUsage(AnalysisUsage &AU) const override { 110 AU.addRequired<DominatorTreeWrapperPass>(); 111 AU.addRequired<ScalarEvolutionWrapperPass>(); 112 AU.addRequired<TargetTransformInfoWrapperPass>(); 113 // We do not modify the shape of the CFG. 114 AU.setPreservesCFG(); 115 } 116 117 bool doInitialization(Module &M) override { 118 DL = &M.getDataLayout(); 119 return false; 120 } 121 122 bool runOnFunction(Function &F) override; 123 }; 124 125 class StraightLineStrengthReduce { 126 public: 127 StraightLineStrengthReduce(const DataLayout *DL, DominatorTree *DT, 128 ScalarEvolution *SE, TargetTransformInfo *TTI) 129 : DL(DL), DT(DT), SE(SE), TTI(TTI) {} 130 131 // SLSR candidate. Such a candidate must be in one of the forms described in 132 // the header comments. 133 struct Candidate { 134 enum Kind { 135 Invalid, // reserved for the default constructor 136 Add, // B + i * S 137 Mul, // (B + i) * S 138 GEP, // &B[..][i * S][..] 139 }; 140 141 Candidate() = default; 142 Candidate(Kind CT, const SCEV *B, ConstantInt *Idx, Value *S, 143 Instruction *I) 144 : CandidateKind(CT), Base(B), Index(Idx), Stride(S), Ins(I) {} 145 146 Kind CandidateKind = Invalid; 147 148 const SCEV *Base = nullptr; 149 150 // Note that Index and Stride of a GEP candidate do not necessarily have the 151 // same integer type. In that case, during rewriting, Stride will be 152 // sign-extended or truncated to Index's type. 153 ConstantInt *Index = nullptr; 154 155 Value *Stride = nullptr; 156 157 // The instruction this candidate corresponds to. It helps us to rewrite a 158 // candidate with respect to its immediate basis. Note that one instruction 159 // can correspond to multiple candidates depending on how you associate the 160 // expression. For instance, 161 // 162 // (a + 1) * (b + 2) 163 // 164 // can be treated as 165 // 166 // <Base: a, Index: 1, Stride: b + 2> 167 // 168 // or 169 // 170 // <Base: b, Index: 2, Stride: a + 1> 171 Instruction *Ins = nullptr; 172 173 // Points to the immediate basis of this candidate, or nullptr if we cannot 174 // find any basis for this candidate. 175 Candidate *Basis = nullptr; 176 }; 177 178 bool runOnFunction(Function &F); 179 180 private: 181 // Returns true if Basis is a basis for C, i.e., Basis dominates C and they 182 // share the same base and stride. 183 bool isBasisFor(const Candidate &Basis, const Candidate &C); 184 185 // Returns whether the candidate can be folded into an addressing mode. 186 bool isFoldable(const Candidate &C, TargetTransformInfo *TTI, 187 const DataLayout *DL); 188 189 // Returns true if C is already in a simplest form and not worth being 190 // rewritten. 191 bool isSimplestForm(const Candidate &C); 192 193 // Checks whether I is in a candidate form. If so, adds all the matching forms 194 // to Candidates, and tries to find the immediate basis for each of them. 195 void allocateCandidatesAndFindBasis(Instruction *I); 196 197 // Allocate candidates and find bases for Add instructions. 198 void allocateCandidatesAndFindBasisForAdd(Instruction *I); 199 200 // Given I = LHS + RHS, factors RHS into i * S and makes (LHS + i * S) a 201 // candidate. 202 void allocateCandidatesAndFindBasisForAdd(Value *LHS, Value *RHS, 203 Instruction *I); 204 // Allocate candidates and find bases for Mul instructions. 205 void allocateCandidatesAndFindBasisForMul(Instruction *I); 206 207 // Splits LHS into Base + Index and, if succeeds, calls 208 // allocateCandidatesAndFindBasis. 209 void allocateCandidatesAndFindBasisForMul(Value *LHS, Value *RHS, 210 Instruction *I); 211 212 // Allocate candidates and find bases for GetElementPtr instructions. 213 void allocateCandidatesAndFindBasisForGEP(GetElementPtrInst *GEP); 214 215 // A helper function that scales Idx with ElementSize before invoking 216 // allocateCandidatesAndFindBasis. 217 void allocateCandidatesAndFindBasisForGEP(const SCEV *B, ConstantInt *Idx, 218 Value *S, uint64_t ElementSize, 219 Instruction *I); 220 221 // Adds the given form <CT, B, Idx, S> to Candidates, and finds its immediate 222 // basis. 223 void allocateCandidatesAndFindBasis(Candidate::Kind CT, const SCEV *B, 224 ConstantInt *Idx, Value *S, 225 Instruction *I); 226 227 // Rewrites candidate C with respect to Basis. 228 void rewriteCandidateWithBasis(const Candidate &C, const Candidate &Basis); 229 230 // A helper function that factors ArrayIdx to a product of a stride and a 231 // constant index, and invokes allocateCandidatesAndFindBasis with the 232 // factorings. 233 void factorArrayIndex(Value *ArrayIdx, const SCEV *Base, uint64_t ElementSize, 234 GetElementPtrInst *GEP); 235 236 // Emit code that computes the "bump" from Basis to C. 237 static Value *emitBump(const Candidate &Basis, const Candidate &C, 238 IRBuilder<> &Builder, const DataLayout *DL); 239 240 const DataLayout *DL = nullptr; 241 DominatorTree *DT = nullptr; 242 ScalarEvolution *SE; 243 TargetTransformInfo *TTI = nullptr; 244 std::list<Candidate> Candidates; 245 246 // Temporarily holds all instructions that are unlinked (but not deleted) by 247 // rewriteCandidateWithBasis. These instructions will be actually removed 248 // after all rewriting finishes. 249 std::vector<Instruction *> UnlinkedInstructions; 250 }; 251 252 } // end anonymous namespace 253 254 char StraightLineStrengthReduceLegacyPass::ID = 0; 255 256 INITIALIZE_PASS_BEGIN(StraightLineStrengthReduceLegacyPass, "slsr", 257 "Straight line strength reduction", false, false) 258 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 259 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 260 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 261 INITIALIZE_PASS_END(StraightLineStrengthReduceLegacyPass, "slsr", 262 "Straight line strength reduction", false, false) 263 264 FunctionPass *llvm::createStraightLineStrengthReducePass() { 265 return new StraightLineStrengthReduceLegacyPass(); 266 } 267 268 bool StraightLineStrengthReduce::isBasisFor(const Candidate &Basis, 269 const Candidate &C) { 270 return (Basis.Ins != C.Ins && // skip the same instruction 271 // They must have the same type too. Basis.Base == C.Base doesn't 272 // guarantee their types are the same (PR23975). 273 Basis.Ins->getType() == C.Ins->getType() && 274 // Basis must dominate C in order to rewrite C with respect to Basis. 275 DT->dominates(Basis.Ins->getParent(), C.Ins->getParent()) && 276 // They share the same base, stride, and candidate kind. 277 Basis.Base == C.Base && Basis.Stride == C.Stride && 278 Basis.CandidateKind == C.CandidateKind); 279 } 280 281 static bool isGEPFoldable(GetElementPtrInst *GEP, 282 const TargetTransformInfo *TTI) { 283 SmallVector<const Value *, 4> Indices(GEP->indices()); 284 return TTI->getGEPCost(GEP->getSourceElementType(), GEP->getPointerOperand(), 285 Indices) == TargetTransformInfo::TCC_Free; 286 } 287 288 // Returns whether (Base + Index * Stride) can be folded to an addressing mode. 289 static bool isAddFoldable(const SCEV *Base, ConstantInt *Index, Value *Stride, 290 TargetTransformInfo *TTI) { 291 // Index->getSExtValue() may crash if Index is wider than 64-bit. 292 return Index->getBitWidth() <= 64 && 293 TTI->isLegalAddressingMode(Base->getType(), nullptr, 0, true, 294 Index->getSExtValue(), UnknownAddressSpace); 295 } 296 297 bool StraightLineStrengthReduce::isFoldable(const Candidate &C, 298 TargetTransformInfo *TTI, 299 const DataLayout *DL) { 300 if (C.CandidateKind == Candidate::Add) 301 return isAddFoldable(C.Base, C.Index, C.Stride, TTI); 302 if (C.CandidateKind == Candidate::GEP) 303 return isGEPFoldable(cast<GetElementPtrInst>(C.Ins), TTI); 304 return false; 305 } 306 307 // Returns true if GEP has zero or one non-zero index. 308 static bool hasOnlyOneNonZeroIndex(GetElementPtrInst *GEP) { 309 unsigned NumNonZeroIndices = 0; 310 for (Use &Idx : GEP->indices()) { 311 ConstantInt *ConstIdx = dyn_cast<ConstantInt>(Idx); 312 if (ConstIdx == nullptr || !ConstIdx->isZero()) 313 ++NumNonZeroIndices; 314 } 315 return NumNonZeroIndices <= 1; 316 } 317 318 bool StraightLineStrengthReduce::isSimplestForm(const Candidate &C) { 319 if (C.CandidateKind == Candidate::Add) { 320 // B + 1 * S or B + (-1) * S 321 return C.Index->isOne() || C.Index->isMinusOne(); 322 } 323 if (C.CandidateKind == Candidate::Mul) { 324 // (B + 0) * S 325 return C.Index->isZero(); 326 } 327 if (C.CandidateKind == Candidate::GEP) { 328 // (char*)B + S or (char*)B - S 329 return ((C.Index->isOne() || C.Index->isMinusOne()) && 330 hasOnlyOneNonZeroIndex(cast<GetElementPtrInst>(C.Ins))); 331 } 332 return false; 333 } 334 335 // TODO: We currently implement an algorithm whose time complexity is linear in 336 // the number of existing candidates. However, we could do better by using 337 // ScopedHashTable. Specifically, while traversing the dominator tree, we could 338 // maintain all the candidates that dominate the basic block being traversed in 339 // a ScopedHashTable. This hash table is indexed by the base and the stride of 340 // a candidate. Therefore, finding the immediate basis of a candidate boils down 341 // to one hash-table look up. 342 void StraightLineStrengthReduce::allocateCandidatesAndFindBasis( 343 Candidate::Kind CT, const SCEV *B, ConstantInt *Idx, Value *S, 344 Instruction *I) { 345 Candidate C(CT, B, Idx, S, I); 346 // SLSR can complicate an instruction in two cases: 347 // 348 // 1. If we can fold I into an addressing mode, computing I is likely free or 349 // takes only one instruction. 350 // 351 // 2. I is already in a simplest form. For example, when 352 // X = B + 8 * S 353 // Y = B + S, 354 // rewriting Y to X - 7 * S is probably a bad idea. 355 // 356 // In the above cases, we still add I to the candidate list so that I can be 357 // the basis of other candidates, but we leave I's basis blank so that I 358 // won't be rewritten. 359 if (!isFoldable(C, TTI, DL) && !isSimplestForm(C)) { 360 // Try to compute the immediate basis of C. 361 unsigned NumIterations = 0; 362 // Limit the scan radius to avoid running in quadratice time. 363 static const unsigned MaxNumIterations = 50; 364 for (auto Basis = Candidates.rbegin(); 365 Basis != Candidates.rend() && NumIterations < MaxNumIterations; 366 ++Basis, ++NumIterations) { 367 if (isBasisFor(*Basis, C)) { 368 C.Basis = &(*Basis); 369 break; 370 } 371 } 372 } 373 // Regardless of whether we find a basis for C, we need to push C to the 374 // candidate list so that it can be the basis of other candidates. 375 Candidates.push_back(C); 376 } 377 378 void StraightLineStrengthReduce::allocateCandidatesAndFindBasis( 379 Instruction *I) { 380 switch (I->getOpcode()) { 381 case Instruction::Add: 382 allocateCandidatesAndFindBasisForAdd(I); 383 break; 384 case Instruction::Mul: 385 allocateCandidatesAndFindBasisForMul(I); 386 break; 387 case Instruction::GetElementPtr: 388 allocateCandidatesAndFindBasisForGEP(cast<GetElementPtrInst>(I)); 389 break; 390 } 391 } 392 393 void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForAdd( 394 Instruction *I) { 395 // Try matching B + i * S. 396 if (!isa<IntegerType>(I->getType())) 397 return; 398 399 assert(I->getNumOperands() == 2 && "isn't I an add?"); 400 Value *LHS = I->getOperand(0), *RHS = I->getOperand(1); 401 allocateCandidatesAndFindBasisForAdd(LHS, RHS, I); 402 if (LHS != RHS) 403 allocateCandidatesAndFindBasisForAdd(RHS, LHS, I); 404 } 405 406 void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForAdd( 407 Value *LHS, Value *RHS, Instruction *I) { 408 Value *S = nullptr; 409 ConstantInt *Idx = nullptr; 410 if (match(RHS, m_Mul(m_Value(S), m_ConstantInt(Idx)))) { 411 // I = LHS + RHS = LHS + Idx * S 412 allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), Idx, S, I); 413 } else if (match(RHS, m_Shl(m_Value(S), m_ConstantInt(Idx)))) { 414 // I = LHS + RHS = LHS + (S << Idx) = LHS + S * (1 << Idx) 415 APInt One(Idx->getBitWidth(), 1); 416 Idx = ConstantInt::get(Idx->getContext(), One << Idx->getValue()); 417 allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), Idx, S, I); 418 } else { 419 // At least, I = LHS + 1 * RHS 420 ConstantInt *One = ConstantInt::get(cast<IntegerType>(I->getType()), 1); 421 allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), One, RHS, 422 I); 423 } 424 } 425 426 // Returns true if A matches B + C where C is constant. 427 static bool matchesAdd(Value *A, Value *&B, ConstantInt *&C) { 428 return match(A, m_c_Add(m_Value(B), m_ConstantInt(C))); 429 } 430 431 // Returns true if A matches B | C where C is constant. 432 static bool matchesOr(Value *A, Value *&B, ConstantInt *&C) { 433 return match(A, m_c_Or(m_Value(B), m_ConstantInt(C))); 434 } 435 436 void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForMul( 437 Value *LHS, Value *RHS, Instruction *I) { 438 Value *B = nullptr; 439 ConstantInt *Idx = nullptr; 440 if (matchesAdd(LHS, B, Idx)) { 441 // If LHS is in the form of "Base + Index", then I is in the form of 442 // "(Base + Index) * RHS". 443 allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(B), Idx, RHS, I); 444 } else if (matchesOr(LHS, B, Idx) && haveNoCommonBitsSet(B, Idx, *DL)) { 445 // If LHS is in the form of "Base | Index" and Base and Index have no common 446 // bits set, then 447 // Base | Index = Base + Index 448 // and I is thus in the form of "(Base + Index) * RHS". 449 allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(B), Idx, RHS, I); 450 } else { 451 // Otherwise, at least try the form (LHS + 0) * RHS. 452 ConstantInt *Zero = ConstantInt::get(cast<IntegerType>(I->getType()), 0); 453 allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(LHS), Zero, RHS, 454 I); 455 } 456 } 457 458 void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForMul( 459 Instruction *I) { 460 // Try matching (B + i) * S. 461 // TODO: we could extend SLSR to float and vector types. 462 if (!isa<IntegerType>(I->getType())) 463 return; 464 465 assert(I->getNumOperands() == 2 && "isn't I a mul?"); 466 Value *LHS = I->getOperand(0), *RHS = I->getOperand(1); 467 allocateCandidatesAndFindBasisForMul(LHS, RHS, I); 468 if (LHS != RHS) { 469 // Symmetrically, try to split RHS to Base + Index. 470 allocateCandidatesAndFindBasisForMul(RHS, LHS, I); 471 } 472 } 473 474 void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP( 475 const SCEV *B, ConstantInt *Idx, Value *S, uint64_t ElementSize, 476 Instruction *I) { 477 // I = B + sext(Idx *nsw S) * ElementSize 478 // = B + (sext(Idx) * sext(S)) * ElementSize 479 // = B + (sext(Idx) * ElementSize) * sext(S) 480 // Casting to IntegerType is safe because we skipped vector GEPs. 481 IntegerType *PtrIdxTy = cast<IntegerType>(DL->getIndexType(I->getType())); 482 ConstantInt *ScaledIdx = ConstantInt::get( 483 PtrIdxTy, Idx->getSExtValue() * (int64_t)ElementSize, true); 484 allocateCandidatesAndFindBasis(Candidate::GEP, B, ScaledIdx, S, I); 485 } 486 487 void StraightLineStrengthReduce::factorArrayIndex(Value *ArrayIdx, 488 const SCEV *Base, 489 uint64_t ElementSize, 490 GetElementPtrInst *GEP) { 491 // At least, ArrayIdx = ArrayIdx *nsw 1. 492 allocateCandidatesAndFindBasisForGEP( 493 Base, ConstantInt::get(cast<IntegerType>(ArrayIdx->getType()), 1), 494 ArrayIdx, ElementSize, GEP); 495 Value *LHS = nullptr; 496 ConstantInt *RHS = nullptr; 497 // One alternative is matching the SCEV of ArrayIdx instead of ArrayIdx 498 // itself. This would allow us to handle the shl case for free. However, 499 // matching SCEVs has two issues: 500 // 501 // 1. this would complicate rewriting because the rewriting procedure 502 // would have to translate SCEVs back to IR instructions. This translation 503 // is difficult when LHS is further evaluated to a composite SCEV. 504 // 505 // 2. ScalarEvolution is designed to be control-flow oblivious. It tends 506 // to strip nsw/nuw flags which are critical for SLSR to trace into 507 // sext'ed multiplication. 508 if (match(ArrayIdx, m_NSWMul(m_Value(LHS), m_ConstantInt(RHS)))) { 509 // SLSR is currently unsafe if i * S may overflow. 510 // GEP = Base + sext(LHS *nsw RHS) * ElementSize 511 allocateCandidatesAndFindBasisForGEP(Base, RHS, LHS, ElementSize, GEP); 512 } else if (match(ArrayIdx, m_NSWShl(m_Value(LHS), m_ConstantInt(RHS)))) { 513 // GEP = Base + sext(LHS <<nsw RHS) * ElementSize 514 // = Base + sext(LHS *nsw (1 << RHS)) * ElementSize 515 APInt One(RHS->getBitWidth(), 1); 516 ConstantInt *PowerOf2 = 517 ConstantInt::get(RHS->getContext(), One << RHS->getValue()); 518 allocateCandidatesAndFindBasisForGEP(Base, PowerOf2, LHS, ElementSize, GEP); 519 } 520 } 521 522 void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP( 523 GetElementPtrInst *GEP) { 524 // TODO: handle vector GEPs 525 if (GEP->getType()->isVectorTy()) 526 return; 527 528 SmallVector<const SCEV *, 4> IndexExprs; 529 for (Use &Idx : GEP->indices()) 530 IndexExprs.push_back(SE->getSCEV(Idx)); 531 532 gep_type_iterator GTI = gep_type_begin(GEP); 533 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { 534 if (GTI.isStruct()) 535 continue; 536 537 const SCEV *OrigIndexExpr = IndexExprs[I - 1]; 538 IndexExprs[I - 1] = SE->getZero(OrigIndexExpr->getType()); 539 540 // The base of this candidate is GEP's base plus the offsets of all 541 // indices except this current one. 542 const SCEV *BaseExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexExprs); 543 Value *ArrayIdx = GEP->getOperand(I); 544 uint64_t ElementSize = GTI.getSequentialElementStride(*DL); 545 if (ArrayIdx->getType()->getIntegerBitWidth() <= 546 DL->getIndexSizeInBits(GEP->getAddressSpace())) { 547 // Skip factoring if ArrayIdx is wider than the index size, because 548 // ArrayIdx is implicitly truncated to the index size. 549 factorArrayIndex(ArrayIdx, BaseExpr, ElementSize, GEP); 550 } 551 // When ArrayIdx is the sext of a value, we try to factor that value as 552 // well. Handling this case is important because array indices are 553 // typically sign-extended to the pointer index size. 554 Value *TruncatedArrayIdx = nullptr; 555 if (match(ArrayIdx, m_SExt(m_Value(TruncatedArrayIdx))) && 556 TruncatedArrayIdx->getType()->getIntegerBitWidth() <= 557 DL->getIndexSizeInBits(GEP->getAddressSpace())) { 558 // Skip factoring if TruncatedArrayIdx is wider than the pointer size, 559 // because TruncatedArrayIdx is implicitly truncated to the pointer size. 560 factorArrayIndex(TruncatedArrayIdx, BaseExpr, ElementSize, GEP); 561 } 562 563 IndexExprs[I - 1] = OrigIndexExpr; 564 } 565 } 566 567 // A helper function that unifies the bitwidth of A and B. 568 static void unifyBitWidth(APInt &A, APInt &B) { 569 if (A.getBitWidth() < B.getBitWidth()) 570 A = A.sext(B.getBitWidth()); 571 else if (A.getBitWidth() > B.getBitWidth()) 572 B = B.sext(A.getBitWidth()); 573 } 574 575 Value *StraightLineStrengthReduce::emitBump(const Candidate &Basis, 576 const Candidate &C, 577 IRBuilder<> &Builder, 578 const DataLayout *DL) { 579 APInt Idx = C.Index->getValue(), BasisIdx = Basis.Index->getValue(); 580 unifyBitWidth(Idx, BasisIdx); 581 APInt IndexOffset = Idx - BasisIdx; 582 583 // Compute Bump = C - Basis = (i' - i) * S. 584 // Common case 1: if (i' - i) is 1, Bump = S. 585 if (IndexOffset == 1) 586 return C.Stride; 587 // Common case 2: if (i' - i) is -1, Bump = -S. 588 if (IndexOffset.isAllOnes()) 589 return Builder.CreateNeg(C.Stride); 590 591 // Otherwise, Bump = (i' - i) * sext/trunc(S). Note that (i' - i) and S may 592 // have different bit widths. 593 IntegerType *DeltaType = 594 IntegerType::get(Basis.Ins->getContext(), IndexOffset.getBitWidth()); 595 Value *ExtendedStride = Builder.CreateSExtOrTrunc(C.Stride, DeltaType); 596 if (IndexOffset.isPowerOf2()) { 597 // If (i' - i) is a power of 2, Bump = sext/trunc(S) << log(i' - i). 598 ConstantInt *Exponent = ConstantInt::get(DeltaType, IndexOffset.logBase2()); 599 return Builder.CreateShl(ExtendedStride, Exponent); 600 } 601 if (IndexOffset.isNegatedPowerOf2()) { 602 // If (i - i') is a power of 2, Bump = -sext/trunc(S) << log(i' - i). 603 ConstantInt *Exponent = 604 ConstantInt::get(DeltaType, (-IndexOffset).logBase2()); 605 return Builder.CreateNeg(Builder.CreateShl(ExtendedStride, Exponent)); 606 } 607 Constant *Delta = ConstantInt::get(DeltaType, IndexOffset); 608 return Builder.CreateMul(ExtendedStride, Delta); 609 } 610 611 void StraightLineStrengthReduce::rewriteCandidateWithBasis( 612 const Candidate &C, const Candidate &Basis) { 613 assert(C.CandidateKind == Basis.CandidateKind && C.Base == Basis.Base && 614 C.Stride == Basis.Stride); 615 // We run rewriteCandidateWithBasis on all candidates in a post-order, so the 616 // basis of a candidate cannot be unlinked before the candidate. 617 assert(Basis.Ins->getParent() != nullptr && "the basis is unlinked"); 618 619 // An instruction can correspond to multiple candidates. Therefore, instead of 620 // simply deleting an instruction when we rewrite it, we mark its parent as 621 // nullptr (i.e. unlink it) so that we can skip the candidates whose 622 // instruction is already rewritten. 623 if (!C.Ins->getParent()) 624 return; 625 626 IRBuilder<> Builder(C.Ins); 627 Value *Bump = emitBump(Basis, C, Builder, DL); 628 Value *Reduced = nullptr; // equivalent to but weaker than C.Ins 629 switch (C.CandidateKind) { 630 case Candidate::Add: 631 case Candidate::Mul: { 632 // C = Basis + Bump 633 Value *NegBump; 634 if (match(Bump, m_Neg(m_Value(NegBump)))) { 635 // If Bump is a neg instruction, emit C = Basis - (-Bump). 636 Reduced = Builder.CreateSub(Basis.Ins, NegBump); 637 // We only use the negative argument of Bump, and Bump itself may be 638 // trivially dead. 639 RecursivelyDeleteTriviallyDeadInstructions(Bump); 640 } else { 641 // It's tempting to preserve nsw on Bump and/or Reduced. However, it's 642 // usually unsound, e.g., 643 // 644 // X = (-2 +nsw 1) *nsw INT_MAX 645 // Y = (-2 +nsw 3) *nsw INT_MAX 646 // => 647 // Y = X + 2 * INT_MAX 648 // 649 // Neither + and * in the resultant expression are nsw. 650 Reduced = Builder.CreateAdd(Basis.Ins, Bump); 651 } 652 break; 653 } 654 case Candidate::GEP: { 655 bool InBounds = cast<GetElementPtrInst>(C.Ins)->isInBounds(); 656 // C = (char *)Basis + Bump 657 Reduced = Builder.CreatePtrAdd(Basis.Ins, Bump, "", InBounds); 658 break; 659 } 660 default: 661 llvm_unreachable("C.CandidateKind is invalid"); 662 }; 663 Reduced->takeName(C.Ins); 664 C.Ins->replaceAllUsesWith(Reduced); 665 // Unlink C.Ins so that we can skip other candidates also corresponding to 666 // C.Ins. The actual deletion is postponed to the end of runOnFunction. 667 C.Ins->removeFromParent(); 668 UnlinkedInstructions.push_back(C.Ins); 669 } 670 671 bool StraightLineStrengthReduceLegacyPass::runOnFunction(Function &F) { 672 if (skipFunction(F)) 673 return false; 674 675 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 676 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 677 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 678 return StraightLineStrengthReduce(DL, DT, SE, TTI).runOnFunction(F); 679 } 680 681 bool StraightLineStrengthReduce::runOnFunction(Function &F) { 682 // Traverse the dominator tree in the depth-first order. This order makes sure 683 // all bases of a candidate are in Candidates when we process it. 684 for (const auto Node : depth_first(DT)) 685 for (auto &I : *(Node->getBlock())) 686 allocateCandidatesAndFindBasis(&I); 687 688 // Rewrite candidates in the reverse depth-first order. This order makes sure 689 // a candidate being rewritten is not a basis for any other candidate. 690 while (!Candidates.empty()) { 691 const Candidate &C = Candidates.back(); 692 if (C.Basis != nullptr) { 693 rewriteCandidateWithBasis(C, *C.Basis); 694 } 695 Candidates.pop_back(); 696 } 697 698 // Delete all unlink instructions. 699 for (auto *UnlinkedInst : UnlinkedInstructions) { 700 for (unsigned I = 0, E = UnlinkedInst->getNumOperands(); I != E; ++I) { 701 Value *Op = UnlinkedInst->getOperand(I); 702 UnlinkedInst->setOperand(I, nullptr); 703 RecursivelyDeleteTriviallyDeadInstructions(Op); 704 } 705 UnlinkedInst->deleteValue(); 706 } 707 bool Ret = !UnlinkedInstructions.empty(); 708 UnlinkedInstructions.clear(); 709 return Ret; 710 } 711 712 namespace llvm { 713 714 PreservedAnalyses 715 StraightLineStrengthReducePass::run(Function &F, FunctionAnalysisManager &AM) { 716 const DataLayout *DL = &F.getDataLayout(); 717 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 718 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 719 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 720 721 if (!StraightLineStrengthReduce(DL, DT, SE, TTI).runOnFunction(F)) 722 return PreservedAnalyses::all(); 723 724 PreservedAnalyses PA; 725 PA.preserveSet<CFGAnalyses>(); 726 PA.preserve<DominatorTreeAnalysis>(); 727 PA.preserve<ScalarEvolutionAnalysis>(); 728 PA.preserve<TargetIRAnalysis>(); 729 return PA; 730 } 731 732 } // namespace llvm 733