1 //===- NaryReassociate.cpp - Reassociate n-ary expressions ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass reassociates n-ary add expressions and eliminates the redundancy 10 // exposed by the reassociation. 11 // 12 // A motivating example: 13 // 14 // void foo(int a, int b) { 15 // bar(a + b); 16 // bar((a + 2) + b); 17 // } 18 // 19 // An ideal compiler should reassociate (a + 2) + b to (a + b) + 2 and simplify 20 // the above code to 21 // 22 // int t = a + b; 23 // bar(t); 24 // bar(t + 2); 25 // 26 // However, the Reassociate pass is unable to do that because it processes each 27 // instruction individually and believes (a + 2) + b is the best form according 28 // to its rank system. 29 // 30 // To address this limitation, NaryReassociate reassociates an expression in a 31 // form that reuses existing instructions. As a result, NaryReassociate can 32 // reassociate (a + 2) + b in the example to (a + b) + 2 because it detects that 33 // (a + b) is computed before. 34 // 35 // NaryReassociate works as follows. For every instruction in the form of (a + 36 // b) + c, it checks whether a + c or b + c is already computed by a dominating 37 // instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b + 38 // c) + a and removes the redundancy accordingly. To efficiently look up whether 39 // an expression is computed before, we store each instruction seen and its SCEV 40 // into an SCEV-to-instruction map. 41 // 42 // Although the algorithm pattern-matches only ternary additions, it 43 // automatically handles many >3-ary expressions by walking through the function 44 // in the depth-first order. For example, given 45 // 46 // (a + c) + d 47 // ((a + b) + c) + d 48 // 49 // NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites 50 // ((a + c) + b) + d into ((a + c) + d) + b. 51 // 52 // Finally, the above dominator-based algorithm may need to be run multiple 53 // iterations before emitting optimal code. One source of this need is that we 54 // only split an operand when it is used only once. The above algorithm can 55 // eliminate an instruction and decrease the usage count of its operands. As a 56 // result, an instruction that previously had multiple uses may become a 57 // single-use instruction and thus eligible for split consideration. For 58 // example, 59 // 60 // ac = a + c 61 // ab = a + b 62 // abc = ab + c 63 // ab2 = ab + b 64 // ab2c = ab2 + c 65 // 66 // In the first iteration, we cannot reassociate abc to ac+b because ab is used 67 // twice. However, we can reassociate ab2c to abc+b in the first iteration. As a 68 // result, ab2 becomes dead and ab will be used only once in the second 69 // iteration. 70 // 71 // Limitations and TODO items: 72 // 73 // 1) We only considers n-ary adds and muls for now. This should be extended 74 // and generalized. 75 // 76 //===----------------------------------------------------------------------===// 77 78 #include "llvm/Transforms/Scalar/NaryReassociate.h" 79 #include "llvm/ADT/DepthFirstIterator.h" 80 #include "llvm/ADT/SmallVector.h" 81 #include "llvm/Analysis/AssumptionCache.h" 82 #include "llvm/Analysis/ScalarEvolution.h" 83 #include "llvm/Analysis/TargetLibraryInfo.h" 84 #include "llvm/Analysis/TargetTransformInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/IR/BasicBlock.h" 87 #include "llvm/IR/Constants.h" 88 #include "llvm/IR/DataLayout.h" 89 #include "llvm/IR/DerivedTypes.h" 90 #include "llvm/IR/Dominators.h" 91 #include "llvm/IR/Function.h" 92 #include "llvm/IR/GetElementPtrTypeIterator.h" 93 #include "llvm/IR/IRBuilder.h" 94 #include "llvm/IR/InstrTypes.h" 95 #include "llvm/IR/Instruction.h" 96 #include "llvm/IR/Instructions.h" 97 #include "llvm/IR/Module.h" 98 #include "llvm/IR/Operator.h" 99 #include "llvm/IR/PatternMatch.h" 100 #include "llvm/IR/Type.h" 101 #include "llvm/IR/Value.h" 102 #include "llvm/IR/ValueHandle.h" 103 #include "llvm/InitializePasses.h" 104 #include "llvm/Pass.h" 105 #include "llvm/Support/Casting.h" 106 #include "llvm/Support/ErrorHandling.h" 107 #include "llvm/Transforms/Scalar.h" 108 #include "llvm/Transforms/Utils/Local.h" 109 #include <cassert> 110 #include <cstdint> 111 112 using namespace llvm; 113 using namespace PatternMatch; 114 115 #define DEBUG_TYPE "nary-reassociate" 116 117 namespace { 118 119 class NaryReassociateLegacyPass : public FunctionPass { 120 public: 121 static char ID; 122 123 NaryReassociateLegacyPass() : FunctionPass(ID) { 124 initializeNaryReassociateLegacyPassPass(*PassRegistry::getPassRegistry()); 125 } 126 127 bool doInitialization(Module &M) override { 128 return false; 129 } 130 131 bool runOnFunction(Function &F) override; 132 133 void getAnalysisUsage(AnalysisUsage &AU) const override { 134 AU.addPreserved<DominatorTreeWrapperPass>(); 135 AU.addPreserved<ScalarEvolutionWrapperPass>(); 136 AU.addPreserved<TargetLibraryInfoWrapperPass>(); 137 AU.addRequired<AssumptionCacheTracker>(); 138 AU.addRequired<DominatorTreeWrapperPass>(); 139 AU.addRequired<ScalarEvolutionWrapperPass>(); 140 AU.addRequired<TargetLibraryInfoWrapperPass>(); 141 AU.addRequired<TargetTransformInfoWrapperPass>(); 142 AU.setPreservesCFG(); 143 } 144 145 private: 146 NaryReassociatePass Impl; 147 }; 148 149 } // end anonymous namespace 150 151 char NaryReassociateLegacyPass::ID = 0; 152 153 INITIALIZE_PASS_BEGIN(NaryReassociateLegacyPass, "nary-reassociate", 154 "Nary reassociation", false, false) 155 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 156 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 157 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 158 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 159 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 160 INITIALIZE_PASS_END(NaryReassociateLegacyPass, "nary-reassociate", 161 "Nary reassociation", false, false) 162 163 FunctionPass *llvm::createNaryReassociatePass() { 164 return new NaryReassociateLegacyPass(); 165 } 166 167 bool NaryReassociateLegacyPass::runOnFunction(Function &F) { 168 if (skipFunction(F)) 169 return false; 170 171 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 172 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 173 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 174 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 175 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 176 177 return Impl.runImpl(F, AC, DT, SE, TLI, TTI); 178 } 179 180 PreservedAnalyses NaryReassociatePass::run(Function &F, 181 FunctionAnalysisManager &AM) { 182 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 183 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 184 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 185 auto *TLI = &AM.getResult<TargetLibraryAnalysis>(F); 186 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 187 188 if (!runImpl(F, AC, DT, SE, TLI, TTI)) 189 return PreservedAnalyses::all(); 190 191 PreservedAnalyses PA; 192 PA.preserveSet<CFGAnalyses>(); 193 PA.preserve<ScalarEvolutionAnalysis>(); 194 return PA; 195 } 196 197 bool NaryReassociatePass::runImpl(Function &F, AssumptionCache *AC_, 198 DominatorTree *DT_, ScalarEvolution *SE_, 199 TargetLibraryInfo *TLI_, 200 TargetTransformInfo *TTI_) { 201 AC = AC_; 202 DT = DT_; 203 SE = SE_; 204 TLI = TLI_; 205 TTI = TTI_; 206 DL = &F.getParent()->getDataLayout(); 207 208 bool Changed = false, ChangedInThisIteration; 209 do { 210 ChangedInThisIteration = doOneIteration(F); 211 Changed |= ChangedInThisIteration; 212 } while (ChangedInThisIteration); 213 return Changed; 214 } 215 216 // Whitelist the instruction types NaryReassociate handles for now. 217 static bool isPotentiallyNaryReassociable(Instruction *I) { 218 switch (I->getOpcode()) { 219 case Instruction::Add: 220 case Instruction::GetElementPtr: 221 case Instruction::Mul: 222 return true; 223 default: 224 return false; 225 } 226 } 227 228 bool NaryReassociatePass::doOneIteration(Function &F) { 229 bool Changed = false; 230 SeenExprs.clear(); 231 // Process the basic blocks in a depth first traversal of the dominator 232 // tree. This order ensures that all bases of a candidate are in Candidates 233 // when we process it. 234 for (const auto Node : depth_first(DT)) { 235 BasicBlock *BB = Node->getBlock(); 236 for (auto I = BB->begin(); I != BB->end(); ++I) { 237 if (SE->isSCEVable(I->getType()) && isPotentiallyNaryReassociable(&*I)) { 238 const SCEV *OldSCEV = SE->getSCEV(&*I); 239 if (Instruction *NewI = tryReassociate(&*I)) { 240 Changed = true; 241 SE->forgetValue(&*I); 242 I->replaceAllUsesWith(NewI); 243 WeakVH NewIExist = NewI; 244 // If SeenExprs/NewIExist contains I's WeakTrackingVH/WeakVH, that 245 // entry will be replaced with nullptr if deleted. 246 RecursivelyDeleteTriviallyDeadInstructions(&*I, TLI); 247 if (!NewIExist) { 248 // Rare occation where the new instruction (NewI) have been removed, 249 // probably due to parts of the input code was dead from the 250 // beginning, reset the iterator and start over from the beginning 251 I = BB->begin(); 252 continue; 253 } 254 I = NewI->getIterator(); 255 } 256 // Add the rewritten instruction to SeenExprs; the original instruction 257 // is deleted. 258 const SCEV *NewSCEV = SE->getSCEV(&*I); 259 SeenExprs[NewSCEV].push_back(WeakTrackingVH(&*I)); 260 // Ideally, NewSCEV should equal OldSCEV because tryReassociate(I) 261 // is equivalent to I. However, ScalarEvolution::getSCEV may 262 // weaken nsw causing NewSCEV not to equal OldSCEV. For example, suppose 263 // we reassociate 264 // I = &a[sext(i +nsw j)] // assuming sizeof(a[0]) = 4 265 // to 266 // NewI = &a[sext(i)] + sext(j). 267 // 268 // ScalarEvolution computes 269 // getSCEV(I) = a + 4 * sext(i + j) 270 // getSCEV(newI) = a + 4 * sext(i) + 4 * sext(j) 271 // which are different SCEVs. 272 // 273 // To alleviate this issue of ScalarEvolution not always capturing 274 // equivalence, we add I to SeenExprs[OldSCEV] as well so that we can 275 // map both SCEV before and after tryReassociate(I) to I. 276 // 277 // This improvement is exercised in @reassociate_gep_nsw in nary-gep.ll. 278 if (NewSCEV != OldSCEV) 279 SeenExprs[OldSCEV].push_back(WeakTrackingVH(&*I)); 280 } 281 } 282 } 283 return Changed; 284 } 285 286 Instruction *NaryReassociatePass::tryReassociate(Instruction *I) { 287 switch (I->getOpcode()) { 288 case Instruction::Add: 289 case Instruction::Mul: 290 return tryReassociateBinaryOp(cast<BinaryOperator>(I)); 291 case Instruction::GetElementPtr: 292 return tryReassociateGEP(cast<GetElementPtrInst>(I)); 293 default: 294 llvm_unreachable("should be filtered out by isPotentiallyNaryReassociable"); 295 } 296 } 297 298 static bool isGEPFoldable(GetElementPtrInst *GEP, 299 const TargetTransformInfo *TTI) { 300 SmallVector<const Value*, 4> Indices; 301 for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I) 302 Indices.push_back(*I); 303 return TTI->getGEPCost(GEP->getSourceElementType(), GEP->getPointerOperand(), 304 Indices) == TargetTransformInfo::TCC_Free; 305 } 306 307 Instruction *NaryReassociatePass::tryReassociateGEP(GetElementPtrInst *GEP) { 308 // Not worth reassociating GEP if it is foldable. 309 if (isGEPFoldable(GEP, TTI)) 310 return nullptr; 311 312 gep_type_iterator GTI = gep_type_begin(*GEP); 313 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { 314 if (GTI.isSequential()) { 315 if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, 316 GTI.getIndexedType())) { 317 return NewGEP; 318 } 319 } 320 } 321 return nullptr; 322 } 323 324 bool NaryReassociatePass::requiresSignExtension(Value *Index, 325 GetElementPtrInst *GEP) { 326 unsigned PointerSizeInBits = 327 DL->getPointerSizeInBits(GEP->getType()->getPointerAddressSpace()); 328 return cast<IntegerType>(Index->getType())->getBitWidth() < PointerSizeInBits; 329 } 330 331 GetElementPtrInst * 332 NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, 333 unsigned I, Type *IndexedType) { 334 Value *IndexToSplit = GEP->getOperand(I + 1); 335 if (SExtInst *SExt = dyn_cast<SExtInst>(IndexToSplit)) { 336 IndexToSplit = SExt->getOperand(0); 337 } else if (ZExtInst *ZExt = dyn_cast<ZExtInst>(IndexToSplit)) { 338 // zext can be treated as sext if the source is non-negative. 339 if (isKnownNonNegative(ZExt->getOperand(0), *DL, 0, AC, GEP, DT)) 340 IndexToSplit = ZExt->getOperand(0); 341 } 342 343 if (AddOperator *AO = dyn_cast<AddOperator>(IndexToSplit)) { 344 // If the I-th index needs sext and the underlying add is not equipped with 345 // nsw, we cannot split the add because 346 // sext(LHS + RHS) != sext(LHS) + sext(RHS). 347 if (requiresSignExtension(IndexToSplit, GEP) && 348 computeOverflowForSignedAdd(AO, *DL, AC, GEP, DT) != 349 OverflowResult::NeverOverflows) 350 return nullptr; 351 352 Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1); 353 // IndexToSplit = LHS + RHS. 354 if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I, LHS, RHS, IndexedType)) 355 return NewGEP; 356 // Symmetrically, try IndexToSplit = RHS + LHS. 357 if (LHS != RHS) { 358 if (auto *NewGEP = 359 tryReassociateGEPAtIndex(GEP, I, RHS, LHS, IndexedType)) 360 return NewGEP; 361 } 362 } 363 return nullptr; 364 } 365 366 GetElementPtrInst * 367 NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, 368 unsigned I, Value *LHS, 369 Value *RHS, Type *IndexedType) { 370 // Look for GEP's closest dominator that has the same SCEV as GEP except that 371 // the I-th index is replaced with LHS. 372 SmallVector<const SCEV *, 4> IndexExprs; 373 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 374 IndexExprs.push_back(SE->getSCEV(*Index)); 375 // Replace the I-th index with LHS. 376 IndexExprs[I] = SE->getSCEV(LHS); 377 if (isKnownNonNegative(LHS, *DL, 0, AC, GEP, DT) && 378 DL->getTypeSizeInBits(LHS->getType()) < 379 DL->getTypeSizeInBits(GEP->getOperand(I)->getType())) { 380 // Zero-extend LHS if it is non-negative. InstCombine canonicalizes sext to 381 // zext if the source operand is proved non-negative. We should do that 382 // consistently so that CandidateExpr more likely appears before. See 383 // @reassociate_gep_assume for an example of this canonicalization. 384 IndexExprs[I] = 385 SE->getZeroExtendExpr(IndexExprs[I], GEP->getOperand(I)->getType()); 386 } 387 const SCEV *CandidateExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), 388 IndexExprs); 389 390 Value *Candidate = findClosestMatchingDominator(CandidateExpr, GEP); 391 if (Candidate == nullptr) 392 return nullptr; 393 394 IRBuilder<> Builder(GEP); 395 // Candidate does not necessarily have the same pointer type as GEP. Use 396 // bitcast or pointer cast to make sure they have the same type, so that the 397 // later RAUW doesn't complain. 398 Candidate = Builder.CreateBitOrPointerCast(Candidate, GEP->getType()); 399 assert(Candidate->getType() == GEP->getType()); 400 401 // NewGEP = (char *)Candidate + RHS * sizeof(IndexedType) 402 uint64_t IndexedSize = DL->getTypeAllocSize(IndexedType); 403 Type *ElementType = GEP->getResultElementType(); 404 uint64_t ElementSize = DL->getTypeAllocSize(ElementType); 405 // Another less rare case: because I is not necessarily the last index of the 406 // GEP, the size of the type at the I-th index (IndexedSize) is not 407 // necessarily divisible by ElementSize. For example, 408 // 409 // #pragma pack(1) 410 // struct S { 411 // int a[3]; 412 // int64 b[8]; 413 // }; 414 // #pragma pack() 415 // 416 // sizeof(S) = 100 is indivisible by sizeof(int64) = 8. 417 // 418 // TODO: bail out on this case for now. We could emit uglygep. 419 if (IndexedSize % ElementSize != 0) 420 return nullptr; 421 422 // NewGEP = &Candidate[RHS * (sizeof(IndexedType) / sizeof(Candidate[0]))); 423 Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); 424 if (RHS->getType() != IntPtrTy) 425 RHS = Builder.CreateSExtOrTrunc(RHS, IntPtrTy); 426 if (IndexedSize != ElementSize) { 427 RHS = Builder.CreateMul( 428 RHS, ConstantInt::get(IntPtrTy, IndexedSize / ElementSize)); 429 } 430 GetElementPtrInst *NewGEP = cast<GetElementPtrInst>( 431 Builder.CreateGEP(GEP->getResultElementType(), Candidate, RHS)); 432 NewGEP->setIsInBounds(GEP->isInBounds()); 433 NewGEP->takeName(GEP); 434 return NewGEP; 435 } 436 437 Instruction *NaryReassociatePass::tryReassociateBinaryOp(BinaryOperator *I) { 438 Value *LHS = I->getOperand(0), *RHS = I->getOperand(1); 439 // There is no need to reassociate 0. 440 if (SE->getSCEV(I)->isZero()) 441 return nullptr; 442 if (auto *NewI = tryReassociateBinaryOp(LHS, RHS, I)) 443 return NewI; 444 if (auto *NewI = tryReassociateBinaryOp(RHS, LHS, I)) 445 return NewI; 446 return nullptr; 447 } 448 449 Instruction *NaryReassociatePass::tryReassociateBinaryOp(Value *LHS, Value *RHS, 450 BinaryOperator *I) { 451 Value *A = nullptr, *B = nullptr; 452 // To be conservative, we reassociate I only when it is the only user of (A op 453 // B). 454 if (LHS->hasOneUse() && matchTernaryOp(I, LHS, A, B)) { 455 // I = (A op B) op RHS 456 // = (A op RHS) op B or (B op RHS) op A 457 const SCEV *AExpr = SE->getSCEV(A), *BExpr = SE->getSCEV(B); 458 const SCEV *RHSExpr = SE->getSCEV(RHS); 459 if (BExpr != RHSExpr) { 460 if (auto *NewI = 461 tryReassociatedBinaryOp(getBinarySCEV(I, AExpr, RHSExpr), B, I)) 462 return NewI; 463 } 464 if (AExpr != RHSExpr) { 465 if (auto *NewI = 466 tryReassociatedBinaryOp(getBinarySCEV(I, BExpr, RHSExpr), A, I)) 467 return NewI; 468 } 469 } 470 return nullptr; 471 } 472 473 Instruction *NaryReassociatePass::tryReassociatedBinaryOp(const SCEV *LHSExpr, 474 Value *RHS, 475 BinaryOperator *I) { 476 // Look for the closest dominator LHS of I that computes LHSExpr, and replace 477 // I with LHS op RHS. 478 auto *LHS = findClosestMatchingDominator(LHSExpr, I); 479 if (LHS == nullptr) 480 return nullptr; 481 482 Instruction *NewI = nullptr; 483 switch (I->getOpcode()) { 484 case Instruction::Add: 485 NewI = BinaryOperator::CreateAdd(LHS, RHS, "", I); 486 break; 487 case Instruction::Mul: 488 NewI = BinaryOperator::CreateMul(LHS, RHS, "", I); 489 break; 490 default: 491 llvm_unreachable("Unexpected instruction."); 492 } 493 NewI->takeName(I); 494 return NewI; 495 } 496 497 bool NaryReassociatePass::matchTernaryOp(BinaryOperator *I, Value *V, 498 Value *&Op1, Value *&Op2) { 499 switch (I->getOpcode()) { 500 case Instruction::Add: 501 return match(V, m_Add(m_Value(Op1), m_Value(Op2))); 502 case Instruction::Mul: 503 return match(V, m_Mul(m_Value(Op1), m_Value(Op2))); 504 default: 505 llvm_unreachable("Unexpected instruction."); 506 } 507 return false; 508 } 509 510 const SCEV *NaryReassociatePass::getBinarySCEV(BinaryOperator *I, 511 const SCEV *LHS, 512 const SCEV *RHS) { 513 switch (I->getOpcode()) { 514 case Instruction::Add: 515 return SE->getAddExpr(LHS, RHS); 516 case Instruction::Mul: 517 return SE->getMulExpr(LHS, RHS); 518 default: 519 llvm_unreachable("Unexpected instruction."); 520 } 521 return nullptr; 522 } 523 524 Instruction * 525 NaryReassociatePass::findClosestMatchingDominator(const SCEV *CandidateExpr, 526 Instruction *Dominatee) { 527 auto Pos = SeenExprs.find(CandidateExpr); 528 if (Pos == SeenExprs.end()) 529 return nullptr; 530 531 auto &Candidates = Pos->second; 532 // Because we process the basic blocks in pre-order of the dominator tree, a 533 // candidate that doesn't dominate the current instruction won't dominate any 534 // future instruction either. Therefore, we pop it out of the stack. This 535 // optimization makes the algorithm O(n). 536 while (!Candidates.empty()) { 537 // Candidates stores WeakTrackingVHs, so a candidate can be nullptr if it's 538 // removed 539 // during rewriting. 540 if (Value *Candidate = Candidates.back()) { 541 Instruction *CandidateInstruction = cast<Instruction>(Candidate); 542 if (DT->dominates(CandidateInstruction, Dominatee)) 543 return CandidateInstruction; 544 } 545 Candidates.pop_back(); 546 } 547 return nullptr; 548 } 549