1 //===- NaryReassociate.cpp - Reassociate n-ary expressions ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass reassociates n-ary add expressions and eliminates the redundancy 10 // exposed by the reassociation. 11 // 12 // A motivating example: 13 // 14 // void foo(int a, int b) { 15 // bar(a + b); 16 // bar((a + 2) + b); 17 // } 18 // 19 // An ideal compiler should reassociate (a + 2) + b to (a + b) + 2 and simplify 20 // the above code to 21 // 22 // int t = a + b; 23 // bar(t); 24 // bar(t + 2); 25 // 26 // However, the Reassociate pass is unable to do that because it processes each 27 // instruction individually and believes (a + 2) + b is the best form according 28 // to its rank system. 29 // 30 // To address this limitation, NaryReassociate reassociates an expression in a 31 // form that reuses existing instructions. As a result, NaryReassociate can 32 // reassociate (a + 2) + b in the example to (a + b) + 2 because it detects that 33 // (a + b) is computed before. 34 // 35 // NaryReassociate works as follows. For every instruction in the form of (a + 36 // b) + c, it checks whether a + c or b + c is already computed by a dominating 37 // instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b + 38 // c) + a and removes the redundancy accordingly. To efficiently look up whether 39 // an expression is computed before, we store each instruction seen and its SCEV 40 // into an SCEV-to-instruction map. 41 // 42 // Although the algorithm pattern-matches only ternary additions, it 43 // automatically handles many >3-ary expressions by walking through the function 44 // in the depth-first order. For example, given 45 // 46 // (a + c) + d 47 // ((a + b) + c) + d 48 // 49 // NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites 50 // ((a + c) + b) + d into ((a + c) + d) + b. 51 // 52 // Finally, the above dominator-based algorithm may need to be run multiple 53 // iterations before emitting optimal code. One source of this need is that we 54 // only split an operand when it is used only once. The above algorithm can 55 // eliminate an instruction and decrease the usage count of its operands. As a 56 // result, an instruction that previously had multiple uses may become a 57 // single-use instruction and thus eligible for split consideration. For 58 // example, 59 // 60 // ac = a + c 61 // ab = a + b 62 // abc = ab + c 63 // ab2 = ab + b 64 // ab2c = ab2 + c 65 // 66 // In the first iteration, we cannot reassociate abc to ac+b because ab is used 67 // twice. However, we can reassociate ab2c to abc+b in the first iteration. As a 68 // result, ab2 becomes dead and ab will be used only once in the second 69 // iteration. 70 // 71 // Limitations and TODO items: 72 // 73 // 1) We only considers n-ary adds and muls for now. This should be extended 74 // and generalized. 75 // 76 //===----------------------------------------------------------------------===// 77 78 #include "llvm/Transforms/Scalar/NaryReassociate.h" 79 #include "llvm/ADT/DepthFirstIterator.h" 80 #include "llvm/ADT/SmallVector.h" 81 #include "llvm/Analysis/AssumptionCache.h" 82 #include "llvm/Analysis/ScalarEvolution.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/TargetTransformInfo.h" 86 #include "llvm/Analysis/ValueTracking.h" 87 #include "llvm/IR/BasicBlock.h" 88 #include "llvm/IR/Constants.h" 89 #include "llvm/IR/DataLayout.h" 90 #include "llvm/IR/DerivedTypes.h" 91 #include "llvm/IR/Dominators.h" 92 #include "llvm/IR/Function.h" 93 #include "llvm/IR/GetElementPtrTypeIterator.h" 94 #include "llvm/IR/IRBuilder.h" 95 #include "llvm/IR/InstrTypes.h" 96 #include "llvm/IR/Instruction.h" 97 #include "llvm/IR/Instructions.h" 98 #include "llvm/IR/Module.h" 99 #include "llvm/IR/Operator.h" 100 #include "llvm/IR/PatternMatch.h" 101 #include "llvm/IR/Type.h" 102 #include "llvm/IR/Value.h" 103 #include "llvm/IR/ValueHandle.h" 104 #include "llvm/InitializePasses.h" 105 #include "llvm/Pass.h" 106 #include "llvm/Support/Casting.h" 107 #include "llvm/Support/ErrorHandling.h" 108 #include "llvm/Transforms/Scalar.h" 109 #include "llvm/Transforms/Utils/Local.h" 110 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 111 #include <cassert> 112 #include <cstdint> 113 114 using namespace llvm; 115 using namespace PatternMatch; 116 117 #define DEBUG_TYPE "nary-reassociate" 118 119 namespace { 120 121 class NaryReassociateLegacyPass : public FunctionPass { 122 public: 123 static char ID; 124 125 NaryReassociateLegacyPass() : FunctionPass(ID) { 126 initializeNaryReassociateLegacyPassPass(*PassRegistry::getPassRegistry()); 127 } 128 129 bool doInitialization(Module &M) override { 130 return false; 131 } 132 133 bool runOnFunction(Function &F) override; 134 135 void getAnalysisUsage(AnalysisUsage &AU) const override { 136 AU.addPreserved<DominatorTreeWrapperPass>(); 137 AU.addPreserved<ScalarEvolutionWrapperPass>(); 138 AU.addPreserved<TargetLibraryInfoWrapperPass>(); 139 AU.addRequired<AssumptionCacheTracker>(); 140 AU.addRequired<DominatorTreeWrapperPass>(); 141 AU.addRequired<ScalarEvolutionWrapperPass>(); 142 AU.addRequired<TargetLibraryInfoWrapperPass>(); 143 AU.addRequired<TargetTransformInfoWrapperPass>(); 144 AU.setPreservesCFG(); 145 } 146 147 private: 148 NaryReassociatePass Impl; 149 }; 150 151 } // end anonymous namespace 152 153 char NaryReassociateLegacyPass::ID = 0; 154 155 INITIALIZE_PASS_BEGIN(NaryReassociateLegacyPass, "nary-reassociate", 156 "Nary reassociation", false, false) 157 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 158 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 159 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 160 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 161 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 162 INITIALIZE_PASS_END(NaryReassociateLegacyPass, "nary-reassociate", 163 "Nary reassociation", false, false) 164 165 FunctionPass *llvm::createNaryReassociatePass() { 166 return new NaryReassociateLegacyPass(); 167 } 168 169 bool NaryReassociateLegacyPass::runOnFunction(Function &F) { 170 if (skipFunction(F)) 171 return false; 172 173 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 174 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 175 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 176 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 177 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 178 179 return Impl.runImpl(F, AC, DT, SE, TLI, TTI); 180 } 181 182 PreservedAnalyses NaryReassociatePass::run(Function &F, 183 FunctionAnalysisManager &AM) { 184 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 185 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 186 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 187 auto *TLI = &AM.getResult<TargetLibraryAnalysis>(F); 188 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 189 190 if (!runImpl(F, AC, DT, SE, TLI, TTI)) 191 return PreservedAnalyses::all(); 192 193 PreservedAnalyses PA; 194 PA.preserveSet<CFGAnalyses>(); 195 PA.preserve<ScalarEvolutionAnalysis>(); 196 return PA; 197 } 198 199 bool NaryReassociatePass::runImpl(Function &F, AssumptionCache *AC_, 200 DominatorTree *DT_, ScalarEvolution *SE_, 201 TargetLibraryInfo *TLI_, 202 TargetTransformInfo *TTI_) { 203 AC = AC_; 204 DT = DT_; 205 SE = SE_; 206 TLI = TLI_; 207 TTI = TTI_; 208 DL = &F.getParent()->getDataLayout(); 209 210 bool Changed = false, ChangedInThisIteration; 211 do { 212 ChangedInThisIteration = doOneIteration(F); 213 Changed |= ChangedInThisIteration; 214 } while (ChangedInThisIteration); 215 return Changed; 216 } 217 218 bool NaryReassociatePass::doOneIteration(Function &F) { 219 bool Changed = false; 220 SeenExprs.clear(); 221 // Process the basic blocks in a depth first traversal of the dominator 222 // tree. This order ensures that all bases of a candidate are in Candidates 223 // when we process it. 224 SmallVector<WeakTrackingVH, 16> DeadInsts; 225 for (const auto Node : depth_first(DT)) { 226 BasicBlock *BB = Node->getBlock(); 227 for (Instruction &OrigI : *BB) { 228 const SCEV *OrigSCEV = nullptr; 229 if (Instruction *NewI = tryReassociate(&OrigI, OrigSCEV)) { 230 Changed = true; 231 OrigI.replaceAllUsesWith(NewI); 232 233 // Add 'OrigI' to the list of dead instructions. 234 DeadInsts.push_back(WeakTrackingVH(&OrigI)); 235 // Add the rewritten instruction to SeenExprs; the original 236 // instruction is deleted. 237 const SCEV *NewSCEV = SE->getSCEV(NewI); 238 SeenExprs[NewSCEV].push_back(WeakTrackingVH(NewI)); 239 240 // Ideally, NewSCEV should equal OldSCEV because tryReassociate(I) 241 // is equivalent to I. However, ScalarEvolution::getSCEV may 242 // weaken nsw causing NewSCEV not to equal OldSCEV. For example, 243 // suppose we reassociate 244 // I = &a[sext(i +nsw j)] // assuming sizeof(a[0]) = 4 245 // to 246 // NewI = &a[sext(i)] + sext(j). 247 // 248 // ScalarEvolution computes 249 // getSCEV(I) = a + 4 * sext(i + j) 250 // getSCEV(newI) = a + 4 * sext(i) + 4 * sext(j) 251 // which are different SCEVs. 252 // 253 // To alleviate this issue of ScalarEvolution not always capturing 254 // equivalence, we add I to SeenExprs[OldSCEV] as well so that we can 255 // map both SCEV before and after tryReassociate(I) to I. 256 // 257 // This improvement is exercised in @reassociate_gep_nsw in 258 // nary-gep.ll. 259 if (NewSCEV != OrigSCEV) 260 SeenExprs[OrigSCEV].push_back(WeakTrackingVH(NewI)); 261 } else if (OrigSCEV) 262 SeenExprs[OrigSCEV].push_back(WeakTrackingVH(&OrigI)); 263 } 264 } 265 // Delete all dead instructions from 'DeadInsts'. 266 // Please note ScalarEvolution is updated along the way. 267 RecursivelyDeleteTriviallyDeadInstructionsPermissive( 268 DeadInsts, TLI, nullptr, [this](Value *V) { SE->forgetValue(V); }); 269 270 return Changed; 271 } 272 273 template <typename PredT> 274 Instruction * 275 NaryReassociatePass::matchAndReassociateMinOrMax(Instruction *I, 276 const SCEV *&OrigSCEV) { 277 Value *LHS = nullptr; 278 Value *RHS = nullptr; 279 280 auto MinMaxMatcher = 281 MaxMin_match<ICmpInst, bind_ty<Value>, bind_ty<Value>, PredT>( 282 m_Value(LHS), m_Value(RHS)); 283 if (match(I, MinMaxMatcher)) { 284 OrigSCEV = SE->getSCEV(I); 285 if (auto *NewMinMax = dyn_cast_or_null<Instruction>( 286 tryReassociateMinOrMax(I, MinMaxMatcher, LHS, RHS))) 287 return NewMinMax; 288 if (auto *NewMinMax = dyn_cast_or_null<Instruction>( 289 tryReassociateMinOrMax(I, MinMaxMatcher, RHS, LHS))) 290 return NewMinMax; 291 } 292 return nullptr; 293 } 294 295 Instruction *NaryReassociatePass::tryReassociate(Instruction * I, 296 const SCEV *&OrigSCEV) { 297 298 if (!SE->isSCEVable(I->getType())) 299 return nullptr; 300 301 switch (I->getOpcode()) { 302 case Instruction::Add: 303 case Instruction::Mul: 304 OrigSCEV = SE->getSCEV(I); 305 return tryReassociateBinaryOp(cast<BinaryOperator>(I)); 306 case Instruction::GetElementPtr: 307 OrigSCEV = SE->getSCEV(I); 308 return tryReassociateGEP(cast<GetElementPtrInst>(I)); 309 default: 310 break; 311 } 312 313 // Try to match signed/unsigned Min/Max. 314 Instruction *ResI = nullptr; 315 // TODO: Currently min/max reassociation is restricted to integer types only 316 // due to use of SCEVExpander which my introduce incompatible forms of min/max 317 // for pointer types. 318 if (I->getType()->isIntegerTy()) 319 if ((ResI = matchAndReassociateMinOrMax<umin_pred_ty>(I, OrigSCEV)) || 320 (ResI = matchAndReassociateMinOrMax<smin_pred_ty>(I, OrigSCEV)) || 321 (ResI = matchAndReassociateMinOrMax<umax_pred_ty>(I, OrigSCEV)) || 322 (ResI = matchAndReassociateMinOrMax<smax_pred_ty>(I, OrigSCEV))) 323 return ResI; 324 325 return nullptr; 326 } 327 328 static bool isGEPFoldable(GetElementPtrInst *GEP, 329 const TargetTransformInfo *TTI) { 330 SmallVector<const Value *, 4> Indices(GEP->indices()); 331 return TTI->getGEPCost(GEP->getSourceElementType(), GEP->getPointerOperand(), 332 Indices) == TargetTransformInfo::TCC_Free; 333 } 334 335 Instruction *NaryReassociatePass::tryReassociateGEP(GetElementPtrInst *GEP) { 336 // Not worth reassociating GEP if it is foldable. 337 if (isGEPFoldable(GEP, TTI)) 338 return nullptr; 339 340 gep_type_iterator GTI = gep_type_begin(*GEP); 341 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { 342 if (GTI.isSequential()) { 343 if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, 344 GTI.getIndexedType())) { 345 return NewGEP; 346 } 347 } 348 } 349 return nullptr; 350 } 351 352 bool NaryReassociatePass::requiresSignExtension(Value *Index, 353 GetElementPtrInst *GEP) { 354 unsigned IndexSizeInBits = 355 DL->getIndexSizeInBits(GEP->getType()->getPointerAddressSpace()); 356 return cast<IntegerType>(Index->getType())->getBitWidth() < IndexSizeInBits; 357 } 358 359 GetElementPtrInst * 360 NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, 361 unsigned I, Type *IndexedType) { 362 Value *IndexToSplit = GEP->getOperand(I + 1); 363 if (SExtInst *SExt = dyn_cast<SExtInst>(IndexToSplit)) { 364 IndexToSplit = SExt->getOperand(0); 365 } else if (ZExtInst *ZExt = dyn_cast<ZExtInst>(IndexToSplit)) { 366 // zext can be treated as sext if the source is non-negative. 367 if (isKnownNonNegative(ZExt->getOperand(0), *DL, 0, AC, GEP, DT)) 368 IndexToSplit = ZExt->getOperand(0); 369 } 370 371 if (AddOperator *AO = dyn_cast<AddOperator>(IndexToSplit)) { 372 // If the I-th index needs sext and the underlying add is not equipped with 373 // nsw, we cannot split the add because 374 // sext(LHS + RHS) != sext(LHS) + sext(RHS). 375 if (requiresSignExtension(IndexToSplit, GEP) && 376 computeOverflowForSignedAdd(AO, *DL, AC, GEP, DT) != 377 OverflowResult::NeverOverflows) 378 return nullptr; 379 380 Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1); 381 // IndexToSplit = LHS + RHS. 382 if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I, LHS, RHS, IndexedType)) 383 return NewGEP; 384 // Symmetrically, try IndexToSplit = RHS + LHS. 385 if (LHS != RHS) { 386 if (auto *NewGEP = 387 tryReassociateGEPAtIndex(GEP, I, RHS, LHS, IndexedType)) 388 return NewGEP; 389 } 390 } 391 return nullptr; 392 } 393 394 GetElementPtrInst * 395 NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, 396 unsigned I, Value *LHS, 397 Value *RHS, Type *IndexedType) { 398 // Look for GEP's closest dominator that has the same SCEV as GEP except that 399 // the I-th index is replaced with LHS. 400 SmallVector<const SCEV *, 4> IndexExprs; 401 for (Use &Index : GEP->indices()) 402 IndexExprs.push_back(SE->getSCEV(Index)); 403 // Replace the I-th index with LHS. 404 IndexExprs[I] = SE->getSCEV(LHS); 405 if (isKnownNonNegative(LHS, *DL, 0, AC, GEP, DT) && 406 DL->getTypeSizeInBits(LHS->getType()).getFixedValue() < 407 DL->getTypeSizeInBits(GEP->getOperand(I)->getType()) 408 .getFixedValue()) { 409 // Zero-extend LHS if it is non-negative. InstCombine canonicalizes sext to 410 // zext if the source operand is proved non-negative. We should do that 411 // consistently so that CandidateExpr more likely appears before. See 412 // @reassociate_gep_assume for an example of this canonicalization. 413 IndexExprs[I] = 414 SE->getZeroExtendExpr(IndexExprs[I], GEP->getOperand(I)->getType()); 415 } 416 const SCEV *CandidateExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), 417 IndexExprs); 418 419 Value *Candidate = findClosestMatchingDominator(CandidateExpr, GEP); 420 if (Candidate == nullptr) 421 return nullptr; 422 423 IRBuilder<> Builder(GEP); 424 // Candidate does not necessarily have the same pointer type as GEP. Use 425 // bitcast or pointer cast to make sure they have the same type, so that the 426 // later RAUW doesn't complain. 427 Candidate = Builder.CreateBitOrPointerCast(Candidate, GEP->getType()); 428 assert(Candidate->getType() == GEP->getType()); 429 430 // NewGEP = (char *)Candidate + RHS * sizeof(IndexedType) 431 uint64_t IndexedSize = DL->getTypeAllocSize(IndexedType); 432 Type *ElementType = GEP->getResultElementType(); 433 uint64_t ElementSize = DL->getTypeAllocSize(ElementType); 434 // Another less rare case: because I is not necessarily the last index of the 435 // GEP, the size of the type at the I-th index (IndexedSize) is not 436 // necessarily divisible by ElementSize. For example, 437 // 438 // #pragma pack(1) 439 // struct S { 440 // int a[3]; 441 // int64 b[8]; 442 // }; 443 // #pragma pack() 444 // 445 // sizeof(S) = 100 is indivisible by sizeof(int64) = 8. 446 // 447 // TODO: bail out on this case for now. We could emit uglygep. 448 if (IndexedSize % ElementSize != 0) 449 return nullptr; 450 451 // NewGEP = &Candidate[RHS * (sizeof(IndexedType) / sizeof(Candidate[0]))); 452 Type *PtrIdxTy = DL->getIndexType(GEP->getType()); 453 if (RHS->getType() != PtrIdxTy) 454 RHS = Builder.CreateSExtOrTrunc(RHS, PtrIdxTy); 455 if (IndexedSize != ElementSize) { 456 RHS = Builder.CreateMul( 457 RHS, ConstantInt::get(PtrIdxTy, IndexedSize / ElementSize)); 458 } 459 GetElementPtrInst *NewGEP = cast<GetElementPtrInst>( 460 Builder.CreateGEP(GEP->getResultElementType(), Candidate, RHS)); 461 NewGEP->setIsInBounds(GEP->isInBounds()); 462 NewGEP->takeName(GEP); 463 return NewGEP; 464 } 465 466 Instruction *NaryReassociatePass::tryReassociateBinaryOp(BinaryOperator *I) { 467 Value *LHS = I->getOperand(0), *RHS = I->getOperand(1); 468 // There is no need to reassociate 0. 469 if (SE->getSCEV(I)->isZero()) 470 return nullptr; 471 if (auto *NewI = tryReassociateBinaryOp(LHS, RHS, I)) 472 return NewI; 473 if (auto *NewI = tryReassociateBinaryOp(RHS, LHS, I)) 474 return NewI; 475 return nullptr; 476 } 477 478 Instruction *NaryReassociatePass::tryReassociateBinaryOp(Value *LHS, Value *RHS, 479 BinaryOperator *I) { 480 Value *A = nullptr, *B = nullptr; 481 // To be conservative, we reassociate I only when it is the only user of (A op 482 // B). 483 if (LHS->hasOneUse() && matchTernaryOp(I, LHS, A, B)) { 484 // I = (A op B) op RHS 485 // = (A op RHS) op B or (B op RHS) op A 486 const SCEV *AExpr = SE->getSCEV(A), *BExpr = SE->getSCEV(B); 487 const SCEV *RHSExpr = SE->getSCEV(RHS); 488 if (BExpr != RHSExpr) { 489 if (auto *NewI = 490 tryReassociatedBinaryOp(getBinarySCEV(I, AExpr, RHSExpr), B, I)) 491 return NewI; 492 } 493 if (AExpr != RHSExpr) { 494 if (auto *NewI = 495 tryReassociatedBinaryOp(getBinarySCEV(I, BExpr, RHSExpr), A, I)) 496 return NewI; 497 } 498 } 499 return nullptr; 500 } 501 502 Instruction *NaryReassociatePass::tryReassociatedBinaryOp(const SCEV *LHSExpr, 503 Value *RHS, 504 BinaryOperator *I) { 505 // Look for the closest dominator LHS of I that computes LHSExpr, and replace 506 // I with LHS op RHS. 507 auto *LHS = findClosestMatchingDominator(LHSExpr, I); 508 if (LHS == nullptr) 509 return nullptr; 510 511 Instruction *NewI = nullptr; 512 switch (I->getOpcode()) { 513 case Instruction::Add: 514 NewI = BinaryOperator::CreateAdd(LHS, RHS, "", I); 515 break; 516 case Instruction::Mul: 517 NewI = BinaryOperator::CreateMul(LHS, RHS, "", I); 518 break; 519 default: 520 llvm_unreachable("Unexpected instruction."); 521 } 522 NewI->takeName(I); 523 return NewI; 524 } 525 526 bool NaryReassociatePass::matchTernaryOp(BinaryOperator *I, Value *V, 527 Value *&Op1, Value *&Op2) { 528 switch (I->getOpcode()) { 529 case Instruction::Add: 530 return match(V, m_Add(m_Value(Op1), m_Value(Op2))); 531 case Instruction::Mul: 532 return match(V, m_Mul(m_Value(Op1), m_Value(Op2))); 533 default: 534 llvm_unreachable("Unexpected instruction."); 535 } 536 return false; 537 } 538 539 const SCEV *NaryReassociatePass::getBinarySCEV(BinaryOperator *I, 540 const SCEV *LHS, 541 const SCEV *RHS) { 542 switch (I->getOpcode()) { 543 case Instruction::Add: 544 return SE->getAddExpr(LHS, RHS); 545 case Instruction::Mul: 546 return SE->getMulExpr(LHS, RHS); 547 default: 548 llvm_unreachable("Unexpected instruction."); 549 } 550 return nullptr; 551 } 552 553 Instruction * 554 NaryReassociatePass::findClosestMatchingDominator(const SCEV *CandidateExpr, 555 Instruction *Dominatee) { 556 auto Pos = SeenExprs.find(CandidateExpr); 557 if (Pos == SeenExprs.end()) 558 return nullptr; 559 560 auto &Candidates = Pos->second; 561 // Because we process the basic blocks in pre-order of the dominator tree, a 562 // candidate that doesn't dominate the current instruction won't dominate any 563 // future instruction either. Therefore, we pop it out of the stack. This 564 // optimization makes the algorithm O(n). 565 while (!Candidates.empty()) { 566 // Candidates stores WeakTrackingVHs, so a candidate can be nullptr if it's 567 // removed 568 // during rewriting. 569 if (Value *Candidate = Candidates.back()) { 570 Instruction *CandidateInstruction = cast<Instruction>(Candidate); 571 if (DT->dominates(CandidateInstruction, Dominatee)) 572 return CandidateInstruction; 573 } 574 Candidates.pop_back(); 575 } 576 return nullptr; 577 } 578 579 template <typename MaxMinT> static SCEVTypes convertToSCEVype(MaxMinT &MM) { 580 if (std::is_same_v<smax_pred_ty, typename MaxMinT::PredType>) 581 return scSMaxExpr; 582 else if (std::is_same_v<umax_pred_ty, typename MaxMinT::PredType>) 583 return scUMaxExpr; 584 else if (std::is_same_v<smin_pred_ty, typename MaxMinT::PredType>) 585 return scSMinExpr; 586 else if (std::is_same_v<umin_pred_ty, typename MaxMinT::PredType>) 587 return scUMinExpr; 588 589 llvm_unreachable("Can't convert MinMax pattern to SCEV type"); 590 return scUnknown; 591 } 592 593 // Parameters: 594 // I - instruction matched by MaxMinMatch matcher 595 // MaxMinMatch - min/max idiom matcher 596 // LHS - first operand of I 597 // RHS - second operand of I 598 template <typename MaxMinT> 599 Value *NaryReassociatePass::tryReassociateMinOrMax(Instruction *I, 600 MaxMinT MaxMinMatch, 601 Value *LHS, Value *RHS) { 602 Value *A = nullptr, *B = nullptr; 603 MaxMinT m_MaxMin(m_Value(A), m_Value(B)); 604 605 if (LHS->hasNUsesOrMore(3) || 606 // The optimization is profitable only if LHS can be removed in the end. 607 // In other words LHS should be used (directly or indirectly) by I only. 608 llvm::any_of(LHS->users(), 609 [&](auto *U) { 610 return U != I && 611 !(U->hasOneUser() && *U->users().begin() == I); 612 }) || 613 !match(LHS, m_MaxMin)) 614 return nullptr; 615 616 auto tryCombination = [&](Value *A, const SCEV *AExpr, Value *B, 617 const SCEV *BExpr, Value *C, 618 const SCEV *CExpr) -> Value * { 619 SmallVector<const SCEV *, 2> Ops1{BExpr, AExpr}; 620 const SCEVTypes SCEVType = convertToSCEVype(m_MaxMin); 621 const SCEV *R1Expr = SE->getMinMaxExpr(SCEVType, Ops1); 622 623 Instruction *R1MinMax = findClosestMatchingDominator(R1Expr, I); 624 625 if (!R1MinMax) 626 return nullptr; 627 628 LLVM_DEBUG(dbgs() << "NARY: Found common sub-expr: " << *R1MinMax << "\n"); 629 630 SmallVector<const SCEV *, 2> Ops2{SE->getUnknown(C), 631 SE->getUnknown(R1MinMax)}; 632 const SCEV *R2Expr = SE->getMinMaxExpr(SCEVType, Ops2); 633 634 SCEVExpander Expander(*SE, *DL, "nary-reassociate"); 635 Value *NewMinMax = Expander.expandCodeFor(R2Expr, I->getType(), I); 636 NewMinMax->setName(Twine(I->getName()).concat(".nary")); 637 638 LLVM_DEBUG(dbgs() << "NARY: Deleting: " << *I << "\n" 639 << "NARY: Inserting: " << *NewMinMax << "\n"); 640 return NewMinMax; 641 }; 642 643 const SCEV *AExpr = SE->getSCEV(A); 644 const SCEV *BExpr = SE->getSCEV(B); 645 const SCEV *RHSExpr = SE->getSCEV(RHS); 646 647 if (BExpr != RHSExpr) { 648 // Try (A op RHS) op B 649 if (auto *NewMinMax = tryCombination(A, AExpr, RHS, RHSExpr, B, BExpr)) 650 return NewMinMax; 651 } 652 653 if (AExpr != RHSExpr) { 654 // Try (RHS op B) op A 655 if (auto *NewMinMax = tryCombination(RHS, RHSExpr, B, BExpr, A, AExpr)) 656 return NewMinMax; 657 } 658 659 return nullptr; 660 } 661