1 //===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements an idiom recognizer that transforms simple loops into a 10 // non-loop form. In cases that this kicks in, it can be a significant 11 // performance win. 12 // 13 // If compiling for code size we avoid idiom recognition if the resulting 14 // code could be larger than the code for the original loop. One way this could 15 // happen is if the loop is not removable after idiom recognition due to the 16 // presence of non-idiom instructions. The initial implementation of the 17 // heuristics applies to idioms in multi-block loops. 18 // 19 //===----------------------------------------------------------------------===// 20 // 21 // TODO List: 22 // 23 // Future loop memory idioms to recognize: 24 // memcmp, strlen, etc. 25 // Future floating point idioms to recognize in -ffast-math mode: 26 // fpowi 27 // Future integer operation idioms to recognize: 28 // ctpop 29 // 30 // Beware that isel's default lowering for ctpop is highly inefficient for 31 // i64 and larger types when i64 is legal and the value has few bits set. It 32 // would be good to enhance isel to emit a loop for ctpop in this case. 33 // 34 // This could recognize common matrix multiplies and dot product idioms and 35 // replace them with calls to BLAS (if linked in??). 36 // 37 //===----------------------------------------------------------------------===// 38 39 #include "llvm/Transforms/Scalar/LoopIdiomRecognize.h" 40 #include "llvm/ADT/APInt.h" 41 #include "llvm/ADT/ArrayRef.h" 42 #include "llvm/ADT/DenseMap.h" 43 #include "llvm/ADT/MapVector.h" 44 #include "llvm/ADT/SetVector.h" 45 #include "llvm/ADT/SmallPtrSet.h" 46 #include "llvm/ADT/SmallVector.h" 47 #include "llvm/ADT/Statistic.h" 48 #include "llvm/ADT/StringRef.h" 49 #include "llvm/Analysis/AliasAnalysis.h" 50 #include "llvm/Analysis/CmpInstAnalysis.h" 51 #include "llvm/Analysis/LoopAccessAnalysis.h" 52 #include "llvm/Analysis/LoopInfo.h" 53 #include "llvm/Analysis/LoopPass.h" 54 #include "llvm/Analysis/MemoryLocation.h" 55 #include "llvm/Analysis/MemorySSA.h" 56 #include "llvm/Analysis/MemorySSAUpdater.h" 57 #include "llvm/Analysis/MustExecute.h" 58 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 59 #include "llvm/Analysis/ScalarEvolution.h" 60 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 61 #include "llvm/Analysis/TargetLibraryInfo.h" 62 #include "llvm/Analysis/TargetTransformInfo.h" 63 #include "llvm/Analysis/ValueTracking.h" 64 #include "llvm/IR/Attributes.h" 65 #include "llvm/IR/BasicBlock.h" 66 #include "llvm/IR/Constant.h" 67 #include "llvm/IR/Constants.h" 68 #include "llvm/IR/DataLayout.h" 69 #include "llvm/IR/DebugLoc.h" 70 #include "llvm/IR/DerivedTypes.h" 71 #include "llvm/IR/Dominators.h" 72 #include "llvm/IR/GlobalValue.h" 73 #include "llvm/IR/GlobalVariable.h" 74 #include "llvm/IR/IRBuilder.h" 75 #include "llvm/IR/InstrTypes.h" 76 #include "llvm/IR/Instruction.h" 77 #include "llvm/IR/Instructions.h" 78 #include "llvm/IR/IntrinsicInst.h" 79 #include "llvm/IR/Intrinsics.h" 80 #include "llvm/IR/LLVMContext.h" 81 #include "llvm/IR/Module.h" 82 #include "llvm/IR/PassManager.h" 83 #include "llvm/IR/PatternMatch.h" 84 #include "llvm/IR/Type.h" 85 #include "llvm/IR/User.h" 86 #include "llvm/IR/Value.h" 87 #include "llvm/IR/ValueHandle.h" 88 #include "llvm/InitializePasses.h" 89 #include "llvm/Pass.h" 90 #include "llvm/Support/Casting.h" 91 #include "llvm/Support/CommandLine.h" 92 #include "llvm/Support/Debug.h" 93 #include "llvm/Support/InstructionCost.h" 94 #include "llvm/Support/raw_ostream.h" 95 #include "llvm/Transforms/Scalar.h" 96 #include "llvm/Transforms/Utils/BuildLibCalls.h" 97 #include "llvm/Transforms/Utils/Local.h" 98 #include "llvm/Transforms/Utils/LoopUtils.h" 99 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 100 #include <algorithm> 101 #include <cassert> 102 #include <cstdint> 103 #include <utility> 104 #include <vector> 105 106 using namespace llvm; 107 108 #define DEBUG_TYPE "loop-idiom" 109 110 STATISTIC(NumMemSet, "Number of memset's formed from loop stores"); 111 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores"); 112 STATISTIC(NumMemMove, "Number of memmove's formed from loop load+stores"); 113 STATISTIC( 114 NumShiftUntilBitTest, 115 "Number of uncountable loops recognized as 'shift until bitttest' idiom"); 116 STATISTIC(NumShiftUntilZero, 117 "Number of uncountable loops recognized as 'shift until zero' idiom"); 118 119 bool DisableLIRP::All; 120 static cl::opt<bool, true> 121 DisableLIRPAll("disable-" DEBUG_TYPE "-all", 122 cl::desc("Options to disable Loop Idiom Recognize Pass."), 123 cl::location(DisableLIRP::All), cl::init(false), 124 cl::ReallyHidden); 125 126 bool DisableLIRP::Memset; 127 static cl::opt<bool, true> 128 DisableLIRPMemset("disable-" DEBUG_TYPE "-memset", 129 cl::desc("Proceed with loop idiom recognize pass, but do " 130 "not convert loop(s) to memset."), 131 cl::location(DisableLIRP::Memset), cl::init(false), 132 cl::ReallyHidden); 133 134 bool DisableLIRP::Memcpy; 135 static cl::opt<bool, true> 136 DisableLIRPMemcpy("disable-" DEBUG_TYPE "-memcpy", 137 cl::desc("Proceed with loop idiom recognize pass, but do " 138 "not convert loop(s) to memcpy."), 139 cl::location(DisableLIRP::Memcpy), cl::init(false), 140 cl::ReallyHidden); 141 142 static cl::opt<bool> UseLIRCodeSizeHeurs( 143 "use-lir-code-size-heurs", 144 cl::desc("Use loop idiom recognition code size heuristics when compiling" 145 "with -Os/-Oz"), 146 cl::init(true), cl::Hidden); 147 148 namespace { 149 150 class LoopIdiomRecognize { 151 Loop *CurLoop = nullptr; 152 AliasAnalysis *AA; 153 DominatorTree *DT; 154 LoopInfo *LI; 155 ScalarEvolution *SE; 156 TargetLibraryInfo *TLI; 157 const TargetTransformInfo *TTI; 158 const DataLayout *DL; 159 OptimizationRemarkEmitter &ORE; 160 bool ApplyCodeSizeHeuristics; 161 std::unique_ptr<MemorySSAUpdater> MSSAU; 162 163 public: 164 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT, 165 LoopInfo *LI, ScalarEvolution *SE, 166 TargetLibraryInfo *TLI, 167 const TargetTransformInfo *TTI, MemorySSA *MSSA, 168 const DataLayout *DL, 169 OptimizationRemarkEmitter &ORE) 170 : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL), ORE(ORE) { 171 if (MSSA) 172 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA); 173 } 174 175 bool runOnLoop(Loop *L); 176 177 private: 178 using StoreList = SmallVector<StoreInst *, 8>; 179 using StoreListMap = MapVector<Value *, StoreList>; 180 181 StoreListMap StoreRefsForMemset; 182 StoreListMap StoreRefsForMemsetPattern; 183 StoreList StoreRefsForMemcpy; 184 bool HasMemset; 185 bool HasMemsetPattern; 186 bool HasMemcpy; 187 188 /// Return code for isLegalStore() 189 enum LegalStoreKind { 190 None = 0, 191 Memset, 192 MemsetPattern, 193 Memcpy, 194 UnorderedAtomicMemcpy, 195 DontUse // Dummy retval never to be used. Allows catching errors in retval 196 // handling. 197 }; 198 199 /// \name Countable Loop Idiom Handling 200 /// @{ 201 202 bool runOnCountableLoop(); 203 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, 204 SmallVectorImpl<BasicBlock *> &ExitBlocks); 205 206 void collectStores(BasicBlock *BB); 207 LegalStoreKind isLegalStore(StoreInst *SI); 208 enum class ForMemset { No, Yes }; 209 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount, 210 ForMemset For); 211 212 template <typename MemInst> 213 bool processLoopMemIntrinsic( 214 BasicBlock *BB, 215 bool (LoopIdiomRecognize::*Processor)(MemInst *, const SCEV *), 216 const SCEV *BECount); 217 bool processLoopMemCpy(MemCpyInst *MCI, const SCEV *BECount); 218 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount); 219 220 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize, 221 MaybeAlign StoreAlignment, Value *StoredVal, 222 Instruction *TheStore, 223 SmallPtrSetImpl<Instruction *> &Stores, 224 const SCEVAddRecExpr *Ev, const SCEV *BECount, 225 bool NegStride, bool IsLoopMemset = false); 226 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount); 227 bool processLoopStoreOfLoopLoad(Value *DestPtr, Value *SourcePtr, 228 unsigned StoreSize, MaybeAlign StoreAlign, 229 MaybeAlign LoadAlign, Instruction *TheStore, 230 Instruction *TheLoad, 231 const SCEVAddRecExpr *StoreEv, 232 const SCEVAddRecExpr *LoadEv, 233 const SCEV *BECount); 234 bool avoidLIRForMultiBlockLoop(bool IsMemset = false, 235 bool IsLoopMemset = false); 236 237 /// @} 238 /// \name Noncountable Loop Idiom Handling 239 /// @{ 240 241 bool runOnNoncountableLoop(); 242 243 bool recognizePopcount(); 244 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst, 245 PHINode *CntPhi, Value *Var); 246 bool recognizeAndInsertFFS(); /// Find First Set: ctlz or cttz 247 void transformLoopToCountable(Intrinsic::ID IntrinID, BasicBlock *PreCondBB, 248 Instruction *CntInst, PHINode *CntPhi, 249 Value *Var, Instruction *DefX, 250 const DebugLoc &DL, bool ZeroCheck, 251 bool IsCntPhiUsedOutsideLoop); 252 253 bool recognizeShiftUntilBitTest(); 254 bool recognizeShiftUntilZero(); 255 256 /// @} 257 }; 258 259 class LoopIdiomRecognizeLegacyPass : public LoopPass { 260 public: 261 static char ID; 262 263 explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) { 264 initializeLoopIdiomRecognizeLegacyPassPass( 265 *PassRegistry::getPassRegistry()); 266 } 267 268 bool runOnLoop(Loop *L, LPPassManager &LPM) override { 269 if (DisableLIRP::All) 270 return false; 271 272 if (skipLoop(L)) 273 return false; 274 275 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 276 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 277 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 278 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 279 TargetLibraryInfo *TLI = 280 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI( 281 *L->getHeader()->getParent()); 282 const TargetTransformInfo *TTI = 283 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 284 *L->getHeader()->getParent()); 285 const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout(); 286 auto *MSSAAnalysis = getAnalysisIfAvailable<MemorySSAWrapperPass>(); 287 MemorySSA *MSSA = nullptr; 288 if (MSSAAnalysis) 289 MSSA = &MSSAAnalysis->getMSSA(); 290 291 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis 292 // pass. Function analyses need to be preserved across loop transformations 293 // but ORE cannot be preserved (see comment before the pass definition). 294 OptimizationRemarkEmitter ORE(L->getHeader()->getParent()); 295 296 LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, MSSA, DL, ORE); 297 return LIR.runOnLoop(L); 298 } 299 300 /// This transformation requires natural loop information & requires that 301 /// loop preheaders be inserted into the CFG. 302 void getAnalysisUsage(AnalysisUsage &AU) const override { 303 AU.addRequired<TargetLibraryInfoWrapperPass>(); 304 AU.addRequired<TargetTransformInfoWrapperPass>(); 305 AU.addPreserved<MemorySSAWrapperPass>(); 306 getLoopAnalysisUsage(AU); 307 } 308 }; 309 310 } // end anonymous namespace 311 312 char LoopIdiomRecognizeLegacyPass::ID = 0; 313 314 PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM, 315 LoopStandardAnalysisResults &AR, 316 LPMUpdater &) { 317 if (DisableLIRP::All) 318 return PreservedAnalyses::all(); 319 320 const auto *DL = &L.getHeader()->getModule()->getDataLayout(); 321 322 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis 323 // pass. Function analyses need to be preserved across loop transformations 324 // but ORE cannot be preserved (see comment before the pass definition). 325 OptimizationRemarkEmitter ORE(L.getHeader()->getParent()); 326 327 LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, 328 AR.MSSA, DL, ORE); 329 if (!LIR.runOnLoop(&L)) 330 return PreservedAnalyses::all(); 331 332 auto PA = getLoopPassPreservedAnalyses(); 333 if (AR.MSSA) 334 PA.preserve<MemorySSAAnalysis>(); 335 return PA; 336 } 337 338 INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom", 339 "Recognize loop idioms", false, false) 340 INITIALIZE_PASS_DEPENDENCY(LoopPass) 341 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 342 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 343 INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom", 344 "Recognize loop idioms", false, false) 345 346 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); } 347 348 static void deleteDeadInstruction(Instruction *I) { 349 I->replaceAllUsesWith(UndefValue::get(I->getType())); 350 I->eraseFromParent(); 351 } 352 353 //===----------------------------------------------------------------------===// 354 // 355 // Implementation of LoopIdiomRecognize 356 // 357 //===----------------------------------------------------------------------===// 358 359 bool LoopIdiomRecognize::runOnLoop(Loop *L) { 360 CurLoop = L; 361 // If the loop could not be converted to canonical form, it must have an 362 // indirectbr in it, just give up. 363 if (!L->getLoopPreheader()) 364 return false; 365 366 // Disable loop idiom recognition if the function's name is a common idiom. 367 StringRef Name = L->getHeader()->getParent()->getName(); 368 if (Name == "memset" || Name == "memcpy") 369 return false; 370 371 // Determine if code size heuristics need to be applied. 372 ApplyCodeSizeHeuristics = 373 L->getHeader()->getParent()->hasOptSize() && UseLIRCodeSizeHeurs; 374 375 HasMemset = TLI->has(LibFunc_memset); 376 HasMemsetPattern = TLI->has(LibFunc_memset_pattern16); 377 HasMemcpy = TLI->has(LibFunc_memcpy); 378 379 if (HasMemset || HasMemsetPattern || HasMemcpy) 380 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 381 return runOnCountableLoop(); 382 383 return runOnNoncountableLoop(); 384 } 385 386 bool LoopIdiomRecognize::runOnCountableLoop() { 387 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop); 388 assert(!isa<SCEVCouldNotCompute>(BECount) && 389 "runOnCountableLoop() called on a loop without a predictable" 390 "backedge-taken count"); 391 392 // If this loop executes exactly one time, then it should be peeled, not 393 // optimized by this pass. 394 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 395 if (BECst->getAPInt() == 0) 396 return false; 397 398 SmallVector<BasicBlock *, 8> ExitBlocks; 399 CurLoop->getUniqueExitBlocks(ExitBlocks); 400 401 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F[" 402 << CurLoop->getHeader()->getParent()->getName() 403 << "] Countable Loop %" << CurLoop->getHeader()->getName() 404 << "\n"); 405 406 // The following transforms hoist stores/memsets into the loop pre-header. 407 // Give up if the loop has instructions that may throw. 408 SimpleLoopSafetyInfo SafetyInfo; 409 SafetyInfo.computeLoopSafetyInfo(CurLoop); 410 if (SafetyInfo.anyBlockMayThrow()) 411 return false; 412 413 bool MadeChange = false; 414 415 // Scan all the blocks in the loop that are not in subloops. 416 for (auto *BB : CurLoop->getBlocks()) { 417 // Ignore blocks in subloops. 418 if (LI->getLoopFor(BB) != CurLoop) 419 continue; 420 421 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks); 422 } 423 return MadeChange; 424 } 425 426 static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) { 427 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1)); 428 return ConstStride->getAPInt(); 429 } 430 431 /// getMemSetPatternValue - If a strided store of the specified value is safe to 432 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should 433 /// be passed in. Otherwise, return null. 434 /// 435 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these 436 /// just replicate their input array and then pass on to memset_pattern16. 437 static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) { 438 // FIXME: This could check for UndefValue because it can be merged into any 439 // other valid pattern. 440 441 // If the value isn't a constant, we can't promote it to being in a constant 442 // array. We could theoretically do a store to an alloca or something, but 443 // that doesn't seem worthwhile. 444 Constant *C = dyn_cast<Constant>(V); 445 if (!C) 446 return nullptr; 447 448 // Only handle simple values that are a power of two bytes in size. 449 uint64_t Size = DL->getTypeSizeInBits(V->getType()); 450 if (Size == 0 || (Size & 7) || (Size & (Size - 1))) 451 return nullptr; 452 453 // Don't care enough about darwin/ppc to implement this. 454 if (DL->isBigEndian()) 455 return nullptr; 456 457 // Convert to size in bytes. 458 Size /= 8; 459 460 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see 461 // if the top and bottom are the same (e.g. for vectors and large integers). 462 if (Size > 16) 463 return nullptr; 464 465 // If the constant is exactly 16 bytes, just use it. 466 if (Size == 16) 467 return C; 468 469 // Otherwise, we'll use an array of the constants. 470 unsigned ArraySize = 16 / Size; 471 ArrayType *AT = ArrayType::get(V->getType(), ArraySize); 472 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C)); 473 } 474 475 LoopIdiomRecognize::LegalStoreKind 476 LoopIdiomRecognize::isLegalStore(StoreInst *SI) { 477 // Don't touch volatile stores. 478 if (SI->isVolatile()) 479 return LegalStoreKind::None; 480 // We only want simple or unordered-atomic stores. 481 if (!SI->isUnordered()) 482 return LegalStoreKind::None; 483 484 // Avoid merging nontemporal stores. 485 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 486 return LegalStoreKind::None; 487 488 Value *StoredVal = SI->getValueOperand(); 489 Value *StorePtr = SI->getPointerOperand(); 490 491 // Don't convert stores of non-integral pointer types to memsets (which stores 492 // integers). 493 if (DL->isNonIntegralPointerType(StoredVal->getType()->getScalarType())) 494 return LegalStoreKind::None; 495 496 // Reject stores that are so large that they overflow an unsigned. 497 // When storing out scalable vectors we bail out for now, since the code 498 // below currently only works for constant strides. 499 TypeSize SizeInBits = DL->getTypeSizeInBits(StoredVal->getType()); 500 if (SizeInBits.isScalable() || (SizeInBits.getFixedSize() & 7) || 501 (SizeInBits.getFixedSize() >> 32) != 0) 502 return LegalStoreKind::None; 503 504 // See if the pointer expression is an AddRec like {base,+,1} on the current 505 // loop, which indicates a strided store. If we have something else, it's a 506 // random store we can't handle. 507 const SCEVAddRecExpr *StoreEv = 508 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 509 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine()) 510 return LegalStoreKind::None; 511 512 // Check to see if we have a constant stride. 513 if (!isa<SCEVConstant>(StoreEv->getOperand(1))) 514 return LegalStoreKind::None; 515 516 // See if the store can be turned into a memset. 517 518 // If the stored value is a byte-wise value (like i32 -1), then it may be 519 // turned into a memset of i8 -1, assuming that all the consecutive bytes 520 // are stored. A store of i32 0x01020304 can never be turned into a memset, 521 // but it can be turned into memset_pattern if the target supports it. 522 Value *SplatValue = isBytewiseValue(StoredVal, *DL); 523 524 // Note: memset and memset_pattern on unordered-atomic is yet not supported 525 bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple(); 526 527 // If we're allowed to form a memset, and the stored value would be 528 // acceptable for memset, use it. 529 if (!UnorderedAtomic && HasMemset && SplatValue && !DisableLIRP::Memset && 530 // Verify that the stored value is loop invariant. If not, we can't 531 // promote the memset. 532 CurLoop->isLoopInvariant(SplatValue)) { 533 // It looks like we can use SplatValue. 534 return LegalStoreKind::Memset; 535 } 536 if (!UnorderedAtomic && HasMemsetPattern && !DisableLIRP::Memset && 537 // Don't create memset_pattern16s with address spaces. 538 StorePtr->getType()->getPointerAddressSpace() == 0 && 539 getMemSetPatternValue(StoredVal, DL)) { 540 // It looks like we can use PatternValue! 541 return LegalStoreKind::MemsetPattern; 542 } 543 544 // Otherwise, see if the store can be turned into a memcpy. 545 if (HasMemcpy && !DisableLIRP::Memcpy) { 546 // Check to see if the stride matches the size of the store. If so, then we 547 // know that every byte is touched in the loop. 548 APInt Stride = getStoreStride(StoreEv); 549 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType()); 550 if (StoreSize != Stride && StoreSize != -Stride) 551 return LegalStoreKind::None; 552 553 // The store must be feeding a non-volatile load. 554 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand()); 555 556 // Only allow non-volatile loads 557 if (!LI || LI->isVolatile()) 558 return LegalStoreKind::None; 559 // Only allow simple or unordered-atomic loads 560 if (!LI->isUnordered()) 561 return LegalStoreKind::None; 562 563 // See if the pointer expression is an AddRec like {base,+,1} on the current 564 // loop, which indicates a strided load. If we have something else, it's a 565 // random load we can't handle. 566 const SCEVAddRecExpr *LoadEv = 567 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand())); 568 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine()) 569 return LegalStoreKind::None; 570 571 // The store and load must share the same stride. 572 if (StoreEv->getOperand(1) != LoadEv->getOperand(1)) 573 return LegalStoreKind::None; 574 575 // Success. This store can be converted into a memcpy. 576 UnorderedAtomic = UnorderedAtomic || LI->isAtomic(); 577 return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy 578 : LegalStoreKind::Memcpy; 579 } 580 // This store can't be transformed into a memset/memcpy. 581 return LegalStoreKind::None; 582 } 583 584 void LoopIdiomRecognize::collectStores(BasicBlock *BB) { 585 StoreRefsForMemset.clear(); 586 StoreRefsForMemsetPattern.clear(); 587 StoreRefsForMemcpy.clear(); 588 for (Instruction &I : *BB) { 589 StoreInst *SI = dyn_cast<StoreInst>(&I); 590 if (!SI) 591 continue; 592 593 // Make sure this is a strided store with a constant stride. 594 switch (isLegalStore(SI)) { 595 case LegalStoreKind::None: 596 // Nothing to do 597 break; 598 case LegalStoreKind::Memset: { 599 // Find the base pointer. 600 Value *Ptr = getUnderlyingObject(SI->getPointerOperand()); 601 StoreRefsForMemset[Ptr].push_back(SI); 602 } break; 603 case LegalStoreKind::MemsetPattern: { 604 // Find the base pointer. 605 Value *Ptr = getUnderlyingObject(SI->getPointerOperand()); 606 StoreRefsForMemsetPattern[Ptr].push_back(SI); 607 } break; 608 case LegalStoreKind::Memcpy: 609 case LegalStoreKind::UnorderedAtomicMemcpy: 610 StoreRefsForMemcpy.push_back(SI); 611 break; 612 default: 613 assert(false && "unhandled return value"); 614 break; 615 } 616 } 617 } 618 619 /// runOnLoopBlock - Process the specified block, which lives in a counted loop 620 /// with the specified backedge count. This block is known to be in the current 621 /// loop and not in any subloops. 622 bool LoopIdiomRecognize::runOnLoopBlock( 623 BasicBlock *BB, const SCEV *BECount, 624 SmallVectorImpl<BasicBlock *> &ExitBlocks) { 625 // We can only promote stores in this block if they are unconditionally 626 // executed in the loop. For a block to be unconditionally executed, it has 627 // to dominate all the exit blocks of the loop. Verify this now. 628 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) 629 if (!DT->dominates(BB, ExitBlocks[i])) 630 return false; 631 632 bool MadeChange = false; 633 // Look for store instructions, which may be optimized to memset/memcpy. 634 collectStores(BB); 635 636 // Look for a single store or sets of stores with a common base, which can be 637 // optimized into a memset (memset_pattern). The latter most commonly happens 638 // with structs and handunrolled loops. 639 for (auto &SL : StoreRefsForMemset) 640 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::Yes); 641 642 for (auto &SL : StoreRefsForMemsetPattern) 643 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::No); 644 645 // Optimize the store into a memcpy, if it feeds an similarly strided load. 646 for (auto &SI : StoreRefsForMemcpy) 647 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount); 648 649 MadeChange |= processLoopMemIntrinsic<MemCpyInst>( 650 BB, &LoopIdiomRecognize::processLoopMemCpy, BECount); 651 MadeChange |= processLoopMemIntrinsic<MemSetInst>( 652 BB, &LoopIdiomRecognize::processLoopMemSet, BECount); 653 654 return MadeChange; 655 } 656 657 /// See if this store(s) can be promoted to a memset. 658 bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL, 659 const SCEV *BECount, ForMemset For) { 660 // Try to find consecutive stores that can be transformed into memsets. 661 SetVector<StoreInst *> Heads, Tails; 662 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 663 664 // Do a quadratic search on all of the given stores and find 665 // all of the pairs of stores that follow each other. 666 SmallVector<unsigned, 16> IndexQueue; 667 for (unsigned i = 0, e = SL.size(); i < e; ++i) { 668 assert(SL[i]->isSimple() && "Expected only non-volatile stores."); 669 670 Value *FirstStoredVal = SL[i]->getValueOperand(); 671 Value *FirstStorePtr = SL[i]->getPointerOperand(); 672 const SCEVAddRecExpr *FirstStoreEv = 673 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr)); 674 APInt FirstStride = getStoreStride(FirstStoreEv); 675 unsigned FirstStoreSize = DL->getTypeStoreSize(SL[i]->getValueOperand()->getType()); 676 677 // See if we can optimize just this store in isolation. 678 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) { 679 Heads.insert(SL[i]); 680 continue; 681 } 682 683 Value *FirstSplatValue = nullptr; 684 Constant *FirstPatternValue = nullptr; 685 686 if (For == ForMemset::Yes) 687 FirstSplatValue = isBytewiseValue(FirstStoredVal, *DL); 688 else 689 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL); 690 691 assert((FirstSplatValue || FirstPatternValue) && 692 "Expected either splat value or pattern value."); 693 694 IndexQueue.clear(); 695 // If a store has multiple consecutive store candidates, search Stores 696 // array according to the sequence: from i+1 to e, then from i-1 to 0. 697 // This is because usually pairing with immediate succeeding or preceding 698 // candidate create the best chance to find memset opportunity. 699 unsigned j = 0; 700 for (j = i + 1; j < e; ++j) 701 IndexQueue.push_back(j); 702 for (j = i; j > 0; --j) 703 IndexQueue.push_back(j - 1); 704 705 for (auto &k : IndexQueue) { 706 assert(SL[k]->isSimple() && "Expected only non-volatile stores."); 707 Value *SecondStorePtr = SL[k]->getPointerOperand(); 708 const SCEVAddRecExpr *SecondStoreEv = 709 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr)); 710 APInt SecondStride = getStoreStride(SecondStoreEv); 711 712 if (FirstStride != SecondStride) 713 continue; 714 715 Value *SecondStoredVal = SL[k]->getValueOperand(); 716 Value *SecondSplatValue = nullptr; 717 Constant *SecondPatternValue = nullptr; 718 719 if (For == ForMemset::Yes) 720 SecondSplatValue = isBytewiseValue(SecondStoredVal, *DL); 721 else 722 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL); 723 724 assert((SecondSplatValue || SecondPatternValue) && 725 "Expected either splat value or pattern value."); 726 727 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) { 728 if (For == ForMemset::Yes) { 729 if (isa<UndefValue>(FirstSplatValue)) 730 FirstSplatValue = SecondSplatValue; 731 if (FirstSplatValue != SecondSplatValue) 732 continue; 733 } else { 734 if (isa<UndefValue>(FirstPatternValue)) 735 FirstPatternValue = SecondPatternValue; 736 if (FirstPatternValue != SecondPatternValue) 737 continue; 738 } 739 Tails.insert(SL[k]); 740 Heads.insert(SL[i]); 741 ConsecutiveChain[SL[i]] = SL[k]; 742 break; 743 } 744 } 745 } 746 747 // We may run into multiple chains that merge into a single chain. We mark the 748 // stores that we transformed so that we don't visit the same store twice. 749 SmallPtrSet<Value *, 16> TransformedStores; 750 bool Changed = false; 751 752 // For stores that start but don't end a link in the chain: 753 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); 754 it != e; ++it) { 755 if (Tails.count(*it)) 756 continue; 757 758 // We found a store instr that starts a chain. Now follow the chain and try 759 // to transform it. 760 SmallPtrSet<Instruction *, 8> AdjacentStores; 761 StoreInst *I = *it; 762 763 StoreInst *HeadStore = I; 764 unsigned StoreSize = 0; 765 766 // Collect the chain into a list. 767 while (Tails.count(I) || Heads.count(I)) { 768 if (TransformedStores.count(I)) 769 break; 770 AdjacentStores.insert(I); 771 772 StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType()); 773 // Move to the next value in the chain. 774 I = ConsecutiveChain[I]; 775 } 776 777 Value *StoredVal = HeadStore->getValueOperand(); 778 Value *StorePtr = HeadStore->getPointerOperand(); 779 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 780 APInt Stride = getStoreStride(StoreEv); 781 782 // Check to see if the stride matches the size of the stores. If so, then 783 // we know that every byte is touched in the loop. 784 if (StoreSize != Stride && StoreSize != -Stride) 785 continue; 786 787 bool NegStride = StoreSize == -Stride; 788 789 if (processLoopStridedStore(StorePtr, StoreSize, 790 MaybeAlign(HeadStore->getAlignment()), 791 StoredVal, HeadStore, AdjacentStores, StoreEv, 792 BECount, NegStride)) { 793 TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end()); 794 Changed = true; 795 } 796 } 797 798 return Changed; 799 } 800 801 /// processLoopMemIntrinsic - Template function for calling different processor 802 /// functions based on mem instrinsic type. 803 template <typename MemInst> 804 bool LoopIdiomRecognize::processLoopMemIntrinsic( 805 BasicBlock *BB, 806 bool (LoopIdiomRecognize::*Processor)(MemInst *, const SCEV *), 807 const SCEV *BECount) { 808 bool MadeChange = false; 809 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 810 Instruction *Inst = &*I++; 811 // Look for memory instructions, which may be optimized to a larger one. 812 if (MemInst *MI = dyn_cast<MemInst>(Inst)) { 813 WeakTrackingVH InstPtr(&*I); 814 if (!(this->*Processor)(MI, BECount)) 815 continue; 816 MadeChange = true; 817 818 // If processing the instruction invalidated our iterator, start over from 819 // the top of the block. 820 if (!InstPtr) 821 I = BB->begin(); 822 } 823 } 824 return MadeChange; 825 } 826 827 /// processLoopMemCpy - See if this memcpy can be promoted to a large memcpy 828 bool LoopIdiomRecognize::processLoopMemCpy(MemCpyInst *MCI, 829 const SCEV *BECount) { 830 // We can only handle non-volatile memcpys with a constant size. 831 if (MCI->isVolatile() || !isa<ConstantInt>(MCI->getLength())) 832 return false; 833 834 // If we're not allowed to hack on memcpy, we fail. 835 if ((!HasMemcpy && !isa<MemCpyInlineInst>(MCI)) || DisableLIRP::Memcpy) 836 return false; 837 838 Value *Dest = MCI->getDest(); 839 Value *Source = MCI->getSource(); 840 if (!Dest || !Source) 841 return false; 842 843 // See if the load and store pointer expressions are AddRec like {base,+,1} on 844 // the current loop, which indicates a strided load and store. If we have 845 // something else, it's a random load or store we can't handle. 846 const SCEVAddRecExpr *StoreEv = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Dest)); 847 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine()) 848 return false; 849 const SCEVAddRecExpr *LoadEv = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Source)); 850 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine()) 851 return false; 852 853 // Reject memcpys that are so large that they overflow an unsigned. 854 uint64_t SizeInBytes = cast<ConstantInt>(MCI->getLength())->getZExtValue(); 855 if ((SizeInBytes >> 32) != 0) 856 return false; 857 858 // Check if the stride matches the size of the memcpy. If so, then we know 859 // that every byte is touched in the loop. 860 const SCEVConstant *StoreStride = 861 dyn_cast<SCEVConstant>(StoreEv->getOperand(1)); 862 const SCEVConstant *LoadStride = 863 dyn_cast<SCEVConstant>(LoadEv->getOperand(1)); 864 if (!StoreStride || !LoadStride) 865 return false; 866 867 APInt StoreStrideValue = StoreStride->getAPInt(); 868 APInt LoadStrideValue = LoadStride->getAPInt(); 869 // Huge stride value - give up 870 if (StoreStrideValue.getBitWidth() > 64 || LoadStrideValue.getBitWidth() > 64) 871 return false; 872 873 if (SizeInBytes != StoreStrideValue && SizeInBytes != -StoreStrideValue) { 874 ORE.emit([&]() { 875 return OptimizationRemarkMissed(DEBUG_TYPE, "SizeStrideUnequal", MCI) 876 << ore::NV("Inst", "memcpy") << " in " 877 << ore::NV("Function", MCI->getFunction()) 878 << " function will not be hoised: " 879 << ore::NV("Reason", "memcpy size is not equal to stride"); 880 }); 881 return false; 882 } 883 884 int64_t StoreStrideInt = StoreStrideValue.getSExtValue(); 885 int64_t LoadStrideInt = LoadStrideValue.getSExtValue(); 886 // Check if the load stride matches the store stride. 887 if (StoreStrideInt != LoadStrideInt) 888 return false; 889 890 return processLoopStoreOfLoopLoad(Dest, Source, (unsigned)SizeInBytes, 891 MCI->getDestAlign(), MCI->getSourceAlign(), 892 MCI, MCI, StoreEv, LoadEv, BECount); 893 } 894 895 /// processLoopMemSet - See if this memset can be promoted to a large memset. 896 bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI, 897 const SCEV *BECount) { 898 // We can only handle non-volatile memsets with a constant size. 899 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength())) 900 return false; 901 902 // If we're not allowed to hack on memset, we fail. 903 if (!HasMemset || DisableLIRP::Memset) 904 return false; 905 906 Value *Pointer = MSI->getDest(); 907 908 // See if the pointer expression is an AddRec like {base,+,1} on the current 909 // loop, which indicates a strided store. If we have something else, it's a 910 // random store we can't handle. 911 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer)); 912 if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine()) 913 return false; 914 915 // Reject memsets that are so large that they overflow an unsigned. 916 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 917 if ((SizeInBytes >> 32) != 0) 918 return false; 919 920 // Check to see if the stride matches the size of the memset. If so, then we 921 // know that every byte is touched in the loop. 922 const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1)); 923 if (!ConstStride) 924 return false; 925 926 APInt Stride = ConstStride->getAPInt(); 927 if (SizeInBytes != Stride && SizeInBytes != -Stride) 928 return false; 929 930 // Verify that the memset value is loop invariant. If not, we can't promote 931 // the memset. 932 Value *SplatValue = MSI->getValue(); 933 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue)) 934 return false; 935 936 SmallPtrSet<Instruction *, 1> MSIs; 937 MSIs.insert(MSI); 938 bool NegStride = SizeInBytes == -Stride; 939 return processLoopStridedStore( 940 Pointer, (unsigned)SizeInBytes, MaybeAlign(MSI->getDestAlignment()), 941 SplatValue, MSI, MSIs, Ev, BECount, NegStride, /*IsLoopMemset=*/true); 942 } 943 944 /// mayLoopAccessLocation - Return true if the specified loop might access the 945 /// specified pointer location, which is a loop-strided access. The 'Access' 946 /// argument specifies what the verboten forms of access are (read or write). 947 static bool 948 mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L, 949 const SCEV *BECount, unsigned StoreSize, 950 AliasAnalysis &AA, 951 SmallPtrSetImpl<Instruction *> &IgnoredStores) { 952 // Get the location that may be stored across the loop. Since the access is 953 // strided positively through memory, we say that the modified location starts 954 // at the pointer and has infinite size. 955 LocationSize AccessSize = LocationSize::afterPointer(); 956 957 // If the loop iterates a fixed number of times, we can refine the access size 958 // to be exactly the size of the memset, which is (BECount+1)*StoreSize 959 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 960 AccessSize = LocationSize::precise((BECst->getValue()->getZExtValue() + 1) * 961 StoreSize); 962 963 // TODO: For this to be really effective, we have to dive into the pointer 964 // operand in the store. Store to &A[i] of 100 will always return may alias 965 // with store of &A[100], we need to StoreLoc to be "A" with size of 100, 966 // which will then no-alias a store to &A[100]. 967 MemoryLocation StoreLoc(Ptr, AccessSize); 968 969 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; 970 ++BI) 971 for (Instruction &I : **BI) 972 if (IgnoredStores.count(&I) == 0 && 973 isModOrRefSet( 974 intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access))) 975 return true; 976 977 return false; 978 } 979 980 // If we have a negative stride, Start refers to the end of the memory location 981 // we're trying to memset. Therefore, we need to recompute the base pointer, 982 // which is just Start - BECount*Size. 983 static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount, 984 Type *IntPtr, unsigned StoreSize, 985 ScalarEvolution *SE) { 986 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr); 987 if (StoreSize != 1) 988 Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize), 989 SCEV::FlagNUW); 990 return SE->getMinusSCEV(Start, Index); 991 } 992 993 /// Compute the number of bytes as a SCEV from the backedge taken count. 994 /// 995 /// This also maps the SCEV into the provided type and tries to handle the 996 /// computation in a way that will fold cleanly. 997 static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr, 998 unsigned StoreSize, Loop *CurLoop, 999 const DataLayout *DL, ScalarEvolution *SE) { 1000 const SCEV *NumBytesS; 1001 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to 1002 // pointer size if it isn't already. 1003 // 1004 // If we're going to need to zero extend the BE count, check if we can add 1005 // one to it prior to zero extending without overflow. Provided this is safe, 1006 // it allows better simplification of the +1. 1007 if (DL->getTypeSizeInBits(BECount->getType()).getFixedSize() < 1008 DL->getTypeSizeInBits(IntPtr).getFixedSize() && 1009 SE->isLoopEntryGuardedByCond( 1010 CurLoop, ICmpInst::ICMP_NE, BECount, 1011 SE->getNegativeSCEV(SE->getOne(BECount->getType())))) { 1012 NumBytesS = SE->getZeroExtendExpr( 1013 SE->getAddExpr(BECount, SE->getOne(BECount->getType()), SCEV::FlagNUW), 1014 IntPtr); 1015 } else { 1016 NumBytesS = SE->getAddExpr(SE->getTruncateOrZeroExtend(BECount, IntPtr), 1017 SE->getOne(IntPtr), SCEV::FlagNUW); 1018 } 1019 1020 // And scale it based on the store size. 1021 if (StoreSize != 1) { 1022 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize), 1023 SCEV::FlagNUW); 1024 } 1025 return NumBytesS; 1026 } 1027 1028 /// processLoopStridedStore - We see a strided store of some value. If we can 1029 /// transform this into a memset or memset_pattern in the loop preheader, do so. 1030 bool LoopIdiomRecognize::processLoopStridedStore( 1031 Value *DestPtr, unsigned StoreSize, MaybeAlign StoreAlignment, 1032 Value *StoredVal, Instruction *TheStore, 1033 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev, 1034 const SCEV *BECount, bool NegStride, bool IsLoopMemset) { 1035 Value *SplatValue = isBytewiseValue(StoredVal, *DL); 1036 Constant *PatternValue = nullptr; 1037 1038 if (!SplatValue) 1039 PatternValue = getMemSetPatternValue(StoredVal, DL); 1040 1041 assert((SplatValue || PatternValue) && 1042 "Expected either splat value or pattern value."); 1043 1044 // The trip count of the loop and the base pointer of the addrec SCEV is 1045 // guaranteed to be loop invariant, which means that it should dominate the 1046 // header. This allows us to insert code for it in the preheader. 1047 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace(); 1048 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 1049 IRBuilder<> Builder(Preheader->getTerminator()); 1050 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 1051 SCEVExpanderCleaner ExpCleaner(Expander, *DT); 1052 1053 Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS); 1054 Type *IntIdxTy = DL->getIndexType(DestPtr->getType()); 1055 1056 bool Changed = false; 1057 const SCEV *Start = Ev->getStart(); 1058 // Handle negative strided loops. 1059 if (NegStride) 1060 Start = getStartForNegStride(Start, BECount, IntIdxTy, StoreSize, SE); 1061 1062 // TODO: ideally we should still be able to generate memset if SCEV expander 1063 // is taught to generate the dependencies at the latest point. 1064 if (!isSafeToExpand(Start, *SE)) 1065 return Changed; 1066 1067 // Okay, we have a strided store "p[i]" of a splattable value. We can turn 1068 // this into a memset in the loop preheader now if we want. However, this 1069 // would be unsafe to do if there is anything else in the loop that may read 1070 // or write to the aliased location. Check for any overlap by generating the 1071 // base pointer and checking the region. 1072 Value *BasePtr = 1073 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator()); 1074 1075 // From here on out, conservatively report to the pass manager that we've 1076 // changed the IR, even if we later clean up these added instructions. There 1077 // may be structural differences e.g. in the order of use lists not accounted 1078 // for in just a textual dump of the IR. This is written as a variable, even 1079 // though statically all the places this dominates could be replaced with 1080 // 'true', with the hope that anyone trying to be clever / "more precise" with 1081 // the return value will read this comment, and leave them alone. 1082 Changed = true; 1083 1084 if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount, 1085 StoreSize, *AA, Stores)) 1086 return Changed; 1087 1088 if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset)) 1089 return Changed; 1090 1091 // Okay, everything looks good, insert the memset. 1092 1093 const SCEV *NumBytesS = 1094 getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE); 1095 1096 // TODO: ideally we should still be able to generate memset if SCEV expander 1097 // is taught to generate the dependencies at the latest point. 1098 if (!isSafeToExpand(NumBytesS, *SE)) 1099 return Changed; 1100 1101 Value *NumBytes = 1102 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); 1103 1104 CallInst *NewCall; 1105 if (SplatValue) { 1106 NewCall = Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, 1107 MaybeAlign(StoreAlignment)); 1108 } else { 1109 // Everything is emitted in default address space 1110 Type *Int8PtrTy = DestInt8PtrTy; 1111 1112 Module *M = TheStore->getModule(); 1113 StringRef FuncName = "memset_pattern16"; 1114 FunctionCallee MSP = M->getOrInsertFunction(FuncName, Builder.getVoidTy(), 1115 Int8PtrTy, Int8PtrTy, IntIdxTy); 1116 inferLibFuncAttributes(M, FuncName, *TLI); 1117 1118 // Otherwise we should form a memset_pattern16. PatternValue is known to be 1119 // an constant array of 16-bytes. Plop the value into a mergable global. 1120 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true, 1121 GlobalValue::PrivateLinkage, 1122 PatternValue, ".memset_pattern"); 1123 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these. 1124 GV->setAlignment(Align(16)); 1125 Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy); 1126 NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes}); 1127 } 1128 NewCall->setDebugLoc(TheStore->getDebugLoc()); 1129 1130 if (MSSAU) { 1131 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( 1132 NewCall, nullptr, NewCall->getParent(), MemorySSA::BeforeTerminator); 1133 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); 1134 } 1135 1136 LLVM_DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n" 1137 << " from store to: " << *Ev << " at: " << *TheStore 1138 << "\n"); 1139 1140 ORE.emit([&]() { 1141 return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStridedStore", 1142 NewCall->getDebugLoc(), Preheader) 1143 << "Transformed loop-strided store in " 1144 << ore::NV("Function", TheStore->getFunction()) 1145 << " function into a call to " 1146 << ore::NV("NewFunction", NewCall->getCalledFunction()) 1147 << "() intrinsic"; 1148 }); 1149 1150 // Okay, the memset has been formed. Zap the original store and anything that 1151 // feeds into it. 1152 for (auto *I : Stores) { 1153 if (MSSAU) 1154 MSSAU->removeMemoryAccess(I, true); 1155 deleteDeadInstruction(I); 1156 } 1157 if (MSSAU && VerifyMemorySSA) 1158 MSSAU->getMemorySSA()->verifyMemorySSA(); 1159 ++NumMemSet; 1160 ExpCleaner.markResultUsed(); 1161 return true; 1162 } 1163 1164 /// If the stored value is a strided load in the same loop with the same stride 1165 /// this may be transformable into a memcpy. This kicks in for stuff like 1166 /// for (i) A[i] = B[i]; 1167 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, 1168 const SCEV *BECount) { 1169 assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores."); 1170 1171 Value *StorePtr = SI->getPointerOperand(); 1172 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 1173 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType()); 1174 1175 // The store must be feeding a non-volatile load. 1176 LoadInst *LI = cast<LoadInst>(SI->getValueOperand()); 1177 assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads."); 1178 1179 // See if the pointer expression is an AddRec like {base,+,1} on the current 1180 // loop, which indicates a strided load. If we have something else, it's a 1181 // random load we can't handle. 1182 Value *LoadPtr = LI->getPointerOperand(); 1183 const SCEVAddRecExpr *LoadEv = cast<SCEVAddRecExpr>(SE->getSCEV(LoadPtr)); 1184 return processLoopStoreOfLoopLoad(StorePtr, LoadPtr, StoreSize, 1185 SI->getAlign(), LI->getAlign(), SI, LI, 1186 StoreEv, LoadEv, BECount); 1187 } 1188 1189 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad( 1190 Value *DestPtr, Value *SourcePtr, unsigned StoreSize, MaybeAlign StoreAlign, 1191 MaybeAlign LoadAlign, Instruction *TheStore, Instruction *TheLoad, 1192 const SCEVAddRecExpr *StoreEv, const SCEVAddRecExpr *LoadEv, 1193 const SCEV *BECount) { 1194 1195 // FIXME: until llvm.memcpy.inline supports dynamic sizes, we need to 1196 // conservatively bail here, since otherwise we may have to transform 1197 // llvm.memcpy.inline into llvm.memcpy which is illegal. 1198 if (isa<MemCpyInlineInst>(TheStore)) 1199 return false; 1200 1201 // The trip count of the loop and the base pointer of the addrec SCEV is 1202 // guaranteed to be loop invariant, which means that it should dominate the 1203 // header. This allows us to insert code for it in the preheader. 1204 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 1205 IRBuilder<> Builder(Preheader->getTerminator()); 1206 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 1207 1208 SCEVExpanderCleaner ExpCleaner(Expander, *DT); 1209 1210 bool Changed = false; 1211 const SCEV *StrStart = StoreEv->getStart(); 1212 unsigned StrAS = DestPtr->getType()->getPointerAddressSpace(); 1213 Type *IntIdxTy = Builder.getIntNTy(DL->getIndexSizeInBits(StrAS)); 1214 1215 APInt Stride = getStoreStride(StoreEv); 1216 bool NegStride = StoreSize == -Stride; 1217 1218 // Handle negative strided loops. 1219 if (NegStride) 1220 StrStart = getStartForNegStride(StrStart, BECount, IntIdxTy, StoreSize, SE); 1221 1222 // Okay, we have a strided store "p[i]" of a loaded value. We can turn 1223 // this into a memcpy in the loop preheader now if we want. However, this 1224 // would be unsafe to do if there is anything else in the loop that may read 1225 // or write the memory region we're storing to. This includes the load that 1226 // feeds the stores. Check for an alias by generating the base address and 1227 // checking everything. 1228 Value *StoreBasePtr = Expander.expandCodeFor( 1229 StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator()); 1230 1231 // From here on out, conservatively report to the pass manager that we've 1232 // changed the IR, even if we later clean up these added instructions. There 1233 // may be structural differences e.g. in the order of use lists not accounted 1234 // for in just a textual dump of the IR. This is written as a variable, even 1235 // though statically all the places this dominates could be replaced with 1236 // 'true', with the hope that anyone trying to be clever / "more precise" with 1237 // the return value will read this comment, and leave them alone. 1238 Changed = true; 1239 1240 SmallPtrSet<Instruction *, 2> Stores; 1241 Stores.insert(TheStore); 1242 1243 bool IsMemCpy = isa<MemCpyInst>(TheStore); 1244 const StringRef InstRemark = IsMemCpy ? "memcpy" : "load and store"; 1245 1246 bool UseMemMove = 1247 mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount, 1248 StoreSize, *AA, Stores); 1249 if (UseMemMove) { 1250 // For memmove case it's not enough to guarantee that loop doesn't access 1251 // TheStore and TheLoad. Additionally we need to make sure that TheStore is 1252 // the only user of TheLoad. 1253 if (!TheLoad->hasOneUse()) 1254 return Changed; 1255 Stores.insert(TheLoad); 1256 if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, 1257 BECount, StoreSize, *AA, Stores)) { 1258 ORE.emit([&]() { 1259 return OptimizationRemarkMissed(DEBUG_TYPE, "LoopMayAccessStore", 1260 TheStore) 1261 << ore::NV("Inst", InstRemark) << " in " 1262 << ore::NV("Function", TheStore->getFunction()) 1263 << " function will not be hoisted: " 1264 << ore::NV("Reason", "The loop may access store location"); 1265 }); 1266 return Changed; 1267 } 1268 Stores.erase(TheLoad); 1269 } 1270 1271 const SCEV *LdStart = LoadEv->getStart(); 1272 unsigned LdAS = SourcePtr->getType()->getPointerAddressSpace(); 1273 1274 // Handle negative strided loops. 1275 if (NegStride) 1276 LdStart = getStartForNegStride(LdStart, BECount, IntIdxTy, StoreSize, SE); 1277 1278 // For a memcpy, we have to make sure that the input array is not being 1279 // mutated by the loop. 1280 Value *LoadBasePtr = Expander.expandCodeFor( 1281 LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator()); 1282 1283 // If the store is a memcpy instruction, we must check if it will write to 1284 // the load memory locations. So remove it from the ignored stores. 1285 if (IsMemCpy) 1286 Stores.erase(TheStore); 1287 if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount, 1288 StoreSize, *AA, Stores)) { 1289 ORE.emit([&]() { 1290 return OptimizationRemarkMissed(DEBUG_TYPE, "LoopMayAccessLoad", TheLoad) 1291 << ore::NV("Inst", InstRemark) << " in " 1292 << ore::NV("Function", TheStore->getFunction()) 1293 << " function will not be hoisted: " 1294 << ore::NV("Reason", "The loop may access load location"); 1295 }); 1296 return Changed; 1297 } 1298 if (UseMemMove) { 1299 // Ensure that LoadBasePtr is after StoreBasePtr or before StoreBasePtr for 1300 // negative stride. LoadBasePtr shouldn't overlap with StoreBasePtr. 1301 int64_t LoadOff = 0, StoreOff = 0; 1302 const Value *BP1 = llvm::GetPointerBaseWithConstantOffset( 1303 LoadBasePtr->stripPointerCasts(), LoadOff, *DL); 1304 const Value *BP2 = llvm::GetPointerBaseWithConstantOffset( 1305 StoreBasePtr->stripPointerCasts(), StoreOff, *DL); 1306 int64_t LoadSize = 1307 DL->getTypeSizeInBits(TheLoad->getType()).getFixedSize() / 8; 1308 if (BP1 != BP2 || LoadSize != int64_t(StoreSize)) 1309 return Changed; 1310 if ((!NegStride && LoadOff < StoreOff + int64_t(StoreSize)) || 1311 (NegStride && LoadOff + LoadSize > StoreOff)) 1312 return Changed; 1313 } 1314 1315 if (avoidLIRForMultiBlockLoop()) 1316 return Changed; 1317 1318 // Okay, everything is safe, we can transform this! 1319 1320 const SCEV *NumBytesS = 1321 getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE); 1322 1323 Value *NumBytes = 1324 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); 1325 1326 CallInst *NewCall = nullptr; 1327 // Check whether to generate an unordered atomic memcpy: 1328 // If the load or store are atomic, then they must necessarily be unordered 1329 // by previous checks. 1330 if (!TheStore->isAtomic() && !TheLoad->isAtomic()) { 1331 if (UseMemMove) 1332 NewCall = Builder.CreateMemMove(StoreBasePtr, StoreAlign, LoadBasePtr, 1333 LoadAlign, NumBytes); 1334 else 1335 NewCall = Builder.CreateMemCpy(StoreBasePtr, StoreAlign, LoadBasePtr, 1336 LoadAlign, NumBytes); 1337 } else { 1338 // For now don't support unordered atomic memmove. 1339 if (UseMemMove) 1340 return Changed; 1341 // We cannot allow unaligned ops for unordered load/store, so reject 1342 // anything where the alignment isn't at least the element size. 1343 assert((StoreAlign.hasValue() && LoadAlign.hasValue()) && 1344 "Expect unordered load/store to have align."); 1345 if (StoreAlign.getValue() < StoreSize || LoadAlign.getValue() < StoreSize) 1346 return Changed; 1347 1348 // If the element.atomic memcpy is not lowered into explicit 1349 // loads/stores later, then it will be lowered into an element-size 1350 // specific lib call. If the lib call doesn't exist for our store size, then 1351 // we shouldn't generate the memcpy. 1352 if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize()) 1353 return Changed; 1354 1355 // Create the call. 1356 // Note that unordered atomic loads/stores are *required* by the spec to 1357 // have an alignment but non-atomic loads/stores may not. 1358 NewCall = Builder.CreateElementUnorderedAtomicMemCpy( 1359 StoreBasePtr, StoreAlign.getValue(), LoadBasePtr, LoadAlign.getValue(), 1360 NumBytes, StoreSize); 1361 } 1362 NewCall->setDebugLoc(TheStore->getDebugLoc()); 1363 1364 if (MSSAU) { 1365 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( 1366 NewCall, nullptr, NewCall->getParent(), MemorySSA::BeforeTerminator); 1367 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); 1368 } 1369 1370 LLVM_DEBUG(dbgs() << " Formed new call: " << *NewCall << "\n" 1371 << " from load ptr=" << *LoadEv << " at: " << *TheLoad 1372 << "\n" 1373 << " from store ptr=" << *StoreEv << " at: " << *TheStore 1374 << "\n"); 1375 1376 ORE.emit([&]() { 1377 return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStoreOfLoopLoad", 1378 NewCall->getDebugLoc(), Preheader) 1379 << "Formed a call to " 1380 << ore::NV("NewFunction", NewCall->getCalledFunction()) 1381 << "() intrinsic from " << ore::NV("Inst", InstRemark) 1382 << " instruction in " << ore::NV("Function", TheStore->getFunction()) 1383 << " function"; 1384 }); 1385 1386 // Okay, the memcpy has been formed. Zap the original store and anything that 1387 // feeds into it. 1388 if (MSSAU) 1389 MSSAU->removeMemoryAccess(TheStore, true); 1390 deleteDeadInstruction(TheStore); 1391 if (MSSAU && VerifyMemorySSA) 1392 MSSAU->getMemorySSA()->verifyMemorySSA(); 1393 if (UseMemMove) 1394 ++NumMemMove; 1395 else 1396 ++NumMemCpy; 1397 ExpCleaner.markResultUsed(); 1398 return true; 1399 } 1400 1401 // When compiling for codesize we avoid idiom recognition for a multi-block loop 1402 // unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop. 1403 // 1404 bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset, 1405 bool IsLoopMemset) { 1406 if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) { 1407 if (CurLoop->isOutermost() && (!IsMemset || !IsLoopMemset)) { 1408 LLVM_DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName() 1409 << " : LIR " << (IsMemset ? "Memset" : "Memcpy") 1410 << " avoided: multi-block top-level loop\n"); 1411 return true; 1412 } 1413 } 1414 1415 return false; 1416 } 1417 1418 bool LoopIdiomRecognize::runOnNoncountableLoop() { 1419 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F[" 1420 << CurLoop->getHeader()->getParent()->getName() 1421 << "] Noncountable Loop %" 1422 << CurLoop->getHeader()->getName() << "\n"); 1423 1424 return recognizePopcount() || recognizeAndInsertFFS() || 1425 recognizeShiftUntilBitTest() || recognizeShiftUntilZero(); 1426 } 1427 1428 /// Check if the given conditional branch is based on the comparison between 1429 /// a variable and zero, and if the variable is non-zero or zero (JmpOnZero is 1430 /// true), the control yields to the loop entry. If the branch matches the 1431 /// behavior, the variable involved in the comparison is returned. This function 1432 /// will be called to see if the precondition and postcondition of the loop are 1433 /// in desirable form. 1434 static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry, 1435 bool JmpOnZero = false) { 1436 if (!BI || !BI->isConditional()) 1437 return nullptr; 1438 1439 ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition()); 1440 if (!Cond) 1441 return nullptr; 1442 1443 ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1)); 1444 if (!CmpZero || !CmpZero->isZero()) 1445 return nullptr; 1446 1447 BasicBlock *TrueSucc = BI->getSuccessor(0); 1448 BasicBlock *FalseSucc = BI->getSuccessor(1); 1449 if (JmpOnZero) 1450 std::swap(TrueSucc, FalseSucc); 1451 1452 ICmpInst::Predicate Pred = Cond->getPredicate(); 1453 if ((Pred == ICmpInst::ICMP_NE && TrueSucc == LoopEntry) || 1454 (Pred == ICmpInst::ICMP_EQ && FalseSucc == LoopEntry)) 1455 return Cond->getOperand(0); 1456 1457 return nullptr; 1458 } 1459 1460 // Check if the recurrence variable `VarX` is in the right form to create 1461 // the idiom. Returns the value coerced to a PHINode if so. 1462 static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX, 1463 BasicBlock *LoopEntry) { 1464 auto *PhiX = dyn_cast<PHINode>(VarX); 1465 if (PhiX && PhiX->getParent() == LoopEntry && 1466 (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX)) 1467 return PhiX; 1468 return nullptr; 1469 } 1470 1471 /// Return true iff the idiom is detected in the loop. 1472 /// 1473 /// Additionally: 1474 /// 1) \p CntInst is set to the instruction counting the population bit. 1475 /// 2) \p CntPhi is set to the corresponding phi node. 1476 /// 3) \p Var is set to the value whose population bits are being counted. 1477 /// 1478 /// The core idiom we are trying to detect is: 1479 /// \code 1480 /// if (x0 != 0) 1481 /// goto loop-exit // the precondition of the loop 1482 /// cnt0 = init-val; 1483 /// do { 1484 /// x1 = phi (x0, x2); 1485 /// cnt1 = phi(cnt0, cnt2); 1486 /// 1487 /// cnt2 = cnt1 + 1; 1488 /// ... 1489 /// x2 = x1 & (x1 - 1); 1490 /// ... 1491 /// } while(x != 0); 1492 /// 1493 /// loop-exit: 1494 /// \endcode 1495 static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB, 1496 Instruction *&CntInst, PHINode *&CntPhi, 1497 Value *&Var) { 1498 // step 1: Check to see if the look-back branch match this pattern: 1499 // "if (a!=0) goto loop-entry". 1500 BasicBlock *LoopEntry; 1501 Instruction *DefX2, *CountInst; 1502 Value *VarX1, *VarX0; 1503 PHINode *PhiX, *CountPhi; 1504 1505 DefX2 = CountInst = nullptr; 1506 VarX1 = VarX0 = nullptr; 1507 PhiX = CountPhi = nullptr; 1508 LoopEntry = *(CurLoop->block_begin()); 1509 1510 // step 1: Check if the loop-back branch is in desirable form. 1511 { 1512 if (Value *T = matchCondition( 1513 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry)) 1514 DefX2 = dyn_cast<Instruction>(T); 1515 else 1516 return false; 1517 } 1518 1519 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)" 1520 { 1521 if (!DefX2 || DefX2->getOpcode() != Instruction::And) 1522 return false; 1523 1524 BinaryOperator *SubOneOp; 1525 1526 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0)))) 1527 VarX1 = DefX2->getOperand(1); 1528 else { 1529 VarX1 = DefX2->getOperand(0); 1530 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1)); 1531 } 1532 if (!SubOneOp || SubOneOp->getOperand(0) != VarX1) 1533 return false; 1534 1535 ConstantInt *Dec = dyn_cast<ConstantInt>(SubOneOp->getOperand(1)); 1536 if (!Dec || 1537 !((SubOneOp->getOpcode() == Instruction::Sub && Dec->isOne()) || 1538 (SubOneOp->getOpcode() == Instruction::Add && 1539 Dec->isMinusOne()))) { 1540 return false; 1541 } 1542 } 1543 1544 // step 3: Check the recurrence of variable X 1545 PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry); 1546 if (!PhiX) 1547 return false; 1548 1549 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1 1550 { 1551 CountInst = nullptr; 1552 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(), 1553 IterE = LoopEntry->end(); 1554 Iter != IterE; Iter++) { 1555 Instruction *Inst = &*Iter; 1556 if (Inst->getOpcode() != Instruction::Add) 1557 continue; 1558 1559 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1)); 1560 if (!Inc || !Inc->isOne()) 1561 continue; 1562 1563 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry); 1564 if (!Phi) 1565 continue; 1566 1567 // Check if the result of the instruction is live of the loop. 1568 bool LiveOutLoop = false; 1569 for (User *U : Inst->users()) { 1570 if ((cast<Instruction>(U))->getParent() != LoopEntry) { 1571 LiveOutLoop = true; 1572 break; 1573 } 1574 } 1575 1576 if (LiveOutLoop) { 1577 CountInst = Inst; 1578 CountPhi = Phi; 1579 break; 1580 } 1581 } 1582 1583 if (!CountInst) 1584 return false; 1585 } 1586 1587 // step 5: check if the precondition is in this form: 1588 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;" 1589 { 1590 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1591 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader()); 1592 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1)) 1593 return false; 1594 1595 CntInst = CountInst; 1596 CntPhi = CountPhi; 1597 Var = T; 1598 } 1599 1600 return true; 1601 } 1602 1603 /// Return true if the idiom is detected in the loop. 1604 /// 1605 /// Additionally: 1606 /// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ) 1607 /// or nullptr if there is no such. 1608 /// 2) \p CntPhi is set to the corresponding phi node 1609 /// or nullptr if there is no such. 1610 /// 3) \p Var is set to the value whose CTLZ could be used. 1611 /// 4) \p DefX is set to the instruction calculating Loop exit condition. 1612 /// 1613 /// The core idiom we are trying to detect is: 1614 /// \code 1615 /// if (x0 == 0) 1616 /// goto loop-exit // the precondition of the loop 1617 /// cnt0 = init-val; 1618 /// do { 1619 /// x = phi (x0, x.next); //PhiX 1620 /// cnt = phi(cnt0, cnt.next); 1621 /// 1622 /// cnt.next = cnt + 1; 1623 /// ... 1624 /// x.next = x >> 1; // DefX 1625 /// ... 1626 /// } while(x.next != 0); 1627 /// 1628 /// loop-exit: 1629 /// \endcode 1630 static bool detectShiftUntilZeroIdiom(Loop *CurLoop, const DataLayout &DL, 1631 Intrinsic::ID &IntrinID, Value *&InitX, 1632 Instruction *&CntInst, PHINode *&CntPhi, 1633 Instruction *&DefX) { 1634 BasicBlock *LoopEntry; 1635 Value *VarX = nullptr; 1636 1637 DefX = nullptr; 1638 CntInst = nullptr; 1639 CntPhi = nullptr; 1640 LoopEntry = *(CurLoop->block_begin()); 1641 1642 // step 1: Check if the loop-back branch is in desirable form. 1643 if (Value *T = matchCondition( 1644 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry)) 1645 DefX = dyn_cast<Instruction>(T); 1646 else 1647 return false; 1648 1649 // step 2: detect instructions corresponding to "x.next = x >> 1 or x << 1" 1650 if (!DefX || !DefX->isShift()) 1651 return false; 1652 IntrinID = DefX->getOpcode() == Instruction::Shl ? Intrinsic::cttz : 1653 Intrinsic::ctlz; 1654 ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1)); 1655 if (!Shft || !Shft->isOne()) 1656 return false; 1657 VarX = DefX->getOperand(0); 1658 1659 // step 3: Check the recurrence of variable X 1660 PHINode *PhiX = getRecurrenceVar(VarX, DefX, LoopEntry); 1661 if (!PhiX) 1662 return false; 1663 1664 InitX = PhiX->getIncomingValueForBlock(CurLoop->getLoopPreheader()); 1665 1666 // Make sure the initial value can't be negative otherwise the ashr in the 1667 // loop might never reach zero which would make the loop infinite. 1668 if (DefX->getOpcode() == Instruction::AShr && !isKnownNonNegative(InitX, DL)) 1669 return false; 1670 1671 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1 1672 // or cnt.next = cnt + -1. 1673 // TODO: We can skip the step. If loop trip count is known (CTLZ), 1674 // then all uses of "cnt.next" could be optimized to the trip count 1675 // plus "cnt0". Currently it is not optimized. 1676 // This step could be used to detect POPCNT instruction: 1677 // cnt.next = cnt + (x.next & 1) 1678 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(), 1679 IterE = LoopEntry->end(); 1680 Iter != IterE; Iter++) { 1681 Instruction *Inst = &*Iter; 1682 if (Inst->getOpcode() != Instruction::Add) 1683 continue; 1684 1685 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1)); 1686 if (!Inc || (!Inc->isOne() && !Inc->isMinusOne())) 1687 continue; 1688 1689 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry); 1690 if (!Phi) 1691 continue; 1692 1693 CntInst = Inst; 1694 CntPhi = Phi; 1695 break; 1696 } 1697 if (!CntInst) 1698 return false; 1699 1700 return true; 1701 } 1702 1703 /// Recognize CTLZ or CTTZ idiom in a non-countable loop and convert the loop 1704 /// to countable (with CTLZ / CTTZ trip count). If CTLZ / CTTZ inserted as a new 1705 /// trip count returns true; otherwise, returns false. 1706 bool LoopIdiomRecognize::recognizeAndInsertFFS() { 1707 // Give up if the loop has multiple blocks or multiple backedges. 1708 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1) 1709 return false; 1710 1711 Intrinsic::ID IntrinID; 1712 Value *InitX; 1713 Instruction *DefX = nullptr; 1714 PHINode *CntPhi = nullptr; 1715 Instruction *CntInst = nullptr; 1716 // Help decide if transformation is profitable. For ShiftUntilZero idiom, 1717 // this is always 6. 1718 size_t IdiomCanonicalSize = 6; 1719 1720 if (!detectShiftUntilZeroIdiom(CurLoop, *DL, IntrinID, InitX, 1721 CntInst, CntPhi, DefX)) 1722 return false; 1723 1724 bool IsCntPhiUsedOutsideLoop = false; 1725 for (User *U : CntPhi->users()) 1726 if (!CurLoop->contains(cast<Instruction>(U))) { 1727 IsCntPhiUsedOutsideLoop = true; 1728 break; 1729 } 1730 bool IsCntInstUsedOutsideLoop = false; 1731 for (User *U : CntInst->users()) 1732 if (!CurLoop->contains(cast<Instruction>(U))) { 1733 IsCntInstUsedOutsideLoop = true; 1734 break; 1735 } 1736 // If both CntInst and CntPhi are used outside the loop the profitability 1737 // is questionable. 1738 if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop) 1739 return false; 1740 1741 // For some CPUs result of CTLZ(X) intrinsic is undefined 1742 // when X is 0. If we can not guarantee X != 0, we need to check this 1743 // when expand. 1744 bool ZeroCheck = false; 1745 // It is safe to assume Preheader exist as it was checked in 1746 // parent function RunOnLoop. 1747 BasicBlock *PH = CurLoop->getLoopPreheader(); 1748 1749 // If we are using the count instruction outside the loop, make sure we 1750 // have a zero check as a precondition. Without the check the loop would run 1751 // one iteration for before any check of the input value. This means 0 and 1 1752 // would have identical behavior in the original loop and thus 1753 if (!IsCntPhiUsedOutsideLoop) { 1754 auto *PreCondBB = PH->getSinglePredecessor(); 1755 if (!PreCondBB) 1756 return false; 1757 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1758 if (!PreCondBI) 1759 return false; 1760 if (matchCondition(PreCondBI, PH) != InitX) 1761 return false; 1762 ZeroCheck = true; 1763 } 1764 1765 // Check if CTLZ / CTTZ intrinsic is profitable. Assume it is always 1766 // profitable if we delete the loop. 1767 1768 // the loop has only 6 instructions: 1769 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ] 1770 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ] 1771 // %shr = ashr %n.addr.0, 1 1772 // %tobool = icmp eq %shr, 0 1773 // %inc = add nsw %i.0, 1 1774 // br i1 %tobool 1775 1776 const Value *Args[] = {InitX, 1777 ConstantInt::getBool(InitX->getContext(), ZeroCheck)}; 1778 1779 // @llvm.dbg doesn't count as they have no semantic effect. 1780 auto InstWithoutDebugIt = CurLoop->getHeader()->instructionsWithoutDebug(); 1781 uint32_t HeaderSize = 1782 std::distance(InstWithoutDebugIt.begin(), InstWithoutDebugIt.end()); 1783 1784 IntrinsicCostAttributes Attrs(IntrinID, InitX->getType(), Args); 1785 InstructionCost Cost = 1786 TTI->getIntrinsicInstrCost(Attrs, TargetTransformInfo::TCK_SizeAndLatency); 1787 if (HeaderSize != IdiomCanonicalSize && 1788 Cost > TargetTransformInfo::TCC_Basic) 1789 return false; 1790 1791 transformLoopToCountable(IntrinID, PH, CntInst, CntPhi, InitX, DefX, 1792 DefX->getDebugLoc(), ZeroCheck, 1793 IsCntPhiUsedOutsideLoop); 1794 return true; 1795 } 1796 1797 /// Recognizes a population count idiom in a non-countable loop. 1798 /// 1799 /// If detected, transforms the relevant code to issue the popcount intrinsic 1800 /// function call, and returns true; otherwise, returns false. 1801 bool LoopIdiomRecognize::recognizePopcount() { 1802 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware) 1803 return false; 1804 1805 // Counting population are usually conducted by few arithmetic instructions. 1806 // Such instructions can be easily "absorbed" by vacant slots in a 1807 // non-compact loop. Therefore, recognizing popcount idiom only makes sense 1808 // in a compact loop. 1809 1810 // Give up if the loop has multiple blocks or multiple backedges. 1811 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1) 1812 return false; 1813 1814 BasicBlock *LoopBody = *(CurLoop->block_begin()); 1815 if (LoopBody->size() >= 20) { 1816 // The loop is too big, bail out. 1817 return false; 1818 } 1819 1820 // It should have a preheader containing nothing but an unconditional branch. 1821 BasicBlock *PH = CurLoop->getLoopPreheader(); 1822 if (!PH || &PH->front() != PH->getTerminator()) 1823 return false; 1824 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator()); 1825 if (!EntryBI || EntryBI->isConditional()) 1826 return false; 1827 1828 // It should have a precondition block where the generated popcount intrinsic 1829 // function can be inserted. 1830 auto *PreCondBB = PH->getSinglePredecessor(); 1831 if (!PreCondBB) 1832 return false; 1833 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1834 if (!PreCondBI || PreCondBI->isUnconditional()) 1835 return false; 1836 1837 Instruction *CntInst; 1838 PHINode *CntPhi; 1839 Value *Val; 1840 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val)) 1841 return false; 1842 1843 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val); 1844 return true; 1845 } 1846 1847 static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val, 1848 const DebugLoc &DL) { 1849 Value *Ops[] = {Val}; 1850 Type *Tys[] = {Val->getType()}; 1851 1852 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent(); 1853 Function *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys); 1854 CallInst *CI = IRBuilder.CreateCall(Func, Ops); 1855 CI->setDebugLoc(DL); 1856 1857 return CI; 1858 } 1859 1860 static CallInst *createFFSIntrinsic(IRBuilder<> &IRBuilder, Value *Val, 1861 const DebugLoc &DL, bool ZeroCheck, 1862 Intrinsic::ID IID) { 1863 Value *Ops[] = {Val, IRBuilder.getInt1(ZeroCheck)}; 1864 Type *Tys[] = {Val->getType()}; 1865 1866 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent(); 1867 Function *Func = Intrinsic::getDeclaration(M, IID, Tys); 1868 CallInst *CI = IRBuilder.CreateCall(Func, Ops); 1869 CI->setDebugLoc(DL); 1870 1871 return CI; 1872 } 1873 1874 /// Transform the following loop (Using CTLZ, CTTZ is similar): 1875 /// loop: 1876 /// CntPhi = PHI [Cnt0, CntInst] 1877 /// PhiX = PHI [InitX, DefX] 1878 /// CntInst = CntPhi + 1 1879 /// DefX = PhiX >> 1 1880 /// LOOP_BODY 1881 /// Br: loop if (DefX != 0) 1882 /// Use(CntPhi) or Use(CntInst) 1883 /// 1884 /// Into: 1885 /// If CntPhi used outside the loop: 1886 /// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1) 1887 /// Count = CountPrev + 1 1888 /// else 1889 /// Count = BitWidth(InitX) - CTLZ(InitX) 1890 /// loop: 1891 /// CntPhi = PHI [Cnt0, CntInst] 1892 /// PhiX = PHI [InitX, DefX] 1893 /// PhiCount = PHI [Count, Dec] 1894 /// CntInst = CntPhi + 1 1895 /// DefX = PhiX >> 1 1896 /// Dec = PhiCount - 1 1897 /// LOOP_BODY 1898 /// Br: loop if (Dec != 0) 1899 /// Use(CountPrev + Cnt0) // Use(CntPhi) 1900 /// or 1901 /// Use(Count + Cnt0) // Use(CntInst) 1902 /// 1903 /// If LOOP_BODY is empty the loop will be deleted. 1904 /// If CntInst and DefX are not used in LOOP_BODY they will be removed. 1905 void LoopIdiomRecognize::transformLoopToCountable( 1906 Intrinsic::ID IntrinID, BasicBlock *Preheader, Instruction *CntInst, 1907 PHINode *CntPhi, Value *InitX, Instruction *DefX, const DebugLoc &DL, 1908 bool ZeroCheck, bool IsCntPhiUsedOutsideLoop) { 1909 BranchInst *PreheaderBr = cast<BranchInst>(Preheader->getTerminator()); 1910 1911 // Step 1: Insert the CTLZ/CTTZ instruction at the end of the preheader block 1912 IRBuilder<> Builder(PreheaderBr); 1913 Builder.SetCurrentDebugLocation(DL); 1914 1915 // If there are no uses of CntPhi crate: 1916 // Count = BitWidth - CTLZ(InitX); 1917 // NewCount = Count; 1918 // If there are uses of CntPhi create: 1919 // NewCount = BitWidth - CTLZ(InitX >> 1); 1920 // Count = NewCount + 1; 1921 Value *InitXNext; 1922 if (IsCntPhiUsedOutsideLoop) { 1923 if (DefX->getOpcode() == Instruction::AShr) 1924 InitXNext = Builder.CreateAShr(InitX, 1); 1925 else if (DefX->getOpcode() == Instruction::LShr) 1926 InitXNext = Builder.CreateLShr(InitX, 1); 1927 else if (DefX->getOpcode() == Instruction::Shl) // cttz 1928 InitXNext = Builder.CreateShl(InitX, 1); 1929 else 1930 llvm_unreachable("Unexpected opcode!"); 1931 } else 1932 InitXNext = InitX; 1933 Value *Count = 1934 createFFSIntrinsic(Builder, InitXNext, DL, ZeroCheck, IntrinID); 1935 Type *CountTy = Count->getType(); 1936 Count = Builder.CreateSub( 1937 ConstantInt::get(CountTy, CountTy->getIntegerBitWidth()), Count); 1938 Value *NewCount = Count; 1939 if (IsCntPhiUsedOutsideLoop) 1940 Count = Builder.CreateAdd(Count, ConstantInt::get(CountTy, 1)); 1941 1942 NewCount = Builder.CreateZExtOrTrunc(NewCount, CntInst->getType()); 1943 1944 Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader); 1945 if (cast<ConstantInt>(CntInst->getOperand(1))->isOne()) { 1946 // If the counter was being incremented in the loop, add NewCount to the 1947 // counter's initial value, but only if the initial value is not zero. 1948 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal); 1949 if (!InitConst || !InitConst->isZero()) 1950 NewCount = Builder.CreateAdd(NewCount, CntInitVal); 1951 } else { 1952 // If the count was being decremented in the loop, subtract NewCount from 1953 // the counter's initial value. 1954 NewCount = Builder.CreateSub(CntInitVal, NewCount); 1955 } 1956 1957 // Step 2: Insert new IV and loop condition: 1958 // loop: 1959 // ... 1960 // PhiCount = PHI [Count, Dec] 1961 // ... 1962 // Dec = PhiCount - 1 1963 // ... 1964 // Br: loop if (Dec != 0) 1965 BasicBlock *Body = *(CurLoop->block_begin()); 1966 auto *LbBr = cast<BranchInst>(Body->getTerminator()); 1967 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition()); 1968 1969 PHINode *TcPhi = PHINode::Create(CountTy, 2, "tcphi", &Body->front()); 1970 1971 Builder.SetInsertPoint(LbCond); 1972 Instruction *TcDec = cast<Instruction>(Builder.CreateSub( 1973 TcPhi, ConstantInt::get(CountTy, 1), "tcdec", false, true)); 1974 1975 TcPhi->addIncoming(Count, Preheader); 1976 TcPhi->addIncoming(TcDec, Body); 1977 1978 CmpInst::Predicate Pred = 1979 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ; 1980 LbCond->setPredicate(Pred); 1981 LbCond->setOperand(0, TcDec); 1982 LbCond->setOperand(1, ConstantInt::get(CountTy, 0)); 1983 1984 // Step 3: All the references to the original counter outside 1985 // the loop are replaced with the NewCount 1986 if (IsCntPhiUsedOutsideLoop) 1987 CntPhi->replaceUsesOutsideBlock(NewCount, Body); 1988 else 1989 CntInst->replaceUsesOutsideBlock(NewCount, Body); 1990 1991 // step 4: Forget the "non-computable" trip-count SCEV associated with the 1992 // loop. The loop would otherwise not be deleted even if it becomes empty. 1993 SE->forgetLoop(CurLoop); 1994 } 1995 1996 void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB, 1997 Instruction *CntInst, 1998 PHINode *CntPhi, Value *Var) { 1999 BasicBlock *PreHead = CurLoop->getLoopPreheader(); 2000 auto *PreCondBr = cast<BranchInst>(PreCondBB->getTerminator()); 2001 const DebugLoc &DL = CntInst->getDebugLoc(); 2002 2003 // Assuming before transformation, the loop is following: 2004 // if (x) // the precondition 2005 // do { cnt++; x &= x - 1; } while(x); 2006 2007 // Step 1: Insert the ctpop instruction at the end of the precondition block 2008 IRBuilder<> Builder(PreCondBr); 2009 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt; 2010 { 2011 PopCnt = createPopcntIntrinsic(Builder, Var, DL); 2012 NewCount = PopCntZext = 2013 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType())); 2014 2015 if (NewCount != PopCnt) 2016 (cast<Instruction>(NewCount))->setDebugLoc(DL); 2017 2018 // TripCnt is exactly the number of iterations the loop has 2019 TripCnt = NewCount; 2020 2021 // If the population counter's initial value is not zero, insert Add Inst. 2022 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead); 2023 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal); 2024 if (!InitConst || !InitConst->isZero()) { 2025 NewCount = Builder.CreateAdd(NewCount, CntInitVal); 2026 (cast<Instruction>(NewCount))->setDebugLoc(DL); 2027 } 2028 } 2029 2030 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to 2031 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic 2032 // function would be partial dead code, and downstream passes will drag 2033 // it back from the precondition block to the preheader. 2034 { 2035 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition()); 2036 2037 Value *Opnd0 = PopCntZext; 2038 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0); 2039 if (PreCond->getOperand(0) != Var) 2040 std::swap(Opnd0, Opnd1); 2041 2042 ICmpInst *NewPreCond = cast<ICmpInst>( 2043 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1)); 2044 PreCondBr->setCondition(NewPreCond); 2045 2046 RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI); 2047 } 2048 2049 // Step 3: Note that the population count is exactly the trip count of the 2050 // loop in question, which enable us to convert the loop from noncountable 2051 // loop into a countable one. The benefit is twofold: 2052 // 2053 // - If the loop only counts population, the entire loop becomes dead after 2054 // the transformation. It is a lot easier to prove a countable loop dead 2055 // than to prove a noncountable one. (In some C dialects, an infinite loop 2056 // isn't dead even if it computes nothing useful. In general, DCE needs 2057 // to prove a noncountable loop finite before safely delete it.) 2058 // 2059 // - If the loop also performs something else, it remains alive. 2060 // Since it is transformed to countable form, it can be aggressively 2061 // optimized by some optimizations which are in general not applicable 2062 // to a noncountable loop. 2063 // 2064 // After this step, this loop (conceptually) would look like following: 2065 // newcnt = __builtin_ctpop(x); 2066 // t = newcnt; 2067 // if (x) 2068 // do { cnt++; x &= x-1; t--) } while (t > 0); 2069 BasicBlock *Body = *(CurLoop->block_begin()); 2070 { 2071 auto *LbBr = cast<BranchInst>(Body->getTerminator()); 2072 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition()); 2073 Type *Ty = TripCnt->getType(); 2074 2075 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front()); 2076 2077 Builder.SetInsertPoint(LbCond); 2078 Instruction *TcDec = cast<Instruction>( 2079 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1), 2080 "tcdec", false, true)); 2081 2082 TcPhi->addIncoming(TripCnt, PreHead); 2083 TcPhi->addIncoming(TcDec, Body); 2084 2085 CmpInst::Predicate Pred = 2086 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE; 2087 LbCond->setPredicate(Pred); 2088 LbCond->setOperand(0, TcDec); 2089 LbCond->setOperand(1, ConstantInt::get(Ty, 0)); 2090 } 2091 2092 // Step 4: All the references to the original population counter outside 2093 // the loop are replaced with the NewCount -- the value returned from 2094 // __builtin_ctpop(). 2095 CntInst->replaceUsesOutsideBlock(NewCount, Body); 2096 2097 // step 5: Forget the "non-computable" trip-count SCEV associated with the 2098 // loop. The loop would otherwise not be deleted even if it becomes empty. 2099 SE->forgetLoop(CurLoop); 2100 } 2101 2102 /// Match loop-invariant value. 2103 template <typename SubPattern_t> struct match_LoopInvariant { 2104 SubPattern_t SubPattern; 2105 const Loop *L; 2106 2107 match_LoopInvariant(const SubPattern_t &SP, const Loop *L) 2108 : SubPattern(SP), L(L) {} 2109 2110 template <typename ITy> bool match(ITy *V) { 2111 return L->isLoopInvariant(V) && SubPattern.match(V); 2112 } 2113 }; 2114 2115 /// Matches if the value is loop-invariant. 2116 template <typename Ty> 2117 inline match_LoopInvariant<Ty> m_LoopInvariant(const Ty &M, const Loop *L) { 2118 return match_LoopInvariant<Ty>(M, L); 2119 } 2120 2121 /// Return true if the idiom is detected in the loop. 2122 /// 2123 /// The core idiom we are trying to detect is: 2124 /// \code 2125 /// entry: 2126 /// <...> 2127 /// %bitmask = shl i32 1, %bitpos 2128 /// br label %loop 2129 /// 2130 /// loop: 2131 /// %x.curr = phi i32 [ %x, %entry ], [ %x.next, %loop ] 2132 /// %x.curr.bitmasked = and i32 %x.curr, %bitmask 2133 /// %x.curr.isbitunset = icmp eq i32 %x.curr.bitmasked, 0 2134 /// %x.next = shl i32 %x.curr, 1 2135 /// <...> 2136 /// br i1 %x.curr.isbitunset, label %loop, label %end 2137 /// 2138 /// end: 2139 /// %x.curr.res = phi i32 [ %x.curr, %loop ] <...> 2140 /// %x.next.res = phi i32 [ %x.next, %loop ] <...> 2141 /// <...> 2142 /// \endcode 2143 static bool detectShiftUntilBitTestIdiom(Loop *CurLoop, Value *&BaseX, 2144 Value *&BitMask, Value *&BitPos, 2145 Value *&CurrX, Instruction *&NextX) { 2146 LLVM_DEBUG(dbgs() << DEBUG_TYPE 2147 " Performing shift-until-bittest idiom detection.\n"); 2148 2149 // Give up if the loop has multiple blocks or multiple backedges. 2150 if (CurLoop->getNumBlocks() != 1 || CurLoop->getNumBackEdges() != 1) { 2151 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad block/backedge count.\n"); 2152 return false; 2153 } 2154 2155 BasicBlock *LoopHeaderBB = CurLoop->getHeader(); 2156 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader(); 2157 assert(LoopPreheaderBB && "There is always a loop preheader."); 2158 2159 using namespace PatternMatch; 2160 2161 // Step 1: Check if the loop backedge is in desirable form. 2162 2163 ICmpInst::Predicate Pred; 2164 Value *CmpLHS, *CmpRHS; 2165 BasicBlock *TrueBB, *FalseBB; 2166 if (!match(LoopHeaderBB->getTerminator(), 2167 m_Br(m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS)), 2168 m_BasicBlock(TrueBB), m_BasicBlock(FalseBB)))) { 2169 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge structure.\n"); 2170 return false; 2171 } 2172 2173 // Step 2: Check if the backedge's condition is in desirable form. 2174 2175 auto MatchVariableBitMask = [&]() { 2176 return ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero()) && 2177 match(CmpLHS, 2178 m_c_And(m_Value(CurrX), 2179 m_CombineAnd( 2180 m_Value(BitMask), 2181 m_LoopInvariant(m_Shl(m_One(), m_Value(BitPos)), 2182 CurLoop)))); 2183 }; 2184 auto MatchConstantBitMask = [&]() { 2185 return ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero()) && 2186 match(CmpLHS, m_And(m_Value(CurrX), 2187 m_CombineAnd(m_Value(BitMask), m_Power2()))) && 2188 (BitPos = ConstantExpr::getExactLogBase2(cast<Constant>(BitMask))); 2189 }; 2190 auto MatchDecomposableConstantBitMask = [&]() { 2191 APInt Mask; 2192 return llvm::decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, CurrX, Mask) && 2193 ICmpInst::isEquality(Pred) && Mask.isPowerOf2() && 2194 (BitMask = ConstantInt::get(CurrX->getType(), Mask)) && 2195 (BitPos = ConstantInt::get(CurrX->getType(), Mask.logBase2())); 2196 }; 2197 2198 if (!MatchVariableBitMask() && !MatchConstantBitMask() && 2199 !MatchDecomposableConstantBitMask()) { 2200 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge comparison.\n"); 2201 return false; 2202 } 2203 2204 // Step 3: Check if the recurrence is in desirable form. 2205 auto *CurrXPN = dyn_cast<PHINode>(CurrX); 2206 if (!CurrXPN || CurrXPN->getParent() != LoopHeaderBB) { 2207 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Not an expected PHI node.\n"); 2208 return false; 2209 } 2210 2211 BaseX = CurrXPN->getIncomingValueForBlock(LoopPreheaderBB); 2212 NextX = 2213 dyn_cast<Instruction>(CurrXPN->getIncomingValueForBlock(LoopHeaderBB)); 2214 2215 assert(CurLoop->isLoopInvariant(BaseX) && 2216 "Expected BaseX to be avaliable in the preheader!"); 2217 2218 if (!NextX || !match(NextX, m_Shl(m_Specific(CurrX), m_One()))) { 2219 // FIXME: support right-shift? 2220 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad recurrence.\n"); 2221 return false; 2222 } 2223 2224 // Step 4: Check if the backedge's destinations are in desirable form. 2225 2226 assert(ICmpInst::isEquality(Pred) && 2227 "Should only get equality predicates here."); 2228 2229 // cmp-br is commutative, so canonicalize to a single variant. 2230 if (Pred != ICmpInst::Predicate::ICMP_EQ) { 2231 Pred = ICmpInst::getInversePredicate(Pred); 2232 std::swap(TrueBB, FalseBB); 2233 } 2234 2235 // We expect to exit loop when comparison yields false, 2236 // so when it yields true we should branch back to loop header. 2237 if (TrueBB != LoopHeaderBB) { 2238 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge flow.\n"); 2239 return false; 2240 } 2241 2242 // Okay, idiom checks out. 2243 return true; 2244 } 2245 2246 /// Look for the following loop: 2247 /// \code 2248 /// entry: 2249 /// <...> 2250 /// %bitmask = shl i32 1, %bitpos 2251 /// br label %loop 2252 /// 2253 /// loop: 2254 /// %x.curr = phi i32 [ %x, %entry ], [ %x.next, %loop ] 2255 /// %x.curr.bitmasked = and i32 %x.curr, %bitmask 2256 /// %x.curr.isbitunset = icmp eq i32 %x.curr.bitmasked, 0 2257 /// %x.next = shl i32 %x.curr, 1 2258 /// <...> 2259 /// br i1 %x.curr.isbitunset, label %loop, label %end 2260 /// 2261 /// end: 2262 /// %x.curr.res = phi i32 [ %x.curr, %loop ] <...> 2263 /// %x.next.res = phi i32 [ %x.next, %loop ] <...> 2264 /// <...> 2265 /// \endcode 2266 /// 2267 /// And transform it into: 2268 /// \code 2269 /// entry: 2270 /// %bitmask = shl i32 1, %bitpos 2271 /// %lowbitmask = add i32 %bitmask, -1 2272 /// %mask = or i32 %lowbitmask, %bitmask 2273 /// %x.masked = and i32 %x, %mask 2274 /// %x.masked.numleadingzeros = call i32 @llvm.ctlz.i32(i32 %x.masked, 2275 /// i1 true) 2276 /// %x.masked.numactivebits = sub i32 32, %x.masked.numleadingzeros 2277 /// %x.masked.leadingonepos = add i32 %x.masked.numactivebits, -1 2278 /// %backedgetakencount = sub i32 %bitpos, %x.masked.leadingonepos 2279 /// %tripcount = add i32 %backedgetakencount, 1 2280 /// %x.curr = shl i32 %x, %backedgetakencount 2281 /// %x.next = shl i32 %x, %tripcount 2282 /// br label %loop 2283 /// 2284 /// loop: 2285 /// %loop.iv = phi i32 [ 0, %entry ], [ %loop.iv.next, %loop ] 2286 /// %loop.iv.next = add nuw i32 %loop.iv, 1 2287 /// %loop.ivcheck = icmp eq i32 %loop.iv.next, %tripcount 2288 /// <...> 2289 /// br i1 %loop.ivcheck, label %end, label %loop 2290 /// 2291 /// end: 2292 /// %x.curr.res = phi i32 [ %x.curr, %loop ] <...> 2293 /// %x.next.res = phi i32 [ %x.next, %loop ] <...> 2294 /// <...> 2295 /// \endcode 2296 bool LoopIdiomRecognize::recognizeShiftUntilBitTest() { 2297 bool MadeChange = false; 2298 2299 Value *X, *BitMask, *BitPos, *XCurr; 2300 Instruction *XNext; 2301 if (!detectShiftUntilBitTestIdiom(CurLoop, X, BitMask, BitPos, XCurr, 2302 XNext)) { 2303 LLVM_DEBUG(dbgs() << DEBUG_TYPE 2304 " shift-until-bittest idiom detection failed.\n"); 2305 return MadeChange; 2306 } 2307 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-bittest idiom detected!\n"); 2308 2309 // Ok, it is the idiom we were looking for, we *could* transform this loop, 2310 // but is it profitable to transform? 2311 2312 BasicBlock *LoopHeaderBB = CurLoop->getHeader(); 2313 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader(); 2314 assert(LoopPreheaderBB && "There is always a loop preheader."); 2315 2316 BasicBlock *SuccessorBB = CurLoop->getExitBlock(); 2317 assert(SuccessorBB && "There is only a single successor."); 2318 2319 IRBuilder<> Builder(LoopPreheaderBB->getTerminator()); 2320 Builder.SetCurrentDebugLocation(cast<Instruction>(XCurr)->getDebugLoc()); 2321 2322 Intrinsic::ID IntrID = Intrinsic::ctlz; 2323 Type *Ty = X->getType(); 2324 unsigned Bitwidth = Ty->getScalarSizeInBits(); 2325 2326 TargetTransformInfo::TargetCostKind CostKind = 2327 TargetTransformInfo::TCK_SizeAndLatency; 2328 2329 // The rewrite is considered to be unprofitable iff and only iff the 2330 // intrinsic/shift we'll use are not cheap. Note that we are okay with *just* 2331 // making the loop countable, even if nothing else changes. 2332 IntrinsicCostAttributes Attrs( 2333 IntrID, Ty, {UndefValue::get(Ty), /*is_zero_undef=*/Builder.getTrue()}); 2334 InstructionCost Cost = TTI->getIntrinsicInstrCost(Attrs, CostKind); 2335 if (Cost > TargetTransformInfo::TCC_Basic) { 2336 LLVM_DEBUG(dbgs() << DEBUG_TYPE 2337 " Intrinsic is too costly, not beneficial\n"); 2338 return MadeChange; 2339 } 2340 if (TTI->getArithmeticInstrCost(Instruction::Shl, Ty, CostKind) > 2341 TargetTransformInfo::TCC_Basic) { 2342 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Shift is too costly, not beneficial\n"); 2343 return MadeChange; 2344 } 2345 2346 // Ok, transform appears worthwhile. 2347 MadeChange = true; 2348 2349 // Step 1: Compute the loop trip count. 2350 2351 Value *LowBitMask = Builder.CreateAdd(BitMask, Constant::getAllOnesValue(Ty), 2352 BitPos->getName() + ".lowbitmask"); 2353 Value *Mask = 2354 Builder.CreateOr(LowBitMask, BitMask, BitPos->getName() + ".mask"); 2355 Value *XMasked = Builder.CreateAnd(X, Mask, X->getName() + ".masked"); 2356 CallInst *XMaskedNumLeadingZeros = Builder.CreateIntrinsic( 2357 IntrID, Ty, {XMasked, /*is_zero_undef=*/Builder.getTrue()}, 2358 /*FMFSource=*/nullptr, XMasked->getName() + ".numleadingzeros"); 2359 Value *XMaskedNumActiveBits = Builder.CreateSub( 2360 ConstantInt::get(Ty, Ty->getScalarSizeInBits()), XMaskedNumLeadingZeros, 2361 XMasked->getName() + ".numactivebits", /*HasNUW=*/true, 2362 /*HasNSW=*/Bitwidth != 2); 2363 Value *XMaskedLeadingOnePos = 2364 Builder.CreateAdd(XMaskedNumActiveBits, Constant::getAllOnesValue(Ty), 2365 XMasked->getName() + ".leadingonepos", /*HasNUW=*/false, 2366 /*HasNSW=*/Bitwidth > 2); 2367 2368 Value *LoopBackedgeTakenCount = Builder.CreateSub( 2369 BitPos, XMaskedLeadingOnePos, CurLoop->getName() + ".backedgetakencount", 2370 /*HasNUW=*/true, /*HasNSW=*/true); 2371 // We know loop's backedge-taken count, but what's loop's trip count? 2372 // Note that while NUW is always safe, while NSW is only for bitwidths != 2. 2373 Value *LoopTripCount = 2374 Builder.CreateAdd(LoopBackedgeTakenCount, ConstantInt::get(Ty, 1), 2375 CurLoop->getName() + ".tripcount", /*HasNUW=*/true, 2376 /*HasNSW=*/Bitwidth != 2); 2377 2378 // Step 2: Compute the recurrence's final value without a loop. 2379 2380 // NewX is always safe to compute, because `LoopBackedgeTakenCount` 2381 // will always be smaller than `bitwidth(X)`, i.e. we never get poison. 2382 Value *NewX = Builder.CreateShl(X, LoopBackedgeTakenCount); 2383 NewX->takeName(XCurr); 2384 if (auto *I = dyn_cast<Instruction>(NewX)) 2385 I->copyIRFlags(XNext, /*IncludeWrapFlags=*/true); 2386 2387 Value *NewXNext; 2388 // Rewriting XNext is more complicated, however, because `X << LoopTripCount` 2389 // will be poison iff `LoopTripCount == bitwidth(X)` (which will happen 2390 // iff `BitPos` is `bitwidth(x) - 1` and `X` is `1`). So unless we know 2391 // that isn't the case, we'll need to emit an alternative, safe IR. 2392 if (XNext->hasNoSignedWrap() || XNext->hasNoUnsignedWrap() || 2393 PatternMatch::match( 2394 BitPos, PatternMatch::m_SpecificInt_ICMP( 2395 ICmpInst::ICMP_NE, APInt(Ty->getScalarSizeInBits(), 2396 Ty->getScalarSizeInBits() - 1)))) 2397 NewXNext = Builder.CreateShl(X, LoopTripCount); 2398 else { 2399 // Otherwise, just additionally shift by one. It's the smallest solution, 2400 // alternatively, we could check that NewX is INT_MIN (or BitPos is ) 2401 // and select 0 instead. 2402 NewXNext = Builder.CreateShl(NewX, ConstantInt::get(Ty, 1)); 2403 } 2404 2405 NewXNext->takeName(XNext); 2406 if (auto *I = dyn_cast<Instruction>(NewXNext)) 2407 I->copyIRFlags(XNext, /*IncludeWrapFlags=*/true); 2408 2409 // Step 3: Adjust the successor basic block to recieve the computed 2410 // recurrence's final value instead of the recurrence itself. 2411 2412 XCurr->replaceUsesOutsideBlock(NewX, LoopHeaderBB); 2413 XNext->replaceUsesOutsideBlock(NewXNext, LoopHeaderBB); 2414 2415 // Step 4: Rewrite the loop into a countable form, with canonical IV. 2416 2417 // The new canonical induction variable. 2418 Builder.SetInsertPoint(&LoopHeaderBB->front()); 2419 auto *IV = Builder.CreatePHI(Ty, 2, CurLoop->getName() + ".iv"); 2420 2421 // The induction itself. 2422 // Note that while NUW is always safe, while NSW is only for bitwidths != 2. 2423 Builder.SetInsertPoint(LoopHeaderBB->getTerminator()); 2424 auto *IVNext = 2425 Builder.CreateAdd(IV, ConstantInt::get(Ty, 1), IV->getName() + ".next", 2426 /*HasNUW=*/true, /*HasNSW=*/Bitwidth != 2); 2427 2428 // The loop trip count check. 2429 auto *IVCheck = Builder.CreateICmpEQ(IVNext, LoopTripCount, 2430 CurLoop->getName() + ".ivcheck"); 2431 Builder.CreateCondBr(IVCheck, SuccessorBB, LoopHeaderBB); 2432 LoopHeaderBB->getTerminator()->eraseFromParent(); 2433 2434 // Populate the IV PHI. 2435 IV->addIncoming(ConstantInt::get(Ty, 0), LoopPreheaderBB); 2436 IV->addIncoming(IVNext, LoopHeaderBB); 2437 2438 // Step 5: Forget the "non-computable" trip-count SCEV associated with the 2439 // loop. The loop would otherwise not be deleted even if it becomes empty. 2440 2441 SE->forgetLoop(CurLoop); 2442 2443 // Other passes will take care of actually deleting the loop if possible. 2444 2445 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-bittest idiom optimized!\n"); 2446 2447 ++NumShiftUntilBitTest; 2448 return MadeChange; 2449 } 2450 2451 /// Return true if the idiom is detected in the loop. 2452 /// 2453 /// The core idiom we are trying to detect is: 2454 /// \code 2455 /// entry: 2456 /// <...> 2457 /// %start = <...> 2458 /// %extraoffset = <...> 2459 /// <...> 2460 /// br label %for.cond 2461 /// 2462 /// loop: 2463 /// %iv = phi i8 [ %start, %entry ], [ %iv.next, %for.cond ] 2464 /// %nbits = add nsw i8 %iv, %extraoffset 2465 /// %val.shifted = {{l,a}shr,shl} i8 %val, %nbits 2466 /// %val.shifted.iszero = icmp eq i8 %val.shifted, 0 2467 /// %iv.next = add i8 %iv, 1 2468 /// <...> 2469 /// br i1 %val.shifted.iszero, label %end, label %loop 2470 /// 2471 /// end: 2472 /// %iv.res = phi i8 [ %iv, %loop ] <...> 2473 /// %nbits.res = phi i8 [ %nbits, %loop ] <...> 2474 /// %val.shifted.res = phi i8 [ %val.shifted, %loop ] <...> 2475 /// %val.shifted.iszero.res = phi i1 [ %val.shifted.iszero, %loop ] <...> 2476 /// %iv.next.res = phi i8 [ %iv.next, %loop ] <...> 2477 /// <...> 2478 /// \endcode 2479 static bool detectShiftUntilZeroIdiom(Loop *CurLoop, ScalarEvolution *SE, 2480 Instruction *&ValShiftedIsZero, 2481 Intrinsic::ID &IntrinID, Instruction *&IV, 2482 Value *&Start, Value *&Val, 2483 const SCEV *&ExtraOffsetExpr, 2484 bool &InvertedCond) { 2485 LLVM_DEBUG(dbgs() << DEBUG_TYPE 2486 " Performing shift-until-zero idiom detection.\n"); 2487 2488 // Give up if the loop has multiple blocks or multiple backedges. 2489 if (CurLoop->getNumBlocks() != 1 || CurLoop->getNumBackEdges() != 1) { 2490 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad block/backedge count.\n"); 2491 return false; 2492 } 2493 2494 Instruction *ValShifted, *NBits, *IVNext; 2495 Value *ExtraOffset; 2496 2497 BasicBlock *LoopHeaderBB = CurLoop->getHeader(); 2498 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader(); 2499 assert(LoopPreheaderBB && "There is always a loop preheader."); 2500 2501 using namespace PatternMatch; 2502 2503 // Step 1: Check if the loop backedge, condition is in desirable form. 2504 2505 ICmpInst::Predicate Pred; 2506 BasicBlock *TrueBB, *FalseBB; 2507 if (!match(LoopHeaderBB->getTerminator(), 2508 m_Br(m_Instruction(ValShiftedIsZero), m_BasicBlock(TrueBB), 2509 m_BasicBlock(FalseBB))) || 2510 !match(ValShiftedIsZero, 2511 m_ICmp(Pred, m_Instruction(ValShifted), m_Zero())) || 2512 !ICmpInst::isEquality(Pred)) { 2513 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge structure.\n"); 2514 return false; 2515 } 2516 2517 // Step 2: Check if the comparison's operand is in desirable form. 2518 // FIXME: Val could be a one-input PHI node, which we should look past. 2519 if (!match(ValShifted, m_Shift(m_LoopInvariant(m_Value(Val), CurLoop), 2520 m_Instruction(NBits)))) { 2521 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad comparisons value computation.\n"); 2522 return false; 2523 } 2524 IntrinID = ValShifted->getOpcode() == Instruction::Shl ? Intrinsic::cttz 2525 : Intrinsic::ctlz; 2526 2527 // Step 3: Check if the shift amount is in desirable form. 2528 2529 if (match(NBits, m_c_Add(m_Instruction(IV), 2530 m_LoopInvariant(m_Value(ExtraOffset), CurLoop))) && 2531 (NBits->hasNoSignedWrap() || NBits->hasNoUnsignedWrap())) 2532 ExtraOffsetExpr = SE->getNegativeSCEV(SE->getSCEV(ExtraOffset)); 2533 else if (match(NBits, 2534 m_Sub(m_Instruction(IV), 2535 m_LoopInvariant(m_Value(ExtraOffset), CurLoop))) && 2536 NBits->hasNoSignedWrap()) 2537 ExtraOffsetExpr = SE->getSCEV(ExtraOffset); 2538 else { 2539 IV = NBits; 2540 ExtraOffsetExpr = SE->getZero(NBits->getType()); 2541 } 2542 2543 // Step 4: Check if the recurrence is in desirable form. 2544 auto *IVPN = dyn_cast<PHINode>(IV); 2545 if (!IVPN || IVPN->getParent() != LoopHeaderBB) { 2546 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Not an expected PHI node.\n"); 2547 return false; 2548 } 2549 2550 Start = IVPN->getIncomingValueForBlock(LoopPreheaderBB); 2551 IVNext = dyn_cast<Instruction>(IVPN->getIncomingValueForBlock(LoopHeaderBB)); 2552 2553 if (!IVNext || !match(IVNext, m_Add(m_Specific(IVPN), m_One()))) { 2554 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad recurrence.\n"); 2555 return false; 2556 } 2557 2558 // Step 4: Check if the backedge's destinations are in desirable form. 2559 2560 assert(ICmpInst::isEquality(Pred) && 2561 "Should only get equality predicates here."); 2562 2563 // cmp-br is commutative, so canonicalize to a single variant. 2564 InvertedCond = Pred != ICmpInst::Predicate::ICMP_EQ; 2565 if (InvertedCond) { 2566 Pred = ICmpInst::getInversePredicate(Pred); 2567 std::swap(TrueBB, FalseBB); 2568 } 2569 2570 // We expect to exit loop when comparison yields true, 2571 // so when it yields false we should branch back to loop header. 2572 if (FalseBB != LoopHeaderBB) { 2573 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge flow.\n"); 2574 return false; 2575 } 2576 2577 // The new, countable, loop will certainly only run a known number of 2578 // iterations, It won't be infinite. But the old loop might be infinite 2579 // under certain conditions. For logical shifts, the value will become zero 2580 // after at most bitwidth(%Val) loop iterations. However, for arithmetic 2581 // right-shift, iff the sign bit was set, the value will never become zero, 2582 // and the loop may never finish. 2583 if (ValShifted->getOpcode() == Instruction::AShr && 2584 !isMustProgress(CurLoop) && !SE->isKnownNonNegative(SE->getSCEV(Val))) { 2585 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Can not prove the loop is finite.\n"); 2586 return false; 2587 } 2588 2589 // Okay, idiom checks out. 2590 return true; 2591 } 2592 2593 /// Look for the following loop: 2594 /// \code 2595 /// entry: 2596 /// <...> 2597 /// %start = <...> 2598 /// %extraoffset = <...> 2599 /// <...> 2600 /// br label %for.cond 2601 /// 2602 /// loop: 2603 /// %iv = phi i8 [ %start, %entry ], [ %iv.next, %for.cond ] 2604 /// %nbits = add nsw i8 %iv, %extraoffset 2605 /// %val.shifted = {{l,a}shr,shl} i8 %val, %nbits 2606 /// %val.shifted.iszero = icmp eq i8 %val.shifted, 0 2607 /// %iv.next = add i8 %iv, 1 2608 /// <...> 2609 /// br i1 %val.shifted.iszero, label %end, label %loop 2610 /// 2611 /// end: 2612 /// %iv.res = phi i8 [ %iv, %loop ] <...> 2613 /// %nbits.res = phi i8 [ %nbits, %loop ] <...> 2614 /// %val.shifted.res = phi i8 [ %val.shifted, %loop ] <...> 2615 /// %val.shifted.iszero.res = phi i1 [ %val.shifted.iszero, %loop ] <...> 2616 /// %iv.next.res = phi i8 [ %iv.next, %loop ] <...> 2617 /// <...> 2618 /// \endcode 2619 /// 2620 /// And transform it into: 2621 /// \code 2622 /// entry: 2623 /// <...> 2624 /// %start = <...> 2625 /// %extraoffset = <...> 2626 /// <...> 2627 /// %val.numleadingzeros = call i8 @llvm.ct{l,t}z.i8(i8 %val, i1 0) 2628 /// %val.numactivebits = sub i8 8, %val.numleadingzeros 2629 /// %extraoffset.neg = sub i8 0, %extraoffset 2630 /// %tmp = add i8 %val.numactivebits, %extraoffset.neg 2631 /// %iv.final = call i8 @llvm.smax.i8(i8 %tmp, i8 %start) 2632 /// %loop.tripcount = sub i8 %iv.final, %start 2633 /// br label %loop 2634 /// 2635 /// loop: 2636 /// %loop.iv = phi i8 [ 0, %entry ], [ %loop.iv.next, %loop ] 2637 /// %loop.iv.next = add i8 %loop.iv, 1 2638 /// %loop.ivcheck = icmp eq i8 %loop.iv.next, %loop.tripcount 2639 /// %iv = add i8 %loop.iv, %start 2640 /// <...> 2641 /// br i1 %loop.ivcheck, label %end, label %loop 2642 /// 2643 /// end: 2644 /// %iv.res = phi i8 [ %iv.final, %loop ] <...> 2645 /// <...> 2646 /// \endcode 2647 bool LoopIdiomRecognize::recognizeShiftUntilZero() { 2648 bool MadeChange = false; 2649 2650 Instruction *ValShiftedIsZero; 2651 Intrinsic::ID IntrID; 2652 Instruction *IV; 2653 Value *Start, *Val; 2654 const SCEV *ExtraOffsetExpr; 2655 bool InvertedCond; 2656 if (!detectShiftUntilZeroIdiom(CurLoop, SE, ValShiftedIsZero, IntrID, IV, 2657 Start, Val, ExtraOffsetExpr, InvertedCond)) { 2658 LLVM_DEBUG(dbgs() << DEBUG_TYPE 2659 " shift-until-zero idiom detection failed.\n"); 2660 return MadeChange; 2661 } 2662 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-zero idiom detected!\n"); 2663 2664 // Ok, it is the idiom we were looking for, we *could* transform this loop, 2665 // but is it profitable to transform? 2666 2667 BasicBlock *LoopHeaderBB = CurLoop->getHeader(); 2668 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader(); 2669 assert(LoopPreheaderBB && "There is always a loop preheader."); 2670 2671 BasicBlock *SuccessorBB = CurLoop->getExitBlock(); 2672 assert(SuccessorBB && "There is only a single successor."); 2673 2674 IRBuilder<> Builder(LoopPreheaderBB->getTerminator()); 2675 Builder.SetCurrentDebugLocation(IV->getDebugLoc()); 2676 2677 Type *Ty = Val->getType(); 2678 unsigned Bitwidth = Ty->getScalarSizeInBits(); 2679 2680 TargetTransformInfo::TargetCostKind CostKind = 2681 TargetTransformInfo::TCK_SizeAndLatency; 2682 2683 // The rewrite is considered to be unprofitable iff and only iff the 2684 // intrinsic we'll use are not cheap. Note that we are okay with *just* 2685 // making the loop countable, even if nothing else changes. 2686 IntrinsicCostAttributes Attrs( 2687 IntrID, Ty, {UndefValue::get(Ty), /*is_zero_undef=*/Builder.getFalse()}); 2688 InstructionCost Cost = TTI->getIntrinsicInstrCost(Attrs, CostKind); 2689 if (Cost > TargetTransformInfo::TCC_Basic) { 2690 LLVM_DEBUG(dbgs() << DEBUG_TYPE 2691 " Intrinsic is too costly, not beneficial\n"); 2692 return MadeChange; 2693 } 2694 2695 // Ok, transform appears worthwhile. 2696 MadeChange = true; 2697 2698 bool OffsetIsZero = false; 2699 if (auto *ExtraOffsetExprC = dyn_cast<SCEVConstant>(ExtraOffsetExpr)) 2700 OffsetIsZero = ExtraOffsetExprC->isZero(); 2701 2702 // Step 1: Compute the loop's final IV value / trip count. 2703 2704 CallInst *ValNumLeadingZeros = Builder.CreateIntrinsic( 2705 IntrID, Ty, {Val, /*is_zero_undef=*/Builder.getFalse()}, 2706 /*FMFSource=*/nullptr, Val->getName() + ".numleadingzeros"); 2707 Value *ValNumActiveBits = Builder.CreateSub( 2708 ConstantInt::get(Ty, Ty->getScalarSizeInBits()), ValNumLeadingZeros, 2709 Val->getName() + ".numactivebits", /*HasNUW=*/true, 2710 /*HasNSW=*/Bitwidth != 2); 2711 2712 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 2713 Expander.setInsertPoint(&*Builder.GetInsertPoint()); 2714 Value *ExtraOffset = Expander.expandCodeFor(ExtraOffsetExpr); 2715 2716 Value *ValNumActiveBitsOffset = Builder.CreateAdd( 2717 ValNumActiveBits, ExtraOffset, ValNumActiveBits->getName() + ".offset", 2718 /*HasNUW=*/OffsetIsZero, /*HasNSW=*/true); 2719 Value *IVFinal = Builder.CreateIntrinsic(Intrinsic::smax, {Ty}, 2720 {ValNumActiveBitsOffset, Start}, 2721 /*FMFSource=*/nullptr, "iv.final"); 2722 2723 auto *LoopBackedgeTakenCount = cast<Instruction>(Builder.CreateSub( 2724 IVFinal, Start, CurLoop->getName() + ".backedgetakencount", 2725 /*HasNUW=*/OffsetIsZero, /*HasNSW=*/true)); 2726 // FIXME: or when the offset was `add nuw` 2727 2728 // We know loop's backedge-taken count, but what's loop's trip count? 2729 Value *LoopTripCount = 2730 Builder.CreateAdd(LoopBackedgeTakenCount, ConstantInt::get(Ty, 1), 2731 CurLoop->getName() + ".tripcount", /*HasNUW=*/true, 2732 /*HasNSW=*/Bitwidth != 2); 2733 2734 // Step 2: Adjust the successor basic block to recieve the original 2735 // induction variable's final value instead of the orig. IV itself. 2736 2737 IV->replaceUsesOutsideBlock(IVFinal, LoopHeaderBB); 2738 2739 // Step 3: Rewrite the loop into a countable form, with canonical IV. 2740 2741 // The new canonical induction variable. 2742 Builder.SetInsertPoint(&LoopHeaderBB->front()); 2743 auto *CIV = Builder.CreatePHI(Ty, 2, CurLoop->getName() + ".iv"); 2744 2745 // The induction itself. 2746 Builder.SetInsertPoint(LoopHeaderBB->getFirstNonPHI()); 2747 auto *CIVNext = 2748 Builder.CreateAdd(CIV, ConstantInt::get(Ty, 1), CIV->getName() + ".next", 2749 /*HasNUW=*/true, /*HasNSW=*/Bitwidth != 2); 2750 2751 // The loop trip count check. 2752 auto *CIVCheck = Builder.CreateICmpEQ(CIVNext, LoopTripCount, 2753 CurLoop->getName() + ".ivcheck"); 2754 auto *NewIVCheck = CIVCheck; 2755 if (InvertedCond) { 2756 NewIVCheck = Builder.CreateNot(CIVCheck); 2757 NewIVCheck->takeName(ValShiftedIsZero); 2758 } 2759 2760 // The original IV, but rebased to be an offset to the CIV. 2761 auto *IVDePHId = Builder.CreateAdd(CIV, Start, "", /*HasNUW=*/false, 2762 /*HasNSW=*/true); // FIXME: what about NUW? 2763 IVDePHId->takeName(IV); 2764 2765 // The loop terminator. 2766 Builder.SetInsertPoint(LoopHeaderBB->getTerminator()); 2767 Builder.CreateCondBr(CIVCheck, SuccessorBB, LoopHeaderBB); 2768 LoopHeaderBB->getTerminator()->eraseFromParent(); 2769 2770 // Populate the IV PHI. 2771 CIV->addIncoming(ConstantInt::get(Ty, 0), LoopPreheaderBB); 2772 CIV->addIncoming(CIVNext, LoopHeaderBB); 2773 2774 // Step 4: Forget the "non-computable" trip-count SCEV associated with the 2775 // loop. The loop would otherwise not be deleted even if it becomes empty. 2776 2777 SE->forgetLoop(CurLoop); 2778 2779 // Step 5: Try to cleanup the loop's body somewhat. 2780 IV->replaceAllUsesWith(IVDePHId); 2781 IV->eraseFromParent(); 2782 2783 ValShiftedIsZero->replaceAllUsesWith(NewIVCheck); 2784 ValShiftedIsZero->eraseFromParent(); 2785 2786 // Other passes will take care of actually deleting the loop if possible. 2787 2788 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-zero idiom optimized!\n"); 2789 2790 ++NumShiftUntilZero; 2791 return MadeChange; 2792 } 2793