1 //===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements an idiom recognizer that transforms simple loops into a 10 // non-loop form. In cases that this kicks in, it can be a significant 11 // performance win. 12 // 13 // If compiling for code size we avoid idiom recognition if the resulting 14 // code could be larger than the code for the original loop. One way this could 15 // happen is if the loop is not removable after idiom recognition due to the 16 // presence of non-idiom instructions. The initial implementation of the 17 // heuristics applies to idioms in multi-block loops. 18 // 19 //===----------------------------------------------------------------------===// 20 // 21 // TODO List: 22 // 23 // Future loop memory idioms to recognize: 24 // memcmp, memmove, strlen, etc. 25 // Future floating point idioms to recognize in -ffast-math mode: 26 // fpowi 27 // Future integer operation idioms to recognize: 28 // ctpop 29 // 30 // Beware that isel's default lowering for ctpop is highly inefficient for 31 // i64 and larger types when i64 is legal and the value has few bits set. It 32 // would be good to enhance isel to emit a loop for ctpop in this case. 33 // 34 // This could recognize common matrix multiplies and dot product idioms and 35 // replace them with calls to BLAS (if linked in??). 36 // 37 //===----------------------------------------------------------------------===// 38 39 #include "llvm/Transforms/Scalar/LoopIdiomRecognize.h" 40 #include "llvm/ADT/APInt.h" 41 #include "llvm/ADT/ArrayRef.h" 42 #include "llvm/ADT/DenseMap.h" 43 #include "llvm/ADT/MapVector.h" 44 #include "llvm/ADT/SetVector.h" 45 #include "llvm/ADT/SmallPtrSet.h" 46 #include "llvm/ADT/SmallVector.h" 47 #include "llvm/ADT/Statistic.h" 48 #include "llvm/ADT/StringRef.h" 49 #include "llvm/Analysis/AliasAnalysis.h" 50 #include "llvm/Analysis/LoopAccessAnalysis.h" 51 #include "llvm/Analysis/LoopInfo.h" 52 #include "llvm/Analysis/LoopPass.h" 53 #include "llvm/Analysis/MemoryLocation.h" 54 #include "llvm/Analysis/MemorySSA.h" 55 #include "llvm/Analysis/MemorySSAUpdater.h" 56 #include "llvm/Analysis/MustExecute.h" 57 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 58 #include "llvm/Analysis/ScalarEvolution.h" 59 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 60 #include "llvm/Analysis/TargetLibraryInfo.h" 61 #include "llvm/Analysis/TargetTransformInfo.h" 62 #include "llvm/Analysis/ValueTracking.h" 63 #include "llvm/IR/Attributes.h" 64 #include "llvm/IR/BasicBlock.h" 65 #include "llvm/IR/Constant.h" 66 #include "llvm/IR/Constants.h" 67 #include "llvm/IR/DataLayout.h" 68 #include "llvm/IR/DebugLoc.h" 69 #include "llvm/IR/DerivedTypes.h" 70 #include "llvm/IR/Dominators.h" 71 #include "llvm/IR/GlobalValue.h" 72 #include "llvm/IR/GlobalVariable.h" 73 #include "llvm/IR/IRBuilder.h" 74 #include "llvm/IR/InstrTypes.h" 75 #include "llvm/IR/Instruction.h" 76 #include "llvm/IR/Instructions.h" 77 #include "llvm/IR/IntrinsicInst.h" 78 #include "llvm/IR/Intrinsics.h" 79 #include "llvm/IR/LLVMContext.h" 80 #include "llvm/IR/Module.h" 81 #include "llvm/IR/PassManager.h" 82 #include "llvm/IR/Type.h" 83 #include "llvm/IR/User.h" 84 #include "llvm/IR/Value.h" 85 #include "llvm/IR/ValueHandle.h" 86 #include "llvm/InitializePasses.h" 87 #include "llvm/Pass.h" 88 #include "llvm/Support/Casting.h" 89 #include "llvm/Support/CommandLine.h" 90 #include "llvm/Support/Debug.h" 91 #include "llvm/Support/raw_ostream.h" 92 #include "llvm/Transforms/Scalar.h" 93 #include "llvm/Transforms/Utils/BuildLibCalls.h" 94 #include "llvm/Transforms/Utils/Local.h" 95 #include "llvm/Transforms/Utils/LoopUtils.h" 96 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 97 #include <algorithm> 98 #include <cassert> 99 #include <cstdint> 100 #include <utility> 101 #include <vector> 102 103 using namespace llvm; 104 105 #define DEBUG_TYPE "loop-idiom" 106 107 STATISTIC(NumMemSet, "Number of memset's formed from loop stores"); 108 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores"); 109 110 static cl::opt<bool> UseLIRCodeSizeHeurs( 111 "use-lir-code-size-heurs", 112 cl::desc("Use loop idiom recognition code size heuristics when compiling" 113 "with -Os/-Oz"), 114 cl::init(true), cl::Hidden); 115 116 namespace { 117 118 class LoopIdiomRecognize { 119 Loop *CurLoop = nullptr; 120 AliasAnalysis *AA; 121 DominatorTree *DT; 122 LoopInfo *LI; 123 ScalarEvolution *SE; 124 TargetLibraryInfo *TLI; 125 const TargetTransformInfo *TTI; 126 const DataLayout *DL; 127 OptimizationRemarkEmitter &ORE; 128 bool ApplyCodeSizeHeuristics; 129 std::unique_ptr<MemorySSAUpdater> MSSAU; 130 131 public: 132 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT, 133 LoopInfo *LI, ScalarEvolution *SE, 134 TargetLibraryInfo *TLI, 135 const TargetTransformInfo *TTI, MemorySSA *MSSA, 136 const DataLayout *DL, 137 OptimizationRemarkEmitter &ORE) 138 : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL), ORE(ORE) { 139 if (MSSA) 140 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA); 141 } 142 143 bool runOnLoop(Loop *L); 144 145 private: 146 using StoreList = SmallVector<StoreInst *, 8>; 147 using StoreListMap = MapVector<Value *, StoreList>; 148 149 StoreListMap StoreRefsForMemset; 150 StoreListMap StoreRefsForMemsetPattern; 151 StoreList StoreRefsForMemcpy; 152 bool HasMemset; 153 bool HasMemsetPattern; 154 bool HasMemcpy; 155 156 /// Return code for isLegalStore() 157 enum LegalStoreKind { 158 None = 0, 159 Memset, 160 MemsetPattern, 161 Memcpy, 162 UnorderedAtomicMemcpy, 163 DontUse // Dummy retval never to be used. Allows catching errors in retval 164 // handling. 165 }; 166 167 /// \name Countable Loop Idiom Handling 168 /// @{ 169 170 bool runOnCountableLoop(); 171 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, 172 SmallVectorImpl<BasicBlock *> &ExitBlocks); 173 174 void collectStores(BasicBlock *BB); 175 LegalStoreKind isLegalStore(StoreInst *SI); 176 enum class ForMemset { No, Yes }; 177 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount, 178 ForMemset For); 179 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount); 180 181 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize, 182 MaybeAlign StoreAlignment, Value *StoredVal, 183 Instruction *TheStore, 184 SmallPtrSetImpl<Instruction *> &Stores, 185 const SCEVAddRecExpr *Ev, const SCEV *BECount, 186 bool NegStride, bool IsLoopMemset = false); 187 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount); 188 bool avoidLIRForMultiBlockLoop(bool IsMemset = false, 189 bool IsLoopMemset = false); 190 191 /// @} 192 /// \name Noncountable Loop Idiom Handling 193 /// @{ 194 195 bool runOnNoncountableLoop(); 196 197 bool recognizePopcount(); 198 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst, 199 PHINode *CntPhi, Value *Var); 200 bool recognizeAndInsertFFS(); /// Find First Set: ctlz or cttz 201 void transformLoopToCountable(Intrinsic::ID IntrinID, BasicBlock *PreCondBB, 202 Instruction *CntInst, PHINode *CntPhi, 203 Value *Var, Instruction *DefX, 204 const DebugLoc &DL, bool ZeroCheck, 205 bool IsCntPhiUsedOutsideLoop); 206 207 /// @} 208 }; 209 210 class LoopIdiomRecognizeLegacyPass : public LoopPass { 211 public: 212 static char ID; 213 214 explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) { 215 initializeLoopIdiomRecognizeLegacyPassPass( 216 *PassRegistry::getPassRegistry()); 217 } 218 219 bool runOnLoop(Loop *L, LPPassManager &LPM) override { 220 if (skipLoop(L)) 221 return false; 222 223 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 224 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 225 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 226 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 227 TargetLibraryInfo *TLI = 228 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI( 229 *L->getHeader()->getParent()); 230 const TargetTransformInfo *TTI = 231 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 232 *L->getHeader()->getParent()); 233 const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout(); 234 auto *MSSAAnalysis = getAnalysisIfAvailable<MemorySSAWrapperPass>(); 235 MemorySSA *MSSA = nullptr; 236 if (MSSAAnalysis) 237 MSSA = &MSSAAnalysis->getMSSA(); 238 239 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis 240 // pass. Function analyses need to be preserved across loop transformations 241 // but ORE cannot be preserved (see comment before the pass definition). 242 OptimizationRemarkEmitter ORE(L->getHeader()->getParent()); 243 244 LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, MSSA, DL, ORE); 245 return LIR.runOnLoop(L); 246 } 247 248 /// This transformation requires natural loop information & requires that 249 /// loop preheaders be inserted into the CFG. 250 void getAnalysisUsage(AnalysisUsage &AU) const override { 251 AU.addRequired<TargetLibraryInfoWrapperPass>(); 252 AU.addRequired<TargetTransformInfoWrapperPass>(); 253 AU.addPreserved<MemorySSAWrapperPass>(); 254 getLoopAnalysisUsage(AU); 255 } 256 }; 257 258 } // end anonymous namespace 259 260 char LoopIdiomRecognizeLegacyPass::ID = 0; 261 262 PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM, 263 LoopStandardAnalysisResults &AR, 264 LPMUpdater &) { 265 const auto *DL = &L.getHeader()->getModule()->getDataLayout(); 266 267 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis 268 // pass. Function analyses need to be preserved across loop transformations 269 // but ORE cannot be preserved (see comment before the pass definition). 270 OptimizationRemarkEmitter ORE(L.getHeader()->getParent()); 271 272 LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, 273 AR.MSSA, DL, ORE); 274 if (!LIR.runOnLoop(&L)) 275 return PreservedAnalyses::all(); 276 277 auto PA = getLoopPassPreservedAnalyses(); 278 if (AR.MSSA) 279 PA.preserve<MemorySSAAnalysis>(); 280 return PA; 281 } 282 283 INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom", 284 "Recognize loop idioms", false, false) 285 INITIALIZE_PASS_DEPENDENCY(LoopPass) 286 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 287 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 288 INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom", 289 "Recognize loop idioms", false, false) 290 291 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); } 292 293 static void deleteDeadInstruction(Instruction *I) { 294 I->replaceAllUsesWith(UndefValue::get(I->getType())); 295 I->eraseFromParent(); 296 } 297 298 //===----------------------------------------------------------------------===// 299 // 300 // Implementation of LoopIdiomRecognize 301 // 302 //===----------------------------------------------------------------------===// 303 304 bool LoopIdiomRecognize::runOnLoop(Loop *L) { 305 CurLoop = L; 306 // If the loop could not be converted to canonical form, it must have an 307 // indirectbr in it, just give up. 308 if (!L->getLoopPreheader()) 309 return false; 310 311 // Disable loop idiom recognition if the function's name is a common idiom. 312 StringRef Name = L->getHeader()->getParent()->getName(); 313 if (Name == "memset" || Name == "memcpy") 314 return false; 315 316 // Determine if code size heuristics need to be applied. 317 ApplyCodeSizeHeuristics = 318 L->getHeader()->getParent()->hasOptSize() && UseLIRCodeSizeHeurs; 319 320 HasMemset = TLI->has(LibFunc_memset); 321 HasMemsetPattern = TLI->has(LibFunc_memset_pattern16); 322 HasMemcpy = TLI->has(LibFunc_memcpy); 323 324 if (HasMemset || HasMemsetPattern || HasMemcpy) 325 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 326 return runOnCountableLoop(); 327 328 return runOnNoncountableLoop(); 329 } 330 331 bool LoopIdiomRecognize::runOnCountableLoop() { 332 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop); 333 assert(!isa<SCEVCouldNotCompute>(BECount) && 334 "runOnCountableLoop() called on a loop without a predictable" 335 "backedge-taken count"); 336 337 // If this loop executes exactly one time, then it should be peeled, not 338 // optimized by this pass. 339 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 340 if (BECst->getAPInt() == 0) 341 return false; 342 343 SmallVector<BasicBlock *, 8> ExitBlocks; 344 CurLoop->getUniqueExitBlocks(ExitBlocks); 345 346 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F[" 347 << CurLoop->getHeader()->getParent()->getName() 348 << "] Countable Loop %" << CurLoop->getHeader()->getName() 349 << "\n"); 350 351 // The following transforms hoist stores/memsets into the loop pre-header. 352 // Give up if the loop has instructions that may throw. 353 SimpleLoopSafetyInfo SafetyInfo; 354 SafetyInfo.computeLoopSafetyInfo(CurLoop); 355 if (SafetyInfo.anyBlockMayThrow()) 356 return false; 357 358 bool MadeChange = false; 359 360 // Scan all the blocks in the loop that are not in subloops. 361 for (auto *BB : CurLoop->getBlocks()) { 362 // Ignore blocks in subloops. 363 if (LI->getLoopFor(BB) != CurLoop) 364 continue; 365 366 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks); 367 } 368 return MadeChange; 369 } 370 371 static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) { 372 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1)); 373 return ConstStride->getAPInt(); 374 } 375 376 /// getMemSetPatternValue - If a strided store of the specified value is safe to 377 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should 378 /// be passed in. Otherwise, return null. 379 /// 380 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these 381 /// just replicate their input array and then pass on to memset_pattern16. 382 static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) { 383 // FIXME: This could check for UndefValue because it can be merged into any 384 // other valid pattern. 385 386 // If the value isn't a constant, we can't promote it to being in a constant 387 // array. We could theoretically do a store to an alloca or something, but 388 // that doesn't seem worthwhile. 389 Constant *C = dyn_cast<Constant>(V); 390 if (!C) 391 return nullptr; 392 393 // Only handle simple values that are a power of two bytes in size. 394 uint64_t Size = DL->getTypeSizeInBits(V->getType()); 395 if (Size == 0 || (Size & 7) || (Size & (Size - 1))) 396 return nullptr; 397 398 // Don't care enough about darwin/ppc to implement this. 399 if (DL->isBigEndian()) 400 return nullptr; 401 402 // Convert to size in bytes. 403 Size /= 8; 404 405 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see 406 // if the top and bottom are the same (e.g. for vectors and large integers). 407 if (Size > 16) 408 return nullptr; 409 410 // If the constant is exactly 16 bytes, just use it. 411 if (Size == 16) 412 return C; 413 414 // Otherwise, we'll use an array of the constants. 415 unsigned ArraySize = 16 / Size; 416 ArrayType *AT = ArrayType::get(V->getType(), ArraySize); 417 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C)); 418 } 419 420 LoopIdiomRecognize::LegalStoreKind 421 LoopIdiomRecognize::isLegalStore(StoreInst *SI) { 422 // Don't touch volatile stores. 423 if (SI->isVolatile()) 424 return LegalStoreKind::None; 425 // We only want simple or unordered-atomic stores. 426 if (!SI->isUnordered()) 427 return LegalStoreKind::None; 428 429 // Don't convert stores of non-integral pointer types to memsets (which stores 430 // integers). 431 if (DL->isNonIntegralPointerType(SI->getValueOperand()->getType())) 432 return LegalStoreKind::None; 433 434 // Avoid merging nontemporal stores. 435 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 436 return LegalStoreKind::None; 437 438 Value *StoredVal = SI->getValueOperand(); 439 Value *StorePtr = SI->getPointerOperand(); 440 441 // Reject stores that are so large that they overflow an unsigned. 442 uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType()); 443 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0) 444 return LegalStoreKind::None; 445 446 // See if the pointer expression is an AddRec like {base,+,1} on the current 447 // loop, which indicates a strided store. If we have something else, it's a 448 // random store we can't handle. 449 const SCEVAddRecExpr *StoreEv = 450 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 451 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine()) 452 return LegalStoreKind::None; 453 454 // Check to see if we have a constant stride. 455 if (!isa<SCEVConstant>(StoreEv->getOperand(1))) 456 return LegalStoreKind::None; 457 458 // See if the store can be turned into a memset. 459 460 // If the stored value is a byte-wise value (like i32 -1), then it may be 461 // turned into a memset of i8 -1, assuming that all the consecutive bytes 462 // are stored. A store of i32 0x01020304 can never be turned into a memset, 463 // but it can be turned into memset_pattern if the target supports it. 464 Value *SplatValue = isBytewiseValue(StoredVal, *DL); 465 Constant *PatternValue = nullptr; 466 467 // Note: memset and memset_pattern on unordered-atomic is yet not supported 468 bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple(); 469 470 // If we're allowed to form a memset, and the stored value would be 471 // acceptable for memset, use it. 472 if (!UnorderedAtomic && HasMemset && SplatValue && 473 // Verify that the stored value is loop invariant. If not, we can't 474 // promote the memset. 475 CurLoop->isLoopInvariant(SplatValue)) { 476 // It looks like we can use SplatValue. 477 return LegalStoreKind::Memset; 478 } else if (!UnorderedAtomic && HasMemsetPattern && 479 // Don't create memset_pattern16s with address spaces. 480 StorePtr->getType()->getPointerAddressSpace() == 0 && 481 (PatternValue = getMemSetPatternValue(StoredVal, DL))) { 482 // It looks like we can use PatternValue! 483 return LegalStoreKind::MemsetPattern; 484 } 485 486 // Otherwise, see if the store can be turned into a memcpy. 487 if (HasMemcpy) { 488 // Check to see if the stride matches the size of the store. If so, then we 489 // know that every byte is touched in the loop. 490 APInt Stride = getStoreStride(StoreEv); 491 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType()); 492 if (StoreSize != Stride && StoreSize != -Stride) 493 return LegalStoreKind::None; 494 495 // The store must be feeding a non-volatile load. 496 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand()); 497 498 // Only allow non-volatile loads 499 if (!LI || LI->isVolatile()) 500 return LegalStoreKind::None; 501 // Only allow simple or unordered-atomic loads 502 if (!LI->isUnordered()) 503 return LegalStoreKind::None; 504 505 // See if the pointer expression is an AddRec like {base,+,1} on the current 506 // loop, which indicates a strided load. If we have something else, it's a 507 // random load we can't handle. 508 const SCEVAddRecExpr *LoadEv = 509 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand())); 510 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine()) 511 return LegalStoreKind::None; 512 513 // The store and load must share the same stride. 514 if (StoreEv->getOperand(1) != LoadEv->getOperand(1)) 515 return LegalStoreKind::None; 516 517 // Success. This store can be converted into a memcpy. 518 UnorderedAtomic = UnorderedAtomic || LI->isAtomic(); 519 return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy 520 : LegalStoreKind::Memcpy; 521 } 522 // This store can't be transformed into a memset/memcpy. 523 return LegalStoreKind::None; 524 } 525 526 void LoopIdiomRecognize::collectStores(BasicBlock *BB) { 527 StoreRefsForMemset.clear(); 528 StoreRefsForMemsetPattern.clear(); 529 StoreRefsForMemcpy.clear(); 530 for (Instruction &I : *BB) { 531 StoreInst *SI = dyn_cast<StoreInst>(&I); 532 if (!SI) 533 continue; 534 535 // Make sure this is a strided store with a constant stride. 536 switch (isLegalStore(SI)) { 537 case LegalStoreKind::None: 538 // Nothing to do 539 break; 540 case LegalStoreKind::Memset: { 541 // Find the base pointer. 542 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL); 543 StoreRefsForMemset[Ptr].push_back(SI); 544 } break; 545 case LegalStoreKind::MemsetPattern: { 546 // Find the base pointer. 547 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL); 548 StoreRefsForMemsetPattern[Ptr].push_back(SI); 549 } break; 550 case LegalStoreKind::Memcpy: 551 case LegalStoreKind::UnorderedAtomicMemcpy: 552 StoreRefsForMemcpy.push_back(SI); 553 break; 554 default: 555 assert(false && "unhandled return value"); 556 break; 557 } 558 } 559 } 560 561 /// runOnLoopBlock - Process the specified block, which lives in a counted loop 562 /// with the specified backedge count. This block is known to be in the current 563 /// loop and not in any subloops. 564 bool LoopIdiomRecognize::runOnLoopBlock( 565 BasicBlock *BB, const SCEV *BECount, 566 SmallVectorImpl<BasicBlock *> &ExitBlocks) { 567 // We can only promote stores in this block if they are unconditionally 568 // executed in the loop. For a block to be unconditionally executed, it has 569 // to dominate all the exit blocks of the loop. Verify this now. 570 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) 571 if (!DT->dominates(BB, ExitBlocks[i])) 572 return false; 573 574 bool MadeChange = false; 575 // Look for store instructions, which may be optimized to memset/memcpy. 576 collectStores(BB); 577 578 // Look for a single store or sets of stores with a common base, which can be 579 // optimized into a memset (memset_pattern). The latter most commonly happens 580 // with structs and handunrolled loops. 581 for (auto &SL : StoreRefsForMemset) 582 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::Yes); 583 584 for (auto &SL : StoreRefsForMemsetPattern) 585 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::No); 586 587 // Optimize the store into a memcpy, if it feeds an similarly strided load. 588 for (auto &SI : StoreRefsForMemcpy) 589 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount); 590 591 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 592 Instruction *Inst = &*I++; 593 // Look for memset instructions, which may be optimized to a larger memset. 594 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) { 595 WeakTrackingVH InstPtr(&*I); 596 if (!processLoopMemSet(MSI, BECount)) 597 continue; 598 MadeChange = true; 599 600 // If processing the memset invalidated our iterator, start over from the 601 // top of the block. 602 if (!InstPtr) 603 I = BB->begin(); 604 continue; 605 } 606 } 607 608 return MadeChange; 609 } 610 611 /// See if this store(s) can be promoted to a memset. 612 bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL, 613 const SCEV *BECount, ForMemset For) { 614 // Try to find consecutive stores that can be transformed into memsets. 615 SetVector<StoreInst *> Heads, Tails; 616 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 617 618 // Do a quadratic search on all of the given stores and find 619 // all of the pairs of stores that follow each other. 620 SmallVector<unsigned, 16> IndexQueue; 621 for (unsigned i = 0, e = SL.size(); i < e; ++i) { 622 assert(SL[i]->isSimple() && "Expected only non-volatile stores."); 623 624 Value *FirstStoredVal = SL[i]->getValueOperand(); 625 Value *FirstStorePtr = SL[i]->getPointerOperand(); 626 const SCEVAddRecExpr *FirstStoreEv = 627 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr)); 628 APInt FirstStride = getStoreStride(FirstStoreEv); 629 unsigned FirstStoreSize = DL->getTypeStoreSize(SL[i]->getValueOperand()->getType()); 630 631 // See if we can optimize just this store in isolation. 632 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) { 633 Heads.insert(SL[i]); 634 continue; 635 } 636 637 Value *FirstSplatValue = nullptr; 638 Constant *FirstPatternValue = nullptr; 639 640 if (For == ForMemset::Yes) 641 FirstSplatValue = isBytewiseValue(FirstStoredVal, *DL); 642 else 643 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL); 644 645 assert((FirstSplatValue || FirstPatternValue) && 646 "Expected either splat value or pattern value."); 647 648 IndexQueue.clear(); 649 // If a store has multiple consecutive store candidates, search Stores 650 // array according to the sequence: from i+1 to e, then from i-1 to 0. 651 // This is because usually pairing with immediate succeeding or preceding 652 // candidate create the best chance to find memset opportunity. 653 unsigned j = 0; 654 for (j = i + 1; j < e; ++j) 655 IndexQueue.push_back(j); 656 for (j = i; j > 0; --j) 657 IndexQueue.push_back(j - 1); 658 659 for (auto &k : IndexQueue) { 660 assert(SL[k]->isSimple() && "Expected only non-volatile stores."); 661 Value *SecondStorePtr = SL[k]->getPointerOperand(); 662 const SCEVAddRecExpr *SecondStoreEv = 663 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr)); 664 APInt SecondStride = getStoreStride(SecondStoreEv); 665 666 if (FirstStride != SecondStride) 667 continue; 668 669 Value *SecondStoredVal = SL[k]->getValueOperand(); 670 Value *SecondSplatValue = nullptr; 671 Constant *SecondPatternValue = nullptr; 672 673 if (For == ForMemset::Yes) 674 SecondSplatValue = isBytewiseValue(SecondStoredVal, *DL); 675 else 676 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL); 677 678 assert((SecondSplatValue || SecondPatternValue) && 679 "Expected either splat value or pattern value."); 680 681 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) { 682 if (For == ForMemset::Yes) { 683 if (isa<UndefValue>(FirstSplatValue)) 684 FirstSplatValue = SecondSplatValue; 685 if (FirstSplatValue != SecondSplatValue) 686 continue; 687 } else { 688 if (isa<UndefValue>(FirstPatternValue)) 689 FirstPatternValue = SecondPatternValue; 690 if (FirstPatternValue != SecondPatternValue) 691 continue; 692 } 693 Tails.insert(SL[k]); 694 Heads.insert(SL[i]); 695 ConsecutiveChain[SL[i]] = SL[k]; 696 break; 697 } 698 } 699 } 700 701 // We may run into multiple chains that merge into a single chain. We mark the 702 // stores that we transformed so that we don't visit the same store twice. 703 SmallPtrSet<Value *, 16> TransformedStores; 704 bool Changed = false; 705 706 // For stores that start but don't end a link in the chain: 707 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); 708 it != e; ++it) { 709 if (Tails.count(*it)) 710 continue; 711 712 // We found a store instr that starts a chain. Now follow the chain and try 713 // to transform it. 714 SmallPtrSet<Instruction *, 8> AdjacentStores; 715 StoreInst *I = *it; 716 717 StoreInst *HeadStore = I; 718 unsigned StoreSize = 0; 719 720 // Collect the chain into a list. 721 while (Tails.count(I) || Heads.count(I)) { 722 if (TransformedStores.count(I)) 723 break; 724 AdjacentStores.insert(I); 725 726 StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType()); 727 // Move to the next value in the chain. 728 I = ConsecutiveChain[I]; 729 } 730 731 Value *StoredVal = HeadStore->getValueOperand(); 732 Value *StorePtr = HeadStore->getPointerOperand(); 733 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 734 APInt Stride = getStoreStride(StoreEv); 735 736 // Check to see if the stride matches the size of the stores. If so, then 737 // we know that every byte is touched in the loop. 738 if (StoreSize != Stride && StoreSize != -Stride) 739 continue; 740 741 bool NegStride = StoreSize == -Stride; 742 743 if (processLoopStridedStore(StorePtr, StoreSize, 744 MaybeAlign(HeadStore->getAlignment()), 745 StoredVal, HeadStore, AdjacentStores, StoreEv, 746 BECount, NegStride)) { 747 TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end()); 748 Changed = true; 749 } 750 } 751 752 return Changed; 753 } 754 755 /// processLoopMemSet - See if this memset can be promoted to a large memset. 756 bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI, 757 const SCEV *BECount) { 758 // We can only handle non-volatile memsets with a constant size. 759 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength())) 760 return false; 761 762 // If we're not allowed to hack on memset, we fail. 763 if (!HasMemset) 764 return false; 765 766 Value *Pointer = MSI->getDest(); 767 768 // See if the pointer expression is an AddRec like {base,+,1} on the current 769 // loop, which indicates a strided store. If we have something else, it's a 770 // random store we can't handle. 771 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer)); 772 if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine()) 773 return false; 774 775 // Reject memsets that are so large that they overflow an unsigned. 776 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 777 if ((SizeInBytes >> 32) != 0) 778 return false; 779 780 // Check to see if the stride matches the size of the memset. If so, then we 781 // know that every byte is touched in the loop. 782 const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1)); 783 if (!ConstStride) 784 return false; 785 786 APInt Stride = ConstStride->getAPInt(); 787 if (SizeInBytes != Stride && SizeInBytes != -Stride) 788 return false; 789 790 // Verify that the memset value is loop invariant. If not, we can't promote 791 // the memset. 792 Value *SplatValue = MSI->getValue(); 793 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue)) 794 return false; 795 796 SmallPtrSet<Instruction *, 1> MSIs; 797 MSIs.insert(MSI); 798 bool NegStride = SizeInBytes == -Stride; 799 return processLoopStridedStore( 800 Pointer, (unsigned)SizeInBytes, MaybeAlign(MSI->getDestAlignment()), 801 SplatValue, MSI, MSIs, Ev, BECount, NegStride, /*IsLoopMemset=*/true); 802 } 803 804 /// mayLoopAccessLocation - Return true if the specified loop might access the 805 /// specified pointer location, which is a loop-strided access. The 'Access' 806 /// argument specifies what the verboten forms of access are (read or write). 807 static bool 808 mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L, 809 const SCEV *BECount, unsigned StoreSize, 810 AliasAnalysis &AA, 811 SmallPtrSetImpl<Instruction *> &IgnoredStores) { 812 // Get the location that may be stored across the loop. Since the access is 813 // strided positively through memory, we say that the modified location starts 814 // at the pointer and has infinite size. 815 LocationSize AccessSize = LocationSize::unknown(); 816 817 // If the loop iterates a fixed number of times, we can refine the access size 818 // to be exactly the size of the memset, which is (BECount+1)*StoreSize 819 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 820 AccessSize = LocationSize::precise((BECst->getValue()->getZExtValue() + 1) * 821 StoreSize); 822 823 // TODO: For this to be really effective, we have to dive into the pointer 824 // operand in the store. Store to &A[i] of 100 will always return may alias 825 // with store of &A[100], we need to StoreLoc to be "A" with size of 100, 826 // which will then no-alias a store to &A[100]. 827 MemoryLocation StoreLoc(Ptr, AccessSize); 828 829 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; 830 ++BI) 831 for (Instruction &I : **BI) 832 if (IgnoredStores.count(&I) == 0 && 833 isModOrRefSet( 834 intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access))) 835 return true; 836 837 return false; 838 } 839 840 // If we have a negative stride, Start refers to the end of the memory location 841 // we're trying to memset. Therefore, we need to recompute the base pointer, 842 // which is just Start - BECount*Size. 843 static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount, 844 Type *IntPtr, unsigned StoreSize, 845 ScalarEvolution *SE) { 846 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr); 847 if (StoreSize != 1) 848 Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize), 849 SCEV::FlagNUW); 850 return SE->getMinusSCEV(Start, Index); 851 } 852 853 /// Compute the number of bytes as a SCEV from the backedge taken count. 854 /// 855 /// This also maps the SCEV into the provided type and tries to handle the 856 /// computation in a way that will fold cleanly. 857 static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr, 858 unsigned StoreSize, Loop *CurLoop, 859 const DataLayout *DL, ScalarEvolution *SE) { 860 const SCEV *NumBytesS; 861 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to 862 // pointer size if it isn't already. 863 // 864 // If we're going to need to zero extend the BE count, check if we can add 865 // one to it prior to zero extending without overflow. Provided this is safe, 866 // it allows better simplification of the +1. 867 if (DL->getTypeSizeInBits(BECount->getType()) < 868 DL->getTypeSizeInBits(IntPtr) && 869 SE->isLoopEntryGuardedByCond( 870 CurLoop, ICmpInst::ICMP_NE, BECount, 871 SE->getNegativeSCEV(SE->getOne(BECount->getType())))) { 872 NumBytesS = SE->getZeroExtendExpr( 873 SE->getAddExpr(BECount, SE->getOne(BECount->getType()), SCEV::FlagNUW), 874 IntPtr); 875 } else { 876 NumBytesS = SE->getAddExpr(SE->getTruncateOrZeroExtend(BECount, IntPtr), 877 SE->getOne(IntPtr), SCEV::FlagNUW); 878 } 879 880 // And scale it based on the store size. 881 if (StoreSize != 1) { 882 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize), 883 SCEV::FlagNUW); 884 } 885 return NumBytesS; 886 } 887 888 /// processLoopStridedStore - We see a strided store of some value. If we can 889 /// transform this into a memset or memset_pattern in the loop preheader, do so. 890 bool LoopIdiomRecognize::processLoopStridedStore( 891 Value *DestPtr, unsigned StoreSize, MaybeAlign StoreAlignment, 892 Value *StoredVal, Instruction *TheStore, 893 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev, 894 const SCEV *BECount, bool NegStride, bool IsLoopMemset) { 895 Value *SplatValue = isBytewiseValue(StoredVal, *DL); 896 Constant *PatternValue = nullptr; 897 898 if (!SplatValue) 899 PatternValue = getMemSetPatternValue(StoredVal, DL); 900 901 assert((SplatValue || PatternValue) && 902 "Expected either splat value or pattern value."); 903 904 // The trip count of the loop and the base pointer of the addrec SCEV is 905 // guaranteed to be loop invariant, which means that it should dominate the 906 // header. This allows us to insert code for it in the preheader. 907 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace(); 908 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 909 IRBuilder<> Builder(Preheader->getTerminator()); 910 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 911 912 Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS); 913 Type *IntIdxTy = DL->getIndexType(DestPtr->getType()); 914 915 const SCEV *Start = Ev->getStart(); 916 // Handle negative strided loops. 917 if (NegStride) 918 Start = getStartForNegStride(Start, BECount, IntIdxTy, StoreSize, SE); 919 920 // TODO: ideally we should still be able to generate memset if SCEV expander 921 // is taught to generate the dependencies at the latest point. 922 if (!isSafeToExpand(Start, *SE)) 923 return false; 924 925 // Okay, we have a strided store "p[i]" of a splattable value. We can turn 926 // this into a memset in the loop preheader now if we want. However, this 927 // would be unsafe to do if there is anything else in the loop that may read 928 // or write to the aliased location. Check for any overlap by generating the 929 // base pointer and checking the region. 930 Value *BasePtr = 931 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator()); 932 if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount, 933 StoreSize, *AA, Stores)) { 934 Expander.clear(); 935 // If we generated new code for the base pointer, clean up. 936 RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI); 937 return false; 938 } 939 940 if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset)) 941 return false; 942 943 // Okay, everything looks good, insert the memset. 944 945 const SCEV *NumBytesS = 946 getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE); 947 948 // TODO: ideally we should still be able to generate memset if SCEV expander 949 // is taught to generate the dependencies at the latest point. 950 if (!isSafeToExpand(NumBytesS, *SE)) 951 return false; 952 953 Value *NumBytes = 954 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); 955 956 CallInst *NewCall; 957 if (SplatValue) { 958 NewCall = Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, 959 MaybeAlign(StoreAlignment)); 960 } else { 961 // Everything is emitted in default address space 962 Type *Int8PtrTy = DestInt8PtrTy; 963 964 Module *M = TheStore->getModule(); 965 StringRef FuncName = "memset_pattern16"; 966 FunctionCallee MSP = M->getOrInsertFunction(FuncName, Builder.getVoidTy(), 967 Int8PtrTy, Int8PtrTy, IntIdxTy); 968 inferLibFuncAttributes(M, FuncName, *TLI); 969 970 // Otherwise we should form a memset_pattern16. PatternValue is known to be 971 // an constant array of 16-bytes. Plop the value into a mergable global. 972 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true, 973 GlobalValue::PrivateLinkage, 974 PatternValue, ".memset_pattern"); 975 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these. 976 GV->setAlignment(Align(16)); 977 Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy); 978 NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes}); 979 } 980 NewCall->setDebugLoc(TheStore->getDebugLoc()); 981 982 if (MSSAU) { 983 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( 984 NewCall, nullptr, NewCall->getParent(), MemorySSA::BeforeTerminator); 985 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); 986 } 987 988 LLVM_DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n" 989 << " from store to: " << *Ev << " at: " << *TheStore 990 << "\n"); 991 992 ORE.emit([&]() { 993 return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStridedStore", 994 NewCall->getDebugLoc(), Preheader) 995 << "Transformed loop-strided store into a call to " 996 << ore::NV("NewFunction", NewCall->getCalledFunction()) 997 << "() function"; 998 }); 999 1000 // Okay, the memset has been formed. Zap the original store and anything that 1001 // feeds into it. 1002 for (auto *I : Stores) { 1003 if (MSSAU) 1004 MSSAU->removeMemoryAccess(I, true); 1005 deleteDeadInstruction(I); 1006 } 1007 if (MSSAU && VerifyMemorySSA) 1008 MSSAU->getMemorySSA()->verifyMemorySSA(); 1009 ++NumMemSet; 1010 return true; 1011 } 1012 1013 class ExpandedValuesCleaner { 1014 SCEVExpander &Expander; 1015 TargetLibraryInfo *TLI; 1016 SmallVector<Value *, 4> ExpandedValues; 1017 bool Commit = false; 1018 1019 public: 1020 ExpandedValuesCleaner(SCEVExpander &Expander, TargetLibraryInfo *TLI) 1021 : Expander(Expander), TLI(TLI) {} 1022 1023 void add(Value *V) { ExpandedValues.push_back(V); } 1024 1025 void commit() { Commit = true; } 1026 1027 ~ExpandedValuesCleaner() { 1028 if (!Commit) { 1029 Expander.clear(); 1030 for (auto *V : ExpandedValues) 1031 RecursivelyDeleteTriviallyDeadInstructions(V, TLI); 1032 } 1033 } 1034 }; 1035 1036 /// If the stored value is a strided load in the same loop with the same stride 1037 /// this may be transformable into a memcpy. This kicks in for stuff like 1038 /// for (i) A[i] = B[i]; 1039 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, 1040 const SCEV *BECount) { 1041 assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores."); 1042 1043 Value *StorePtr = SI->getPointerOperand(); 1044 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 1045 APInt Stride = getStoreStride(StoreEv); 1046 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType()); 1047 bool NegStride = StoreSize == -Stride; 1048 1049 // The store must be feeding a non-volatile load. 1050 LoadInst *LI = cast<LoadInst>(SI->getValueOperand()); 1051 assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads."); 1052 1053 // See if the pointer expression is an AddRec like {base,+,1} on the current 1054 // loop, which indicates a strided load. If we have something else, it's a 1055 // random load we can't handle. 1056 const SCEVAddRecExpr *LoadEv = 1057 cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand())); 1058 1059 // The trip count of the loop and the base pointer of the addrec SCEV is 1060 // guaranteed to be loop invariant, which means that it should dominate the 1061 // header. This allows us to insert code for it in the preheader. 1062 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 1063 IRBuilder<> Builder(Preheader->getTerminator()); 1064 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 1065 1066 ExpandedValuesCleaner EVC(Expander, TLI); 1067 1068 const SCEV *StrStart = StoreEv->getStart(); 1069 unsigned StrAS = SI->getPointerAddressSpace(); 1070 Type *IntIdxTy = Builder.getIntNTy(DL->getIndexSizeInBits(StrAS)); 1071 1072 // Handle negative strided loops. 1073 if (NegStride) 1074 StrStart = getStartForNegStride(StrStart, BECount, IntIdxTy, StoreSize, SE); 1075 1076 // Okay, we have a strided store "p[i]" of a loaded value. We can turn 1077 // this into a memcpy in the loop preheader now if we want. However, this 1078 // would be unsafe to do if there is anything else in the loop that may read 1079 // or write the memory region we're storing to. This includes the load that 1080 // feeds the stores. Check for an alias by generating the base address and 1081 // checking everything. 1082 Value *StoreBasePtr = Expander.expandCodeFor( 1083 StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator()); 1084 EVC.add(StoreBasePtr); 1085 1086 SmallPtrSet<Instruction *, 1> Stores; 1087 Stores.insert(SI); 1088 if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount, 1089 StoreSize, *AA, Stores)) 1090 return false; 1091 1092 const SCEV *LdStart = LoadEv->getStart(); 1093 unsigned LdAS = LI->getPointerAddressSpace(); 1094 1095 // Handle negative strided loops. 1096 if (NegStride) 1097 LdStart = getStartForNegStride(LdStart, BECount, IntIdxTy, StoreSize, SE); 1098 1099 // For a memcpy, we have to make sure that the input array is not being 1100 // mutated by the loop. 1101 Value *LoadBasePtr = Expander.expandCodeFor( 1102 LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator()); 1103 EVC.add(LoadBasePtr); 1104 1105 if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount, 1106 StoreSize, *AA, Stores)) 1107 return false; 1108 1109 if (avoidLIRForMultiBlockLoop()) 1110 return false; 1111 1112 // Okay, everything is safe, we can transform this! 1113 1114 const SCEV *NumBytesS = 1115 getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE); 1116 1117 Value *NumBytes = 1118 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); 1119 EVC.add(NumBytes); 1120 1121 CallInst *NewCall = nullptr; 1122 // Check whether to generate an unordered atomic memcpy: 1123 // If the load or store are atomic, then they must necessarily be unordered 1124 // by previous checks. 1125 if (!SI->isAtomic() && !LI->isAtomic()) 1126 NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlign(), LoadBasePtr, 1127 LI->getAlign(), NumBytes); 1128 else { 1129 // We cannot allow unaligned ops for unordered load/store, so reject 1130 // anything where the alignment isn't at least the element size. 1131 const Align StoreAlign = SI->getAlign(); 1132 const Align LoadAlign = LI->getAlign(); 1133 if (StoreAlign < StoreSize || LoadAlign < StoreSize) 1134 return false; 1135 1136 // If the element.atomic memcpy is not lowered into explicit 1137 // loads/stores later, then it will be lowered into an element-size 1138 // specific lib call. If the lib call doesn't exist for our store size, then 1139 // we shouldn't generate the memcpy. 1140 if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize()) 1141 return false; 1142 1143 // Create the call. 1144 // Note that unordered atomic loads/stores are *required* by the spec to 1145 // have an alignment but non-atomic loads/stores may not. 1146 NewCall = Builder.CreateElementUnorderedAtomicMemCpy( 1147 StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, NumBytes, 1148 StoreSize); 1149 } 1150 NewCall->setDebugLoc(SI->getDebugLoc()); 1151 1152 if (MSSAU) { 1153 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( 1154 NewCall, nullptr, NewCall->getParent(), MemorySSA::BeforeTerminator); 1155 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); 1156 } 1157 1158 LLVM_DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n" 1159 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n" 1160 << " from store ptr=" << *StoreEv << " at: " << *SI 1161 << "\n"); 1162 1163 ORE.emit([&]() { 1164 return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStoreOfLoopLoad", 1165 NewCall->getDebugLoc(), Preheader) 1166 << "Formed a call to " 1167 << ore::NV("NewFunction", NewCall->getCalledFunction()) 1168 << "() function"; 1169 }); 1170 1171 // Okay, the memcpy has been formed. Zap the original store and anything that 1172 // feeds into it. 1173 if (MSSAU) 1174 MSSAU->removeMemoryAccess(SI, true); 1175 deleteDeadInstruction(SI); 1176 if (MSSAU && VerifyMemorySSA) 1177 MSSAU->getMemorySSA()->verifyMemorySSA(); 1178 ++NumMemCpy; 1179 EVC.commit(); 1180 return true; 1181 } 1182 1183 // When compiling for codesize we avoid idiom recognition for a multi-block loop 1184 // unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop. 1185 // 1186 bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset, 1187 bool IsLoopMemset) { 1188 if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) { 1189 if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) { 1190 LLVM_DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName() 1191 << " : LIR " << (IsMemset ? "Memset" : "Memcpy") 1192 << " avoided: multi-block top-level loop\n"); 1193 return true; 1194 } 1195 } 1196 1197 return false; 1198 } 1199 1200 bool LoopIdiomRecognize::runOnNoncountableLoop() { 1201 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F[" 1202 << CurLoop->getHeader()->getParent()->getName() 1203 << "] Noncountable Loop %" 1204 << CurLoop->getHeader()->getName() << "\n"); 1205 1206 return recognizePopcount() || recognizeAndInsertFFS(); 1207 } 1208 1209 /// Check if the given conditional branch is based on the comparison between 1210 /// a variable and zero, and if the variable is non-zero or zero (JmpOnZero is 1211 /// true), the control yields to the loop entry. If the branch matches the 1212 /// behavior, the variable involved in the comparison is returned. This function 1213 /// will be called to see if the precondition and postcondition of the loop are 1214 /// in desirable form. 1215 static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry, 1216 bool JmpOnZero = false) { 1217 if (!BI || !BI->isConditional()) 1218 return nullptr; 1219 1220 ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition()); 1221 if (!Cond) 1222 return nullptr; 1223 1224 ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1)); 1225 if (!CmpZero || !CmpZero->isZero()) 1226 return nullptr; 1227 1228 BasicBlock *TrueSucc = BI->getSuccessor(0); 1229 BasicBlock *FalseSucc = BI->getSuccessor(1); 1230 if (JmpOnZero) 1231 std::swap(TrueSucc, FalseSucc); 1232 1233 ICmpInst::Predicate Pred = Cond->getPredicate(); 1234 if ((Pred == ICmpInst::ICMP_NE && TrueSucc == LoopEntry) || 1235 (Pred == ICmpInst::ICMP_EQ && FalseSucc == LoopEntry)) 1236 return Cond->getOperand(0); 1237 1238 return nullptr; 1239 } 1240 1241 // Check if the recurrence variable `VarX` is in the right form to create 1242 // the idiom. Returns the value coerced to a PHINode if so. 1243 static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX, 1244 BasicBlock *LoopEntry) { 1245 auto *PhiX = dyn_cast<PHINode>(VarX); 1246 if (PhiX && PhiX->getParent() == LoopEntry && 1247 (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX)) 1248 return PhiX; 1249 return nullptr; 1250 } 1251 1252 /// Return true iff the idiom is detected in the loop. 1253 /// 1254 /// Additionally: 1255 /// 1) \p CntInst is set to the instruction counting the population bit. 1256 /// 2) \p CntPhi is set to the corresponding phi node. 1257 /// 3) \p Var is set to the value whose population bits are being counted. 1258 /// 1259 /// The core idiom we are trying to detect is: 1260 /// \code 1261 /// if (x0 != 0) 1262 /// goto loop-exit // the precondition of the loop 1263 /// cnt0 = init-val; 1264 /// do { 1265 /// x1 = phi (x0, x2); 1266 /// cnt1 = phi(cnt0, cnt2); 1267 /// 1268 /// cnt2 = cnt1 + 1; 1269 /// ... 1270 /// x2 = x1 & (x1 - 1); 1271 /// ... 1272 /// } while(x != 0); 1273 /// 1274 /// loop-exit: 1275 /// \endcode 1276 static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB, 1277 Instruction *&CntInst, PHINode *&CntPhi, 1278 Value *&Var) { 1279 // step 1: Check to see if the look-back branch match this pattern: 1280 // "if (a!=0) goto loop-entry". 1281 BasicBlock *LoopEntry; 1282 Instruction *DefX2, *CountInst; 1283 Value *VarX1, *VarX0; 1284 PHINode *PhiX, *CountPhi; 1285 1286 DefX2 = CountInst = nullptr; 1287 VarX1 = VarX0 = nullptr; 1288 PhiX = CountPhi = nullptr; 1289 LoopEntry = *(CurLoop->block_begin()); 1290 1291 // step 1: Check if the loop-back branch is in desirable form. 1292 { 1293 if (Value *T = matchCondition( 1294 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry)) 1295 DefX2 = dyn_cast<Instruction>(T); 1296 else 1297 return false; 1298 } 1299 1300 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)" 1301 { 1302 if (!DefX2 || DefX2->getOpcode() != Instruction::And) 1303 return false; 1304 1305 BinaryOperator *SubOneOp; 1306 1307 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0)))) 1308 VarX1 = DefX2->getOperand(1); 1309 else { 1310 VarX1 = DefX2->getOperand(0); 1311 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1)); 1312 } 1313 if (!SubOneOp || SubOneOp->getOperand(0) != VarX1) 1314 return false; 1315 1316 ConstantInt *Dec = dyn_cast<ConstantInt>(SubOneOp->getOperand(1)); 1317 if (!Dec || 1318 !((SubOneOp->getOpcode() == Instruction::Sub && Dec->isOne()) || 1319 (SubOneOp->getOpcode() == Instruction::Add && 1320 Dec->isMinusOne()))) { 1321 return false; 1322 } 1323 } 1324 1325 // step 3: Check the recurrence of variable X 1326 PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry); 1327 if (!PhiX) 1328 return false; 1329 1330 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1 1331 { 1332 CountInst = nullptr; 1333 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(), 1334 IterE = LoopEntry->end(); 1335 Iter != IterE; Iter++) { 1336 Instruction *Inst = &*Iter; 1337 if (Inst->getOpcode() != Instruction::Add) 1338 continue; 1339 1340 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1)); 1341 if (!Inc || !Inc->isOne()) 1342 continue; 1343 1344 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry); 1345 if (!Phi) 1346 continue; 1347 1348 // Check if the result of the instruction is live of the loop. 1349 bool LiveOutLoop = false; 1350 for (User *U : Inst->users()) { 1351 if ((cast<Instruction>(U))->getParent() != LoopEntry) { 1352 LiveOutLoop = true; 1353 break; 1354 } 1355 } 1356 1357 if (LiveOutLoop) { 1358 CountInst = Inst; 1359 CountPhi = Phi; 1360 break; 1361 } 1362 } 1363 1364 if (!CountInst) 1365 return false; 1366 } 1367 1368 // step 5: check if the precondition is in this form: 1369 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;" 1370 { 1371 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1372 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader()); 1373 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1)) 1374 return false; 1375 1376 CntInst = CountInst; 1377 CntPhi = CountPhi; 1378 Var = T; 1379 } 1380 1381 return true; 1382 } 1383 1384 /// Return true if the idiom is detected in the loop. 1385 /// 1386 /// Additionally: 1387 /// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ) 1388 /// or nullptr if there is no such. 1389 /// 2) \p CntPhi is set to the corresponding phi node 1390 /// or nullptr if there is no such. 1391 /// 3) \p Var is set to the value whose CTLZ could be used. 1392 /// 4) \p DefX is set to the instruction calculating Loop exit condition. 1393 /// 1394 /// The core idiom we are trying to detect is: 1395 /// \code 1396 /// if (x0 == 0) 1397 /// goto loop-exit // the precondition of the loop 1398 /// cnt0 = init-val; 1399 /// do { 1400 /// x = phi (x0, x.next); //PhiX 1401 /// cnt = phi(cnt0, cnt.next); 1402 /// 1403 /// cnt.next = cnt + 1; 1404 /// ... 1405 /// x.next = x >> 1; // DefX 1406 /// ... 1407 /// } while(x.next != 0); 1408 /// 1409 /// loop-exit: 1410 /// \endcode 1411 static bool detectShiftUntilZeroIdiom(Loop *CurLoop, const DataLayout &DL, 1412 Intrinsic::ID &IntrinID, Value *&InitX, 1413 Instruction *&CntInst, PHINode *&CntPhi, 1414 Instruction *&DefX) { 1415 BasicBlock *LoopEntry; 1416 Value *VarX = nullptr; 1417 1418 DefX = nullptr; 1419 CntInst = nullptr; 1420 CntPhi = nullptr; 1421 LoopEntry = *(CurLoop->block_begin()); 1422 1423 // step 1: Check if the loop-back branch is in desirable form. 1424 if (Value *T = matchCondition( 1425 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry)) 1426 DefX = dyn_cast<Instruction>(T); 1427 else 1428 return false; 1429 1430 // step 2: detect instructions corresponding to "x.next = x >> 1 or x << 1" 1431 if (!DefX || !DefX->isShift()) 1432 return false; 1433 IntrinID = DefX->getOpcode() == Instruction::Shl ? Intrinsic::cttz : 1434 Intrinsic::ctlz; 1435 ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1)); 1436 if (!Shft || !Shft->isOne()) 1437 return false; 1438 VarX = DefX->getOperand(0); 1439 1440 // step 3: Check the recurrence of variable X 1441 PHINode *PhiX = getRecurrenceVar(VarX, DefX, LoopEntry); 1442 if (!PhiX) 1443 return false; 1444 1445 InitX = PhiX->getIncomingValueForBlock(CurLoop->getLoopPreheader()); 1446 1447 // Make sure the initial value can't be negative otherwise the ashr in the 1448 // loop might never reach zero which would make the loop infinite. 1449 if (DefX->getOpcode() == Instruction::AShr && !isKnownNonNegative(InitX, DL)) 1450 return false; 1451 1452 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1 1453 // TODO: We can skip the step. If loop trip count is known (CTLZ), 1454 // then all uses of "cnt.next" could be optimized to the trip count 1455 // plus "cnt0". Currently it is not optimized. 1456 // This step could be used to detect POPCNT instruction: 1457 // cnt.next = cnt + (x.next & 1) 1458 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(), 1459 IterE = LoopEntry->end(); 1460 Iter != IterE; Iter++) { 1461 Instruction *Inst = &*Iter; 1462 if (Inst->getOpcode() != Instruction::Add) 1463 continue; 1464 1465 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1)); 1466 if (!Inc || !Inc->isOne()) 1467 continue; 1468 1469 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry); 1470 if (!Phi) 1471 continue; 1472 1473 CntInst = Inst; 1474 CntPhi = Phi; 1475 break; 1476 } 1477 if (!CntInst) 1478 return false; 1479 1480 return true; 1481 } 1482 1483 /// Recognize CTLZ or CTTZ idiom in a non-countable loop and convert the loop 1484 /// to countable (with CTLZ / CTTZ trip count). If CTLZ / CTTZ inserted as a new 1485 /// trip count returns true; otherwise, returns false. 1486 bool LoopIdiomRecognize::recognizeAndInsertFFS() { 1487 // Give up if the loop has multiple blocks or multiple backedges. 1488 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1) 1489 return false; 1490 1491 Intrinsic::ID IntrinID; 1492 Value *InitX; 1493 Instruction *DefX = nullptr; 1494 PHINode *CntPhi = nullptr; 1495 Instruction *CntInst = nullptr; 1496 // Help decide if transformation is profitable. For ShiftUntilZero idiom, 1497 // this is always 6. 1498 size_t IdiomCanonicalSize = 6; 1499 1500 if (!detectShiftUntilZeroIdiom(CurLoop, *DL, IntrinID, InitX, 1501 CntInst, CntPhi, DefX)) 1502 return false; 1503 1504 bool IsCntPhiUsedOutsideLoop = false; 1505 for (User *U : CntPhi->users()) 1506 if (!CurLoop->contains(cast<Instruction>(U))) { 1507 IsCntPhiUsedOutsideLoop = true; 1508 break; 1509 } 1510 bool IsCntInstUsedOutsideLoop = false; 1511 for (User *U : CntInst->users()) 1512 if (!CurLoop->contains(cast<Instruction>(U))) { 1513 IsCntInstUsedOutsideLoop = true; 1514 break; 1515 } 1516 // If both CntInst and CntPhi are used outside the loop the profitability 1517 // is questionable. 1518 if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop) 1519 return false; 1520 1521 // For some CPUs result of CTLZ(X) intrinsic is undefined 1522 // when X is 0. If we can not guarantee X != 0, we need to check this 1523 // when expand. 1524 bool ZeroCheck = false; 1525 // It is safe to assume Preheader exist as it was checked in 1526 // parent function RunOnLoop. 1527 BasicBlock *PH = CurLoop->getLoopPreheader(); 1528 1529 // If we are using the count instruction outside the loop, make sure we 1530 // have a zero check as a precondition. Without the check the loop would run 1531 // one iteration for before any check of the input value. This means 0 and 1 1532 // would have identical behavior in the original loop and thus 1533 if (!IsCntPhiUsedOutsideLoop) { 1534 auto *PreCondBB = PH->getSinglePredecessor(); 1535 if (!PreCondBB) 1536 return false; 1537 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1538 if (!PreCondBI) 1539 return false; 1540 if (matchCondition(PreCondBI, PH) != InitX) 1541 return false; 1542 ZeroCheck = true; 1543 } 1544 1545 // Check if CTLZ / CTTZ intrinsic is profitable. Assume it is always 1546 // profitable if we delete the loop. 1547 1548 // the loop has only 6 instructions: 1549 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ] 1550 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ] 1551 // %shr = ashr %n.addr.0, 1 1552 // %tobool = icmp eq %shr, 0 1553 // %inc = add nsw %i.0, 1 1554 // br i1 %tobool 1555 1556 const Value *Args[] = { 1557 InitX, ZeroCheck ? ConstantInt::getTrue(InitX->getContext()) 1558 : ConstantInt::getFalse(InitX->getContext())}; 1559 1560 // @llvm.dbg doesn't count as they have no semantic effect. 1561 auto InstWithoutDebugIt = CurLoop->getHeader()->instructionsWithoutDebug(); 1562 uint32_t HeaderSize = 1563 std::distance(InstWithoutDebugIt.begin(), InstWithoutDebugIt.end()); 1564 1565 IntrinsicCostAttributes Attrs(IntrinID, InitX->getType(), Args); 1566 int Cost = 1567 TTI->getIntrinsicInstrCost(Attrs, TargetTransformInfo::TCK_SizeAndLatency); 1568 if (HeaderSize != IdiomCanonicalSize && 1569 Cost > TargetTransformInfo::TCC_Basic) 1570 return false; 1571 1572 transformLoopToCountable(IntrinID, PH, CntInst, CntPhi, InitX, DefX, 1573 DefX->getDebugLoc(), ZeroCheck, 1574 IsCntPhiUsedOutsideLoop); 1575 return true; 1576 } 1577 1578 /// Recognizes a population count idiom in a non-countable loop. 1579 /// 1580 /// If detected, transforms the relevant code to issue the popcount intrinsic 1581 /// function call, and returns true; otherwise, returns false. 1582 bool LoopIdiomRecognize::recognizePopcount() { 1583 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware) 1584 return false; 1585 1586 // Counting population are usually conducted by few arithmetic instructions. 1587 // Such instructions can be easily "absorbed" by vacant slots in a 1588 // non-compact loop. Therefore, recognizing popcount idiom only makes sense 1589 // in a compact loop. 1590 1591 // Give up if the loop has multiple blocks or multiple backedges. 1592 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1) 1593 return false; 1594 1595 BasicBlock *LoopBody = *(CurLoop->block_begin()); 1596 if (LoopBody->size() >= 20) { 1597 // The loop is too big, bail out. 1598 return false; 1599 } 1600 1601 // It should have a preheader containing nothing but an unconditional branch. 1602 BasicBlock *PH = CurLoop->getLoopPreheader(); 1603 if (!PH || &PH->front() != PH->getTerminator()) 1604 return false; 1605 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator()); 1606 if (!EntryBI || EntryBI->isConditional()) 1607 return false; 1608 1609 // It should have a precondition block where the generated popcount intrinsic 1610 // function can be inserted. 1611 auto *PreCondBB = PH->getSinglePredecessor(); 1612 if (!PreCondBB) 1613 return false; 1614 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1615 if (!PreCondBI || PreCondBI->isUnconditional()) 1616 return false; 1617 1618 Instruction *CntInst; 1619 PHINode *CntPhi; 1620 Value *Val; 1621 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val)) 1622 return false; 1623 1624 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val); 1625 return true; 1626 } 1627 1628 static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val, 1629 const DebugLoc &DL) { 1630 Value *Ops[] = {Val}; 1631 Type *Tys[] = {Val->getType()}; 1632 1633 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent(); 1634 Function *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys); 1635 CallInst *CI = IRBuilder.CreateCall(Func, Ops); 1636 CI->setDebugLoc(DL); 1637 1638 return CI; 1639 } 1640 1641 static CallInst *createFFSIntrinsic(IRBuilder<> &IRBuilder, Value *Val, 1642 const DebugLoc &DL, bool ZeroCheck, 1643 Intrinsic::ID IID) { 1644 Value *Ops[] = {Val, ZeroCheck ? IRBuilder.getTrue() : IRBuilder.getFalse()}; 1645 Type *Tys[] = {Val->getType()}; 1646 1647 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent(); 1648 Function *Func = Intrinsic::getDeclaration(M, IID, Tys); 1649 CallInst *CI = IRBuilder.CreateCall(Func, Ops); 1650 CI->setDebugLoc(DL); 1651 1652 return CI; 1653 } 1654 1655 /// Transform the following loop (Using CTLZ, CTTZ is similar): 1656 /// loop: 1657 /// CntPhi = PHI [Cnt0, CntInst] 1658 /// PhiX = PHI [InitX, DefX] 1659 /// CntInst = CntPhi + 1 1660 /// DefX = PhiX >> 1 1661 /// LOOP_BODY 1662 /// Br: loop if (DefX != 0) 1663 /// Use(CntPhi) or Use(CntInst) 1664 /// 1665 /// Into: 1666 /// If CntPhi used outside the loop: 1667 /// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1) 1668 /// Count = CountPrev + 1 1669 /// else 1670 /// Count = BitWidth(InitX) - CTLZ(InitX) 1671 /// loop: 1672 /// CntPhi = PHI [Cnt0, CntInst] 1673 /// PhiX = PHI [InitX, DefX] 1674 /// PhiCount = PHI [Count, Dec] 1675 /// CntInst = CntPhi + 1 1676 /// DefX = PhiX >> 1 1677 /// Dec = PhiCount - 1 1678 /// LOOP_BODY 1679 /// Br: loop if (Dec != 0) 1680 /// Use(CountPrev + Cnt0) // Use(CntPhi) 1681 /// or 1682 /// Use(Count + Cnt0) // Use(CntInst) 1683 /// 1684 /// If LOOP_BODY is empty the loop will be deleted. 1685 /// If CntInst and DefX are not used in LOOP_BODY they will be removed. 1686 void LoopIdiomRecognize::transformLoopToCountable( 1687 Intrinsic::ID IntrinID, BasicBlock *Preheader, Instruction *CntInst, 1688 PHINode *CntPhi, Value *InitX, Instruction *DefX, const DebugLoc &DL, 1689 bool ZeroCheck, bool IsCntPhiUsedOutsideLoop) { 1690 BranchInst *PreheaderBr = cast<BranchInst>(Preheader->getTerminator()); 1691 1692 // Step 1: Insert the CTLZ/CTTZ instruction at the end of the preheader block 1693 IRBuilder<> Builder(PreheaderBr); 1694 Builder.SetCurrentDebugLocation(DL); 1695 Value *FFS, *Count, *CountPrev, *NewCount, *InitXNext; 1696 1697 // Count = BitWidth - CTLZ(InitX); 1698 // If there are uses of CntPhi create: 1699 // CountPrev = BitWidth - CTLZ(InitX >> 1); 1700 if (IsCntPhiUsedOutsideLoop) { 1701 if (DefX->getOpcode() == Instruction::AShr) 1702 InitXNext = 1703 Builder.CreateAShr(InitX, ConstantInt::get(InitX->getType(), 1)); 1704 else if (DefX->getOpcode() == Instruction::LShr) 1705 InitXNext = 1706 Builder.CreateLShr(InitX, ConstantInt::get(InitX->getType(), 1)); 1707 else if (DefX->getOpcode() == Instruction::Shl) // cttz 1708 InitXNext = 1709 Builder.CreateShl(InitX, ConstantInt::get(InitX->getType(), 1)); 1710 else 1711 llvm_unreachable("Unexpected opcode!"); 1712 } else 1713 InitXNext = InitX; 1714 FFS = createFFSIntrinsic(Builder, InitXNext, DL, ZeroCheck, IntrinID); 1715 Count = Builder.CreateSub( 1716 ConstantInt::get(FFS->getType(), 1717 FFS->getType()->getIntegerBitWidth()), 1718 FFS); 1719 if (IsCntPhiUsedOutsideLoop) { 1720 CountPrev = Count; 1721 Count = Builder.CreateAdd( 1722 CountPrev, 1723 ConstantInt::get(CountPrev->getType(), 1)); 1724 } 1725 1726 NewCount = Builder.CreateZExtOrTrunc( 1727 IsCntPhiUsedOutsideLoop ? CountPrev : Count, 1728 cast<IntegerType>(CntInst->getType())); 1729 1730 // If the counter's initial value is not zero, insert Add Inst. 1731 Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader); 1732 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal); 1733 if (!InitConst || !InitConst->isZero()) 1734 NewCount = Builder.CreateAdd(NewCount, CntInitVal); 1735 1736 // Step 2: Insert new IV and loop condition: 1737 // loop: 1738 // ... 1739 // PhiCount = PHI [Count, Dec] 1740 // ... 1741 // Dec = PhiCount - 1 1742 // ... 1743 // Br: loop if (Dec != 0) 1744 BasicBlock *Body = *(CurLoop->block_begin()); 1745 auto *LbBr = cast<BranchInst>(Body->getTerminator()); 1746 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition()); 1747 Type *Ty = Count->getType(); 1748 1749 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front()); 1750 1751 Builder.SetInsertPoint(LbCond); 1752 Instruction *TcDec = cast<Instruction>( 1753 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1), 1754 "tcdec", false, true)); 1755 1756 TcPhi->addIncoming(Count, Preheader); 1757 TcPhi->addIncoming(TcDec, Body); 1758 1759 CmpInst::Predicate Pred = 1760 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ; 1761 LbCond->setPredicate(Pred); 1762 LbCond->setOperand(0, TcDec); 1763 LbCond->setOperand(1, ConstantInt::get(Ty, 0)); 1764 1765 // Step 3: All the references to the original counter outside 1766 // the loop are replaced with the NewCount 1767 if (IsCntPhiUsedOutsideLoop) 1768 CntPhi->replaceUsesOutsideBlock(NewCount, Body); 1769 else 1770 CntInst->replaceUsesOutsideBlock(NewCount, Body); 1771 1772 // step 4: Forget the "non-computable" trip-count SCEV associated with the 1773 // loop. The loop would otherwise not be deleted even if it becomes empty. 1774 SE->forgetLoop(CurLoop); 1775 } 1776 1777 void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB, 1778 Instruction *CntInst, 1779 PHINode *CntPhi, Value *Var) { 1780 BasicBlock *PreHead = CurLoop->getLoopPreheader(); 1781 auto *PreCondBr = cast<BranchInst>(PreCondBB->getTerminator()); 1782 const DebugLoc &DL = CntInst->getDebugLoc(); 1783 1784 // Assuming before transformation, the loop is following: 1785 // if (x) // the precondition 1786 // do { cnt++; x &= x - 1; } while(x); 1787 1788 // Step 1: Insert the ctpop instruction at the end of the precondition block 1789 IRBuilder<> Builder(PreCondBr); 1790 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt; 1791 { 1792 PopCnt = createPopcntIntrinsic(Builder, Var, DL); 1793 NewCount = PopCntZext = 1794 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType())); 1795 1796 if (NewCount != PopCnt) 1797 (cast<Instruction>(NewCount))->setDebugLoc(DL); 1798 1799 // TripCnt is exactly the number of iterations the loop has 1800 TripCnt = NewCount; 1801 1802 // If the population counter's initial value is not zero, insert Add Inst. 1803 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead); 1804 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal); 1805 if (!InitConst || !InitConst->isZero()) { 1806 NewCount = Builder.CreateAdd(NewCount, CntInitVal); 1807 (cast<Instruction>(NewCount))->setDebugLoc(DL); 1808 } 1809 } 1810 1811 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to 1812 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic 1813 // function would be partial dead code, and downstream passes will drag 1814 // it back from the precondition block to the preheader. 1815 { 1816 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition()); 1817 1818 Value *Opnd0 = PopCntZext; 1819 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0); 1820 if (PreCond->getOperand(0) != Var) 1821 std::swap(Opnd0, Opnd1); 1822 1823 ICmpInst *NewPreCond = cast<ICmpInst>( 1824 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1)); 1825 PreCondBr->setCondition(NewPreCond); 1826 1827 RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI); 1828 } 1829 1830 // Step 3: Note that the population count is exactly the trip count of the 1831 // loop in question, which enable us to convert the loop from noncountable 1832 // loop into a countable one. The benefit is twofold: 1833 // 1834 // - If the loop only counts population, the entire loop becomes dead after 1835 // the transformation. It is a lot easier to prove a countable loop dead 1836 // than to prove a noncountable one. (In some C dialects, an infinite loop 1837 // isn't dead even if it computes nothing useful. In general, DCE needs 1838 // to prove a noncountable loop finite before safely delete it.) 1839 // 1840 // - If the loop also performs something else, it remains alive. 1841 // Since it is transformed to countable form, it can be aggressively 1842 // optimized by some optimizations which are in general not applicable 1843 // to a noncountable loop. 1844 // 1845 // After this step, this loop (conceptually) would look like following: 1846 // newcnt = __builtin_ctpop(x); 1847 // t = newcnt; 1848 // if (x) 1849 // do { cnt++; x &= x-1; t--) } while (t > 0); 1850 BasicBlock *Body = *(CurLoop->block_begin()); 1851 { 1852 auto *LbBr = cast<BranchInst>(Body->getTerminator()); 1853 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition()); 1854 Type *Ty = TripCnt->getType(); 1855 1856 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front()); 1857 1858 Builder.SetInsertPoint(LbCond); 1859 Instruction *TcDec = cast<Instruction>( 1860 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1), 1861 "tcdec", false, true)); 1862 1863 TcPhi->addIncoming(TripCnt, PreHead); 1864 TcPhi->addIncoming(TcDec, Body); 1865 1866 CmpInst::Predicate Pred = 1867 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE; 1868 LbCond->setPredicate(Pred); 1869 LbCond->setOperand(0, TcDec); 1870 LbCond->setOperand(1, ConstantInt::get(Ty, 0)); 1871 } 1872 1873 // Step 4: All the references to the original population counter outside 1874 // the loop are replaced with the NewCount -- the value returned from 1875 // __builtin_ctpop(). 1876 CntInst->replaceUsesOutsideBlock(NewCount, Body); 1877 1878 // step 5: Forget the "non-computable" trip-count SCEV associated with the 1879 // loop. The loop would otherwise not be deleted even if it becomes empty. 1880 SE->forgetLoop(CurLoop); 1881 } 1882