1 //===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The code below implements dead store elimination using MemorySSA. It uses 10 // the following general approach: given a MemoryDef, walk upwards to find 11 // clobbering MemoryDefs that may be killed by the starting def. Then check 12 // that there are no uses that may read the location of the original MemoryDef 13 // in between both MemoryDefs. A bit more concretely: 14 // 15 // For all MemoryDefs StartDef: 16 // 1. Get the next dominating clobbering MemoryDef (MaybeDeadAccess) by walking 17 // upwards. 18 // 2. Check that there are no reads between MaybeDeadAccess and the StartDef by 19 // checking all uses starting at MaybeDeadAccess and walking until we see 20 // StartDef. 21 // 3. For each found CurrentDef, check that: 22 // 1. There are no barrier instructions between CurrentDef and StartDef (like 23 // throws or stores with ordering constraints). 24 // 2. StartDef is executed whenever CurrentDef is executed. 25 // 3. StartDef completely overwrites CurrentDef. 26 // 4. Erase CurrentDef from the function and MemorySSA. 27 // 28 //===----------------------------------------------------------------------===// 29 30 #include "llvm/Transforms/Scalar/DeadStoreElimination.h" 31 #include "llvm/ADT/APInt.h" 32 #include "llvm/ADT/DenseMap.h" 33 #include "llvm/ADT/MapVector.h" 34 #include "llvm/ADT/PostOrderIterator.h" 35 #include "llvm/ADT/SetVector.h" 36 #include "llvm/ADT/SmallPtrSet.h" 37 #include "llvm/ADT/SmallVector.h" 38 #include "llvm/ADT/Statistic.h" 39 #include "llvm/ADT/StringRef.h" 40 #include "llvm/Analysis/AliasAnalysis.h" 41 #include "llvm/Analysis/CaptureTracking.h" 42 #include "llvm/Analysis/GlobalsModRef.h" 43 #include "llvm/Analysis/LoopInfo.h" 44 #include "llvm/Analysis/MemoryBuiltins.h" 45 #include "llvm/Analysis/MemoryLocation.h" 46 #include "llvm/Analysis/MemorySSA.h" 47 #include "llvm/Analysis/MemorySSAUpdater.h" 48 #include "llvm/Analysis/MustExecute.h" 49 #include "llvm/Analysis/PostDominators.h" 50 #include "llvm/Analysis/TargetLibraryInfo.h" 51 #include "llvm/Analysis/ValueTracking.h" 52 #include "llvm/IR/Argument.h" 53 #include "llvm/IR/BasicBlock.h" 54 #include "llvm/IR/Constant.h" 55 #include "llvm/IR/Constants.h" 56 #include "llvm/IR/DataLayout.h" 57 #include "llvm/IR/Dominators.h" 58 #include "llvm/IR/Function.h" 59 #include "llvm/IR/IRBuilder.h" 60 #include "llvm/IR/InstIterator.h" 61 #include "llvm/IR/InstrTypes.h" 62 #include "llvm/IR/Instruction.h" 63 #include "llvm/IR/Instructions.h" 64 #include "llvm/IR/IntrinsicInst.h" 65 #include "llvm/IR/Intrinsics.h" 66 #include "llvm/IR/LLVMContext.h" 67 #include "llvm/IR/Module.h" 68 #include "llvm/IR/PassManager.h" 69 #include "llvm/IR/PatternMatch.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/InitializePasses.h" 72 #include "llvm/Pass.h" 73 #include "llvm/Support/Casting.h" 74 #include "llvm/Support/CommandLine.h" 75 #include "llvm/Support/Debug.h" 76 #include "llvm/Support/DebugCounter.h" 77 #include "llvm/Support/ErrorHandling.h" 78 #include "llvm/Support/MathExtras.h" 79 #include "llvm/Support/raw_ostream.h" 80 #include "llvm/Transforms/Scalar.h" 81 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 82 #include "llvm/Transforms/Utils/BuildLibCalls.h" 83 #include "llvm/Transforms/Utils/Local.h" 84 #include <algorithm> 85 #include <cassert> 86 #include <cstddef> 87 #include <cstdint> 88 #include <iterator> 89 #include <map> 90 #include <utility> 91 92 using namespace llvm; 93 using namespace PatternMatch; 94 95 #define DEBUG_TYPE "dse" 96 97 STATISTIC(NumRemainingStores, "Number of stores remaining after DSE"); 98 STATISTIC(NumRedundantStores, "Number of redundant stores deleted"); 99 STATISTIC(NumFastStores, "Number of stores deleted"); 100 STATISTIC(NumFastOther, "Number of other instrs removed"); 101 STATISTIC(NumCompletePartials, "Number of stores dead by later partials"); 102 STATISTIC(NumModifiedStores, "Number of stores modified"); 103 STATISTIC(NumCFGChecks, "Number of stores modified"); 104 STATISTIC(NumCFGTries, "Number of stores modified"); 105 STATISTIC(NumCFGSuccess, "Number of stores modified"); 106 STATISTIC(NumGetDomMemoryDefPassed, 107 "Number of times a valid candidate is returned from getDomMemoryDef"); 108 STATISTIC(NumDomMemDefChecks, 109 "Number iterations check for reads in getDomMemoryDef"); 110 111 DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa", 112 "Controls which MemoryDefs are eliminated."); 113 114 static cl::opt<bool> 115 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking", 116 cl::init(true), cl::Hidden, 117 cl::desc("Enable partial-overwrite tracking in DSE")); 118 119 static cl::opt<bool> 120 EnablePartialStoreMerging("enable-dse-partial-store-merging", 121 cl::init(true), cl::Hidden, 122 cl::desc("Enable partial store merging in DSE")); 123 124 static cl::opt<unsigned> 125 MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden, 126 cl::desc("The number of memory instructions to scan for " 127 "dead store elimination (default = 150)")); 128 static cl::opt<unsigned> MemorySSAUpwardsStepLimit( 129 "dse-memoryssa-walklimit", cl::init(90), cl::Hidden, 130 cl::desc("The maximum number of steps while walking upwards to find " 131 "MemoryDefs that may be killed (default = 90)")); 132 133 static cl::opt<unsigned> MemorySSAPartialStoreLimit( 134 "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden, 135 cl::desc("The maximum number candidates that only partially overwrite the " 136 "killing MemoryDef to consider" 137 " (default = 5)")); 138 139 static cl::opt<unsigned> MemorySSADefsPerBlockLimit( 140 "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden, 141 cl::desc("The number of MemoryDefs we consider as candidates to eliminated " 142 "other stores per basic block (default = 5000)")); 143 144 static cl::opt<unsigned> MemorySSASameBBStepCost( 145 "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden, 146 cl::desc( 147 "The cost of a step in the same basic block as the killing MemoryDef" 148 "(default = 1)")); 149 150 static cl::opt<unsigned> 151 MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5), 152 cl::Hidden, 153 cl::desc("The cost of a step in a different basic " 154 "block than the killing MemoryDef" 155 "(default = 5)")); 156 157 static cl::opt<unsigned> MemorySSAPathCheckLimit( 158 "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden, 159 cl::desc("The maximum number of blocks to check when trying to prove that " 160 "all paths to an exit go through a killing block (default = 50)")); 161 162 // This flags allows or disallows DSE to optimize MemorySSA during its 163 // traversal. Note that DSE optimizing MemorySSA may impact other passes 164 // downstream of the DSE invocation and can lead to issues not being 165 // reproducible in isolation (i.e. when MemorySSA is built from scratch). In 166 // those cases, the flag can be used to check if DSE's MemorySSA optimizations 167 // impact follow-up passes. 168 static cl::opt<bool> 169 OptimizeMemorySSA("dse-optimize-memoryssa", cl::init(true), cl::Hidden, 170 cl::desc("Allow DSE to optimize memory accesses.")); 171 172 //===----------------------------------------------------------------------===// 173 // Helper functions 174 //===----------------------------------------------------------------------===// 175 using OverlapIntervalsTy = std::map<int64_t, int64_t>; 176 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>; 177 178 /// If the value of this instruction and the memory it writes to is unused, may 179 /// we delete this instruction? 180 static bool isRemovable(Instruction *I) { 181 // Don't remove volatile/atomic stores. 182 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 183 return SI->isUnordered(); 184 185 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 186 switch (II->getIntrinsicID()) { 187 default: llvm_unreachable("Does not have LocForWrite"); 188 case Intrinsic::lifetime_end: 189 // Never remove dead lifetime_end's, e.g. because it is followed by a 190 // free. 191 return false; 192 case Intrinsic::init_trampoline: 193 // Always safe to remove init_trampoline. 194 return true; 195 case Intrinsic::memset: 196 case Intrinsic::memmove: 197 case Intrinsic::memcpy: 198 case Intrinsic::memcpy_inline: 199 // Don't remove volatile memory intrinsics. 200 return !cast<MemIntrinsic>(II)->isVolatile(); 201 case Intrinsic::memcpy_element_unordered_atomic: 202 case Intrinsic::memmove_element_unordered_atomic: 203 case Intrinsic::memset_element_unordered_atomic: 204 case Intrinsic::masked_store: 205 return true; 206 } 207 } 208 209 // note: only get here for calls with analyzable writes - i.e. libcalls 210 if (auto *CB = dyn_cast<CallBase>(I)) 211 return CB->use_empty(); 212 213 return false; 214 } 215 216 /// Returns true if the end of this instruction can be safely shortened in 217 /// length. 218 static bool isShortenableAtTheEnd(Instruction *I) { 219 // Don't shorten stores for now 220 if (isa<StoreInst>(I)) 221 return false; 222 223 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 224 switch (II->getIntrinsicID()) { 225 default: return false; 226 case Intrinsic::memset: 227 case Intrinsic::memcpy: 228 case Intrinsic::memcpy_element_unordered_atomic: 229 case Intrinsic::memset_element_unordered_atomic: 230 // Do shorten memory intrinsics. 231 // FIXME: Add memmove if it's also safe to transform. 232 return true; 233 } 234 } 235 236 // Don't shorten libcalls calls for now. 237 238 return false; 239 } 240 241 /// Returns true if the beginning of this instruction can be safely shortened 242 /// in length. 243 static bool isShortenableAtTheBeginning(Instruction *I) { 244 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be 245 // easily done by offsetting the source address. 246 return isa<AnyMemSetInst>(I); 247 } 248 249 static uint64_t getPointerSize(const Value *V, const DataLayout &DL, 250 const TargetLibraryInfo &TLI, 251 const Function *F) { 252 uint64_t Size; 253 ObjectSizeOpts Opts; 254 Opts.NullIsUnknownSize = NullPointerIsDefined(F); 255 256 if (getObjectSize(V, Size, DL, &TLI, Opts)) 257 return Size; 258 return MemoryLocation::UnknownSize; 259 } 260 261 namespace { 262 263 enum OverwriteResult { 264 OW_Begin, 265 OW_Complete, 266 OW_End, 267 OW_PartialEarlierWithFullLater, 268 OW_MaybePartial, 269 OW_None, 270 OW_Unknown 271 }; 272 273 } // end anonymous namespace 274 275 /// Check if two instruction are masked stores that completely 276 /// overwrite one another. More specifically, \p KillingI has to 277 /// overwrite \p DeadI. 278 static OverwriteResult isMaskedStoreOverwrite(const Instruction *KillingI, 279 const Instruction *DeadI, 280 BatchAAResults &AA) { 281 const auto *KillingII = dyn_cast<IntrinsicInst>(KillingI); 282 const auto *DeadII = dyn_cast<IntrinsicInst>(DeadI); 283 if (KillingII == nullptr || DeadII == nullptr) 284 return OW_Unknown; 285 if (KillingII->getIntrinsicID() != Intrinsic::masked_store || 286 DeadII->getIntrinsicID() != Intrinsic::masked_store) 287 return OW_Unknown; 288 // Pointers. 289 Value *KillingPtr = KillingII->getArgOperand(1)->stripPointerCasts(); 290 Value *DeadPtr = DeadII->getArgOperand(1)->stripPointerCasts(); 291 if (KillingPtr != DeadPtr && !AA.isMustAlias(KillingPtr, DeadPtr)) 292 return OW_Unknown; 293 // Masks. 294 // TODO: check that KillingII's mask is a superset of the DeadII's mask. 295 if (KillingII->getArgOperand(3) != DeadII->getArgOperand(3)) 296 return OW_Unknown; 297 return OW_Complete; 298 } 299 300 /// Return 'OW_Complete' if a store to the 'KillingLoc' location completely 301 /// overwrites a store to the 'DeadLoc' location, 'OW_End' if the end of the 302 /// 'DeadLoc' location is completely overwritten by 'KillingLoc', 'OW_Begin' 303 /// if the beginning of the 'DeadLoc' location is overwritten by 'KillingLoc'. 304 /// 'OW_PartialEarlierWithFullLater' means that a dead (big) store was 305 /// overwritten by a killing (smaller) store which doesn't write outside the big 306 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined. 307 /// NOTE: This function must only be called if both \p KillingLoc and \p 308 /// DeadLoc belong to the same underlying object with valid \p KillingOff and 309 /// \p DeadOff. 310 static OverwriteResult isPartialOverwrite(const MemoryLocation &KillingLoc, 311 const MemoryLocation &DeadLoc, 312 int64_t KillingOff, int64_t DeadOff, 313 Instruction *DeadI, 314 InstOverlapIntervalsTy &IOL) { 315 const uint64_t KillingSize = KillingLoc.Size.getValue(); 316 const uint64_t DeadSize = DeadLoc.Size.getValue(); 317 // We may now overlap, although the overlap is not complete. There might also 318 // be other incomplete overlaps, and together, they might cover the complete 319 // dead store. 320 // Note: The correctness of this logic depends on the fact that this function 321 // is not even called providing DepWrite when there are any intervening reads. 322 if (EnablePartialOverwriteTracking && 323 KillingOff < int64_t(DeadOff + DeadSize) && 324 int64_t(KillingOff + KillingSize) >= DeadOff) { 325 326 // Insert our part of the overlap into the map. 327 auto &IM = IOL[DeadI]; 328 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: DeadLoc [" << DeadOff << ", " 329 << int64_t(DeadOff + DeadSize) << ") KillingLoc [" 330 << KillingOff << ", " << int64_t(KillingOff + KillingSize) 331 << ")\n"); 332 333 // Make sure that we only insert non-overlapping intervals and combine 334 // adjacent intervals. The intervals are stored in the map with the ending 335 // offset as the key (in the half-open sense) and the starting offset as 336 // the value. 337 int64_t KillingIntStart = KillingOff; 338 int64_t KillingIntEnd = KillingOff + KillingSize; 339 340 // Find any intervals ending at, or after, KillingIntStart which start 341 // before KillingIntEnd. 342 auto ILI = IM.lower_bound(KillingIntStart); 343 if (ILI != IM.end() && ILI->second <= KillingIntEnd) { 344 // This existing interval is overlapped with the current store somewhere 345 // in [KillingIntStart, KillingIntEnd]. Merge them by erasing the existing 346 // intervals and adjusting our start and end. 347 KillingIntStart = std::min(KillingIntStart, ILI->second); 348 KillingIntEnd = std::max(KillingIntEnd, ILI->first); 349 ILI = IM.erase(ILI); 350 351 // Continue erasing and adjusting our end in case other previous 352 // intervals are also overlapped with the current store. 353 // 354 // |--- dead 1 ---| |--- dead 2 ---| 355 // |------- killing---------| 356 // 357 while (ILI != IM.end() && ILI->second <= KillingIntEnd) { 358 assert(ILI->second > KillingIntStart && "Unexpected interval"); 359 KillingIntEnd = std::max(KillingIntEnd, ILI->first); 360 ILI = IM.erase(ILI); 361 } 362 } 363 364 IM[KillingIntEnd] = KillingIntStart; 365 366 ILI = IM.begin(); 367 if (ILI->second <= DeadOff && ILI->first >= int64_t(DeadOff + DeadSize)) { 368 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: DeadLoc [" 369 << DeadOff << ", " << int64_t(DeadOff + DeadSize) 370 << ") Composite KillingLoc [" << ILI->second << ", " 371 << ILI->first << ")\n"); 372 ++NumCompletePartials; 373 return OW_Complete; 374 } 375 } 376 377 // Check for a dead store which writes to all the memory locations that 378 // the killing store writes to. 379 if (EnablePartialStoreMerging && KillingOff >= DeadOff && 380 int64_t(DeadOff + DeadSize) > KillingOff && 381 uint64_t(KillingOff - DeadOff) + KillingSize <= DeadSize) { 382 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite a dead load [" << DeadOff 383 << ", " << int64_t(DeadOff + DeadSize) 384 << ") by a killing store [" << KillingOff << ", " 385 << int64_t(KillingOff + KillingSize) << ")\n"); 386 // TODO: Maybe come up with a better name? 387 return OW_PartialEarlierWithFullLater; 388 } 389 390 // Another interesting case is if the killing store overwrites the end of the 391 // dead store. 392 // 393 // |--dead--| 394 // |-- killing --| 395 // 396 // In this case we may want to trim the size of dead store to avoid 397 // generating stores to addresses which will definitely be overwritten killing 398 // store. 399 if (!EnablePartialOverwriteTracking && 400 (KillingOff > DeadOff && KillingOff < int64_t(DeadOff + DeadSize) && 401 int64_t(KillingOff + KillingSize) >= int64_t(DeadOff + DeadSize))) 402 return OW_End; 403 404 // Finally, we also need to check if the killing store overwrites the 405 // beginning of the dead store. 406 // 407 // |--dead--| 408 // |-- killing --| 409 // 410 // In this case we may want to move the destination address and trim the size 411 // of dead store to avoid generating stores to addresses which will definitely 412 // be overwritten killing store. 413 if (!EnablePartialOverwriteTracking && 414 (KillingOff <= DeadOff && int64_t(KillingOff + KillingSize) > DeadOff)) { 415 assert(int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) && 416 "Expect to be handled as OW_Complete"); 417 return OW_Begin; 418 } 419 // Otherwise, they don't completely overlap. 420 return OW_Unknown; 421 } 422 423 /// Returns true if the memory which is accessed by the second instruction is not 424 /// modified between the first and the second instruction. 425 /// Precondition: Second instruction must be dominated by the first 426 /// instruction. 427 static bool 428 memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, 429 BatchAAResults &AA, const DataLayout &DL, 430 DominatorTree *DT) { 431 // Do a backwards scan through the CFG from SecondI to FirstI. Look for 432 // instructions which can modify the memory location accessed by SecondI. 433 // 434 // While doing the walk keep track of the address to check. It might be 435 // different in different basic blocks due to PHI translation. 436 using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>; 437 SmallVector<BlockAddressPair, 16> WorkList; 438 // Keep track of the address we visited each block with. Bail out if we 439 // visit a block with different addresses. 440 DenseMap<BasicBlock *, Value *> Visited; 441 442 BasicBlock::iterator FirstBBI(FirstI); 443 ++FirstBBI; 444 BasicBlock::iterator SecondBBI(SecondI); 445 BasicBlock *FirstBB = FirstI->getParent(); 446 BasicBlock *SecondBB = SecondI->getParent(); 447 MemoryLocation MemLoc; 448 if (auto *MemSet = dyn_cast<MemSetInst>(SecondI)) 449 MemLoc = MemoryLocation::getForDest(MemSet); 450 else 451 MemLoc = MemoryLocation::get(SecondI); 452 453 auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr); 454 455 // Start checking the SecondBB. 456 WorkList.push_back( 457 std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr))); 458 bool isFirstBlock = true; 459 460 // Check all blocks going backward until we reach the FirstBB. 461 while (!WorkList.empty()) { 462 BlockAddressPair Current = WorkList.pop_back_val(); 463 BasicBlock *B = Current.first; 464 PHITransAddr &Addr = Current.second; 465 Value *Ptr = Addr.getAddr(); 466 467 // Ignore instructions before FirstI if this is the FirstBB. 468 BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin()); 469 470 BasicBlock::iterator EI; 471 if (isFirstBlock) { 472 // Ignore instructions after SecondI if this is the first visit of SecondBB. 473 assert(B == SecondBB && "first block is not the store block"); 474 EI = SecondBBI; 475 isFirstBlock = false; 476 } else { 477 // It's not SecondBB or (in case of a loop) the second visit of SecondBB. 478 // In this case we also have to look at instructions after SecondI. 479 EI = B->end(); 480 } 481 for (; BI != EI; ++BI) { 482 Instruction *I = &*BI; 483 if (I->mayWriteToMemory() && I != SecondI) 484 if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr)))) 485 return false; 486 } 487 if (B != FirstBB) { 488 assert(B != &FirstBB->getParent()->getEntryBlock() && 489 "Should not hit the entry block because SI must be dominated by LI"); 490 for (BasicBlock *Pred : predecessors(B)) { 491 PHITransAddr PredAddr = Addr; 492 if (PredAddr.NeedsPHITranslationFromBlock(B)) { 493 if (!PredAddr.IsPotentiallyPHITranslatable()) 494 return false; 495 if (PredAddr.PHITranslateValue(B, Pred, DT, false)) 496 return false; 497 } 498 Value *TranslatedPtr = PredAddr.getAddr(); 499 auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr)); 500 if (!Inserted.second) { 501 // We already visited this block before. If it was with a different 502 // address - bail out! 503 if (TranslatedPtr != Inserted.first->second) 504 return false; 505 // ... otherwise just skip it. 506 continue; 507 } 508 WorkList.push_back(std::make_pair(Pred, PredAddr)); 509 } 510 } 511 } 512 return true; 513 } 514 515 static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart, 516 uint64_t &DeadSize, int64_t KillingStart, 517 uint64_t KillingSize, bool IsOverwriteEnd) { 518 auto *DeadIntrinsic = cast<AnyMemIntrinsic>(DeadI); 519 Align PrefAlign = DeadIntrinsic->getDestAlign().valueOrOne(); 520 521 // We assume that memet/memcpy operates in chunks of the "largest" native 522 // type size and aligned on the same value. That means optimal start and size 523 // of memset/memcpy should be modulo of preferred alignment of that type. That 524 // is it there is no any sense in trying to reduce store size any further 525 // since any "extra" stores comes for free anyway. 526 // On the other hand, maximum alignment we can achieve is limited by alignment 527 // of initial store. 528 529 // TODO: Limit maximum alignment by preferred (or abi?) alignment of the 530 // "largest" native type. 531 // Note: What is the proper way to get that value? 532 // Should TargetTransformInfo::getRegisterBitWidth be used or anything else? 533 // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign); 534 535 int64_t ToRemoveStart = 0; 536 uint64_t ToRemoveSize = 0; 537 // Compute start and size of the region to remove. Make sure 'PrefAlign' is 538 // maintained on the remaining store. 539 if (IsOverwriteEnd) { 540 // Calculate required adjustment for 'KillingStart' in order to keep 541 // remaining store size aligned on 'PerfAlign'. 542 uint64_t Off = 543 offsetToAlignment(uint64_t(KillingStart - DeadStart), PrefAlign); 544 ToRemoveStart = KillingStart + Off; 545 if (DeadSize <= uint64_t(ToRemoveStart - DeadStart)) 546 return false; 547 ToRemoveSize = DeadSize - uint64_t(ToRemoveStart - DeadStart); 548 } else { 549 ToRemoveStart = DeadStart; 550 assert(KillingSize >= uint64_t(DeadStart - KillingStart) && 551 "Not overlapping accesses?"); 552 ToRemoveSize = KillingSize - uint64_t(DeadStart - KillingStart); 553 // Calculate required adjustment for 'ToRemoveSize'in order to keep 554 // start of the remaining store aligned on 'PerfAlign'. 555 uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign); 556 if (Off != 0) { 557 if (ToRemoveSize <= (PrefAlign.value() - Off)) 558 return false; 559 ToRemoveSize -= PrefAlign.value() - Off; 560 } 561 assert(isAligned(PrefAlign, ToRemoveSize) && 562 "Should preserve selected alignment"); 563 } 564 565 assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove"); 566 assert(DeadSize > ToRemoveSize && "Can't remove more than original size"); 567 568 uint64_t NewSize = DeadSize - ToRemoveSize; 569 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(DeadI)) { 570 // When shortening an atomic memory intrinsic, the newly shortened 571 // length must remain an integer multiple of the element size. 572 const uint32_t ElementSize = AMI->getElementSizeInBytes(); 573 if (0 != NewSize % ElementSize) 574 return false; 575 } 576 577 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW " 578 << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *DeadI 579 << "\n KILLER [" << ToRemoveStart << ", " 580 << int64_t(ToRemoveStart + ToRemoveSize) << ")\n"); 581 582 Value *DeadWriteLength = DeadIntrinsic->getLength(); 583 Value *TrimmedLength = ConstantInt::get(DeadWriteLength->getType(), NewSize); 584 DeadIntrinsic->setLength(TrimmedLength); 585 DeadIntrinsic->setDestAlignment(PrefAlign); 586 587 if (!IsOverwriteEnd) { 588 Value *OrigDest = DeadIntrinsic->getRawDest(); 589 Type *Int8PtrTy = 590 Type::getInt8PtrTy(DeadIntrinsic->getContext(), 591 OrigDest->getType()->getPointerAddressSpace()); 592 Value *Dest = OrigDest; 593 if (OrigDest->getType() != Int8PtrTy) 594 Dest = CastInst::CreatePointerCast(OrigDest, Int8PtrTy, "", DeadI); 595 Value *Indices[1] = { 596 ConstantInt::get(DeadWriteLength->getType(), ToRemoveSize)}; 597 Instruction *NewDestGEP = GetElementPtrInst::CreateInBounds( 598 Type::getInt8Ty(DeadIntrinsic->getContext()), Dest, Indices, "", DeadI); 599 NewDestGEP->setDebugLoc(DeadIntrinsic->getDebugLoc()); 600 if (NewDestGEP->getType() != OrigDest->getType()) 601 NewDestGEP = CastInst::CreatePointerCast(NewDestGEP, OrigDest->getType(), 602 "", DeadI); 603 DeadIntrinsic->setDest(NewDestGEP); 604 } 605 606 // Finally update start and size of dead access. 607 if (!IsOverwriteEnd) 608 DeadStart += ToRemoveSize; 609 DeadSize = NewSize; 610 611 return true; 612 } 613 614 static bool tryToShortenEnd(Instruction *DeadI, OverlapIntervalsTy &IntervalMap, 615 int64_t &DeadStart, uint64_t &DeadSize) { 616 if (IntervalMap.empty() || !isShortenableAtTheEnd(DeadI)) 617 return false; 618 619 OverlapIntervalsTy::iterator OII = --IntervalMap.end(); 620 int64_t KillingStart = OII->second; 621 uint64_t KillingSize = OII->first - KillingStart; 622 623 assert(OII->first - KillingStart >= 0 && "Size expected to be positive"); 624 625 if (KillingStart > DeadStart && 626 // Note: "KillingStart - KillingStart" is known to be positive due to 627 // preceding check. 628 (uint64_t)(KillingStart - DeadStart) < DeadSize && 629 // Note: "DeadSize - (uint64_t)(KillingStart - DeadStart)" is known to 630 // be non negative due to preceding checks. 631 KillingSize >= DeadSize - (uint64_t)(KillingStart - DeadStart)) { 632 if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize, 633 true)) { 634 IntervalMap.erase(OII); 635 return true; 636 } 637 } 638 return false; 639 } 640 641 static bool tryToShortenBegin(Instruction *DeadI, 642 OverlapIntervalsTy &IntervalMap, 643 int64_t &DeadStart, uint64_t &DeadSize) { 644 if (IntervalMap.empty() || !isShortenableAtTheBeginning(DeadI)) 645 return false; 646 647 OverlapIntervalsTy::iterator OII = IntervalMap.begin(); 648 int64_t KillingStart = OII->second; 649 uint64_t KillingSize = OII->first - KillingStart; 650 651 assert(OII->first - KillingStart >= 0 && "Size expected to be positive"); 652 653 if (KillingStart <= DeadStart && 654 // Note: "DeadStart - KillingStart" is known to be non negative due to 655 // preceding check. 656 KillingSize > (uint64_t)(DeadStart - KillingStart)) { 657 // Note: "KillingSize - (uint64_t)(DeadStart - DeadStart)" is known to 658 // be positive due to preceding checks. 659 assert(KillingSize - (uint64_t)(DeadStart - KillingStart) < DeadSize && 660 "Should have been handled as OW_Complete"); 661 if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize, 662 false)) { 663 IntervalMap.erase(OII); 664 return true; 665 } 666 } 667 return false; 668 } 669 670 static Constant * 671 tryToMergePartialOverlappingStores(StoreInst *KillingI, StoreInst *DeadI, 672 int64_t KillingOffset, int64_t DeadOffset, 673 const DataLayout &DL, BatchAAResults &AA, 674 DominatorTree *DT) { 675 676 if (DeadI && isa<ConstantInt>(DeadI->getValueOperand()) && 677 DL.typeSizeEqualsStoreSize(DeadI->getValueOperand()->getType()) && 678 KillingI && isa<ConstantInt>(KillingI->getValueOperand()) && 679 DL.typeSizeEqualsStoreSize(KillingI->getValueOperand()->getType()) && 680 memoryIsNotModifiedBetween(DeadI, KillingI, AA, DL, DT)) { 681 // If the store we find is: 682 // a) partially overwritten by the store to 'Loc' 683 // b) the killing store is fully contained in the dead one and 684 // c) they both have a constant value 685 // d) none of the two stores need padding 686 // Merge the two stores, replacing the dead store's value with a 687 // merge of both values. 688 // TODO: Deal with other constant types (vectors, etc), and probably 689 // some mem intrinsics (if needed) 690 691 APInt DeadValue = cast<ConstantInt>(DeadI->getValueOperand())->getValue(); 692 APInt KillingValue = 693 cast<ConstantInt>(KillingI->getValueOperand())->getValue(); 694 unsigned KillingBits = KillingValue.getBitWidth(); 695 assert(DeadValue.getBitWidth() > KillingValue.getBitWidth()); 696 KillingValue = KillingValue.zext(DeadValue.getBitWidth()); 697 698 // Offset of the smaller store inside the larger store 699 unsigned BitOffsetDiff = (KillingOffset - DeadOffset) * 8; 700 unsigned LShiftAmount = 701 DL.isBigEndian() ? DeadValue.getBitWidth() - BitOffsetDiff - KillingBits 702 : BitOffsetDiff; 703 APInt Mask = APInt::getBitsSet(DeadValue.getBitWidth(), LShiftAmount, 704 LShiftAmount + KillingBits); 705 // Clear the bits we'll be replacing, then OR with the smaller 706 // store, shifted appropriately. 707 APInt Merged = (DeadValue & ~Mask) | (KillingValue << LShiftAmount); 708 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Dead: " << *DeadI 709 << "\n Killing: " << *KillingI 710 << "\n Merged Value: " << Merged << '\n'); 711 return ConstantInt::get(DeadI->getValueOperand()->getType(), Merged); 712 } 713 return nullptr; 714 } 715 716 namespace { 717 // Returns true if \p I is an intrisnic that does not read or write memory. 718 bool isNoopIntrinsic(Instruction *I) { 719 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 720 switch (II->getIntrinsicID()) { 721 case Intrinsic::lifetime_start: 722 case Intrinsic::lifetime_end: 723 case Intrinsic::invariant_end: 724 case Intrinsic::launder_invariant_group: 725 case Intrinsic::assume: 726 return true; 727 case Intrinsic::dbg_addr: 728 case Intrinsic::dbg_declare: 729 case Intrinsic::dbg_label: 730 case Intrinsic::dbg_value: 731 llvm_unreachable("Intrinsic should not be modeled in MemorySSA"); 732 default: 733 return false; 734 } 735 } 736 return false; 737 } 738 739 // Check if we can ignore \p D for DSE. 740 bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller, 741 const TargetLibraryInfo &TLI) { 742 Instruction *DI = D->getMemoryInst(); 743 // Calls that only access inaccessible memory cannot read or write any memory 744 // locations we consider for elimination. 745 if (auto *CB = dyn_cast<CallBase>(DI)) 746 if (CB->onlyAccessesInaccessibleMemory()) { 747 if (isAllocLikeFn(DI, &TLI)) 748 return false; 749 return true; 750 } 751 // We can eliminate stores to locations not visible to the caller across 752 // throwing instructions. 753 if (DI->mayThrow() && !DefVisibleToCaller) 754 return true; 755 756 // We can remove the dead stores, irrespective of the fence and its ordering 757 // (release/acquire/seq_cst). Fences only constraints the ordering of 758 // already visible stores, it does not make a store visible to other 759 // threads. So, skipping over a fence does not change a store from being 760 // dead. 761 if (isa<FenceInst>(DI)) 762 return true; 763 764 // Skip intrinsics that do not really read or modify memory. 765 if (isNoopIntrinsic(DI)) 766 return true; 767 768 return false; 769 } 770 771 struct DSEState { 772 Function &F; 773 AliasAnalysis &AA; 774 EarliestEscapeInfo EI; 775 776 /// The single BatchAA instance that is used to cache AA queries. It will 777 /// not be invalidated over the whole run. This is safe, because: 778 /// 1. Only memory writes are removed, so the alias cache for memory 779 /// locations remains valid. 780 /// 2. No new instructions are added (only instructions removed), so cached 781 /// information for a deleted value cannot be accessed by a re-used new 782 /// value pointer. 783 BatchAAResults BatchAA; 784 785 MemorySSA &MSSA; 786 DominatorTree &DT; 787 PostDominatorTree &PDT; 788 const TargetLibraryInfo &TLI; 789 const DataLayout &DL; 790 const LoopInfo &LI; 791 792 // Whether the function contains any irreducible control flow, useful for 793 // being accurately able to detect loops. 794 bool ContainsIrreducibleLoops; 795 796 // All MemoryDefs that potentially could kill other MemDefs. 797 SmallVector<MemoryDef *, 64> MemDefs; 798 // Any that should be skipped as they are already deleted 799 SmallPtrSet<MemoryAccess *, 4> SkipStores; 800 // Keep track of all of the objects that are invisible to the caller before 801 // the function returns. 802 // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet; 803 DenseMap<const Value *, bool> InvisibleToCallerBeforeRet; 804 // Keep track of all of the objects that are invisible to the caller after 805 // the function returns. 806 DenseMap<const Value *, bool> InvisibleToCallerAfterRet; 807 // Keep track of blocks with throwing instructions not modeled in MemorySSA. 808 SmallPtrSet<BasicBlock *, 16> ThrowingBlocks; 809 // Post-order numbers for each basic block. Used to figure out if memory 810 // accesses are executed before another access. 811 DenseMap<BasicBlock *, unsigned> PostOrderNumbers; 812 813 /// Keep track of instructions (partly) overlapping with killing MemoryDefs per 814 /// basic block. 815 MapVector<BasicBlock *, InstOverlapIntervalsTy> IOLs; 816 817 // Class contains self-reference, make sure it's not copied/moved. 818 DSEState(const DSEState &) = delete; 819 DSEState &operator=(const DSEState &) = delete; 820 821 DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT, 822 PostDominatorTree &PDT, const TargetLibraryInfo &TLI, 823 const LoopInfo &LI) 824 : F(F), AA(AA), EI(DT, LI), BatchAA(AA, &EI), MSSA(MSSA), DT(DT), 825 PDT(PDT), TLI(TLI), DL(F.getParent()->getDataLayout()), LI(LI) { 826 // Collect blocks with throwing instructions not modeled in MemorySSA and 827 // alloc-like objects. 828 unsigned PO = 0; 829 for (BasicBlock *BB : post_order(&F)) { 830 PostOrderNumbers[BB] = PO++; 831 for (Instruction &I : *BB) { 832 MemoryAccess *MA = MSSA.getMemoryAccess(&I); 833 if (I.mayThrow() && !MA) 834 ThrowingBlocks.insert(I.getParent()); 835 836 auto *MD = dyn_cast_or_null<MemoryDef>(MA); 837 if (MD && MemDefs.size() < MemorySSADefsPerBlockLimit && 838 (getLocForWriteEx(&I) || isMemTerminatorInst(&I))) 839 MemDefs.push_back(MD); 840 } 841 } 842 843 // Treat byval or inalloca arguments the same as Allocas, stores to them are 844 // dead at the end of the function. 845 for (Argument &AI : F.args()) 846 if (AI.hasPassPointeeByValueCopyAttr()) { 847 // For byval, the caller doesn't know the address of the allocation. 848 if (AI.hasByValAttr()) 849 InvisibleToCallerBeforeRet.insert({&AI, true}); 850 InvisibleToCallerAfterRet.insert({&AI, true}); 851 } 852 853 // Collect whether there is any irreducible control flow in the function. 854 ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI); 855 } 856 857 /// Return 'OW_Complete' if a store to the 'KillingLoc' location (by \p 858 /// KillingI instruction) completely overwrites a store to the 'DeadLoc' 859 /// location (by \p DeadI instruction). 860 /// Return OW_MaybePartial if \p KillingI does not completely overwrite 861 /// \p DeadI, but they both write to the same underlying object. In that 862 /// case, use isPartialOverwrite to check if \p KillingI partially overwrites 863 /// \p DeadI. Returns 'OR_None' if \p KillingI is known to not overwrite the 864 /// \p DeadI. Returns 'OW_Unknown' if nothing can be determined. 865 OverwriteResult isOverwrite(const Instruction *KillingI, 866 const Instruction *DeadI, 867 const MemoryLocation &KillingLoc, 868 const MemoryLocation &DeadLoc, 869 int64_t &KillingOff, int64_t &DeadOff) { 870 // AliasAnalysis does not always account for loops. Limit overwrite checks 871 // to dependencies for which we can guarantee they are independent of any 872 // loops they are in. 873 if (!isGuaranteedLoopIndependent(DeadI, KillingI, DeadLoc)) 874 return OW_Unknown; 875 876 // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll 877 // get imprecise values here, though (except for unknown sizes). 878 if (!KillingLoc.Size.isPrecise() || !DeadLoc.Size.isPrecise()) { 879 // In case no constant size is known, try to an IR values for the number 880 // of bytes written and check if they match. 881 const auto *KillingMemI = dyn_cast<MemIntrinsic>(KillingI); 882 const auto *DeadMemI = dyn_cast<MemIntrinsic>(DeadI); 883 if (KillingMemI && DeadMemI) { 884 const Value *KillingV = KillingMemI->getLength(); 885 const Value *DeadV = DeadMemI->getLength(); 886 if (KillingV == DeadV && BatchAA.isMustAlias(DeadLoc, KillingLoc)) 887 return OW_Complete; 888 } 889 890 // Masked stores have imprecise locations, but we can reason about them 891 // to some extent. 892 return isMaskedStoreOverwrite(KillingI, DeadI, BatchAA); 893 } 894 895 const uint64_t KillingSize = KillingLoc.Size.getValue(); 896 const uint64_t DeadSize = DeadLoc.Size.getValue(); 897 898 // Query the alias information 899 AliasResult AAR = BatchAA.alias(KillingLoc, DeadLoc); 900 901 // If the start pointers are the same, we just have to compare sizes to see if 902 // the killing store was larger than the dead store. 903 if (AAR == AliasResult::MustAlias) { 904 // Make sure that the KillingSize size is >= the DeadSize size. 905 if (KillingSize >= DeadSize) 906 return OW_Complete; 907 } 908 909 // If we hit a partial alias we may have a full overwrite 910 if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) { 911 int32_t Off = AAR.getOffset(); 912 if (Off >= 0 && (uint64_t)Off + DeadSize <= KillingSize) 913 return OW_Complete; 914 } 915 916 // Check to see if the killing store is to the entire object (either a 917 // global, an alloca, or a byval/inalloca argument). If so, then it clearly 918 // overwrites any other store to the same object. 919 const Value *DeadPtr = DeadLoc.Ptr->stripPointerCasts(); 920 const Value *KillingPtr = KillingLoc.Ptr->stripPointerCasts(); 921 const Value *DeadUndObj = getUnderlyingObject(DeadPtr); 922 const Value *KillingUndObj = getUnderlyingObject(KillingPtr); 923 924 // If we can't resolve the same pointers to the same object, then we can't 925 // analyze them at all. 926 if (DeadUndObj != KillingUndObj) { 927 // Non aliasing stores to different objects don't overlap. Note that 928 // if the killing store is known to overwrite whole object (out of 929 // bounds access overwrites whole object as well) then it is assumed to 930 // completely overwrite any store to the same object even if they don't 931 // actually alias (see next check). 932 if (AAR == AliasResult::NoAlias) 933 return OW_None; 934 return OW_Unknown; 935 } 936 937 // If the KillingI store is to a recognizable object, get its size. 938 uint64_t KillingUndObjSize = getPointerSize(KillingUndObj, DL, TLI, &F); 939 if (KillingUndObjSize != MemoryLocation::UnknownSize) 940 if (KillingUndObjSize == KillingSize && KillingUndObjSize >= DeadSize) 941 return OW_Complete; 942 943 // Okay, we have stores to two completely different pointers. Try to 944 // decompose the pointer into a "base + constant_offset" form. If the base 945 // pointers are equal, then we can reason about the two stores. 946 DeadOff = 0; 947 KillingOff = 0; 948 const Value *DeadBasePtr = 949 GetPointerBaseWithConstantOffset(DeadPtr, DeadOff, DL); 950 const Value *KillingBasePtr = 951 GetPointerBaseWithConstantOffset(KillingPtr, KillingOff, DL); 952 953 // If the base pointers still differ, we have two completely different 954 // stores. 955 if (DeadBasePtr != KillingBasePtr) 956 return OW_Unknown; 957 958 // The killing access completely overlaps the dead store if and only if 959 // both start and end of the dead one is "inside" the killing one: 960 // |<->|--dead--|<->| 961 // |-----killing------| 962 // Accesses may overlap if and only if start of one of them is "inside" 963 // another one: 964 // |<->|--dead--|<-------->| 965 // |-------killing--------| 966 // OR 967 // |-------dead-------| 968 // |<->|---killing---|<----->| 969 // 970 // We have to be careful here as *Off is signed while *.Size is unsigned. 971 972 // Check if the dead access starts "not before" the killing one. 973 if (DeadOff >= KillingOff) { 974 // If the dead access ends "not after" the killing access then the 975 // dead one is completely overwritten by the killing one. 976 if (uint64_t(DeadOff - KillingOff) + DeadSize <= KillingSize) 977 return OW_Complete; 978 // If start of the dead access is "before" end of the killing access 979 // then accesses overlap. 980 else if ((uint64_t)(DeadOff - KillingOff) < KillingSize) 981 return OW_MaybePartial; 982 } 983 // If start of the killing access is "before" end of the dead access then 984 // accesses overlap. 985 else if ((uint64_t)(KillingOff - DeadOff) < DeadSize) { 986 return OW_MaybePartial; 987 } 988 989 // Can reach here only if accesses are known not to overlap. 990 return OW_None; 991 } 992 993 bool isInvisibleToCallerAfterRet(const Value *V) { 994 if (isa<AllocaInst>(V)) 995 return true; 996 auto I = InvisibleToCallerAfterRet.insert({V, false}); 997 if (I.second) { 998 if (!isInvisibleToCallerBeforeRet(V)) { 999 I.first->second = false; 1000 } else { 1001 auto *Inst = dyn_cast<Instruction>(V); 1002 if (Inst && isAllocLikeFn(Inst, &TLI)) 1003 I.first->second = !PointerMayBeCaptured(V, true, false); 1004 } 1005 } 1006 return I.first->second; 1007 } 1008 1009 bool isInvisibleToCallerBeforeRet(const Value *V) { 1010 if (isa<AllocaInst>(V)) 1011 return true; 1012 auto I = InvisibleToCallerBeforeRet.insert({V, false}); 1013 if (I.second) { 1014 auto *Inst = dyn_cast<Instruction>(V); 1015 if (Inst && isAllocLikeFn(Inst, &TLI)) 1016 // NOTE: This could be made more precise by PointerMayBeCapturedBefore 1017 // with the killing MemoryDef. But we refrain from doing so for now to 1018 // limit compile-time and this does not cause any changes to the number 1019 // of stores removed on a large test set in practice. 1020 I.first->second = !PointerMayBeCaptured(V, false, true); 1021 } 1022 return I.first->second; 1023 } 1024 1025 Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const { 1026 if (!I->mayWriteToMemory()) 1027 return None; 1028 1029 if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I)) 1030 return {MemoryLocation::getForDest(MTI)}; 1031 1032 if (auto *CB = dyn_cast<CallBase>(I)) { 1033 // If the functions may write to memory we do not know about, bail out. 1034 if (!CB->onlyAccessesArgMemory() && 1035 !CB->onlyAccessesInaccessibleMemOrArgMem()) 1036 return None; 1037 1038 LibFunc LF; 1039 if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) { 1040 switch (LF) { 1041 case LibFunc_strncpy: 1042 if (const auto *Len = dyn_cast<ConstantInt>(CB->getArgOperand(2))) 1043 return MemoryLocation(CB->getArgOperand(0), 1044 LocationSize::precise(Len->getZExtValue()), 1045 CB->getAAMetadata()); 1046 LLVM_FALLTHROUGH; 1047 case LibFunc_strcpy: 1048 case LibFunc_strcat: 1049 case LibFunc_strncat: 1050 return {MemoryLocation::getAfter(CB->getArgOperand(0))}; 1051 default: 1052 break; 1053 } 1054 } 1055 switch (CB->getIntrinsicID()) { 1056 case Intrinsic::init_trampoline: 1057 return {MemoryLocation::getAfter(CB->getArgOperand(0))}; 1058 case Intrinsic::masked_store: 1059 return {MemoryLocation::getForArgument(CB, 1, TLI)}; 1060 default: 1061 break; 1062 } 1063 return None; 1064 } 1065 1066 return MemoryLocation::getOrNone(I); 1067 } 1068 1069 /// Returns true if \p UseInst completely overwrites \p DefLoc 1070 /// (stored by \p DefInst). 1071 bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst, 1072 Instruction *UseInst) { 1073 // UseInst has a MemoryDef associated in MemorySSA. It's possible for a 1074 // MemoryDef to not write to memory, e.g. a volatile load is modeled as a 1075 // MemoryDef. 1076 if (!UseInst->mayWriteToMemory()) 1077 return false; 1078 1079 if (auto *CB = dyn_cast<CallBase>(UseInst)) 1080 if (CB->onlyAccessesInaccessibleMemory()) 1081 return false; 1082 1083 int64_t InstWriteOffset, DepWriteOffset; 1084 if (auto CC = getLocForWriteEx(UseInst)) 1085 return isOverwrite(UseInst, DefInst, *CC, DefLoc, InstWriteOffset, 1086 DepWriteOffset) == OW_Complete; 1087 return false; 1088 } 1089 1090 /// Returns true if \p Def is not read before returning from the function. 1091 bool isWriteAtEndOfFunction(MemoryDef *Def) { 1092 LLVM_DEBUG(dbgs() << " Check if def " << *Def << " (" 1093 << *Def->getMemoryInst() 1094 << ") is at the end the function \n"); 1095 1096 auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst()); 1097 if (!MaybeLoc) { 1098 LLVM_DEBUG(dbgs() << " ... could not get location for write.\n"); 1099 return false; 1100 } 1101 1102 SmallVector<MemoryAccess *, 4> WorkList; 1103 SmallPtrSet<MemoryAccess *, 8> Visited; 1104 auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) { 1105 if (!Visited.insert(Acc).second) 1106 return; 1107 for (Use &U : Acc->uses()) 1108 WorkList.push_back(cast<MemoryAccess>(U.getUser())); 1109 }; 1110 PushMemUses(Def); 1111 for (unsigned I = 0; I < WorkList.size(); I++) { 1112 if (WorkList.size() >= MemorySSAScanLimit) { 1113 LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n"); 1114 return false; 1115 } 1116 1117 MemoryAccess *UseAccess = WorkList[I]; 1118 // Simply adding the users of MemoryPhi to the worklist is not enough, 1119 // because we might miss read clobbers in different iterations of a loop, 1120 // for example. 1121 // TODO: Add support for phi translation to handle the loop case. 1122 if (isa<MemoryPhi>(UseAccess)) 1123 return false; 1124 1125 // TODO: Checking for aliasing is expensive. Consider reducing the amount 1126 // of times this is called and/or caching it. 1127 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst(); 1128 if (isReadClobber(*MaybeLoc, UseInst)) { 1129 LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst << ".\n"); 1130 return false; 1131 } 1132 1133 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) 1134 PushMemUses(UseDef); 1135 } 1136 return true; 1137 } 1138 1139 /// If \p I is a memory terminator like llvm.lifetime.end or free, return a 1140 /// pair with the MemoryLocation terminated by \p I and a boolean flag 1141 /// indicating whether \p I is a free-like call. 1142 Optional<std::pair<MemoryLocation, bool>> 1143 getLocForTerminator(Instruction *I) const { 1144 uint64_t Len; 1145 Value *Ptr; 1146 if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len), 1147 m_Value(Ptr)))) 1148 return {std::make_pair(MemoryLocation(Ptr, Len), false)}; 1149 1150 if (auto *CB = dyn_cast<CallBase>(I)) { 1151 if (isFreeCall(I, &TLI)) 1152 return {std::make_pair(MemoryLocation::getAfter(CB->getArgOperand(0)), 1153 true)}; 1154 } 1155 1156 return None; 1157 } 1158 1159 /// Returns true if \p I is a memory terminator instruction like 1160 /// llvm.lifetime.end or free. 1161 bool isMemTerminatorInst(Instruction *I) const { 1162 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I); 1163 return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) || 1164 isFreeCall(I, &TLI); 1165 } 1166 1167 /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from 1168 /// instruction \p AccessI. 1169 bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI, 1170 Instruction *MaybeTerm) { 1171 Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc = 1172 getLocForTerminator(MaybeTerm); 1173 1174 if (!MaybeTermLoc) 1175 return false; 1176 1177 // If the terminator is a free-like call, all accesses to the underlying 1178 // object can be considered terminated. 1179 if (getUnderlyingObject(Loc.Ptr) != 1180 getUnderlyingObject(MaybeTermLoc->first.Ptr)) 1181 return false; 1182 1183 auto TermLoc = MaybeTermLoc->first; 1184 if (MaybeTermLoc->second) { 1185 const Value *LocUO = getUnderlyingObject(Loc.Ptr); 1186 return BatchAA.isMustAlias(TermLoc.Ptr, LocUO); 1187 } 1188 int64_t InstWriteOffset = 0; 1189 int64_t DepWriteOffset = 0; 1190 return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, InstWriteOffset, 1191 DepWriteOffset) == OW_Complete; 1192 } 1193 1194 // Returns true if \p Use may read from \p DefLoc. 1195 bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) { 1196 if (isNoopIntrinsic(UseInst)) 1197 return false; 1198 1199 // Monotonic or weaker atomic stores can be re-ordered and do not need to be 1200 // treated as read clobber. 1201 if (auto SI = dyn_cast<StoreInst>(UseInst)) 1202 return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic); 1203 1204 if (!UseInst->mayReadFromMemory()) 1205 return false; 1206 1207 if (auto *CB = dyn_cast<CallBase>(UseInst)) 1208 if (CB->onlyAccessesInaccessibleMemory()) 1209 return false; 1210 1211 return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc)); 1212 } 1213 1214 /// Returns true if a dependency between \p Current and \p KillingDef is 1215 /// guaranteed to be loop invariant for the loops that they are in. Either 1216 /// because they are known to be in the same block, in the same loop level or 1217 /// by guaranteeing that \p CurrentLoc only references a single MemoryLocation 1218 /// during execution of the containing function. 1219 bool isGuaranteedLoopIndependent(const Instruction *Current, 1220 const Instruction *KillingDef, 1221 const MemoryLocation &CurrentLoc) { 1222 // If the dependency is within the same block or loop level (being careful 1223 // of irreducible loops), we know that AA will return a valid result for the 1224 // memory dependency. (Both at the function level, outside of any loop, 1225 // would also be valid but we currently disable that to limit compile time). 1226 if (Current->getParent() == KillingDef->getParent()) 1227 return true; 1228 const Loop *CurrentLI = LI.getLoopFor(Current->getParent()); 1229 if (!ContainsIrreducibleLoops && CurrentLI && 1230 CurrentLI == LI.getLoopFor(KillingDef->getParent())) 1231 return true; 1232 // Otherwise check the memory location is invariant to any loops. 1233 return isGuaranteedLoopInvariant(CurrentLoc.Ptr); 1234 } 1235 1236 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible 1237 /// loop. In particular, this guarantees that it only references a single 1238 /// MemoryLocation during execution of the containing function. 1239 bool isGuaranteedLoopInvariant(const Value *Ptr) { 1240 auto IsGuaranteedLoopInvariantBase = [this](const Value *Ptr) { 1241 Ptr = Ptr->stripPointerCasts(); 1242 if (auto *I = dyn_cast<Instruction>(Ptr)) { 1243 if (isa<AllocaInst>(Ptr)) 1244 return true; 1245 1246 if (isAllocLikeFn(I, &TLI)) 1247 return true; 1248 1249 return false; 1250 } 1251 return true; 1252 }; 1253 1254 Ptr = Ptr->stripPointerCasts(); 1255 if (auto *I = dyn_cast<Instruction>(Ptr)) { 1256 if (I->getParent()->isEntryBlock()) 1257 return true; 1258 } 1259 if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) { 1260 return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) && 1261 GEP->hasAllConstantIndices(); 1262 } 1263 return IsGuaranteedLoopInvariantBase(Ptr); 1264 } 1265 1266 // Find a MemoryDef writing to \p KillingLoc and dominating \p StartAccess, 1267 // with no read access between them or on any other path to a function exit 1268 // block if \p KillingLoc is not accessible after the function returns. If 1269 // there is no such MemoryDef, return None. The returned value may not 1270 // (completely) overwrite \p KillingLoc. Currently we bail out when we 1271 // encounter an aliasing MemoryUse (read). 1272 Optional<MemoryAccess *> 1273 getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess, 1274 const MemoryLocation &KillingLoc, const Value *KillingUndObj, 1275 unsigned &ScanLimit, unsigned &WalkerStepLimit, 1276 bool IsMemTerm, unsigned &PartialLimit) { 1277 if (ScanLimit == 0 || WalkerStepLimit == 0) { 1278 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n"); 1279 return None; 1280 } 1281 1282 MemoryAccess *Current = StartAccess; 1283 Instruction *KillingI = KillingDef->getMemoryInst(); 1284 LLVM_DEBUG(dbgs() << " trying to get dominating access\n"); 1285 1286 // Only optimize defining access of KillingDef when directly starting at its 1287 // defining access. The defining access also must only access KillingLoc. At 1288 // the moment we only support instructions with a single write location, so 1289 // it should be sufficient to disable optimizations for instructions that 1290 // also read from memory. 1291 bool CanOptimize = OptimizeMemorySSA && 1292 KillingDef->getDefiningAccess() == StartAccess && 1293 !KillingI->mayReadFromMemory(); 1294 1295 // Find the next clobbering Mod access for DefLoc, starting at StartAccess. 1296 Optional<MemoryLocation> CurrentLoc; 1297 for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) { 1298 LLVM_DEBUG({ 1299 dbgs() << " visiting " << *Current; 1300 if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current)) 1301 dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst() 1302 << ")"; 1303 dbgs() << "\n"; 1304 }); 1305 1306 // Reached TOP. 1307 if (MSSA.isLiveOnEntryDef(Current)) { 1308 LLVM_DEBUG(dbgs() << " ... found LiveOnEntryDef\n"); 1309 return None; 1310 } 1311 1312 // Cost of a step. Accesses in the same block are more likely to be valid 1313 // candidates for elimination, hence consider them cheaper. 1314 unsigned StepCost = KillingDef->getBlock() == Current->getBlock() 1315 ? MemorySSASameBBStepCost 1316 : MemorySSAOtherBBStepCost; 1317 if (WalkerStepLimit <= StepCost) { 1318 LLVM_DEBUG(dbgs() << " ... hit walker step limit\n"); 1319 return None; 1320 } 1321 WalkerStepLimit -= StepCost; 1322 1323 // Return for MemoryPhis. They cannot be eliminated directly and the 1324 // caller is responsible for traversing them. 1325 if (isa<MemoryPhi>(Current)) { 1326 LLVM_DEBUG(dbgs() << " ... found MemoryPhi\n"); 1327 return Current; 1328 } 1329 1330 // Below, check if CurrentDef is a valid candidate to be eliminated by 1331 // KillingDef. If it is not, check the next candidate. 1332 MemoryDef *CurrentDef = cast<MemoryDef>(Current); 1333 Instruction *CurrentI = CurrentDef->getMemoryInst(); 1334 1335 if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(KillingUndObj), 1336 TLI)) { 1337 CanOptimize = false; 1338 continue; 1339 } 1340 1341 // Before we try to remove anything, check for any extra throwing 1342 // instructions that block us from DSEing 1343 if (mayThrowBetween(KillingI, CurrentI, KillingUndObj)) { 1344 LLVM_DEBUG(dbgs() << " ... skip, may throw!\n"); 1345 return None; 1346 } 1347 1348 // Check for anything that looks like it will be a barrier to further 1349 // removal 1350 if (isDSEBarrier(KillingUndObj, CurrentI)) { 1351 LLVM_DEBUG(dbgs() << " ... skip, barrier\n"); 1352 return None; 1353 } 1354 1355 // If Current is known to be on path that reads DefLoc or is a read 1356 // clobber, bail out, as the path is not profitable. We skip this check 1357 // for intrinsic calls, because the code knows how to handle memcpy 1358 // intrinsics. 1359 if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(KillingLoc, CurrentI)) 1360 return None; 1361 1362 // Quick check if there are direct uses that are read-clobbers. 1363 if (any_of(Current->uses(), [this, &KillingLoc, StartAccess](Use &U) { 1364 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser())) 1365 return !MSSA.dominates(StartAccess, UseOrDef) && 1366 isReadClobber(KillingLoc, UseOrDef->getMemoryInst()); 1367 return false; 1368 })) { 1369 LLVM_DEBUG(dbgs() << " ... found a read clobber\n"); 1370 return None; 1371 } 1372 1373 // If Current does not have an analyzable write location or is not 1374 // removable, skip it. 1375 CurrentLoc = getLocForWriteEx(CurrentI); 1376 if (!CurrentLoc || !isRemovable(CurrentI)) { 1377 CanOptimize = false; 1378 continue; 1379 } 1380 1381 // AliasAnalysis does not account for loops. Limit elimination to 1382 // candidates for which we can guarantee they always store to the same 1383 // memory location and not located in different loops. 1384 if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) { 1385 LLVM_DEBUG(dbgs() << " ... not guaranteed loop independent\n"); 1386 WalkerStepLimit -= 1; 1387 CanOptimize = false; 1388 continue; 1389 } 1390 1391 if (IsMemTerm) { 1392 // If the killing def is a memory terminator (e.g. lifetime.end), check 1393 // the next candidate if the current Current does not write the same 1394 // underlying object as the terminator. 1395 if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) { 1396 CanOptimize = false; 1397 continue; 1398 } 1399 } else { 1400 int64_t KillingOffset = 0; 1401 int64_t DeadOffset = 0; 1402 auto OR = isOverwrite(KillingI, CurrentI, KillingLoc, *CurrentLoc, 1403 KillingOffset, DeadOffset); 1404 if (CanOptimize) { 1405 // CurrentDef is the earliest write clobber of KillingDef. Use it as 1406 // optimized access. Do not optimize if CurrentDef is already the 1407 // defining access of KillingDef. 1408 if (CurrentDef != KillingDef->getDefiningAccess() && 1409 (OR == OW_Complete || OR == OW_MaybePartial)) 1410 KillingDef->setOptimized(CurrentDef); 1411 1412 // Once a may-aliasing def is encountered do not set an optimized 1413 // access. 1414 if (OR != OW_None) 1415 CanOptimize = false; 1416 } 1417 1418 // If Current does not write to the same object as KillingDef, check 1419 // the next candidate. 1420 if (OR == OW_Unknown || OR == OW_None) 1421 continue; 1422 else if (OR == OW_MaybePartial) { 1423 // If KillingDef only partially overwrites Current, check the next 1424 // candidate if the partial step limit is exceeded. This aggressively 1425 // limits the number of candidates for partial store elimination, 1426 // which are less likely to be removable in the end. 1427 if (PartialLimit <= 1) { 1428 WalkerStepLimit -= 1; 1429 LLVM_DEBUG(dbgs() << " ... reached partial limit ... continue with next access\n"); 1430 continue; 1431 } 1432 PartialLimit -= 1; 1433 } 1434 } 1435 break; 1436 }; 1437 1438 // Accesses to objects accessible after the function returns can only be 1439 // eliminated if the access is dead along all paths to the exit. Collect 1440 // the blocks with killing (=completely overwriting MemoryDefs) and check if 1441 // they cover all paths from MaybeDeadAccess to any function exit. 1442 SmallPtrSet<Instruction *, 16> KillingDefs; 1443 KillingDefs.insert(KillingDef->getMemoryInst()); 1444 MemoryAccess *MaybeDeadAccess = Current; 1445 MemoryLocation MaybeDeadLoc = *CurrentLoc; 1446 Instruction *MaybeDeadI = cast<MemoryDef>(MaybeDeadAccess)->getMemoryInst(); 1447 LLVM_DEBUG(dbgs() << " Checking for reads of " << *MaybeDeadAccess << " (" 1448 << *MaybeDeadI << ")\n"); 1449 1450 SmallSetVector<MemoryAccess *, 32> WorkList; 1451 auto PushMemUses = [&WorkList](MemoryAccess *Acc) { 1452 for (Use &U : Acc->uses()) 1453 WorkList.insert(cast<MemoryAccess>(U.getUser())); 1454 }; 1455 PushMemUses(MaybeDeadAccess); 1456 1457 // Check if DeadDef may be read. 1458 for (unsigned I = 0; I < WorkList.size(); I++) { 1459 MemoryAccess *UseAccess = WorkList[I]; 1460 1461 LLVM_DEBUG(dbgs() << " " << *UseAccess); 1462 // Bail out if the number of accesses to check exceeds the scan limit. 1463 if (ScanLimit < (WorkList.size() - I)) { 1464 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n"); 1465 return None; 1466 } 1467 --ScanLimit; 1468 NumDomMemDefChecks++; 1469 1470 if (isa<MemoryPhi>(UseAccess)) { 1471 if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) { 1472 return DT.properlyDominates(KI->getParent(), 1473 UseAccess->getBlock()); 1474 })) { 1475 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n"); 1476 continue; 1477 } 1478 LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n"); 1479 PushMemUses(UseAccess); 1480 continue; 1481 } 1482 1483 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst(); 1484 LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n"); 1485 1486 if (any_of(KillingDefs, [this, UseInst](Instruction *KI) { 1487 return DT.dominates(KI, UseInst); 1488 })) { 1489 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n"); 1490 continue; 1491 } 1492 1493 // A memory terminator kills all preceeding MemoryDefs and all succeeding 1494 // MemoryAccesses. We do not have to check it's users. 1495 if (isMemTerminator(MaybeDeadLoc, MaybeDeadI, UseInst)) { 1496 LLVM_DEBUG( 1497 dbgs() 1498 << " ... skipping, memterminator invalidates following accesses\n"); 1499 continue; 1500 } 1501 1502 if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) { 1503 LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n"); 1504 PushMemUses(UseAccess); 1505 continue; 1506 } 1507 1508 if (UseInst->mayThrow() && !isInvisibleToCallerBeforeRet(KillingUndObj)) { 1509 LLVM_DEBUG(dbgs() << " ... found throwing instruction\n"); 1510 return None; 1511 } 1512 1513 // Uses which may read the original MemoryDef mean we cannot eliminate the 1514 // original MD. Stop walk. 1515 if (isReadClobber(MaybeDeadLoc, UseInst)) { 1516 LLVM_DEBUG(dbgs() << " ... found read clobber\n"); 1517 return None; 1518 } 1519 1520 // If this worklist walks back to the original memory access (and the 1521 // pointer is not guarenteed loop invariant) then we cannot assume that a 1522 // store kills itself. 1523 if (MaybeDeadAccess == UseAccess && 1524 !isGuaranteedLoopInvariant(MaybeDeadLoc.Ptr)) { 1525 LLVM_DEBUG(dbgs() << " ... found not loop invariant self access\n"); 1526 return None; 1527 } 1528 // Otherwise, for the KillingDef and MaybeDeadAccess we only have to check 1529 // if it reads the memory location. 1530 // TODO: It would probably be better to check for self-reads before 1531 // calling the function. 1532 if (KillingDef == UseAccess || MaybeDeadAccess == UseAccess) { 1533 LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n"); 1534 continue; 1535 } 1536 1537 // Check all uses for MemoryDefs, except for defs completely overwriting 1538 // the original location. Otherwise we have to check uses of *all* 1539 // MemoryDefs we discover, including non-aliasing ones. Otherwise we might 1540 // miss cases like the following 1541 // 1 = Def(LoE) ; <----- DeadDef stores [0,1] 1542 // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3] 1543 // Use(2) ; MayAlias 2 *and* 1, loads [0, 3]. 1544 // (The Use points to the *first* Def it may alias) 1545 // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias, 1546 // stores [0,1] 1547 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) { 1548 if (isCompleteOverwrite(MaybeDeadLoc, MaybeDeadI, UseInst)) { 1549 BasicBlock *MaybeKillingBlock = UseInst->getParent(); 1550 if (PostOrderNumbers.find(MaybeKillingBlock)->second < 1551 PostOrderNumbers.find(MaybeDeadAccess->getBlock())->second) { 1552 if (!isInvisibleToCallerAfterRet(KillingUndObj)) { 1553 LLVM_DEBUG(dbgs() 1554 << " ... found killing def " << *UseInst << "\n"); 1555 KillingDefs.insert(UseInst); 1556 } 1557 } else { 1558 LLVM_DEBUG(dbgs() 1559 << " ... found preceeding def " << *UseInst << "\n"); 1560 return None; 1561 } 1562 } else 1563 PushMemUses(UseDef); 1564 } 1565 } 1566 1567 // For accesses to locations visible after the function returns, make sure 1568 // that the location is dead (=overwritten) along all paths from 1569 // MaybeDeadAccess to the exit. 1570 if (!isInvisibleToCallerAfterRet(KillingUndObj)) { 1571 SmallPtrSet<BasicBlock *, 16> KillingBlocks; 1572 for (Instruction *KD : KillingDefs) 1573 KillingBlocks.insert(KD->getParent()); 1574 assert(!KillingBlocks.empty() && 1575 "Expected at least a single killing block"); 1576 1577 // Find the common post-dominator of all killing blocks. 1578 BasicBlock *CommonPred = *KillingBlocks.begin(); 1579 for (BasicBlock *BB : llvm::drop_begin(KillingBlocks)) { 1580 if (!CommonPred) 1581 break; 1582 CommonPred = PDT.findNearestCommonDominator(CommonPred, BB); 1583 } 1584 1585 // If CommonPred is in the set of killing blocks, just check if it 1586 // post-dominates MaybeDeadAccess. 1587 if (KillingBlocks.count(CommonPred)) { 1588 if (PDT.dominates(CommonPred, MaybeDeadAccess->getBlock())) 1589 return {MaybeDeadAccess}; 1590 return None; 1591 } 1592 1593 // If the common post-dominator does not post-dominate MaybeDeadAccess, 1594 // there is a path from MaybeDeadAccess to an exit not going through a 1595 // killing block. 1596 if (PDT.dominates(CommonPred, MaybeDeadAccess->getBlock())) { 1597 SetVector<BasicBlock *> WorkList; 1598 1599 // If CommonPred is null, there are multiple exits from the function. 1600 // They all have to be added to the worklist. 1601 if (CommonPred) 1602 WorkList.insert(CommonPred); 1603 else 1604 for (BasicBlock *R : PDT.roots()) 1605 WorkList.insert(R); 1606 1607 NumCFGTries++; 1608 // Check if all paths starting from an exit node go through one of the 1609 // killing blocks before reaching MaybeDeadAccess. 1610 for (unsigned I = 0; I < WorkList.size(); I++) { 1611 NumCFGChecks++; 1612 BasicBlock *Current = WorkList[I]; 1613 if (KillingBlocks.count(Current)) 1614 continue; 1615 if (Current == MaybeDeadAccess->getBlock()) 1616 return None; 1617 1618 // MaybeDeadAccess is reachable from the entry, so we don't have to 1619 // explore unreachable blocks further. 1620 if (!DT.isReachableFromEntry(Current)) 1621 continue; 1622 1623 for (BasicBlock *Pred : predecessors(Current)) 1624 WorkList.insert(Pred); 1625 1626 if (WorkList.size() >= MemorySSAPathCheckLimit) 1627 return None; 1628 } 1629 NumCFGSuccess++; 1630 return {MaybeDeadAccess}; 1631 } 1632 return None; 1633 } 1634 1635 // No aliasing MemoryUses of MaybeDeadAccess found, MaybeDeadAccess is 1636 // potentially dead. 1637 return {MaybeDeadAccess}; 1638 } 1639 1640 // Delete dead memory defs 1641 void deleteDeadInstruction(Instruction *SI) { 1642 MemorySSAUpdater Updater(&MSSA); 1643 SmallVector<Instruction *, 32> NowDeadInsts; 1644 NowDeadInsts.push_back(SI); 1645 --NumFastOther; 1646 1647 while (!NowDeadInsts.empty()) { 1648 Instruction *DeadInst = NowDeadInsts.pop_back_val(); 1649 ++NumFastOther; 1650 1651 // Try to preserve debug information attached to the dead instruction. 1652 salvageDebugInfo(*DeadInst); 1653 salvageKnowledge(DeadInst); 1654 1655 // Remove the Instruction from MSSA. 1656 if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) { 1657 if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) { 1658 SkipStores.insert(MD); 1659 } 1660 1661 Updater.removeMemoryAccess(MA); 1662 } 1663 1664 auto I = IOLs.find(DeadInst->getParent()); 1665 if (I != IOLs.end()) 1666 I->second.erase(DeadInst); 1667 // Remove its operands 1668 for (Use &O : DeadInst->operands()) 1669 if (Instruction *OpI = dyn_cast<Instruction>(O)) { 1670 O = nullptr; 1671 if (isInstructionTriviallyDead(OpI, &TLI)) 1672 NowDeadInsts.push_back(OpI); 1673 } 1674 1675 EI.removeInstruction(DeadInst); 1676 DeadInst->eraseFromParent(); 1677 } 1678 } 1679 1680 // Check for any extra throws between \p KillingI and \p DeadI that block 1681 // DSE. This only checks extra maythrows (those that aren't MemoryDef's). 1682 // MemoryDef that may throw are handled during the walk from one def to the 1683 // next. 1684 bool mayThrowBetween(Instruction *KillingI, Instruction *DeadI, 1685 const Value *KillingUndObj) { 1686 // First see if we can ignore it by using the fact that KillingI is an 1687 // alloca/alloca like object that is not visible to the caller during 1688 // execution of the function. 1689 if (KillingUndObj && isInvisibleToCallerBeforeRet(KillingUndObj)) 1690 return false; 1691 1692 if (KillingI->getParent() == DeadI->getParent()) 1693 return ThrowingBlocks.count(KillingI->getParent()); 1694 return !ThrowingBlocks.empty(); 1695 } 1696 1697 // Check if \p DeadI acts as a DSE barrier for \p KillingI. The following 1698 // instructions act as barriers: 1699 // * A memory instruction that may throw and \p KillingI accesses a non-stack 1700 // object. 1701 // * Atomic stores stronger that monotonic. 1702 bool isDSEBarrier(const Value *KillingUndObj, Instruction *DeadI) { 1703 // If DeadI may throw it acts as a barrier, unless we are to an 1704 // alloca/alloca like object that does not escape. 1705 if (DeadI->mayThrow() && !isInvisibleToCallerBeforeRet(KillingUndObj)) 1706 return true; 1707 1708 // If DeadI is an atomic load/store stronger than monotonic, do not try to 1709 // eliminate/reorder it. 1710 if (DeadI->isAtomic()) { 1711 if (auto *LI = dyn_cast<LoadInst>(DeadI)) 1712 return isStrongerThanMonotonic(LI->getOrdering()); 1713 if (auto *SI = dyn_cast<StoreInst>(DeadI)) 1714 return isStrongerThanMonotonic(SI->getOrdering()); 1715 if (auto *ARMW = dyn_cast<AtomicRMWInst>(DeadI)) 1716 return isStrongerThanMonotonic(ARMW->getOrdering()); 1717 if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(DeadI)) 1718 return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) || 1719 isStrongerThanMonotonic(CmpXchg->getFailureOrdering()); 1720 llvm_unreachable("other instructions should be skipped in MemorySSA"); 1721 } 1722 return false; 1723 } 1724 1725 /// Eliminate writes to objects that are not visible in the caller and are not 1726 /// accessed before returning from the function. 1727 bool eliminateDeadWritesAtEndOfFunction() { 1728 bool MadeChange = false; 1729 LLVM_DEBUG( 1730 dbgs() 1731 << "Trying to eliminate MemoryDefs at the end of the function\n"); 1732 for (int I = MemDefs.size() - 1; I >= 0; I--) { 1733 MemoryDef *Def = MemDefs[I]; 1734 if (SkipStores.contains(Def) || !isRemovable(Def->getMemoryInst())) 1735 continue; 1736 1737 Instruction *DefI = Def->getMemoryInst(); 1738 auto DefLoc = getLocForWriteEx(DefI); 1739 if (!DefLoc) 1740 continue; 1741 1742 // NOTE: Currently eliminating writes at the end of a function is limited 1743 // to MemoryDefs with a single underlying object, to save compile-time. In 1744 // practice it appears the case with multiple underlying objects is very 1745 // uncommon. If it turns out to be important, we can use 1746 // getUnderlyingObjects here instead. 1747 const Value *UO = getUnderlyingObject(DefLoc->Ptr); 1748 if (!isInvisibleToCallerAfterRet(UO)) 1749 continue; 1750 1751 if (isWriteAtEndOfFunction(Def)) { 1752 // See through pointer-to-pointer bitcasts 1753 LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end " 1754 "of the function\n"); 1755 deleteDeadInstruction(DefI); 1756 ++NumFastStores; 1757 MadeChange = true; 1758 } 1759 } 1760 return MadeChange; 1761 } 1762 1763 /// \returns true if \p Def is a no-op store, either because it 1764 /// directly stores back a loaded value or stores zero to a calloced object. 1765 bool storeIsNoop(MemoryDef *Def, const Value *DefUO) { 1766 StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst()); 1767 MemSetInst *MemSet = dyn_cast<MemSetInst>(Def->getMemoryInst()); 1768 Constant *StoredConstant = nullptr; 1769 if (Store) 1770 StoredConstant = dyn_cast<Constant>(Store->getOperand(0)); 1771 if (MemSet) 1772 StoredConstant = dyn_cast<Constant>(MemSet->getValue()); 1773 1774 if (StoredConstant && StoredConstant->isNullValue()) { 1775 auto *DefUOInst = dyn_cast<Instruction>(DefUO); 1776 if (DefUOInst) { 1777 if (isCallocLikeFn(DefUOInst, &TLI)) { 1778 auto *UnderlyingDef = 1779 cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst)); 1780 // If UnderlyingDef is the clobbering access of Def, no instructions 1781 // between them can modify the memory location. 1782 auto *ClobberDef = 1783 MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def); 1784 return UnderlyingDef == ClobberDef; 1785 } 1786 1787 if (MemSet) { 1788 if (F.hasFnAttribute(Attribute::SanitizeMemory) || 1789 F.hasFnAttribute(Attribute::SanitizeAddress) || 1790 F.hasFnAttribute(Attribute::SanitizeHWAddress) || 1791 F.getName() == "calloc") 1792 return false; 1793 auto *Malloc = const_cast<CallInst *>(dyn_cast<CallInst>(DefUOInst)); 1794 if (!Malloc) 1795 return false; 1796 auto *InnerCallee = Malloc->getCalledFunction(); 1797 if (!InnerCallee) 1798 return false; 1799 LibFunc Func; 1800 if (!TLI.getLibFunc(*InnerCallee, Func) || !TLI.has(Func) || 1801 Func != LibFunc_malloc) 1802 return false; 1803 1804 auto shouldCreateCalloc = [](CallInst *Malloc, CallInst *Memset) { 1805 // Check for br(icmp ptr, null), truebb, falsebb) pattern at the end 1806 // of malloc block 1807 auto *MallocBB = Malloc->getParent(), 1808 *MemsetBB = Memset->getParent(); 1809 if (MallocBB == MemsetBB) 1810 return true; 1811 auto *Ptr = Memset->getArgOperand(0); 1812 auto *TI = MallocBB->getTerminator(); 1813 ICmpInst::Predicate Pred; 1814 BasicBlock *TrueBB, *FalseBB; 1815 if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Ptr), m_Zero()), TrueBB, 1816 FalseBB))) 1817 return false; 1818 if (Pred != ICmpInst::ICMP_EQ || MemsetBB != FalseBB) 1819 return false; 1820 return true; 1821 }; 1822 1823 if (Malloc->getOperand(0) == MemSet->getLength()) { 1824 if (shouldCreateCalloc(Malloc, MemSet) && 1825 DT.dominates(Malloc, MemSet) && 1826 memoryIsNotModifiedBetween(Malloc, MemSet, BatchAA, DL, &DT)) { 1827 IRBuilder<> IRB(Malloc); 1828 const auto &DL = Malloc->getModule()->getDataLayout(); 1829 if (auto *Calloc = 1830 emitCalloc(ConstantInt::get(IRB.getIntPtrTy(DL), 1), 1831 Malloc->getArgOperand(0), IRB, TLI)) { 1832 MemorySSAUpdater Updater(&MSSA); 1833 auto *LastDef = cast<MemoryDef>( 1834 Updater.getMemorySSA()->getMemoryAccess(Malloc)); 1835 auto *NewAccess = Updater.createMemoryAccessAfter( 1836 cast<Instruction>(Calloc), LastDef, LastDef); 1837 auto *NewAccessMD = cast<MemoryDef>(NewAccess); 1838 Updater.insertDef(NewAccessMD, /*RenameUses=*/true); 1839 Updater.removeMemoryAccess(Malloc); 1840 Malloc->replaceAllUsesWith(Calloc); 1841 Malloc->eraseFromParent(); 1842 return true; 1843 } 1844 return false; 1845 } 1846 } 1847 } 1848 } 1849 } 1850 1851 if (!Store) 1852 return false; 1853 1854 if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) { 1855 if (LoadI->getPointerOperand() == Store->getOperand(1)) { 1856 // Get the defining access for the load. 1857 auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess(); 1858 // Fast path: the defining accesses are the same. 1859 if (LoadAccess == Def->getDefiningAccess()) 1860 return true; 1861 1862 // Look through phi accesses. Recursively scan all phi accesses by 1863 // adding them to a worklist. Bail when we run into a memory def that 1864 // does not match LoadAccess. 1865 SetVector<MemoryAccess *> ToCheck; 1866 MemoryAccess *Current = 1867 MSSA.getWalker()->getClobberingMemoryAccess(Def); 1868 // We don't want to bail when we run into the store memory def. But, 1869 // the phi access may point to it. So, pretend like we've already 1870 // checked it. 1871 ToCheck.insert(Def); 1872 ToCheck.insert(Current); 1873 // Start at current (1) to simulate already having checked Def. 1874 for (unsigned I = 1; I < ToCheck.size(); ++I) { 1875 Current = ToCheck[I]; 1876 if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) { 1877 // Check all the operands. 1878 for (auto &Use : PhiAccess->incoming_values()) 1879 ToCheck.insert(cast<MemoryAccess>(&Use)); 1880 continue; 1881 } 1882 1883 // If we found a memory def, bail. This happens when we have an 1884 // unrelated write in between an otherwise noop store. 1885 assert(isa<MemoryDef>(Current) && 1886 "Only MemoryDefs should reach here."); 1887 // TODO: Skip no alias MemoryDefs that have no aliasing reads. 1888 // We are searching for the definition of the store's destination. 1889 // So, if that is the same definition as the load, then this is a 1890 // noop. Otherwise, fail. 1891 if (LoadAccess != Current) 1892 return false; 1893 } 1894 return true; 1895 } 1896 } 1897 1898 return false; 1899 } 1900 1901 bool removePartiallyOverlappedStores(InstOverlapIntervalsTy &IOL) { 1902 bool Changed = false; 1903 for (auto OI : IOL) { 1904 Instruction *DeadI = OI.first; 1905 MemoryLocation Loc = *getLocForWriteEx(DeadI); 1906 assert(isRemovable(DeadI) && "Expect only removable instruction"); 1907 1908 const Value *Ptr = Loc.Ptr->stripPointerCasts(); 1909 int64_t DeadStart = 0; 1910 uint64_t DeadSize = Loc.Size.getValue(); 1911 GetPointerBaseWithConstantOffset(Ptr, DeadStart, DL); 1912 OverlapIntervalsTy &IntervalMap = OI.second; 1913 Changed |= tryToShortenEnd(DeadI, IntervalMap, DeadStart, DeadSize); 1914 if (IntervalMap.empty()) 1915 continue; 1916 Changed |= tryToShortenBegin(DeadI, IntervalMap, DeadStart, DeadSize); 1917 } 1918 return Changed; 1919 } 1920 1921 /// Eliminates writes to locations where the value that is being written 1922 /// is already stored at the same location. 1923 bool eliminateRedundantStoresOfExistingValues() { 1924 bool MadeChange = false; 1925 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs that write the " 1926 "already existing value\n"); 1927 for (auto *Def : MemDefs) { 1928 if (SkipStores.contains(Def) || MSSA.isLiveOnEntryDef(Def) || 1929 !isRemovable(Def->getMemoryInst())) 1930 continue; 1931 MemoryDef *UpperDef; 1932 // To conserve compile-time, we avoid walking to the next clobbering def. 1933 // Instead, we just try to get the optimized access, if it exists. DSE 1934 // will try to optimize defs during the earlier traversal. 1935 if (Def->isOptimized()) 1936 UpperDef = dyn_cast<MemoryDef>(Def->getOptimized()); 1937 else 1938 UpperDef = dyn_cast<MemoryDef>(Def->getDefiningAccess()); 1939 if (!UpperDef || MSSA.isLiveOnEntryDef(UpperDef)) 1940 continue; 1941 1942 Instruction *DefInst = Def->getMemoryInst(); 1943 Instruction *UpperInst = UpperDef->getMemoryInst(); 1944 auto IsRedundantStore = [this, DefInst, 1945 UpperInst](MemoryLocation UpperLoc) { 1946 if (DefInst->isIdenticalTo(UpperInst)) 1947 return true; 1948 if (auto *MemSetI = dyn_cast<MemSetInst>(UpperInst)) { 1949 if (auto *SI = dyn_cast<StoreInst>(DefInst)) { 1950 auto MaybeDefLoc = getLocForWriteEx(DefInst); 1951 if (!MaybeDefLoc) 1952 return false; 1953 int64_t InstWriteOffset = 0; 1954 int64_t DepWriteOffset = 0; 1955 auto OR = isOverwrite(UpperInst, DefInst, UpperLoc, *MaybeDefLoc, 1956 InstWriteOffset, DepWriteOffset); 1957 Value *StoredByte = isBytewiseValue(SI->getValueOperand(), DL); 1958 return StoredByte && StoredByte == MemSetI->getOperand(1) && 1959 OR == OW_Complete; 1960 } 1961 } 1962 return false; 1963 }; 1964 1965 auto MaybeUpperLoc = getLocForWriteEx(UpperInst); 1966 if (!MaybeUpperLoc || !IsRedundantStore(*MaybeUpperLoc) || 1967 isReadClobber(*MaybeUpperLoc, DefInst)) 1968 continue; 1969 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *DefInst 1970 << '\n'); 1971 deleteDeadInstruction(DefInst); 1972 NumRedundantStores++; 1973 MadeChange = true; 1974 } 1975 return MadeChange; 1976 } 1977 }; 1978 1979 static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, 1980 DominatorTree &DT, PostDominatorTree &PDT, 1981 const TargetLibraryInfo &TLI, 1982 const LoopInfo &LI) { 1983 bool MadeChange = false; 1984 1985 DSEState State(F, AA, MSSA, DT, PDT, TLI, LI); 1986 // For each store: 1987 for (unsigned I = 0; I < State.MemDefs.size(); I++) { 1988 MemoryDef *KillingDef = State.MemDefs[I]; 1989 if (State.SkipStores.count(KillingDef)) 1990 continue; 1991 Instruction *KillingI = KillingDef->getMemoryInst(); 1992 1993 Optional<MemoryLocation> MaybeKillingLoc; 1994 if (State.isMemTerminatorInst(KillingI)) 1995 MaybeKillingLoc = State.getLocForTerminator(KillingI).map( 1996 [](const std::pair<MemoryLocation, bool> &P) { return P.first; }); 1997 else 1998 MaybeKillingLoc = State.getLocForWriteEx(KillingI); 1999 2000 if (!MaybeKillingLoc) { 2001 LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for " 2002 << *KillingI << "\n"); 2003 continue; 2004 } 2005 MemoryLocation KillingLoc = *MaybeKillingLoc; 2006 assert(KillingLoc.Ptr && "KillingLoc should not be null"); 2007 const Value *KillingUndObj = getUnderlyingObject(KillingLoc.Ptr); 2008 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by " 2009 << *KillingDef << " (" << *KillingI << ")\n"); 2010 2011 unsigned ScanLimit = MemorySSAScanLimit; 2012 unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit; 2013 unsigned PartialLimit = MemorySSAPartialStoreLimit; 2014 // Worklist of MemoryAccesses that may be killed by KillingDef. 2015 SetVector<MemoryAccess *> ToCheck; 2016 ToCheck.insert(KillingDef->getDefiningAccess()); 2017 2018 bool Shortend = false; 2019 bool IsMemTerm = State.isMemTerminatorInst(KillingI); 2020 // Check if MemoryAccesses in the worklist are killed by KillingDef. 2021 for (unsigned I = 0; I < ToCheck.size(); I++) { 2022 MemoryAccess *Current = ToCheck[I]; 2023 if (State.SkipStores.count(Current)) 2024 continue; 2025 2026 Optional<MemoryAccess *> MaybeDeadAccess = State.getDomMemoryDef( 2027 KillingDef, Current, KillingLoc, KillingUndObj, ScanLimit, 2028 WalkerStepLimit, IsMemTerm, PartialLimit); 2029 2030 if (!MaybeDeadAccess) { 2031 LLVM_DEBUG(dbgs() << " finished walk\n"); 2032 continue; 2033 } 2034 2035 MemoryAccess *DeadAccess = *MaybeDeadAccess; 2036 LLVM_DEBUG(dbgs() << " Checking if we can kill " << *DeadAccess); 2037 if (isa<MemoryPhi>(DeadAccess)) { 2038 LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n"); 2039 for (Value *V : cast<MemoryPhi>(DeadAccess)->incoming_values()) { 2040 MemoryAccess *IncomingAccess = cast<MemoryAccess>(V); 2041 BasicBlock *IncomingBlock = IncomingAccess->getBlock(); 2042 BasicBlock *PhiBlock = DeadAccess->getBlock(); 2043 2044 // We only consider incoming MemoryAccesses that come before the 2045 // MemoryPhi. Otherwise we could discover candidates that do not 2046 // strictly dominate our starting def. 2047 if (State.PostOrderNumbers[IncomingBlock] > 2048 State.PostOrderNumbers[PhiBlock]) 2049 ToCheck.insert(IncomingAccess); 2050 } 2051 continue; 2052 } 2053 auto *DeadDefAccess = cast<MemoryDef>(DeadAccess); 2054 Instruction *DeadI = DeadDefAccess->getMemoryInst(); 2055 LLVM_DEBUG(dbgs() << " (" << *DeadI << ")\n"); 2056 ToCheck.insert(DeadDefAccess->getDefiningAccess()); 2057 NumGetDomMemoryDefPassed++; 2058 2059 if (!DebugCounter::shouldExecute(MemorySSACounter)) 2060 continue; 2061 2062 MemoryLocation DeadLoc = *State.getLocForWriteEx(DeadI); 2063 2064 if (IsMemTerm) { 2065 const Value *DeadUndObj = getUnderlyingObject(DeadLoc.Ptr); 2066 if (KillingUndObj != DeadUndObj) 2067 continue; 2068 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI 2069 << "\n KILLER: " << *KillingI << '\n'); 2070 State.deleteDeadInstruction(DeadI); 2071 ++NumFastStores; 2072 MadeChange = true; 2073 } else { 2074 // Check if DeadI overwrites KillingI. 2075 int64_t KillingOffset = 0; 2076 int64_t DeadOffset = 0; 2077 OverwriteResult OR = State.isOverwrite( 2078 KillingI, DeadI, KillingLoc, DeadLoc, KillingOffset, DeadOffset); 2079 if (OR == OW_MaybePartial) { 2080 auto Iter = State.IOLs.insert( 2081 std::make_pair<BasicBlock *, InstOverlapIntervalsTy>( 2082 DeadI->getParent(), InstOverlapIntervalsTy())); 2083 auto &IOL = Iter.first->second; 2084 OR = isPartialOverwrite(KillingLoc, DeadLoc, KillingOffset, 2085 DeadOffset, DeadI, IOL); 2086 } 2087 2088 if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) { 2089 auto *DeadSI = dyn_cast<StoreInst>(DeadI); 2090 auto *KillingSI = dyn_cast<StoreInst>(KillingI); 2091 // We are re-using tryToMergePartialOverlappingStores, which requires 2092 // DeadSI to dominate DeadSI. 2093 // TODO: implement tryToMergeParialOverlappingStores using MemorySSA. 2094 if (DeadSI && KillingSI && DT.dominates(DeadSI, KillingSI)) { 2095 if (Constant *Merged = tryToMergePartialOverlappingStores( 2096 KillingSI, DeadSI, KillingOffset, DeadOffset, State.DL, 2097 State.BatchAA, &DT)) { 2098 2099 // Update stored value of earlier store to merged constant. 2100 DeadSI->setOperand(0, Merged); 2101 ++NumModifiedStores; 2102 MadeChange = true; 2103 2104 Shortend = true; 2105 // Remove killing store and remove any outstanding overlap 2106 // intervals for the updated store. 2107 State.deleteDeadInstruction(KillingSI); 2108 auto I = State.IOLs.find(DeadSI->getParent()); 2109 if (I != State.IOLs.end()) 2110 I->second.erase(DeadSI); 2111 break; 2112 } 2113 } 2114 } 2115 2116 if (OR == OW_Complete) { 2117 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI 2118 << "\n KILLER: " << *KillingI << '\n'); 2119 State.deleteDeadInstruction(DeadI); 2120 ++NumFastStores; 2121 MadeChange = true; 2122 } 2123 } 2124 } 2125 2126 // Check if the store is a no-op. 2127 if (!Shortend && isRemovable(KillingI) && 2128 State.storeIsNoop(KillingDef, KillingUndObj)) { 2129 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *KillingI 2130 << '\n'); 2131 State.deleteDeadInstruction(KillingI); 2132 NumRedundantStores++; 2133 MadeChange = true; 2134 continue; 2135 } 2136 } 2137 2138 if (EnablePartialOverwriteTracking) 2139 for (auto &KV : State.IOLs) 2140 MadeChange |= State.removePartiallyOverlappedStores(KV.second); 2141 2142 MadeChange |= State.eliminateRedundantStoresOfExistingValues(); 2143 MadeChange |= State.eliminateDeadWritesAtEndOfFunction(); 2144 return MadeChange; 2145 } 2146 } // end anonymous namespace 2147 2148 //===----------------------------------------------------------------------===// 2149 // DSE Pass 2150 //===----------------------------------------------------------------------===// 2151 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) { 2152 AliasAnalysis &AA = AM.getResult<AAManager>(F); 2153 const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F); 2154 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); 2155 MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA(); 2156 PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F); 2157 LoopInfo &LI = AM.getResult<LoopAnalysis>(F); 2158 2159 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI); 2160 2161 #ifdef LLVM_ENABLE_STATS 2162 if (AreStatisticsEnabled()) 2163 for (auto &I : instructions(F)) 2164 NumRemainingStores += isa<StoreInst>(&I); 2165 #endif 2166 2167 if (!Changed) 2168 return PreservedAnalyses::all(); 2169 2170 PreservedAnalyses PA; 2171 PA.preserveSet<CFGAnalyses>(); 2172 PA.preserve<MemorySSAAnalysis>(); 2173 PA.preserve<LoopAnalysis>(); 2174 return PA; 2175 } 2176 2177 namespace { 2178 2179 /// A legacy pass for the legacy pass manager that wraps \c DSEPass. 2180 class DSELegacyPass : public FunctionPass { 2181 public: 2182 static char ID; // Pass identification, replacement for typeid 2183 2184 DSELegacyPass() : FunctionPass(ID) { 2185 initializeDSELegacyPassPass(*PassRegistry::getPassRegistry()); 2186 } 2187 2188 bool runOnFunction(Function &F) override { 2189 if (skipFunction(F)) 2190 return false; 2191 2192 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 2193 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2194 const TargetLibraryInfo &TLI = 2195 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 2196 MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); 2197 PostDominatorTree &PDT = 2198 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree(); 2199 LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2200 2201 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI); 2202 2203 #ifdef LLVM_ENABLE_STATS 2204 if (AreStatisticsEnabled()) 2205 for (auto &I : instructions(F)) 2206 NumRemainingStores += isa<StoreInst>(&I); 2207 #endif 2208 2209 return Changed; 2210 } 2211 2212 void getAnalysisUsage(AnalysisUsage &AU) const override { 2213 AU.setPreservesCFG(); 2214 AU.addRequired<AAResultsWrapperPass>(); 2215 AU.addRequired<TargetLibraryInfoWrapperPass>(); 2216 AU.addPreserved<GlobalsAAWrapperPass>(); 2217 AU.addRequired<DominatorTreeWrapperPass>(); 2218 AU.addPreserved<DominatorTreeWrapperPass>(); 2219 AU.addRequired<PostDominatorTreeWrapperPass>(); 2220 AU.addRequired<MemorySSAWrapperPass>(); 2221 AU.addPreserved<PostDominatorTreeWrapperPass>(); 2222 AU.addPreserved<MemorySSAWrapperPass>(); 2223 AU.addRequired<LoopInfoWrapperPass>(); 2224 AU.addPreserved<LoopInfoWrapperPass>(); 2225 } 2226 }; 2227 2228 } // end anonymous namespace 2229 2230 char DSELegacyPass::ID = 0; 2231 2232 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false, 2233 false) 2234 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2235 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass) 2236 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2237 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 2238 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 2239 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 2240 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2241 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 2242 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false, 2243 false) 2244 2245 FunctionPass *llvm::createDeadStoreEliminationPass() { 2246 return new DSELegacyPass(); 2247 } 2248