1 //===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The code below implements dead store elimination using MemorySSA. It uses 10 // the following general approach: given a MemoryDef, walk upwards to find 11 // clobbering MemoryDefs that may be killed by the starting def. Then check 12 // that there are no uses that may read the location of the original MemoryDef 13 // in between both MemoryDefs. A bit more concretely: 14 // 15 // For all MemoryDefs StartDef: 16 // 1. Get the next dominating clobbering MemoryDef (MaybeDeadAccess) by walking 17 // upwards. 18 // 2. Check that there are no reads between MaybeDeadAccess and the StartDef by 19 // checking all uses starting at MaybeDeadAccess and walking until we see 20 // StartDef. 21 // 3. For each found CurrentDef, check that: 22 // 1. There are no barrier instructions between CurrentDef and StartDef (like 23 // throws or stores with ordering constraints). 24 // 2. StartDef is executed whenever CurrentDef is executed. 25 // 3. StartDef completely overwrites CurrentDef. 26 // 4. Erase CurrentDef from the function and MemorySSA. 27 // 28 //===----------------------------------------------------------------------===// 29 30 #include "llvm/Transforms/Scalar/DeadStoreElimination.h" 31 #include "llvm/ADT/APInt.h" 32 #include "llvm/ADT/DenseMap.h" 33 #include "llvm/ADT/MapVector.h" 34 #include "llvm/ADT/PostOrderIterator.h" 35 #include "llvm/ADT/SetVector.h" 36 #include "llvm/ADT/SmallPtrSet.h" 37 #include "llvm/ADT/SmallVector.h" 38 #include "llvm/ADT/Statistic.h" 39 #include "llvm/ADT/StringRef.h" 40 #include "llvm/Analysis/AliasAnalysis.h" 41 #include "llvm/Analysis/AssumptionCache.h" 42 #include "llvm/Analysis/CaptureTracking.h" 43 #include "llvm/Analysis/CodeMetrics.h" 44 #include "llvm/Analysis/GlobalsModRef.h" 45 #include "llvm/Analysis/LoopInfo.h" 46 #include "llvm/Analysis/MemoryBuiltins.h" 47 #include "llvm/Analysis/MemoryLocation.h" 48 #include "llvm/Analysis/MemorySSA.h" 49 #include "llvm/Analysis/MemorySSAUpdater.h" 50 #include "llvm/Analysis/MustExecute.h" 51 #include "llvm/Analysis/PostDominators.h" 52 #include "llvm/Analysis/TargetLibraryInfo.h" 53 #include "llvm/Analysis/ValueTracking.h" 54 #include "llvm/IR/Argument.h" 55 #include "llvm/IR/BasicBlock.h" 56 #include "llvm/IR/Constant.h" 57 #include "llvm/IR/Constants.h" 58 #include "llvm/IR/DataLayout.h" 59 #include "llvm/IR/DebugInfo.h" 60 #include "llvm/IR/Dominators.h" 61 #include "llvm/IR/Function.h" 62 #include "llvm/IR/IRBuilder.h" 63 #include "llvm/IR/InstIterator.h" 64 #include "llvm/IR/InstrTypes.h" 65 #include "llvm/IR/Instruction.h" 66 #include "llvm/IR/Instructions.h" 67 #include "llvm/IR/IntrinsicInst.h" 68 #include "llvm/IR/Module.h" 69 #include "llvm/IR/PassManager.h" 70 #include "llvm/IR/PatternMatch.h" 71 #include "llvm/IR/Value.h" 72 #include "llvm/Support/Casting.h" 73 #include "llvm/Support/CommandLine.h" 74 #include "llvm/Support/Debug.h" 75 #include "llvm/Support/DebugCounter.h" 76 #include "llvm/Support/ErrorHandling.h" 77 #include "llvm/Support/raw_ostream.h" 78 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 79 #include "llvm/Transforms/Utils/BuildLibCalls.h" 80 #include "llvm/Transforms/Utils/Local.h" 81 #include <algorithm> 82 #include <cassert> 83 #include <cstdint> 84 #include <iterator> 85 #include <map> 86 #include <optional> 87 #include <utility> 88 89 using namespace llvm; 90 using namespace PatternMatch; 91 92 #define DEBUG_TYPE "dse" 93 94 STATISTIC(NumRemainingStores, "Number of stores remaining after DSE"); 95 STATISTIC(NumRedundantStores, "Number of redundant stores deleted"); 96 STATISTIC(NumFastStores, "Number of stores deleted"); 97 STATISTIC(NumFastOther, "Number of other instrs removed"); 98 STATISTIC(NumCompletePartials, "Number of stores dead by later partials"); 99 STATISTIC(NumModifiedStores, "Number of stores modified"); 100 STATISTIC(NumCFGChecks, "Number of stores modified"); 101 STATISTIC(NumCFGTries, "Number of stores modified"); 102 STATISTIC(NumCFGSuccess, "Number of stores modified"); 103 STATISTIC(NumGetDomMemoryDefPassed, 104 "Number of times a valid candidate is returned from getDomMemoryDef"); 105 STATISTIC(NumDomMemDefChecks, 106 "Number iterations check for reads in getDomMemoryDef"); 107 108 DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa", 109 "Controls which MemoryDefs are eliminated."); 110 111 static cl::opt<bool> 112 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking", 113 cl::init(true), cl::Hidden, 114 cl::desc("Enable partial-overwrite tracking in DSE")); 115 116 static cl::opt<bool> 117 EnablePartialStoreMerging("enable-dse-partial-store-merging", 118 cl::init(true), cl::Hidden, 119 cl::desc("Enable partial store merging in DSE")); 120 121 static cl::opt<unsigned> 122 MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden, 123 cl::desc("The number of memory instructions to scan for " 124 "dead store elimination (default = 150)")); 125 static cl::opt<unsigned> MemorySSAUpwardsStepLimit( 126 "dse-memoryssa-walklimit", cl::init(90), cl::Hidden, 127 cl::desc("The maximum number of steps while walking upwards to find " 128 "MemoryDefs that may be killed (default = 90)")); 129 130 static cl::opt<unsigned> MemorySSAPartialStoreLimit( 131 "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden, 132 cl::desc("The maximum number candidates that only partially overwrite the " 133 "killing MemoryDef to consider" 134 " (default = 5)")); 135 136 static cl::opt<unsigned> MemorySSADefsPerBlockLimit( 137 "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden, 138 cl::desc("The number of MemoryDefs we consider as candidates to eliminated " 139 "other stores per basic block (default = 5000)")); 140 141 static cl::opt<unsigned> MemorySSASameBBStepCost( 142 "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden, 143 cl::desc( 144 "The cost of a step in the same basic block as the killing MemoryDef" 145 "(default = 1)")); 146 147 static cl::opt<unsigned> 148 MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5), 149 cl::Hidden, 150 cl::desc("The cost of a step in a different basic " 151 "block than the killing MemoryDef" 152 "(default = 5)")); 153 154 static cl::opt<unsigned> MemorySSAPathCheckLimit( 155 "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden, 156 cl::desc("The maximum number of blocks to check when trying to prove that " 157 "all paths to an exit go through a killing block (default = 50)")); 158 159 // This flags allows or disallows DSE to optimize MemorySSA during its 160 // traversal. Note that DSE optimizing MemorySSA may impact other passes 161 // downstream of the DSE invocation and can lead to issues not being 162 // reproducible in isolation (i.e. when MemorySSA is built from scratch). In 163 // those cases, the flag can be used to check if DSE's MemorySSA optimizations 164 // impact follow-up passes. 165 static cl::opt<bool> 166 OptimizeMemorySSA("dse-optimize-memoryssa", cl::init(true), cl::Hidden, 167 cl::desc("Allow DSE to optimize memory accesses.")); 168 169 //===----------------------------------------------------------------------===// 170 // Helper functions 171 //===----------------------------------------------------------------------===// 172 using OverlapIntervalsTy = std::map<int64_t, int64_t>; 173 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>; 174 175 /// Returns true if the end of this instruction can be safely shortened in 176 /// length. 177 static bool isShortenableAtTheEnd(Instruction *I) { 178 // Don't shorten stores for now 179 if (isa<StoreInst>(I)) 180 return false; 181 182 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 183 switch (II->getIntrinsicID()) { 184 default: return false; 185 case Intrinsic::memset: 186 case Intrinsic::memcpy: 187 case Intrinsic::memcpy_element_unordered_atomic: 188 case Intrinsic::memset_element_unordered_atomic: 189 // Do shorten memory intrinsics. 190 // FIXME: Add memmove if it's also safe to transform. 191 return true; 192 } 193 } 194 195 // Don't shorten libcalls calls for now. 196 197 return false; 198 } 199 200 /// Returns true if the beginning of this instruction can be safely shortened 201 /// in length. 202 static bool isShortenableAtTheBeginning(Instruction *I) { 203 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be 204 // easily done by offsetting the source address. 205 return isa<AnyMemSetInst>(I); 206 } 207 208 static uint64_t getPointerSize(const Value *V, const DataLayout &DL, 209 const TargetLibraryInfo &TLI, 210 const Function *F) { 211 uint64_t Size; 212 ObjectSizeOpts Opts; 213 Opts.NullIsUnknownSize = NullPointerIsDefined(F); 214 215 if (getObjectSize(V, Size, DL, &TLI, Opts)) 216 return Size; 217 return MemoryLocation::UnknownSize; 218 } 219 220 namespace { 221 222 enum OverwriteResult { 223 OW_Begin, 224 OW_Complete, 225 OW_End, 226 OW_PartialEarlierWithFullLater, 227 OW_MaybePartial, 228 OW_None, 229 OW_Unknown 230 }; 231 232 } // end anonymous namespace 233 234 /// Check if two instruction are masked stores that completely 235 /// overwrite one another. More specifically, \p KillingI has to 236 /// overwrite \p DeadI. 237 static OverwriteResult isMaskedStoreOverwrite(const Instruction *KillingI, 238 const Instruction *DeadI, 239 BatchAAResults &AA) { 240 const auto *KillingII = dyn_cast<IntrinsicInst>(KillingI); 241 const auto *DeadII = dyn_cast<IntrinsicInst>(DeadI); 242 if (KillingII == nullptr || DeadII == nullptr) 243 return OW_Unknown; 244 if (KillingII->getIntrinsicID() != DeadII->getIntrinsicID()) 245 return OW_Unknown; 246 if (KillingII->getIntrinsicID() == Intrinsic::masked_store) { 247 // Type size. 248 VectorType *KillingTy = 249 cast<VectorType>(KillingII->getArgOperand(0)->getType()); 250 VectorType *DeadTy = cast<VectorType>(DeadII->getArgOperand(0)->getType()); 251 if (KillingTy->getScalarSizeInBits() != DeadTy->getScalarSizeInBits()) 252 return OW_Unknown; 253 // Element count. 254 if (KillingTy->getElementCount() != DeadTy->getElementCount()) 255 return OW_Unknown; 256 // Pointers. 257 Value *KillingPtr = KillingII->getArgOperand(1)->stripPointerCasts(); 258 Value *DeadPtr = DeadII->getArgOperand(1)->stripPointerCasts(); 259 if (KillingPtr != DeadPtr && !AA.isMustAlias(KillingPtr, DeadPtr)) 260 return OW_Unknown; 261 // Masks. 262 // TODO: check that KillingII's mask is a superset of the DeadII's mask. 263 if (KillingII->getArgOperand(3) != DeadII->getArgOperand(3)) 264 return OW_Unknown; 265 return OW_Complete; 266 } 267 return OW_Unknown; 268 } 269 270 /// Return 'OW_Complete' if a store to the 'KillingLoc' location completely 271 /// overwrites a store to the 'DeadLoc' location, 'OW_End' if the end of the 272 /// 'DeadLoc' location is completely overwritten by 'KillingLoc', 'OW_Begin' 273 /// if the beginning of the 'DeadLoc' location is overwritten by 'KillingLoc'. 274 /// 'OW_PartialEarlierWithFullLater' means that a dead (big) store was 275 /// overwritten by a killing (smaller) store which doesn't write outside the big 276 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined. 277 /// NOTE: This function must only be called if both \p KillingLoc and \p 278 /// DeadLoc belong to the same underlying object with valid \p KillingOff and 279 /// \p DeadOff. 280 static OverwriteResult isPartialOverwrite(const MemoryLocation &KillingLoc, 281 const MemoryLocation &DeadLoc, 282 int64_t KillingOff, int64_t DeadOff, 283 Instruction *DeadI, 284 InstOverlapIntervalsTy &IOL) { 285 const uint64_t KillingSize = KillingLoc.Size.getValue(); 286 const uint64_t DeadSize = DeadLoc.Size.getValue(); 287 // We may now overlap, although the overlap is not complete. There might also 288 // be other incomplete overlaps, and together, they might cover the complete 289 // dead store. 290 // Note: The correctness of this logic depends on the fact that this function 291 // is not even called providing DepWrite when there are any intervening reads. 292 if (EnablePartialOverwriteTracking && 293 KillingOff < int64_t(DeadOff + DeadSize) && 294 int64_t(KillingOff + KillingSize) >= DeadOff) { 295 296 // Insert our part of the overlap into the map. 297 auto &IM = IOL[DeadI]; 298 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: DeadLoc [" << DeadOff << ", " 299 << int64_t(DeadOff + DeadSize) << ") KillingLoc [" 300 << KillingOff << ", " << int64_t(KillingOff + KillingSize) 301 << ")\n"); 302 303 // Make sure that we only insert non-overlapping intervals and combine 304 // adjacent intervals. The intervals are stored in the map with the ending 305 // offset as the key (in the half-open sense) and the starting offset as 306 // the value. 307 int64_t KillingIntStart = KillingOff; 308 int64_t KillingIntEnd = KillingOff + KillingSize; 309 310 // Find any intervals ending at, or after, KillingIntStart which start 311 // before KillingIntEnd. 312 auto ILI = IM.lower_bound(KillingIntStart); 313 if (ILI != IM.end() && ILI->second <= KillingIntEnd) { 314 // This existing interval is overlapped with the current store somewhere 315 // in [KillingIntStart, KillingIntEnd]. Merge them by erasing the existing 316 // intervals and adjusting our start and end. 317 KillingIntStart = std::min(KillingIntStart, ILI->second); 318 KillingIntEnd = std::max(KillingIntEnd, ILI->first); 319 ILI = IM.erase(ILI); 320 321 // Continue erasing and adjusting our end in case other previous 322 // intervals are also overlapped with the current store. 323 // 324 // |--- dead 1 ---| |--- dead 2 ---| 325 // |------- killing---------| 326 // 327 while (ILI != IM.end() && ILI->second <= KillingIntEnd) { 328 assert(ILI->second > KillingIntStart && "Unexpected interval"); 329 KillingIntEnd = std::max(KillingIntEnd, ILI->first); 330 ILI = IM.erase(ILI); 331 } 332 } 333 334 IM[KillingIntEnd] = KillingIntStart; 335 336 ILI = IM.begin(); 337 if (ILI->second <= DeadOff && ILI->first >= int64_t(DeadOff + DeadSize)) { 338 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: DeadLoc [" 339 << DeadOff << ", " << int64_t(DeadOff + DeadSize) 340 << ") Composite KillingLoc [" << ILI->second << ", " 341 << ILI->first << ")\n"); 342 ++NumCompletePartials; 343 return OW_Complete; 344 } 345 } 346 347 // Check for a dead store which writes to all the memory locations that 348 // the killing store writes to. 349 if (EnablePartialStoreMerging && KillingOff >= DeadOff && 350 int64_t(DeadOff + DeadSize) > KillingOff && 351 uint64_t(KillingOff - DeadOff) + KillingSize <= DeadSize) { 352 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite a dead load [" << DeadOff 353 << ", " << int64_t(DeadOff + DeadSize) 354 << ") by a killing store [" << KillingOff << ", " 355 << int64_t(KillingOff + KillingSize) << ")\n"); 356 // TODO: Maybe come up with a better name? 357 return OW_PartialEarlierWithFullLater; 358 } 359 360 // Another interesting case is if the killing store overwrites the end of the 361 // dead store. 362 // 363 // |--dead--| 364 // |-- killing --| 365 // 366 // In this case we may want to trim the size of dead store to avoid 367 // generating stores to addresses which will definitely be overwritten killing 368 // store. 369 if (!EnablePartialOverwriteTracking && 370 (KillingOff > DeadOff && KillingOff < int64_t(DeadOff + DeadSize) && 371 int64_t(KillingOff + KillingSize) >= int64_t(DeadOff + DeadSize))) 372 return OW_End; 373 374 // Finally, we also need to check if the killing store overwrites the 375 // beginning of the dead store. 376 // 377 // |--dead--| 378 // |-- killing --| 379 // 380 // In this case we may want to move the destination address and trim the size 381 // of dead store to avoid generating stores to addresses which will definitely 382 // be overwritten killing store. 383 if (!EnablePartialOverwriteTracking && 384 (KillingOff <= DeadOff && int64_t(KillingOff + KillingSize) > DeadOff)) { 385 assert(int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) && 386 "Expect to be handled as OW_Complete"); 387 return OW_Begin; 388 } 389 // Otherwise, they don't completely overlap. 390 return OW_Unknown; 391 } 392 393 /// Returns true if the memory which is accessed by the second instruction is not 394 /// modified between the first and the second instruction. 395 /// Precondition: Second instruction must be dominated by the first 396 /// instruction. 397 static bool 398 memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, 399 BatchAAResults &AA, const DataLayout &DL, 400 DominatorTree *DT) { 401 // Do a backwards scan through the CFG from SecondI to FirstI. Look for 402 // instructions which can modify the memory location accessed by SecondI. 403 // 404 // While doing the walk keep track of the address to check. It might be 405 // different in different basic blocks due to PHI translation. 406 using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>; 407 SmallVector<BlockAddressPair, 16> WorkList; 408 // Keep track of the address we visited each block with. Bail out if we 409 // visit a block with different addresses. 410 DenseMap<BasicBlock *, Value *> Visited; 411 412 BasicBlock::iterator FirstBBI(FirstI); 413 ++FirstBBI; 414 BasicBlock::iterator SecondBBI(SecondI); 415 BasicBlock *FirstBB = FirstI->getParent(); 416 BasicBlock *SecondBB = SecondI->getParent(); 417 MemoryLocation MemLoc; 418 if (auto *MemSet = dyn_cast<MemSetInst>(SecondI)) 419 MemLoc = MemoryLocation::getForDest(MemSet); 420 else 421 MemLoc = MemoryLocation::get(SecondI); 422 423 auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr); 424 425 // Start checking the SecondBB. 426 WorkList.push_back( 427 std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr))); 428 bool isFirstBlock = true; 429 430 // Check all blocks going backward until we reach the FirstBB. 431 while (!WorkList.empty()) { 432 BlockAddressPair Current = WorkList.pop_back_val(); 433 BasicBlock *B = Current.first; 434 PHITransAddr &Addr = Current.second; 435 Value *Ptr = Addr.getAddr(); 436 437 // Ignore instructions before FirstI if this is the FirstBB. 438 BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin()); 439 440 BasicBlock::iterator EI; 441 if (isFirstBlock) { 442 // Ignore instructions after SecondI if this is the first visit of SecondBB. 443 assert(B == SecondBB && "first block is not the store block"); 444 EI = SecondBBI; 445 isFirstBlock = false; 446 } else { 447 // It's not SecondBB or (in case of a loop) the second visit of SecondBB. 448 // In this case we also have to look at instructions after SecondI. 449 EI = B->end(); 450 } 451 for (; BI != EI; ++BI) { 452 Instruction *I = &*BI; 453 if (I->mayWriteToMemory() && I != SecondI) 454 if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr)))) 455 return false; 456 } 457 if (B != FirstBB) { 458 assert(B != &FirstBB->getParent()->getEntryBlock() && 459 "Should not hit the entry block because SI must be dominated by LI"); 460 for (BasicBlock *Pred : predecessors(B)) { 461 PHITransAddr PredAddr = Addr; 462 if (PredAddr.needsPHITranslationFromBlock(B)) { 463 if (!PredAddr.isPotentiallyPHITranslatable()) 464 return false; 465 if (!PredAddr.translateValue(B, Pred, DT, false)) 466 return false; 467 } 468 Value *TranslatedPtr = PredAddr.getAddr(); 469 auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr)); 470 if (!Inserted.second) { 471 // We already visited this block before. If it was with a different 472 // address - bail out! 473 if (TranslatedPtr != Inserted.first->second) 474 return false; 475 // ... otherwise just skip it. 476 continue; 477 } 478 WorkList.push_back(std::make_pair(Pred, PredAddr)); 479 } 480 } 481 } 482 return true; 483 } 484 485 static void shortenAssignment(Instruction *Inst, Value *OriginalDest, 486 uint64_t OldOffsetInBits, uint64_t OldSizeInBits, 487 uint64_t NewSizeInBits, bool IsOverwriteEnd) { 488 const DataLayout &DL = Inst->getModule()->getDataLayout(); 489 uint64_t DeadSliceSizeInBits = OldSizeInBits - NewSizeInBits; 490 uint64_t DeadSliceOffsetInBits = 491 OldOffsetInBits + (IsOverwriteEnd ? NewSizeInBits : 0); 492 auto SetDeadFragExpr = [](DbgAssignIntrinsic *DAI, 493 DIExpression::FragmentInfo DeadFragment) { 494 // createFragmentExpression expects an offset relative to the existing 495 // fragment offset if there is one. 496 uint64_t RelativeOffset = DeadFragment.OffsetInBits - 497 DAI->getExpression() 498 ->getFragmentInfo() 499 .value_or(DIExpression::FragmentInfo(0, 0)) 500 .OffsetInBits; 501 if (auto NewExpr = DIExpression::createFragmentExpression( 502 DAI->getExpression(), RelativeOffset, DeadFragment.SizeInBits)) { 503 DAI->setExpression(*NewExpr); 504 return; 505 } 506 // Failed to create a fragment expression for this so discard the value, 507 // making this a kill location. 508 auto *Expr = *DIExpression::createFragmentExpression( 509 DIExpression::get(DAI->getContext(), std::nullopt), 510 DeadFragment.OffsetInBits, DeadFragment.SizeInBits); 511 DAI->setExpression(Expr); 512 DAI->setKillLocation(); 513 }; 514 515 // A DIAssignID to use so that the inserted dbg.assign intrinsics do not 516 // link to any instructions. Created in the loop below (once). 517 DIAssignID *LinkToNothing = nullptr; 518 LLVMContext &Ctx = Inst->getContext(); 519 auto GetDeadLink = [&Ctx, &LinkToNothing]() { 520 if (!LinkToNothing) 521 LinkToNothing = DIAssignID::getDistinct(Ctx); 522 return LinkToNothing; 523 }; 524 525 // Insert an unlinked dbg.assign intrinsic for the dead fragment after each 526 // overlapping dbg.assign intrinsic. The loop invalidates the iterators 527 // returned by getAssignmentMarkers so save a copy of the markers to iterate 528 // over. 529 auto LinkedRange = at::getAssignmentMarkers(Inst); 530 SmallVector<DbgAssignIntrinsic *> Linked(LinkedRange.begin(), 531 LinkedRange.end()); 532 for (auto *DAI : Linked) { 533 std::optional<DIExpression::FragmentInfo> NewFragment; 534 if (!at::calculateFragmentIntersect(DL, OriginalDest, DeadSliceOffsetInBits, 535 DeadSliceSizeInBits, DAI, 536 NewFragment) || 537 !NewFragment) { 538 // We couldn't calculate the intersecting fragment for some reason. Be 539 // cautious and unlink the whole assignment from the store. 540 DAI->setKillAddress(); 541 DAI->setAssignId(GetDeadLink()); 542 continue; 543 } 544 // No intersect. 545 if (NewFragment->SizeInBits == 0) 546 continue; 547 548 // Fragments overlap: insert a new dbg.assign for this dead part. 549 auto *NewAssign = cast<DbgAssignIntrinsic>(DAI->clone()); 550 NewAssign->insertAfter(DAI); 551 NewAssign->setAssignId(GetDeadLink()); 552 if (NewFragment) 553 SetDeadFragExpr(NewAssign, *NewFragment); 554 NewAssign->setKillAddress(); 555 } 556 } 557 558 static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart, 559 uint64_t &DeadSize, int64_t KillingStart, 560 uint64_t KillingSize, bool IsOverwriteEnd) { 561 auto *DeadIntrinsic = cast<AnyMemIntrinsic>(DeadI); 562 Align PrefAlign = DeadIntrinsic->getDestAlign().valueOrOne(); 563 564 // We assume that memet/memcpy operates in chunks of the "largest" native 565 // type size and aligned on the same value. That means optimal start and size 566 // of memset/memcpy should be modulo of preferred alignment of that type. That 567 // is it there is no any sense in trying to reduce store size any further 568 // since any "extra" stores comes for free anyway. 569 // On the other hand, maximum alignment we can achieve is limited by alignment 570 // of initial store. 571 572 // TODO: Limit maximum alignment by preferred (or abi?) alignment of the 573 // "largest" native type. 574 // Note: What is the proper way to get that value? 575 // Should TargetTransformInfo::getRegisterBitWidth be used or anything else? 576 // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign); 577 578 int64_t ToRemoveStart = 0; 579 uint64_t ToRemoveSize = 0; 580 // Compute start and size of the region to remove. Make sure 'PrefAlign' is 581 // maintained on the remaining store. 582 if (IsOverwriteEnd) { 583 // Calculate required adjustment for 'KillingStart' in order to keep 584 // remaining store size aligned on 'PerfAlign'. 585 uint64_t Off = 586 offsetToAlignment(uint64_t(KillingStart - DeadStart), PrefAlign); 587 ToRemoveStart = KillingStart + Off; 588 if (DeadSize <= uint64_t(ToRemoveStart - DeadStart)) 589 return false; 590 ToRemoveSize = DeadSize - uint64_t(ToRemoveStart - DeadStart); 591 } else { 592 ToRemoveStart = DeadStart; 593 assert(KillingSize >= uint64_t(DeadStart - KillingStart) && 594 "Not overlapping accesses?"); 595 ToRemoveSize = KillingSize - uint64_t(DeadStart - KillingStart); 596 // Calculate required adjustment for 'ToRemoveSize'in order to keep 597 // start of the remaining store aligned on 'PerfAlign'. 598 uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign); 599 if (Off != 0) { 600 if (ToRemoveSize <= (PrefAlign.value() - Off)) 601 return false; 602 ToRemoveSize -= PrefAlign.value() - Off; 603 } 604 assert(isAligned(PrefAlign, ToRemoveSize) && 605 "Should preserve selected alignment"); 606 } 607 608 assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove"); 609 assert(DeadSize > ToRemoveSize && "Can't remove more than original size"); 610 611 uint64_t NewSize = DeadSize - ToRemoveSize; 612 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(DeadI)) { 613 // When shortening an atomic memory intrinsic, the newly shortened 614 // length must remain an integer multiple of the element size. 615 const uint32_t ElementSize = AMI->getElementSizeInBytes(); 616 if (0 != NewSize % ElementSize) 617 return false; 618 } 619 620 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW " 621 << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *DeadI 622 << "\n KILLER [" << ToRemoveStart << ", " 623 << int64_t(ToRemoveStart + ToRemoveSize) << ")\n"); 624 625 Value *DeadWriteLength = DeadIntrinsic->getLength(); 626 Value *TrimmedLength = ConstantInt::get(DeadWriteLength->getType(), NewSize); 627 DeadIntrinsic->setLength(TrimmedLength); 628 DeadIntrinsic->setDestAlignment(PrefAlign); 629 630 Value *OrigDest = DeadIntrinsic->getRawDest(); 631 if (!IsOverwriteEnd) { 632 Type *Int8PtrTy = 633 Type::getInt8PtrTy(DeadIntrinsic->getContext(), 634 OrigDest->getType()->getPointerAddressSpace()); 635 Value *Dest = OrigDest; 636 if (OrigDest->getType() != Int8PtrTy) 637 Dest = CastInst::CreatePointerCast(OrigDest, Int8PtrTy, "", DeadI); 638 Value *Indices[1] = { 639 ConstantInt::get(DeadWriteLength->getType(), ToRemoveSize)}; 640 Instruction *NewDestGEP = GetElementPtrInst::CreateInBounds( 641 Type::getInt8Ty(DeadIntrinsic->getContext()), Dest, Indices, "", DeadI); 642 NewDestGEP->setDebugLoc(DeadIntrinsic->getDebugLoc()); 643 if (NewDestGEP->getType() != OrigDest->getType()) 644 NewDestGEP = CastInst::CreatePointerCast(NewDestGEP, OrigDest->getType(), 645 "", DeadI); 646 DeadIntrinsic->setDest(NewDestGEP); 647 } 648 649 // Update attached dbg.assign intrinsics. Assume 8-bit byte. 650 shortenAssignment(DeadI, OrigDest, DeadStart * 8, DeadSize * 8, NewSize * 8, 651 IsOverwriteEnd); 652 653 // Finally update start and size of dead access. 654 if (!IsOverwriteEnd) 655 DeadStart += ToRemoveSize; 656 DeadSize = NewSize; 657 658 return true; 659 } 660 661 static bool tryToShortenEnd(Instruction *DeadI, OverlapIntervalsTy &IntervalMap, 662 int64_t &DeadStart, uint64_t &DeadSize) { 663 if (IntervalMap.empty() || !isShortenableAtTheEnd(DeadI)) 664 return false; 665 666 OverlapIntervalsTy::iterator OII = --IntervalMap.end(); 667 int64_t KillingStart = OII->second; 668 uint64_t KillingSize = OII->first - KillingStart; 669 670 assert(OII->first - KillingStart >= 0 && "Size expected to be positive"); 671 672 if (KillingStart > DeadStart && 673 // Note: "KillingStart - KillingStart" is known to be positive due to 674 // preceding check. 675 (uint64_t)(KillingStart - DeadStart) < DeadSize && 676 // Note: "DeadSize - (uint64_t)(KillingStart - DeadStart)" is known to 677 // be non negative due to preceding checks. 678 KillingSize >= DeadSize - (uint64_t)(KillingStart - DeadStart)) { 679 if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize, 680 true)) { 681 IntervalMap.erase(OII); 682 return true; 683 } 684 } 685 return false; 686 } 687 688 static bool tryToShortenBegin(Instruction *DeadI, 689 OverlapIntervalsTy &IntervalMap, 690 int64_t &DeadStart, uint64_t &DeadSize) { 691 if (IntervalMap.empty() || !isShortenableAtTheBeginning(DeadI)) 692 return false; 693 694 OverlapIntervalsTy::iterator OII = IntervalMap.begin(); 695 int64_t KillingStart = OII->second; 696 uint64_t KillingSize = OII->first - KillingStart; 697 698 assert(OII->first - KillingStart >= 0 && "Size expected to be positive"); 699 700 if (KillingStart <= DeadStart && 701 // Note: "DeadStart - KillingStart" is known to be non negative due to 702 // preceding check. 703 KillingSize > (uint64_t)(DeadStart - KillingStart)) { 704 // Note: "KillingSize - (uint64_t)(DeadStart - DeadStart)" is known to 705 // be positive due to preceding checks. 706 assert(KillingSize - (uint64_t)(DeadStart - KillingStart) < DeadSize && 707 "Should have been handled as OW_Complete"); 708 if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize, 709 false)) { 710 IntervalMap.erase(OII); 711 return true; 712 } 713 } 714 return false; 715 } 716 717 static Constant * 718 tryToMergePartialOverlappingStores(StoreInst *KillingI, StoreInst *DeadI, 719 int64_t KillingOffset, int64_t DeadOffset, 720 const DataLayout &DL, BatchAAResults &AA, 721 DominatorTree *DT) { 722 723 if (DeadI && isa<ConstantInt>(DeadI->getValueOperand()) && 724 DL.typeSizeEqualsStoreSize(DeadI->getValueOperand()->getType()) && 725 KillingI && isa<ConstantInt>(KillingI->getValueOperand()) && 726 DL.typeSizeEqualsStoreSize(KillingI->getValueOperand()->getType()) && 727 memoryIsNotModifiedBetween(DeadI, KillingI, AA, DL, DT)) { 728 // If the store we find is: 729 // a) partially overwritten by the store to 'Loc' 730 // b) the killing store is fully contained in the dead one and 731 // c) they both have a constant value 732 // d) none of the two stores need padding 733 // Merge the two stores, replacing the dead store's value with a 734 // merge of both values. 735 // TODO: Deal with other constant types (vectors, etc), and probably 736 // some mem intrinsics (if needed) 737 738 APInt DeadValue = cast<ConstantInt>(DeadI->getValueOperand())->getValue(); 739 APInt KillingValue = 740 cast<ConstantInt>(KillingI->getValueOperand())->getValue(); 741 unsigned KillingBits = KillingValue.getBitWidth(); 742 assert(DeadValue.getBitWidth() > KillingValue.getBitWidth()); 743 KillingValue = KillingValue.zext(DeadValue.getBitWidth()); 744 745 // Offset of the smaller store inside the larger store 746 unsigned BitOffsetDiff = (KillingOffset - DeadOffset) * 8; 747 unsigned LShiftAmount = 748 DL.isBigEndian() ? DeadValue.getBitWidth() - BitOffsetDiff - KillingBits 749 : BitOffsetDiff; 750 APInt Mask = APInt::getBitsSet(DeadValue.getBitWidth(), LShiftAmount, 751 LShiftAmount + KillingBits); 752 // Clear the bits we'll be replacing, then OR with the smaller 753 // store, shifted appropriately. 754 APInt Merged = (DeadValue & ~Mask) | (KillingValue << LShiftAmount); 755 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Dead: " << *DeadI 756 << "\n Killing: " << *KillingI 757 << "\n Merged Value: " << Merged << '\n'); 758 return ConstantInt::get(DeadI->getValueOperand()->getType(), Merged); 759 } 760 return nullptr; 761 } 762 763 namespace { 764 // Returns true if \p I is an intrinsic that does not read or write memory. 765 bool isNoopIntrinsic(Instruction *I) { 766 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 767 switch (II->getIntrinsicID()) { 768 case Intrinsic::lifetime_start: 769 case Intrinsic::lifetime_end: 770 case Intrinsic::invariant_end: 771 case Intrinsic::launder_invariant_group: 772 case Intrinsic::assume: 773 return true; 774 case Intrinsic::dbg_declare: 775 case Intrinsic::dbg_label: 776 case Intrinsic::dbg_value: 777 llvm_unreachable("Intrinsic should not be modeled in MemorySSA"); 778 default: 779 return false; 780 } 781 } 782 return false; 783 } 784 785 // Check if we can ignore \p D for DSE. 786 bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) { 787 Instruction *DI = D->getMemoryInst(); 788 // Calls that only access inaccessible memory cannot read or write any memory 789 // locations we consider for elimination. 790 if (auto *CB = dyn_cast<CallBase>(DI)) 791 if (CB->onlyAccessesInaccessibleMemory()) 792 return true; 793 794 // We can eliminate stores to locations not visible to the caller across 795 // throwing instructions. 796 if (DI->mayThrow() && !DefVisibleToCaller) 797 return true; 798 799 // We can remove the dead stores, irrespective of the fence and its ordering 800 // (release/acquire/seq_cst). Fences only constraints the ordering of 801 // already visible stores, it does not make a store visible to other 802 // threads. So, skipping over a fence does not change a store from being 803 // dead. 804 if (isa<FenceInst>(DI)) 805 return true; 806 807 // Skip intrinsics that do not really read or modify memory. 808 if (isNoopIntrinsic(DI)) 809 return true; 810 811 return false; 812 } 813 814 struct DSEState { 815 Function &F; 816 AliasAnalysis &AA; 817 EarliestEscapeInfo EI; 818 819 /// The single BatchAA instance that is used to cache AA queries. It will 820 /// not be invalidated over the whole run. This is safe, because: 821 /// 1. Only memory writes are removed, so the alias cache for memory 822 /// locations remains valid. 823 /// 2. No new instructions are added (only instructions removed), so cached 824 /// information for a deleted value cannot be accessed by a re-used new 825 /// value pointer. 826 BatchAAResults BatchAA; 827 828 MemorySSA &MSSA; 829 DominatorTree &DT; 830 PostDominatorTree &PDT; 831 const TargetLibraryInfo &TLI; 832 const DataLayout &DL; 833 const LoopInfo &LI; 834 835 // Whether the function contains any irreducible control flow, useful for 836 // being accurately able to detect loops. 837 bool ContainsIrreducibleLoops; 838 839 // All MemoryDefs that potentially could kill other MemDefs. 840 SmallVector<MemoryDef *, 64> MemDefs; 841 // Any that should be skipped as they are already deleted 842 SmallPtrSet<MemoryAccess *, 4> SkipStores; 843 // Keep track whether a given object is captured before return or not. 844 DenseMap<const Value *, bool> CapturedBeforeReturn; 845 // Keep track of all of the objects that are invisible to the caller after 846 // the function returns. 847 DenseMap<const Value *, bool> InvisibleToCallerAfterRet; 848 // Keep track of blocks with throwing instructions not modeled in MemorySSA. 849 SmallPtrSet<BasicBlock *, 16> ThrowingBlocks; 850 // Post-order numbers for each basic block. Used to figure out if memory 851 // accesses are executed before another access. 852 DenseMap<BasicBlock *, unsigned> PostOrderNumbers; 853 // Values that are only used with assumes. Used to refine pointer escape 854 // analysis. 855 SmallPtrSet<const Value *, 32> EphValues; 856 857 /// Keep track of instructions (partly) overlapping with killing MemoryDefs per 858 /// basic block. 859 MapVector<BasicBlock *, InstOverlapIntervalsTy> IOLs; 860 // Check if there are root nodes that are terminated by UnreachableInst. 861 // Those roots pessimize post-dominance queries. If there are such roots, 862 // fall back to CFG scan starting from all non-unreachable roots. 863 bool AnyUnreachableExit; 864 865 // Whether or not we should iterate on removing dead stores at the end of the 866 // function due to removing a store causing a previously captured pointer to 867 // no longer be captured. 868 bool ShouldIterateEndOfFunctionDSE; 869 870 // Class contains self-reference, make sure it's not copied/moved. 871 DSEState(const DSEState &) = delete; 872 DSEState &operator=(const DSEState &) = delete; 873 874 DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT, 875 PostDominatorTree &PDT, AssumptionCache &AC, 876 const TargetLibraryInfo &TLI, const LoopInfo &LI) 877 : F(F), AA(AA), EI(DT, LI, EphValues), BatchAA(AA, &EI), MSSA(MSSA), 878 DT(DT), PDT(PDT), TLI(TLI), DL(F.getParent()->getDataLayout()), LI(LI) { 879 // Collect blocks with throwing instructions not modeled in MemorySSA and 880 // alloc-like objects. 881 unsigned PO = 0; 882 for (BasicBlock *BB : post_order(&F)) { 883 PostOrderNumbers[BB] = PO++; 884 for (Instruction &I : *BB) { 885 MemoryAccess *MA = MSSA.getMemoryAccess(&I); 886 if (I.mayThrow() && !MA) 887 ThrowingBlocks.insert(I.getParent()); 888 889 auto *MD = dyn_cast_or_null<MemoryDef>(MA); 890 if (MD && MemDefs.size() < MemorySSADefsPerBlockLimit && 891 (getLocForWrite(&I) || isMemTerminatorInst(&I))) 892 MemDefs.push_back(MD); 893 } 894 } 895 896 // Treat byval or inalloca arguments the same as Allocas, stores to them are 897 // dead at the end of the function. 898 for (Argument &AI : F.args()) 899 if (AI.hasPassPointeeByValueCopyAttr()) 900 InvisibleToCallerAfterRet.insert({&AI, true}); 901 902 // Collect whether there is any irreducible control flow in the function. 903 ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI); 904 905 AnyUnreachableExit = any_of(PDT.roots(), [](const BasicBlock *E) { 906 return isa<UnreachableInst>(E->getTerminator()); 907 }); 908 909 CodeMetrics::collectEphemeralValues(&F, &AC, EphValues); 910 } 911 912 LocationSize strengthenLocationSize(const Instruction *I, 913 LocationSize Size) const { 914 if (auto *CB = dyn_cast<CallBase>(I)) { 915 LibFunc F; 916 if (TLI.getLibFunc(*CB, F) && TLI.has(F) && 917 (F == LibFunc_memset_chk || F == LibFunc_memcpy_chk)) { 918 // Use the precise location size specified by the 3rd argument 919 // for determining KillingI overwrites DeadLoc if it is a memset_chk 920 // instruction. memset_chk will write either the amount specified as 3rd 921 // argument or the function will immediately abort and exit the program. 922 // NOTE: AA may determine NoAlias if it can prove that the access size 923 // is larger than the allocation size due to that being UB. To avoid 924 // returning potentially invalid NoAlias results by AA, limit the use of 925 // the precise location size to isOverwrite. 926 if (const auto *Len = dyn_cast<ConstantInt>(CB->getArgOperand(2))) 927 return LocationSize::precise(Len->getZExtValue()); 928 } 929 } 930 return Size; 931 } 932 933 /// Return 'OW_Complete' if a store to the 'KillingLoc' location (by \p 934 /// KillingI instruction) completely overwrites a store to the 'DeadLoc' 935 /// location (by \p DeadI instruction). 936 /// Return OW_MaybePartial if \p KillingI does not completely overwrite 937 /// \p DeadI, but they both write to the same underlying object. In that 938 /// case, use isPartialOverwrite to check if \p KillingI partially overwrites 939 /// \p DeadI. Returns 'OR_None' if \p KillingI is known to not overwrite the 940 /// \p DeadI. Returns 'OW_Unknown' if nothing can be determined. 941 OverwriteResult isOverwrite(const Instruction *KillingI, 942 const Instruction *DeadI, 943 const MemoryLocation &KillingLoc, 944 const MemoryLocation &DeadLoc, 945 int64_t &KillingOff, int64_t &DeadOff) { 946 // AliasAnalysis does not always account for loops. Limit overwrite checks 947 // to dependencies for which we can guarantee they are independent of any 948 // loops they are in. 949 if (!isGuaranteedLoopIndependent(DeadI, KillingI, DeadLoc)) 950 return OW_Unknown; 951 952 LocationSize KillingLocSize = 953 strengthenLocationSize(KillingI, KillingLoc.Size); 954 const Value *DeadPtr = DeadLoc.Ptr->stripPointerCasts(); 955 const Value *KillingPtr = KillingLoc.Ptr->stripPointerCasts(); 956 const Value *DeadUndObj = getUnderlyingObject(DeadPtr); 957 const Value *KillingUndObj = getUnderlyingObject(KillingPtr); 958 959 // Check whether the killing store overwrites the whole object, in which 960 // case the size/offset of the dead store does not matter. 961 if (DeadUndObj == KillingUndObj && KillingLocSize.isPrecise()) { 962 uint64_t KillingUndObjSize = getPointerSize(KillingUndObj, DL, TLI, &F); 963 if (KillingUndObjSize != MemoryLocation::UnknownSize && 964 KillingUndObjSize == KillingLocSize.getValue()) 965 return OW_Complete; 966 } 967 968 // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll 969 // get imprecise values here, though (except for unknown sizes). 970 if (!KillingLocSize.isPrecise() || !DeadLoc.Size.isPrecise()) { 971 // In case no constant size is known, try to an IR values for the number 972 // of bytes written and check if they match. 973 const auto *KillingMemI = dyn_cast<MemIntrinsic>(KillingI); 974 const auto *DeadMemI = dyn_cast<MemIntrinsic>(DeadI); 975 if (KillingMemI && DeadMemI) { 976 const Value *KillingV = KillingMemI->getLength(); 977 const Value *DeadV = DeadMemI->getLength(); 978 if (KillingV == DeadV && BatchAA.isMustAlias(DeadLoc, KillingLoc)) 979 return OW_Complete; 980 } 981 982 // Masked stores have imprecise locations, but we can reason about them 983 // to some extent. 984 return isMaskedStoreOverwrite(KillingI, DeadI, BatchAA); 985 } 986 987 const uint64_t KillingSize = KillingLocSize.getValue(); 988 const uint64_t DeadSize = DeadLoc.Size.getValue(); 989 990 // Query the alias information 991 AliasResult AAR = BatchAA.alias(KillingLoc, DeadLoc); 992 993 // If the start pointers are the same, we just have to compare sizes to see if 994 // the killing store was larger than the dead store. 995 if (AAR == AliasResult::MustAlias) { 996 // Make sure that the KillingSize size is >= the DeadSize size. 997 if (KillingSize >= DeadSize) 998 return OW_Complete; 999 } 1000 1001 // If we hit a partial alias we may have a full overwrite 1002 if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) { 1003 int32_t Off = AAR.getOffset(); 1004 if (Off >= 0 && (uint64_t)Off + DeadSize <= KillingSize) 1005 return OW_Complete; 1006 } 1007 1008 // If we can't resolve the same pointers to the same object, then we can't 1009 // analyze them at all. 1010 if (DeadUndObj != KillingUndObj) { 1011 // Non aliasing stores to different objects don't overlap. Note that 1012 // if the killing store is known to overwrite whole object (out of 1013 // bounds access overwrites whole object as well) then it is assumed to 1014 // completely overwrite any store to the same object even if they don't 1015 // actually alias (see next check). 1016 if (AAR == AliasResult::NoAlias) 1017 return OW_None; 1018 return OW_Unknown; 1019 } 1020 1021 // Okay, we have stores to two completely different pointers. Try to 1022 // decompose the pointer into a "base + constant_offset" form. If the base 1023 // pointers are equal, then we can reason about the two stores. 1024 DeadOff = 0; 1025 KillingOff = 0; 1026 const Value *DeadBasePtr = 1027 GetPointerBaseWithConstantOffset(DeadPtr, DeadOff, DL); 1028 const Value *KillingBasePtr = 1029 GetPointerBaseWithConstantOffset(KillingPtr, KillingOff, DL); 1030 1031 // If the base pointers still differ, we have two completely different 1032 // stores. 1033 if (DeadBasePtr != KillingBasePtr) 1034 return OW_Unknown; 1035 1036 // The killing access completely overlaps the dead store if and only if 1037 // both start and end of the dead one is "inside" the killing one: 1038 // |<->|--dead--|<->| 1039 // |-----killing------| 1040 // Accesses may overlap if and only if start of one of them is "inside" 1041 // another one: 1042 // |<->|--dead--|<-------->| 1043 // |-------killing--------| 1044 // OR 1045 // |-------dead-------| 1046 // |<->|---killing---|<----->| 1047 // 1048 // We have to be careful here as *Off is signed while *.Size is unsigned. 1049 1050 // Check if the dead access starts "not before" the killing one. 1051 if (DeadOff >= KillingOff) { 1052 // If the dead access ends "not after" the killing access then the 1053 // dead one is completely overwritten by the killing one. 1054 if (uint64_t(DeadOff - KillingOff) + DeadSize <= KillingSize) 1055 return OW_Complete; 1056 // If start of the dead access is "before" end of the killing access 1057 // then accesses overlap. 1058 else if ((uint64_t)(DeadOff - KillingOff) < KillingSize) 1059 return OW_MaybePartial; 1060 } 1061 // If start of the killing access is "before" end of the dead access then 1062 // accesses overlap. 1063 else if ((uint64_t)(KillingOff - DeadOff) < DeadSize) { 1064 return OW_MaybePartial; 1065 } 1066 1067 // Can reach here only if accesses are known not to overlap. 1068 return OW_None; 1069 } 1070 1071 bool isInvisibleToCallerAfterRet(const Value *V) { 1072 if (isa<AllocaInst>(V)) 1073 return true; 1074 auto I = InvisibleToCallerAfterRet.insert({V, false}); 1075 if (I.second) { 1076 if (!isInvisibleToCallerOnUnwind(V)) { 1077 I.first->second = false; 1078 } else if (isNoAliasCall(V)) { 1079 I.first->second = !PointerMayBeCaptured(V, true, false, EphValues); 1080 } 1081 } 1082 return I.first->second; 1083 } 1084 1085 bool isInvisibleToCallerOnUnwind(const Value *V) { 1086 bool RequiresNoCaptureBeforeUnwind; 1087 if (!isNotVisibleOnUnwind(V, RequiresNoCaptureBeforeUnwind)) 1088 return false; 1089 if (!RequiresNoCaptureBeforeUnwind) 1090 return true; 1091 1092 auto I = CapturedBeforeReturn.insert({V, true}); 1093 if (I.second) 1094 // NOTE: This could be made more precise by PointerMayBeCapturedBefore 1095 // with the killing MemoryDef. But we refrain from doing so for now to 1096 // limit compile-time and this does not cause any changes to the number 1097 // of stores removed on a large test set in practice. 1098 I.first->second = PointerMayBeCaptured(V, false, true, EphValues); 1099 return !I.first->second; 1100 } 1101 1102 std::optional<MemoryLocation> getLocForWrite(Instruction *I) const { 1103 if (!I->mayWriteToMemory()) 1104 return std::nullopt; 1105 1106 if (auto *CB = dyn_cast<CallBase>(I)) 1107 return MemoryLocation::getForDest(CB, TLI); 1108 1109 return MemoryLocation::getOrNone(I); 1110 } 1111 1112 /// Assuming this instruction has a dead analyzable write, can we delete 1113 /// this instruction? 1114 bool isRemovable(Instruction *I) { 1115 assert(getLocForWrite(I) && "Must have analyzable write"); 1116 1117 // Don't remove volatile/atomic stores. 1118 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1119 return SI->isUnordered(); 1120 1121 if (auto *CB = dyn_cast<CallBase>(I)) { 1122 // Don't remove volatile memory intrinsics. 1123 if (auto *MI = dyn_cast<MemIntrinsic>(CB)) 1124 return !MI->isVolatile(); 1125 1126 // Never remove dead lifetime intrinsics, e.g. because they are followed 1127 // by a free. 1128 if (CB->isLifetimeStartOrEnd()) 1129 return false; 1130 1131 return CB->use_empty() && CB->willReturn() && CB->doesNotThrow() && 1132 !CB->isTerminator(); 1133 } 1134 1135 return false; 1136 } 1137 1138 /// Returns true if \p UseInst completely overwrites \p DefLoc 1139 /// (stored by \p DefInst). 1140 bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst, 1141 Instruction *UseInst) { 1142 // UseInst has a MemoryDef associated in MemorySSA. It's possible for a 1143 // MemoryDef to not write to memory, e.g. a volatile load is modeled as a 1144 // MemoryDef. 1145 if (!UseInst->mayWriteToMemory()) 1146 return false; 1147 1148 if (auto *CB = dyn_cast<CallBase>(UseInst)) 1149 if (CB->onlyAccessesInaccessibleMemory()) 1150 return false; 1151 1152 int64_t InstWriteOffset, DepWriteOffset; 1153 if (auto CC = getLocForWrite(UseInst)) 1154 return isOverwrite(UseInst, DefInst, *CC, DefLoc, InstWriteOffset, 1155 DepWriteOffset) == OW_Complete; 1156 return false; 1157 } 1158 1159 /// Returns true if \p Def is not read before returning from the function. 1160 bool isWriteAtEndOfFunction(MemoryDef *Def) { 1161 LLVM_DEBUG(dbgs() << " Check if def " << *Def << " (" 1162 << *Def->getMemoryInst() 1163 << ") is at the end the function \n"); 1164 1165 auto MaybeLoc = getLocForWrite(Def->getMemoryInst()); 1166 if (!MaybeLoc) { 1167 LLVM_DEBUG(dbgs() << " ... could not get location for write.\n"); 1168 return false; 1169 } 1170 1171 SmallVector<MemoryAccess *, 4> WorkList; 1172 SmallPtrSet<MemoryAccess *, 8> Visited; 1173 auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) { 1174 if (!Visited.insert(Acc).second) 1175 return; 1176 for (Use &U : Acc->uses()) 1177 WorkList.push_back(cast<MemoryAccess>(U.getUser())); 1178 }; 1179 PushMemUses(Def); 1180 for (unsigned I = 0; I < WorkList.size(); I++) { 1181 if (WorkList.size() >= MemorySSAScanLimit) { 1182 LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n"); 1183 return false; 1184 } 1185 1186 MemoryAccess *UseAccess = WorkList[I]; 1187 if (isa<MemoryPhi>(UseAccess)) { 1188 // AliasAnalysis does not account for loops. Limit elimination to 1189 // candidates for which we can guarantee they always store to the same 1190 // memory location. 1191 if (!isGuaranteedLoopInvariant(MaybeLoc->Ptr)) 1192 return false; 1193 1194 PushMemUses(cast<MemoryPhi>(UseAccess)); 1195 continue; 1196 } 1197 // TODO: Checking for aliasing is expensive. Consider reducing the amount 1198 // of times this is called and/or caching it. 1199 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst(); 1200 if (isReadClobber(*MaybeLoc, UseInst)) { 1201 LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst << ".\n"); 1202 return false; 1203 } 1204 1205 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) 1206 PushMemUses(UseDef); 1207 } 1208 return true; 1209 } 1210 1211 /// If \p I is a memory terminator like llvm.lifetime.end or free, return a 1212 /// pair with the MemoryLocation terminated by \p I and a boolean flag 1213 /// indicating whether \p I is a free-like call. 1214 std::optional<std::pair<MemoryLocation, bool>> 1215 getLocForTerminator(Instruction *I) const { 1216 uint64_t Len; 1217 Value *Ptr; 1218 if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len), 1219 m_Value(Ptr)))) 1220 return {std::make_pair(MemoryLocation(Ptr, Len), false)}; 1221 1222 if (auto *CB = dyn_cast<CallBase>(I)) { 1223 if (Value *FreedOp = getFreedOperand(CB, &TLI)) 1224 return {std::make_pair(MemoryLocation::getAfter(FreedOp), true)}; 1225 } 1226 1227 return std::nullopt; 1228 } 1229 1230 /// Returns true if \p I is a memory terminator instruction like 1231 /// llvm.lifetime.end or free. 1232 bool isMemTerminatorInst(Instruction *I) const { 1233 auto *CB = dyn_cast<CallBase>(I); 1234 return CB && (CB->getIntrinsicID() == Intrinsic::lifetime_end || 1235 getFreedOperand(CB, &TLI) != nullptr); 1236 } 1237 1238 /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from 1239 /// instruction \p AccessI. 1240 bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI, 1241 Instruction *MaybeTerm) { 1242 std::optional<std::pair<MemoryLocation, bool>> MaybeTermLoc = 1243 getLocForTerminator(MaybeTerm); 1244 1245 if (!MaybeTermLoc) 1246 return false; 1247 1248 // If the terminator is a free-like call, all accesses to the underlying 1249 // object can be considered terminated. 1250 if (getUnderlyingObject(Loc.Ptr) != 1251 getUnderlyingObject(MaybeTermLoc->first.Ptr)) 1252 return false; 1253 1254 auto TermLoc = MaybeTermLoc->first; 1255 if (MaybeTermLoc->second) { 1256 const Value *LocUO = getUnderlyingObject(Loc.Ptr); 1257 return BatchAA.isMustAlias(TermLoc.Ptr, LocUO); 1258 } 1259 int64_t InstWriteOffset = 0; 1260 int64_t DepWriteOffset = 0; 1261 return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, InstWriteOffset, 1262 DepWriteOffset) == OW_Complete; 1263 } 1264 1265 // Returns true if \p Use may read from \p DefLoc. 1266 bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) { 1267 if (isNoopIntrinsic(UseInst)) 1268 return false; 1269 1270 // Monotonic or weaker atomic stores can be re-ordered and do not need to be 1271 // treated as read clobber. 1272 if (auto SI = dyn_cast<StoreInst>(UseInst)) 1273 return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic); 1274 1275 if (!UseInst->mayReadFromMemory()) 1276 return false; 1277 1278 if (auto *CB = dyn_cast<CallBase>(UseInst)) 1279 if (CB->onlyAccessesInaccessibleMemory()) 1280 return false; 1281 1282 return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc)); 1283 } 1284 1285 /// Returns true if a dependency between \p Current and \p KillingDef is 1286 /// guaranteed to be loop invariant for the loops that they are in. Either 1287 /// because they are known to be in the same block, in the same loop level or 1288 /// by guaranteeing that \p CurrentLoc only references a single MemoryLocation 1289 /// during execution of the containing function. 1290 bool isGuaranteedLoopIndependent(const Instruction *Current, 1291 const Instruction *KillingDef, 1292 const MemoryLocation &CurrentLoc) { 1293 // If the dependency is within the same block or loop level (being careful 1294 // of irreducible loops), we know that AA will return a valid result for the 1295 // memory dependency. (Both at the function level, outside of any loop, 1296 // would also be valid but we currently disable that to limit compile time). 1297 if (Current->getParent() == KillingDef->getParent()) 1298 return true; 1299 const Loop *CurrentLI = LI.getLoopFor(Current->getParent()); 1300 if (!ContainsIrreducibleLoops && CurrentLI && 1301 CurrentLI == LI.getLoopFor(KillingDef->getParent())) 1302 return true; 1303 // Otherwise check the memory location is invariant to any loops. 1304 return isGuaranteedLoopInvariant(CurrentLoc.Ptr); 1305 } 1306 1307 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible 1308 /// loop. In particular, this guarantees that it only references a single 1309 /// MemoryLocation during execution of the containing function. 1310 bool isGuaranteedLoopInvariant(const Value *Ptr) { 1311 Ptr = Ptr->stripPointerCasts(); 1312 if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) 1313 if (GEP->hasAllConstantIndices()) 1314 Ptr = GEP->getPointerOperand()->stripPointerCasts(); 1315 1316 if (auto *I = dyn_cast<Instruction>(Ptr)) { 1317 return I->getParent()->isEntryBlock() || 1318 (!ContainsIrreducibleLoops && !LI.getLoopFor(I->getParent())); 1319 } 1320 return true; 1321 } 1322 1323 // Find a MemoryDef writing to \p KillingLoc and dominating \p StartAccess, 1324 // with no read access between them or on any other path to a function exit 1325 // block if \p KillingLoc is not accessible after the function returns. If 1326 // there is no such MemoryDef, return std::nullopt. The returned value may not 1327 // (completely) overwrite \p KillingLoc. Currently we bail out when we 1328 // encounter an aliasing MemoryUse (read). 1329 std::optional<MemoryAccess *> 1330 getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess, 1331 const MemoryLocation &KillingLoc, const Value *KillingUndObj, 1332 unsigned &ScanLimit, unsigned &WalkerStepLimit, 1333 bool IsMemTerm, unsigned &PartialLimit) { 1334 if (ScanLimit == 0 || WalkerStepLimit == 0) { 1335 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n"); 1336 return std::nullopt; 1337 } 1338 1339 MemoryAccess *Current = StartAccess; 1340 Instruction *KillingI = KillingDef->getMemoryInst(); 1341 LLVM_DEBUG(dbgs() << " trying to get dominating access\n"); 1342 1343 // Only optimize defining access of KillingDef when directly starting at its 1344 // defining access. The defining access also must only access KillingLoc. At 1345 // the moment we only support instructions with a single write location, so 1346 // it should be sufficient to disable optimizations for instructions that 1347 // also read from memory. 1348 bool CanOptimize = OptimizeMemorySSA && 1349 KillingDef->getDefiningAccess() == StartAccess && 1350 !KillingI->mayReadFromMemory(); 1351 1352 // Find the next clobbering Mod access for DefLoc, starting at StartAccess. 1353 std::optional<MemoryLocation> CurrentLoc; 1354 for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) { 1355 LLVM_DEBUG({ 1356 dbgs() << " visiting " << *Current; 1357 if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current)) 1358 dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst() 1359 << ")"; 1360 dbgs() << "\n"; 1361 }); 1362 1363 // Reached TOP. 1364 if (MSSA.isLiveOnEntryDef(Current)) { 1365 LLVM_DEBUG(dbgs() << " ... found LiveOnEntryDef\n"); 1366 if (CanOptimize && Current != KillingDef->getDefiningAccess()) 1367 // The first clobbering def is... none. 1368 KillingDef->setOptimized(Current); 1369 return std::nullopt; 1370 } 1371 1372 // Cost of a step. Accesses in the same block are more likely to be valid 1373 // candidates for elimination, hence consider them cheaper. 1374 unsigned StepCost = KillingDef->getBlock() == Current->getBlock() 1375 ? MemorySSASameBBStepCost 1376 : MemorySSAOtherBBStepCost; 1377 if (WalkerStepLimit <= StepCost) { 1378 LLVM_DEBUG(dbgs() << " ... hit walker step limit\n"); 1379 return std::nullopt; 1380 } 1381 WalkerStepLimit -= StepCost; 1382 1383 // Return for MemoryPhis. They cannot be eliminated directly and the 1384 // caller is responsible for traversing them. 1385 if (isa<MemoryPhi>(Current)) { 1386 LLVM_DEBUG(dbgs() << " ... found MemoryPhi\n"); 1387 return Current; 1388 } 1389 1390 // Below, check if CurrentDef is a valid candidate to be eliminated by 1391 // KillingDef. If it is not, check the next candidate. 1392 MemoryDef *CurrentDef = cast<MemoryDef>(Current); 1393 Instruction *CurrentI = CurrentDef->getMemoryInst(); 1394 1395 if (canSkipDef(CurrentDef, !isInvisibleToCallerOnUnwind(KillingUndObj))) { 1396 CanOptimize = false; 1397 continue; 1398 } 1399 1400 // Before we try to remove anything, check for any extra throwing 1401 // instructions that block us from DSEing 1402 if (mayThrowBetween(KillingI, CurrentI, KillingUndObj)) { 1403 LLVM_DEBUG(dbgs() << " ... skip, may throw!\n"); 1404 return std::nullopt; 1405 } 1406 1407 // Check for anything that looks like it will be a barrier to further 1408 // removal 1409 if (isDSEBarrier(KillingUndObj, CurrentI)) { 1410 LLVM_DEBUG(dbgs() << " ... skip, barrier\n"); 1411 return std::nullopt; 1412 } 1413 1414 // If Current is known to be on path that reads DefLoc or is a read 1415 // clobber, bail out, as the path is not profitable. We skip this check 1416 // for intrinsic calls, because the code knows how to handle memcpy 1417 // intrinsics. 1418 if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(KillingLoc, CurrentI)) 1419 return std::nullopt; 1420 1421 // Quick check if there are direct uses that are read-clobbers. 1422 if (any_of(Current->uses(), [this, &KillingLoc, StartAccess](Use &U) { 1423 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser())) 1424 return !MSSA.dominates(StartAccess, UseOrDef) && 1425 isReadClobber(KillingLoc, UseOrDef->getMemoryInst()); 1426 return false; 1427 })) { 1428 LLVM_DEBUG(dbgs() << " ... found a read clobber\n"); 1429 return std::nullopt; 1430 } 1431 1432 // If Current does not have an analyzable write location or is not 1433 // removable, skip it. 1434 CurrentLoc = getLocForWrite(CurrentI); 1435 if (!CurrentLoc || !isRemovable(CurrentI)) { 1436 CanOptimize = false; 1437 continue; 1438 } 1439 1440 // AliasAnalysis does not account for loops. Limit elimination to 1441 // candidates for which we can guarantee they always store to the same 1442 // memory location and not located in different loops. 1443 if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) { 1444 LLVM_DEBUG(dbgs() << " ... not guaranteed loop independent\n"); 1445 CanOptimize = false; 1446 continue; 1447 } 1448 1449 if (IsMemTerm) { 1450 // If the killing def is a memory terminator (e.g. lifetime.end), check 1451 // the next candidate if the current Current does not write the same 1452 // underlying object as the terminator. 1453 if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) { 1454 CanOptimize = false; 1455 continue; 1456 } 1457 } else { 1458 int64_t KillingOffset = 0; 1459 int64_t DeadOffset = 0; 1460 auto OR = isOverwrite(KillingI, CurrentI, KillingLoc, *CurrentLoc, 1461 KillingOffset, DeadOffset); 1462 if (CanOptimize) { 1463 // CurrentDef is the earliest write clobber of KillingDef. Use it as 1464 // optimized access. Do not optimize if CurrentDef is already the 1465 // defining access of KillingDef. 1466 if (CurrentDef != KillingDef->getDefiningAccess() && 1467 (OR == OW_Complete || OR == OW_MaybePartial)) 1468 KillingDef->setOptimized(CurrentDef); 1469 1470 // Once a may-aliasing def is encountered do not set an optimized 1471 // access. 1472 if (OR != OW_None) 1473 CanOptimize = false; 1474 } 1475 1476 // If Current does not write to the same object as KillingDef, check 1477 // the next candidate. 1478 if (OR == OW_Unknown || OR == OW_None) 1479 continue; 1480 else if (OR == OW_MaybePartial) { 1481 // If KillingDef only partially overwrites Current, check the next 1482 // candidate if the partial step limit is exceeded. This aggressively 1483 // limits the number of candidates for partial store elimination, 1484 // which are less likely to be removable in the end. 1485 if (PartialLimit <= 1) { 1486 WalkerStepLimit -= 1; 1487 LLVM_DEBUG(dbgs() << " ... reached partial limit ... continue with next access\n"); 1488 continue; 1489 } 1490 PartialLimit -= 1; 1491 } 1492 } 1493 break; 1494 }; 1495 1496 // Accesses to objects accessible after the function returns can only be 1497 // eliminated if the access is dead along all paths to the exit. Collect 1498 // the blocks with killing (=completely overwriting MemoryDefs) and check if 1499 // they cover all paths from MaybeDeadAccess to any function exit. 1500 SmallPtrSet<Instruction *, 16> KillingDefs; 1501 KillingDefs.insert(KillingDef->getMemoryInst()); 1502 MemoryAccess *MaybeDeadAccess = Current; 1503 MemoryLocation MaybeDeadLoc = *CurrentLoc; 1504 Instruction *MaybeDeadI = cast<MemoryDef>(MaybeDeadAccess)->getMemoryInst(); 1505 LLVM_DEBUG(dbgs() << " Checking for reads of " << *MaybeDeadAccess << " (" 1506 << *MaybeDeadI << ")\n"); 1507 1508 SmallSetVector<MemoryAccess *, 32> WorkList; 1509 auto PushMemUses = [&WorkList](MemoryAccess *Acc) { 1510 for (Use &U : Acc->uses()) 1511 WorkList.insert(cast<MemoryAccess>(U.getUser())); 1512 }; 1513 PushMemUses(MaybeDeadAccess); 1514 1515 // Check if DeadDef may be read. 1516 for (unsigned I = 0; I < WorkList.size(); I++) { 1517 MemoryAccess *UseAccess = WorkList[I]; 1518 1519 LLVM_DEBUG(dbgs() << " " << *UseAccess); 1520 // Bail out if the number of accesses to check exceeds the scan limit. 1521 if (ScanLimit < (WorkList.size() - I)) { 1522 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n"); 1523 return std::nullopt; 1524 } 1525 --ScanLimit; 1526 NumDomMemDefChecks++; 1527 1528 if (isa<MemoryPhi>(UseAccess)) { 1529 if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) { 1530 return DT.properlyDominates(KI->getParent(), 1531 UseAccess->getBlock()); 1532 })) { 1533 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n"); 1534 continue; 1535 } 1536 LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n"); 1537 PushMemUses(UseAccess); 1538 continue; 1539 } 1540 1541 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst(); 1542 LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n"); 1543 1544 if (any_of(KillingDefs, [this, UseInst](Instruction *KI) { 1545 return DT.dominates(KI, UseInst); 1546 })) { 1547 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n"); 1548 continue; 1549 } 1550 1551 // A memory terminator kills all preceeding MemoryDefs and all succeeding 1552 // MemoryAccesses. We do not have to check it's users. 1553 if (isMemTerminator(MaybeDeadLoc, MaybeDeadI, UseInst)) { 1554 LLVM_DEBUG( 1555 dbgs() 1556 << " ... skipping, memterminator invalidates following accesses\n"); 1557 continue; 1558 } 1559 1560 if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) { 1561 LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n"); 1562 PushMemUses(UseAccess); 1563 continue; 1564 } 1565 1566 if (UseInst->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj)) { 1567 LLVM_DEBUG(dbgs() << " ... found throwing instruction\n"); 1568 return std::nullopt; 1569 } 1570 1571 // Uses which may read the original MemoryDef mean we cannot eliminate the 1572 // original MD. Stop walk. 1573 if (isReadClobber(MaybeDeadLoc, UseInst)) { 1574 LLVM_DEBUG(dbgs() << " ... found read clobber\n"); 1575 return std::nullopt; 1576 } 1577 1578 // If this worklist walks back to the original memory access (and the 1579 // pointer is not guarenteed loop invariant) then we cannot assume that a 1580 // store kills itself. 1581 if (MaybeDeadAccess == UseAccess && 1582 !isGuaranteedLoopInvariant(MaybeDeadLoc.Ptr)) { 1583 LLVM_DEBUG(dbgs() << " ... found not loop invariant self access\n"); 1584 return std::nullopt; 1585 } 1586 // Otherwise, for the KillingDef and MaybeDeadAccess we only have to check 1587 // if it reads the memory location. 1588 // TODO: It would probably be better to check for self-reads before 1589 // calling the function. 1590 if (KillingDef == UseAccess || MaybeDeadAccess == UseAccess) { 1591 LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n"); 1592 continue; 1593 } 1594 1595 // Check all uses for MemoryDefs, except for defs completely overwriting 1596 // the original location. Otherwise we have to check uses of *all* 1597 // MemoryDefs we discover, including non-aliasing ones. Otherwise we might 1598 // miss cases like the following 1599 // 1 = Def(LoE) ; <----- DeadDef stores [0,1] 1600 // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3] 1601 // Use(2) ; MayAlias 2 *and* 1, loads [0, 3]. 1602 // (The Use points to the *first* Def it may alias) 1603 // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias, 1604 // stores [0,1] 1605 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) { 1606 if (isCompleteOverwrite(MaybeDeadLoc, MaybeDeadI, UseInst)) { 1607 BasicBlock *MaybeKillingBlock = UseInst->getParent(); 1608 if (PostOrderNumbers.find(MaybeKillingBlock)->second < 1609 PostOrderNumbers.find(MaybeDeadAccess->getBlock())->second) { 1610 if (!isInvisibleToCallerAfterRet(KillingUndObj)) { 1611 LLVM_DEBUG(dbgs() 1612 << " ... found killing def " << *UseInst << "\n"); 1613 KillingDefs.insert(UseInst); 1614 } 1615 } else { 1616 LLVM_DEBUG(dbgs() 1617 << " ... found preceeding def " << *UseInst << "\n"); 1618 return std::nullopt; 1619 } 1620 } else 1621 PushMemUses(UseDef); 1622 } 1623 } 1624 1625 // For accesses to locations visible after the function returns, make sure 1626 // that the location is dead (=overwritten) along all paths from 1627 // MaybeDeadAccess to the exit. 1628 if (!isInvisibleToCallerAfterRet(KillingUndObj)) { 1629 SmallPtrSet<BasicBlock *, 16> KillingBlocks; 1630 for (Instruction *KD : KillingDefs) 1631 KillingBlocks.insert(KD->getParent()); 1632 assert(!KillingBlocks.empty() && 1633 "Expected at least a single killing block"); 1634 1635 // Find the common post-dominator of all killing blocks. 1636 BasicBlock *CommonPred = *KillingBlocks.begin(); 1637 for (BasicBlock *BB : llvm::drop_begin(KillingBlocks)) { 1638 if (!CommonPred) 1639 break; 1640 CommonPred = PDT.findNearestCommonDominator(CommonPred, BB); 1641 } 1642 1643 // If the common post-dominator does not post-dominate MaybeDeadAccess, 1644 // there is a path from MaybeDeadAccess to an exit not going through a 1645 // killing block. 1646 if (!PDT.dominates(CommonPred, MaybeDeadAccess->getBlock())) { 1647 if (!AnyUnreachableExit) 1648 return std::nullopt; 1649 1650 // Fall back to CFG scan starting at all non-unreachable roots if not 1651 // all paths to the exit go through CommonPred. 1652 CommonPred = nullptr; 1653 } 1654 1655 // If CommonPred itself is in the set of killing blocks, we're done. 1656 if (KillingBlocks.count(CommonPred)) 1657 return {MaybeDeadAccess}; 1658 1659 SetVector<BasicBlock *> WorkList; 1660 // If CommonPred is null, there are multiple exits from the function. 1661 // They all have to be added to the worklist. 1662 if (CommonPred) 1663 WorkList.insert(CommonPred); 1664 else 1665 for (BasicBlock *R : PDT.roots()) { 1666 if (!isa<UnreachableInst>(R->getTerminator())) 1667 WorkList.insert(R); 1668 } 1669 1670 NumCFGTries++; 1671 // Check if all paths starting from an exit node go through one of the 1672 // killing blocks before reaching MaybeDeadAccess. 1673 for (unsigned I = 0; I < WorkList.size(); I++) { 1674 NumCFGChecks++; 1675 BasicBlock *Current = WorkList[I]; 1676 if (KillingBlocks.count(Current)) 1677 continue; 1678 if (Current == MaybeDeadAccess->getBlock()) 1679 return std::nullopt; 1680 1681 // MaybeDeadAccess is reachable from the entry, so we don't have to 1682 // explore unreachable blocks further. 1683 if (!DT.isReachableFromEntry(Current)) 1684 continue; 1685 1686 for (BasicBlock *Pred : predecessors(Current)) 1687 WorkList.insert(Pred); 1688 1689 if (WorkList.size() >= MemorySSAPathCheckLimit) 1690 return std::nullopt; 1691 } 1692 NumCFGSuccess++; 1693 } 1694 1695 // No aliasing MemoryUses of MaybeDeadAccess found, MaybeDeadAccess is 1696 // potentially dead. 1697 return {MaybeDeadAccess}; 1698 } 1699 1700 // Delete dead memory defs 1701 void deleteDeadInstruction(Instruction *SI) { 1702 MemorySSAUpdater Updater(&MSSA); 1703 SmallVector<Instruction *, 32> NowDeadInsts; 1704 NowDeadInsts.push_back(SI); 1705 --NumFastOther; 1706 1707 while (!NowDeadInsts.empty()) { 1708 Instruction *DeadInst = NowDeadInsts.pop_back_val(); 1709 ++NumFastOther; 1710 1711 // Try to preserve debug information attached to the dead instruction. 1712 salvageDebugInfo(*DeadInst); 1713 salvageKnowledge(DeadInst); 1714 1715 // Remove the Instruction from MSSA. 1716 if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) { 1717 if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) { 1718 SkipStores.insert(MD); 1719 if (auto *SI = dyn_cast<StoreInst>(MD->getMemoryInst())) { 1720 if (SI->getValueOperand()->getType()->isPointerTy()) { 1721 const Value *UO = getUnderlyingObject(SI->getValueOperand()); 1722 if (CapturedBeforeReturn.erase(UO)) 1723 ShouldIterateEndOfFunctionDSE = true; 1724 InvisibleToCallerAfterRet.erase(UO); 1725 } 1726 } 1727 } 1728 1729 Updater.removeMemoryAccess(MA); 1730 } 1731 1732 auto I = IOLs.find(DeadInst->getParent()); 1733 if (I != IOLs.end()) 1734 I->second.erase(DeadInst); 1735 // Remove its operands 1736 for (Use &O : DeadInst->operands()) 1737 if (Instruction *OpI = dyn_cast<Instruction>(O)) { 1738 O = nullptr; 1739 if (isInstructionTriviallyDead(OpI, &TLI)) 1740 NowDeadInsts.push_back(OpI); 1741 } 1742 1743 EI.removeInstruction(DeadInst); 1744 DeadInst->eraseFromParent(); 1745 } 1746 } 1747 1748 // Check for any extra throws between \p KillingI and \p DeadI that block 1749 // DSE. This only checks extra maythrows (those that aren't MemoryDef's). 1750 // MemoryDef that may throw are handled during the walk from one def to the 1751 // next. 1752 bool mayThrowBetween(Instruction *KillingI, Instruction *DeadI, 1753 const Value *KillingUndObj) { 1754 // First see if we can ignore it by using the fact that KillingI is an 1755 // alloca/alloca like object that is not visible to the caller during 1756 // execution of the function. 1757 if (KillingUndObj && isInvisibleToCallerOnUnwind(KillingUndObj)) 1758 return false; 1759 1760 if (KillingI->getParent() == DeadI->getParent()) 1761 return ThrowingBlocks.count(KillingI->getParent()); 1762 return !ThrowingBlocks.empty(); 1763 } 1764 1765 // Check if \p DeadI acts as a DSE barrier for \p KillingI. The following 1766 // instructions act as barriers: 1767 // * A memory instruction that may throw and \p KillingI accesses a non-stack 1768 // object. 1769 // * Atomic stores stronger that monotonic. 1770 bool isDSEBarrier(const Value *KillingUndObj, Instruction *DeadI) { 1771 // If DeadI may throw it acts as a barrier, unless we are to an 1772 // alloca/alloca like object that does not escape. 1773 if (DeadI->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj)) 1774 return true; 1775 1776 // If DeadI is an atomic load/store stronger than monotonic, do not try to 1777 // eliminate/reorder it. 1778 if (DeadI->isAtomic()) { 1779 if (auto *LI = dyn_cast<LoadInst>(DeadI)) 1780 return isStrongerThanMonotonic(LI->getOrdering()); 1781 if (auto *SI = dyn_cast<StoreInst>(DeadI)) 1782 return isStrongerThanMonotonic(SI->getOrdering()); 1783 if (auto *ARMW = dyn_cast<AtomicRMWInst>(DeadI)) 1784 return isStrongerThanMonotonic(ARMW->getOrdering()); 1785 if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(DeadI)) 1786 return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) || 1787 isStrongerThanMonotonic(CmpXchg->getFailureOrdering()); 1788 llvm_unreachable("other instructions should be skipped in MemorySSA"); 1789 } 1790 return false; 1791 } 1792 1793 /// Eliminate writes to objects that are not visible in the caller and are not 1794 /// accessed before returning from the function. 1795 bool eliminateDeadWritesAtEndOfFunction() { 1796 bool MadeChange = false; 1797 LLVM_DEBUG( 1798 dbgs() 1799 << "Trying to eliminate MemoryDefs at the end of the function\n"); 1800 do { 1801 ShouldIterateEndOfFunctionDSE = false; 1802 for (MemoryDef *Def : llvm::reverse(MemDefs)) { 1803 if (SkipStores.contains(Def)) 1804 continue; 1805 1806 Instruction *DefI = Def->getMemoryInst(); 1807 auto DefLoc = getLocForWrite(DefI); 1808 if (!DefLoc || !isRemovable(DefI)) 1809 continue; 1810 1811 // NOTE: Currently eliminating writes at the end of a function is 1812 // limited to MemoryDefs with a single underlying object, to save 1813 // compile-time. In practice it appears the case with multiple 1814 // underlying objects is very uncommon. If it turns out to be important, 1815 // we can use getUnderlyingObjects here instead. 1816 const Value *UO = getUnderlyingObject(DefLoc->Ptr); 1817 if (!isInvisibleToCallerAfterRet(UO)) 1818 continue; 1819 1820 if (isWriteAtEndOfFunction(Def)) { 1821 // See through pointer-to-pointer bitcasts 1822 LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end " 1823 "of the function\n"); 1824 deleteDeadInstruction(DefI); 1825 ++NumFastStores; 1826 MadeChange = true; 1827 } 1828 } 1829 } while (ShouldIterateEndOfFunctionDSE); 1830 return MadeChange; 1831 } 1832 1833 /// If we have a zero initializing memset following a call to malloc, 1834 /// try folding it into a call to calloc. 1835 bool tryFoldIntoCalloc(MemoryDef *Def, const Value *DefUO) { 1836 Instruction *DefI = Def->getMemoryInst(); 1837 MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI); 1838 if (!MemSet) 1839 // TODO: Could handle zero store to small allocation as well. 1840 return false; 1841 Constant *StoredConstant = dyn_cast<Constant>(MemSet->getValue()); 1842 if (!StoredConstant || !StoredConstant->isNullValue()) 1843 return false; 1844 1845 if (!isRemovable(DefI)) 1846 // The memset might be volatile.. 1847 return false; 1848 1849 if (F.hasFnAttribute(Attribute::SanitizeMemory) || 1850 F.hasFnAttribute(Attribute::SanitizeAddress) || 1851 F.hasFnAttribute(Attribute::SanitizeHWAddress) || 1852 F.getName() == "calloc") 1853 return false; 1854 auto *Malloc = const_cast<CallInst *>(dyn_cast<CallInst>(DefUO)); 1855 if (!Malloc) 1856 return false; 1857 auto *InnerCallee = Malloc->getCalledFunction(); 1858 if (!InnerCallee) 1859 return false; 1860 LibFunc Func; 1861 if (!TLI.getLibFunc(*InnerCallee, Func) || !TLI.has(Func) || 1862 Func != LibFunc_malloc) 1863 return false; 1864 1865 auto shouldCreateCalloc = [](CallInst *Malloc, CallInst *Memset) { 1866 // Check for br(icmp ptr, null), truebb, falsebb) pattern at the end 1867 // of malloc block 1868 auto *MallocBB = Malloc->getParent(), 1869 *MemsetBB = Memset->getParent(); 1870 if (MallocBB == MemsetBB) 1871 return true; 1872 auto *Ptr = Memset->getArgOperand(0); 1873 auto *TI = MallocBB->getTerminator(); 1874 ICmpInst::Predicate Pred; 1875 BasicBlock *TrueBB, *FalseBB; 1876 if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Ptr), m_Zero()), TrueBB, 1877 FalseBB))) 1878 return false; 1879 if (Pred != ICmpInst::ICMP_EQ || MemsetBB != FalseBB) 1880 return false; 1881 return true; 1882 }; 1883 1884 if (Malloc->getOperand(0) != MemSet->getLength()) 1885 return false; 1886 if (!shouldCreateCalloc(Malloc, MemSet) || 1887 !DT.dominates(Malloc, MemSet) || 1888 !memoryIsNotModifiedBetween(Malloc, MemSet, BatchAA, DL, &DT)) 1889 return false; 1890 IRBuilder<> IRB(Malloc); 1891 Type *SizeTTy = Malloc->getArgOperand(0)->getType(); 1892 auto *Calloc = emitCalloc(ConstantInt::get(SizeTTy, 1), 1893 Malloc->getArgOperand(0), IRB, TLI); 1894 if (!Calloc) 1895 return false; 1896 MemorySSAUpdater Updater(&MSSA); 1897 auto *LastDef = 1898 cast<MemoryDef>(Updater.getMemorySSA()->getMemoryAccess(Malloc)); 1899 auto *NewAccess = 1900 Updater.createMemoryAccessAfter(cast<Instruction>(Calloc), LastDef, 1901 LastDef); 1902 auto *NewAccessMD = cast<MemoryDef>(NewAccess); 1903 Updater.insertDef(NewAccessMD, /*RenameUses=*/true); 1904 Updater.removeMemoryAccess(Malloc); 1905 Malloc->replaceAllUsesWith(Calloc); 1906 Malloc->eraseFromParent(); 1907 return true; 1908 } 1909 1910 /// \returns true if \p Def is a no-op store, either because it 1911 /// directly stores back a loaded value or stores zero to a calloced object. 1912 bool storeIsNoop(MemoryDef *Def, const Value *DefUO) { 1913 Instruction *DefI = Def->getMemoryInst(); 1914 StoreInst *Store = dyn_cast<StoreInst>(DefI); 1915 MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI); 1916 Constant *StoredConstant = nullptr; 1917 if (Store) 1918 StoredConstant = dyn_cast<Constant>(Store->getOperand(0)); 1919 else if (MemSet) 1920 StoredConstant = dyn_cast<Constant>(MemSet->getValue()); 1921 else 1922 return false; 1923 1924 if (!isRemovable(DefI)) 1925 return false; 1926 1927 if (StoredConstant) { 1928 Constant *InitC = 1929 getInitialValueOfAllocation(DefUO, &TLI, StoredConstant->getType()); 1930 // If the clobbering access is LiveOnEntry, no instructions between them 1931 // can modify the memory location. 1932 if (InitC && InitC == StoredConstant) 1933 return MSSA.isLiveOnEntryDef( 1934 MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def, BatchAA)); 1935 } 1936 1937 if (!Store) 1938 return false; 1939 1940 if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) { 1941 if (LoadI->getPointerOperand() == Store->getOperand(1)) { 1942 // Get the defining access for the load. 1943 auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess(); 1944 // Fast path: the defining accesses are the same. 1945 if (LoadAccess == Def->getDefiningAccess()) 1946 return true; 1947 1948 // Look through phi accesses. Recursively scan all phi accesses by 1949 // adding them to a worklist. Bail when we run into a memory def that 1950 // does not match LoadAccess. 1951 SetVector<MemoryAccess *> ToCheck; 1952 MemoryAccess *Current = 1953 MSSA.getWalker()->getClobberingMemoryAccess(Def, BatchAA); 1954 // We don't want to bail when we run into the store memory def. But, 1955 // the phi access may point to it. So, pretend like we've already 1956 // checked it. 1957 ToCheck.insert(Def); 1958 ToCheck.insert(Current); 1959 // Start at current (1) to simulate already having checked Def. 1960 for (unsigned I = 1; I < ToCheck.size(); ++I) { 1961 Current = ToCheck[I]; 1962 if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) { 1963 // Check all the operands. 1964 for (auto &Use : PhiAccess->incoming_values()) 1965 ToCheck.insert(cast<MemoryAccess>(&Use)); 1966 continue; 1967 } 1968 1969 // If we found a memory def, bail. This happens when we have an 1970 // unrelated write in between an otherwise noop store. 1971 assert(isa<MemoryDef>(Current) && 1972 "Only MemoryDefs should reach here."); 1973 // TODO: Skip no alias MemoryDefs that have no aliasing reads. 1974 // We are searching for the definition of the store's destination. 1975 // So, if that is the same definition as the load, then this is a 1976 // noop. Otherwise, fail. 1977 if (LoadAccess != Current) 1978 return false; 1979 } 1980 return true; 1981 } 1982 } 1983 1984 return false; 1985 } 1986 1987 bool removePartiallyOverlappedStores(InstOverlapIntervalsTy &IOL) { 1988 bool Changed = false; 1989 for (auto OI : IOL) { 1990 Instruction *DeadI = OI.first; 1991 MemoryLocation Loc = *getLocForWrite(DeadI); 1992 assert(isRemovable(DeadI) && "Expect only removable instruction"); 1993 1994 const Value *Ptr = Loc.Ptr->stripPointerCasts(); 1995 int64_t DeadStart = 0; 1996 uint64_t DeadSize = Loc.Size.getValue(); 1997 GetPointerBaseWithConstantOffset(Ptr, DeadStart, DL); 1998 OverlapIntervalsTy &IntervalMap = OI.second; 1999 Changed |= tryToShortenEnd(DeadI, IntervalMap, DeadStart, DeadSize); 2000 if (IntervalMap.empty()) 2001 continue; 2002 Changed |= tryToShortenBegin(DeadI, IntervalMap, DeadStart, DeadSize); 2003 } 2004 return Changed; 2005 } 2006 2007 /// Eliminates writes to locations where the value that is being written 2008 /// is already stored at the same location. 2009 bool eliminateRedundantStoresOfExistingValues() { 2010 bool MadeChange = false; 2011 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs that write the " 2012 "already existing value\n"); 2013 for (auto *Def : MemDefs) { 2014 if (SkipStores.contains(Def) || MSSA.isLiveOnEntryDef(Def)) 2015 continue; 2016 2017 Instruction *DefInst = Def->getMemoryInst(); 2018 auto MaybeDefLoc = getLocForWrite(DefInst); 2019 if (!MaybeDefLoc || !isRemovable(DefInst)) 2020 continue; 2021 2022 MemoryDef *UpperDef; 2023 // To conserve compile-time, we avoid walking to the next clobbering def. 2024 // Instead, we just try to get the optimized access, if it exists. DSE 2025 // will try to optimize defs during the earlier traversal. 2026 if (Def->isOptimized()) 2027 UpperDef = dyn_cast<MemoryDef>(Def->getOptimized()); 2028 else 2029 UpperDef = dyn_cast<MemoryDef>(Def->getDefiningAccess()); 2030 if (!UpperDef || MSSA.isLiveOnEntryDef(UpperDef)) 2031 continue; 2032 2033 Instruction *UpperInst = UpperDef->getMemoryInst(); 2034 auto IsRedundantStore = [&]() { 2035 if (DefInst->isIdenticalTo(UpperInst)) 2036 return true; 2037 if (auto *MemSetI = dyn_cast<MemSetInst>(UpperInst)) { 2038 if (auto *SI = dyn_cast<StoreInst>(DefInst)) { 2039 // MemSetInst must have a write location. 2040 MemoryLocation UpperLoc = *getLocForWrite(UpperInst); 2041 int64_t InstWriteOffset = 0; 2042 int64_t DepWriteOffset = 0; 2043 auto OR = isOverwrite(UpperInst, DefInst, UpperLoc, *MaybeDefLoc, 2044 InstWriteOffset, DepWriteOffset); 2045 Value *StoredByte = isBytewiseValue(SI->getValueOperand(), DL); 2046 return StoredByte && StoredByte == MemSetI->getOperand(1) && 2047 OR == OW_Complete; 2048 } 2049 } 2050 return false; 2051 }; 2052 2053 if (!IsRedundantStore() || isReadClobber(*MaybeDefLoc, DefInst)) 2054 continue; 2055 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *DefInst 2056 << '\n'); 2057 deleteDeadInstruction(DefInst); 2058 NumRedundantStores++; 2059 MadeChange = true; 2060 } 2061 return MadeChange; 2062 } 2063 }; 2064 2065 static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, 2066 DominatorTree &DT, PostDominatorTree &PDT, 2067 AssumptionCache &AC, 2068 const TargetLibraryInfo &TLI, 2069 const LoopInfo &LI) { 2070 bool MadeChange = false; 2071 2072 DSEState State(F, AA, MSSA, DT, PDT, AC, TLI, LI); 2073 // For each store: 2074 for (unsigned I = 0; I < State.MemDefs.size(); I++) { 2075 MemoryDef *KillingDef = State.MemDefs[I]; 2076 if (State.SkipStores.count(KillingDef)) 2077 continue; 2078 Instruction *KillingI = KillingDef->getMemoryInst(); 2079 2080 std::optional<MemoryLocation> MaybeKillingLoc; 2081 if (State.isMemTerminatorInst(KillingI)) { 2082 if (auto KillingLoc = State.getLocForTerminator(KillingI)) 2083 MaybeKillingLoc = KillingLoc->first; 2084 } else { 2085 MaybeKillingLoc = State.getLocForWrite(KillingI); 2086 } 2087 2088 if (!MaybeKillingLoc) { 2089 LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for " 2090 << *KillingI << "\n"); 2091 continue; 2092 } 2093 MemoryLocation KillingLoc = *MaybeKillingLoc; 2094 assert(KillingLoc.Ptr && "KillingLoc should not be null"); 2095 const Value *KillingUndObj = getUnderlyingObject(KillingLoc.Ptr); 2096 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by " 2097 << *KillingDef << " (" << *KillingI << ")\n"); 2098 2099 unsigned ScanLimit = MemorySSAScanLimit; 2100 unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit; 2101 unsigned PartialLimit = MemorySSAPartialStoreLimit; 2102 // Worklist of MemoryAccesses that may be killed by KillingDef. 2103 SetVector<MemoryAccess *> ToCheck; 2104 ToCheck.insert(KillingDef->getDefiningAccess()); 2105 2106 bool Shortend = false; 2107 bool IsMemTerm = State.isMemTerminatorInst(KillingI); 2108 // Check if MemoryAccesses in the worklist are killed by KillingDef. 2109 for (unsigned I = 0; I < ToCheck.size(); I++) { 2110 MemoryAccess *Current = ToCheck[I]; 2111 if (State.SkipStores.count(Current)) 2112 continue; 2113 2114 std::optional<MemoryAccess *> MaybeDeadAccess = State.getDomMemoryDef( 2115 KillingDef, Current, KillingLoc, KillingUndObj, ScanLimit, 2116 WalkerStepLimit, IsMemTerm, PartialLimit); 2117 2118 if (!MaybeDeadAccess) { 2119 LLVM_DEBUG(dbgs() << " finished walk\n"); 2120 continue; 2121 } 2122 2123 MemoryAccess *DeadAccess = *MaybeDeadAccess; 2124 LLVM_DEBUG(dbgs() << " Checking if we can kill " << *DeadAccess); 2125 if (isa<MemoryPhi>(DeadAccess)) { 2126 LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n"); 2127 for (Value *V : cast<MemoryPhi>(DeadAccess)->incoming_values()) { 2128 MemoryAccess *IncomingAccess = cast<MemoryAccess>(V); 2129 BasicBlock *IncomingBlock = IncomingAccess->getBlock(); 2130 BasicBlock *PhiBlock = DeadAccess->getBlock(); 2131 2132 // We only consider incoming MemoryAccesses that come before the 2133 // MemoryPhi. Otherwise we could discover candidates that do not 2134 // strictly dominate our starting def. 2135 if (State.PostOrderNumbers[IncomingBlock] > 2136 State.PostOrderNumbers[PhiBlock]) 2137 ToCheck.insert(IncomingAccess); 2138 } 2139 continue; 2140 } 2141 auto *DeadDefAccess = cast<MemoryDef>(DeadAccess); 2142 Instruction *DeadI = DeadDefAccess->getMemoryInst(); 2143 LLVM_DEBUG(dbgs() << " (" << *DeadI << ")\n"); 2144 ToCheck.insert(DeadDefAccess->getDefiningAccess()); 2145 NumGetDomMemoryDefPassed++; 2146 2147 if (!DebugCounter::shouldExecute(MemorySSACounter)) 2148 continue; 2149 2150 MemoryLocation DeadLoc = *State.getLocForWrite(DeadI); 2151 2152 if (IsMemTerm) { 2153 const Value *DeadUndObj = getUnderlyingObject(DeadLoc.Ptr); 2154 if (KillingUndObj != DeadUndObj) 2155 continue; 2156 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI 2157 << "\n KILLER: " << *KillingI << '\n'); 2158 State.deleteDeadInstruction(DeadI); 2159 ++NumFastStores; 2160 MadeChange = true; 2161 } else { 2162 // Check if DeadI overwrites KillingI. 2163 int64_t KillingOffset = 0; 2164 int64_t DeadOffset = 0; 2165 OverwriteResult OR = State.isOverwrite( 2166 KillingI, DeadI, KillingLoc, DeadLoc, KillingOffset, DeadOffset); 2167 if (OR == OW_MaybePartial) { 2168 auto Iter = State.IOLs.insert( 2169 std::make_pair<BasicBlock *, InstOverlapIntervalsTy>( 2170 DeadI->getParent(), InstOverlapIntervalsTy())); 2171 auto &IOL = Iter.first->second; 2172 OR = isPartialOverwrite(KillingLoc, DeadLoc, KillingOffset, 2173 DeadOffset, DeadI, IOL); 2174 } 2175 2176 if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) { 2177 auto *DeadSI = dyn_cast<StoreInst>(DeadI); 2178 auto *KillingSI = dyn_cast<StoreInst>(KillingI); 2179 // We are re-using tryToMergePartialOverlappingStores, which requires 2180 // DeadSI to dominate DeadSI. 2181 // TODO: implement tryToMergeParialOverlappingStores using MemorySSA. 2182 if (DeadSI && KillingSI && DT.dominates(DeadSI, KillingSI)) { 2183 if (Constant *Merged = tryToMergePartialOverlappingStores( 2184 KillingSI, DeadSI, KillingOffset, DeadOffset, State.DL, 2185 State.BatchAA, &DT)) { 2186 2187 // Update stored value of earlier store to merged constant. 2188 DeadSI->setOperand(0, Merged); 2189 ++NumModifiedStores; 2190 MadeChange = true; 2191 2192 Shortend = true; 2193 // Remove killing store and remove any outstanding overlap 2194 // intervals for the updated store. 2195 State.deleteDeadInstruction(KillingSI); 2196 auto I = State.IOLs.find(DeadSI->getParent()); 2197 if (I != State.IOLs.end()) 2198 I->second.erase(DeadSI); 2199 break; 2200 } 2201 } 2202 } 2203 2204 if (OR == OW_Complete) { 2205 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI 2206 << "\n KILLER: " << *KillingI << '\n'); 2207 State.deleteDeadInstruction(DeadI); 2208 ++NumFastStores; 2209 MadeChange = true; 2210 } 2211 } 2212 } 2213 2214 // Check if the store is a no-op. 2215 if (!Shortend && State.storeIsNoop(KillingDef, KillingUndObj)) { 2216 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *KillingI 2217 << '\n'); 2218 State.deleteDeadInstruction(KillingI); 2219 NumRedundantStores++; 2220 MadeChange = true; 2221 continue; 2222 } 2223 2224 // Can we form a calloc from a memset/malloc pair? 2225 if (!Shortend && State.tryFoldIntoCalloc(KillingDef, KillingUndObj)) { 2226 LLVM_DEBUG(dbgs() << "DSE: Remove memset after forming calloc:\n" 2227 << " DEAD: " << *KillingI << '\n'); 2228 State.deleteDeadInstruction(KillingI); 2229 MadeChange = true; 2230 continue; 2231 } 2232 } 2233 2234 if (EnablePartialOverwriteTracking) 2235 for (auto &KV : State.IOLs) 2236 MadeChange |= State.removePartiallyOverlappedStores(KV.second); 2237 2238 MadeChange |= State.eliminateRedundantStoresOfExistingValues(); 2239 MadeChange |= State.eliminateDeadWritesAtEndOfFunction(); 2240 return MadeChange; 2241 } 2242 } // end anonymous namespace 2243 2244 //===----------------------------------------------------------------------===// 2245 // DSE Pass 2246 //===----------------------------------------------------------------------===// 2247 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) { 2248 AliasAnalysis &AA = AM.getResult<AAManager>(F); 2249 const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F); 2250 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); 2251 MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA(); 2252 PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F); 2253 AssumptionCache &AC = AM.getResult<AssumptionAnalysis>(F); 2254 LoopInfo &LI = AM.getResult<LoopAnalysis>(F); 2255 2256 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, AC, TLI, LI); 2257 2258 #ifdef LLVM_ENABLE_STATS 2259 if (AreStatisticsEnabled()) 2260 for (auto &I : instructions(F)) 2261 NumRemainingStores += isa<StoreInst>(&I); 2262 #endif 2263 2264 if (!Changed) 2265 return PreservedAnalyses::all(); 2266 2267 PreservedAnalyses PA; 2268 PA.preserveSet<CFGAnalyses>(); 2269 PA.preserve<MemorySSAAnalysis>(); 2270 PA.preserve<LoopAnalysis>(); 2271 return PA; 2272 } 2273