1 //===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The code below implements dead store elimination using MemorySSA. It uses 10 // the following general approach: given a MemoryDef, walk upwards to find 11 // clobbering MemoryDefs that may be killed by the starting def. Then check 12 // that there are no uses that may read the location of the original MemoryDef 13 // in between both MemoryDefs. A bit more concretely: 14 // 15 // For all MemoryDefs StartDef: 16 // 1. Get the next dominating clobbering MemoryDef (MaybeDeadAccess) by walking 17 // upwards. 18 // 2. Check that there are no reads between MaybeDeadAccess and the StartDef by 19 // checking all uses starting at MaybeDeadAccess and walking until we see 20 // StartDef. 21 // 3. For each found CurrentDef, check that: 22 // 1. There are no barrier instructions between CurrentDef and StartDef (like 23 // throws or stores with ordering constraints). 24 // 2. StartDef is executed whenever CurrentDef is executed. 25 // 3. StartDef completely overwrites CurrentDef. 26 // 4. Erase CurrentDef from the function and MemorySSA. 27 // 28 //===----------------------------------------------------------------------===// 29 30 #include "llvm/Transforms/Scalar/DeadStoreElimination.h" 31 #include "llvm/ADT/APInt.h" 32 #include "llvm/ADT/DenseMap.h" 33 #include "llvm/ADT/MapVector.h" 34 #include "llvm/ADT/PostOrderIterator.h" 35 #include "llvm/ADT/SetVector.h" 36 #include "llvm/ADT/SmallPtrSet.h" 37 #include "llvm/ADT/SmallVector.h" 38 #include "llvm/ADT/Statistic.h" 39 #include "llvm/ADT/StringRef.h" 40 #include "llvm/Analysis/AliasAnalysis.h" 41 #include "llvm/Analysis/AssumptionCache.h" 42 #include "llvm/Analysis/CaptureTracking.h" 43 #include "llvm/Analysis/CodeMetrics.h" 44 #include "llvm/Analysis/GlobalsModRef.h" 45 #include "llvm/Analysis/LoopInfo.h" 46 #include "llvm/Analysis/MemoryBuiltins.h" 47 #include "llvm/Analysis/MemoryLocation.h" 48 #include "llvm/Analysis/MemorySSA.h" 49 #include "llvm/Analysis/MemorySSAUpdater.h" 50 #include "llvm/Analysis/MustExecute.h" 51 #include "llvm/Analysis/PostDominators.h" 52 #include "llvm/Analysis/TargetLibraryInfo.h" 53 #include "llvm/Analysis/ValueTracking.h" 54 #include "llvm/IR/Argument.h" 55 #include "llvm/IR/BasicBlock.h" 56 #include "llvm/IR/Constant.h" 57 #include "llvm/IR/Constants.h" 58 #include "llvm/IR/DataLayout.h" 59 #include "llvm/IR/Dominators.h" 60 #include "llvm/IR/Function.h" 61 #include "llvm/IR/IRBuilder.h" 62 #include "llvm/IR/InstIterator.h" 63 #include "llvm/IR/InstrTypes.h" 64 #include "llvm/IR/Instruction.h" 65 #include "llvm/IR/Instructions.h" 66 #include "llvm/IR/IntrinsicInst.h" 67 #include "llvm/IR/Module.h" 68 #include "llvm/IR/PassManager.h" 69 #include "llvm/IR/PatternMatch.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/InitializePasses.h" 72 #include "llvm/Pass.h" 73 #include "llvm/Support/Casting.h" 74 #include "llvm/Support/CommandLine.h" 75 #include "llvm/Support/Debug.h" 76 #include "llvm/Support/DebugCounter.h" 77 #include "llvm/Support/ErrorHandling.h" 78 #include "llvm/Support/raw_ostream.h" 79 #include "llvm/Transforms/Scalar.h" 80 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 81 #include "llvm/Transforms/Utils/BuildLibCalls.h" 82 #include "llvm/Transforms/Utils/Local.h" 83 #include <algorithm> 84 #include <cassert> 85 #include <cstdint> 86 #include <iterator> 87 #include <map> 88 #include <utility> 89 90 using namespace llvm; 91 using namespace PatternMatch; 92 93 #define DEBUG_TYPE "dse" 94 95 STATISTIC(NumRemainingStores, "Number of stores remaining after DSE"); 96 STATISTIC(NumRedundantStores, "Number of redundant stores deleted"); 97 STATISTIC(NumFastStores, "Number of stores deleted"); 98 STATISTIC(NumFastOther, "Number of other instrs removed"); 99 STATISTIC(NumCompletePartials, "Number of stores dead by later partials"); 100 STATISTIC(NumModifiedStores, "Number of stores modified"); 101 STATISTIC(NumCFGChecks, "Number of stores modified"); 102 STATISTIC(NumCFGTries, "Number of stores modified"); 103 STATISTIC(NumCFGSuccess, "Number of stores modified"); 104 STATISTIC(NumGetDomMemoryDefPassed, 105 "Number of times a valid candidate is returned from getDomMemoryDef"); 106 STATISTIC(NumDomMemDefChecks, 107 "Number iterations check for reads in getDomMemoryDef"); 108 109 DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa", 110 "Controls which MemoryDefs are eliminated."); 111 112 static cl::opt<bool> 113 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking", 114 cl::init(true), cl::Hidden, 115 cl::desc("Enable partial-overwrite tracking in DSE")); 116 117 static cl::opt<bool> 118 EnablePartialStoreMerging("enable-dse-partial-store-merging", 119 cl::init(true), cl::Hidden, 120 cl::desc("Enable partial store merging in DSE")); 121 122 static cl::opt<unsigned> 123 MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden, 124 cl::desc("The number of memory instructions to scan for " 125 "dead store elimination (default = 150)")); 126 static cl::opt<unsigned> MemorySSAUpwardsStepLimit( 127 "dse-memoryssa-walklimit", cl::init(90), cl::Hidden, 128 cl::desc("The maximum number of steps while walking upwards to find " 129 "MemoryDefs that may be killed (default = 90)")); 130 131 static cl::opt<unsigned> MemorySSAPartialStoreLimit( 132 "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden, 133 cl::desc("The maximum number candidates that only partially overwrite the " 134 "killing MemoryDef to consider" 135 " (default = 5)")); 136 137 static cl::opt<unsigned> MemorySSADefsPerBlockLimit( 138 "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden, 139 cl::desc("The number of MemoryDefs we consider as candidates to eliminated " 140 "other stores per basic block (default = 5000)")); 141 142 static cl::opt<unsigned> MemorySSASameBBStepCost( 143 "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden, 144 cl::desc( 145 "The cost of a step in the same basic block as the killing MemoryDef" 146 "(default = 1)")); 147 148 static cl::opt<unsigned> 149 MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5), 150 cl::Hidden, 151 cl::desc("The cost of a step in a different basic " 152 "block than the killing MemoryDef" 153 "(default = 5)")); 154 155 static cl::opt<unsigned> MemorySSAPathCheckLimit( 156 "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden, 157 cl::desc("The maximum number of blocks to check when trying to prove that " 158 "all paths to an exit go through a killing block (default = 50)")); 159 160 // This flags allows or disallows DSE to optimize MemorySSA during its 161 // traversal. Note that DSE optimizing MemorySSA may impact other passes 162 // downstream of the DSE invocation and can lead to issues not being 163 // reproducible in isolation (i.e. when MemorySSA is built from scratch). In 164 // those cases, the flag can be used to check if DSE's MemorySSA optimizations 165 // impact follow-up passes. 166 static cl::opt<bool> 167 OptimizeMemorySSA("dse-optimize-memoryssa", cl::init(true), cl::Hidden, 168 cl::desc("Allow DSE to optimize memory accesses.")); 169 170 //===----------------------------------------------------------------------===// 171 // Helper functions 172 //===----------------------------------------------------------------------===// 173 using OverlapIntervalsTy = std::map<int64_t, int64_t>; 174 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>; 175 176 /// Returns true if the end of this instruction can be safely shortened in 177 /// length. 178 static bool isShortenableAtTheEnd(Instruction *I) { 179 // Don't shorten stores for now 180 if (isa<StoreInst>(I)) 181 return false; 182 183 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 184 switch (II->getIntrinsicID()) { 185 default: return false; 186 case Intrinsic::memset: 187 case Intrinsic::memcpy: 188 case Intrinsic::memcpy_element_unordered_atomic: 189 case Intrinsic::memset_element_unordered_atomic: 190 // Do shorten memory intrinsics. 191 // FIXME: Add memmove if it's also safe to transform. 192 return true; 193 } 194 } 195 196 // Don't shorten libcalls calls for now. 197 198 return false; 199 } 200 201 /// Returns true if the beginning of this instruction can be safely shortened 202 /// in length. 203 static bool isShortenableAtTheBeginning(Instruction *I) { 204 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be 205 // easily done by offsetting the source address. 206 return isa<AnyMemSetInst>(I); 207 } 208 209 static uint64_t getPointerSize(const Value *V, const DataLayout &DL, 210 const TargetLibraryInfo &TLI, 211 const Function *F) { 212 uint64_t Size; 213 ObjectSizeOpts Opts; 214 Opts.NullIsUnknownSize = NullPointerIsDefined(F); 215 216 if (getObjectSize(V, Size, DL, &TLI, Opts)) 217 return Size; 218 return MemoryLocation::UnknownSize; 219 } 220 221 namespace { 222 223 enum OverwriteResult { 224 OW_Begin, 225 OW_Complete, 226 OW_End, 227 OW_PartialEarlierWithFullLater, 228 OW_MaybePartial, 229 OW_None, 230 OW_Unknown 231 }; 232 233 } // end anonymous namespace 234 235 /// Check if two instruction are masked stores that completely 236 /// overwrite one another. More specifically, \p KillingI has to 237 /// overwrite \p DeadI. 238 static OverwriteResult isMaskedStoreOverwrite(const Instruction *KillingI, 239 const Instruction *DeadI, 240 BatchAAResults &AA) { 241 const auto *KillingII = dyn_cast<IntrinsicInst>(KillingI); 242 const auto *DeadII = dyn_cast<IntrinsicInst>(DeadI); 243 if (KillingII == nullptr || DeadII == nullptr) 244 return OW_Unknown; 245 if (KillingII->getIntrinsicID() != Intrinsic::masked_store || 246 DeadII->getIntrinsicID() != Intrinsic::masked_store) 247 return OW_Unknown; 248 // Pointers. 249 Value *KillingPtr = KillingII->getArgOperand(1)->stripPointerCasts(); 250 Value *DeadPtr = DeadII->getArgOperand(1)->stripPointerCasts(); 251 if (KillingPtr != DeadPtr && !AA.isMustAlias(KillingPtr, DeadPtr)) 252 return OW_Unknown; 253 // Masks. 254 // TODO: check that KillingII's mask is a superset of the DeadII's mask. 255 if (KillingII->getArgOperand(3) != DeadII->getArgOperand(3)) 256 return OW_Unknown; 257 return OW_Complete; 258 } 259 260 /// Return 'OW_Complete' if a store to the 'KillingLoc' location completely 261 /// overwrites a store to the 'DeadLoc' location, 'OW_End' if the end of the 262 /// 'DeadLoc' location is completely overwritten by 'KillingLoc', 'OW_Begin' 263 /// if the beginning of the 'DeadLoc' location is overwritten by 'KillingLoc'. 264 /// 'OW_PartialEarlierWithFullLater' means that a dead (big) store was 265 /// overwritten by a killing (smaller) store which doesn't write outside the big 266 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined. 267 /// NOTE: This function must only be called if both \p KillingLoc and \p 268 /// DeadLoc belong to the same underlying object with valid \p KillingOff and 269 /// \p DeadOff. 270 static OverwriteResult isPartialOverwrite(const MemoryLocation &KillingLoc, 271 const MemoryLocation &DeadLoc, 272 int64_t KillingOff, int64_t DeadOff, 273 Instruction *DeadI, 274 InstOverlapIntervalsTy &IOL) { 275 const uint64_t KillingSize = KillingLoc.Size.getValue(); 276 const uint64_t DeadSize = DeadLoc.Size.getValue(); 277 // We may now overlap, although the overlap is not complete. There might also 278 // be other incomplete overlaps, and together, they might cover the complete 279 // dead store. 280 // Note: The correctness of this logic depends on the fact that this function 281 // is not even called providing DepWrite when there are any intervening reads. 282 if (EnablePartialOverwriteTracking && 283 KillingOff < int64_t(DeadOff + DeadSize) && 284 int64_t(KillingOff + KillingSize) >= DeadOff) { 285 286 // Insert our part of the overlap into the map. 287 auto &IM = IOL[DeadI]; 288 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: DeadLoc [" << DeadOff << ", " 289 << int64_t(DeadOff + DeadSize) << ") KillingLoc [" 290 << KillingOff << ", " << int64_t(KillingOff + KillingSize) 291 << ")\n"); 292 293 // Make sure that we only insert non-overlapping intervals and combine 294 // adjacent intervals. The intervals are stored in the map with the ending 295 // offset as the key (in the half-open sense) and the starting offset as 296 // the value. 297 int64_t KillingIntStart = KillingOff; 298 int64_t KillingIntEnd = KillingOff + KillingSize; 299 300 // Find any intervals ending at, or after, KillingIntStart which start 301 // before KillingIntEnd. 302 auto ILI = IM.lower_bound(KillingIntStart); 303 if (ILI != IM.end() && ILI->second <= KillingIntEnd) { 304 // This existing interval is overlapped with the current store somewhere 305 // in [KillingIntStart, KillingIntEnd]. Merge them by erasing the existing 306 // intervals and adjusting our start and end. 307 KillingIntStart = std::min(KillingIntStart, ILI->second); 308 KillingIntEnd = std::max(KillingIntEnd, ILI->first); 309 ILI = IM.erase(ILI); 310 311 // Continue erasing and adjusting our end in case other previous 312 // intervals are also overlapped with the current store. 313 // 314 // |--- dead 1 ---| |--- dead 2 ---| 315 // |------- killing---------| 316 // 317 while (ILI != IM.end() && ILI->second <= KillingIntEnd) { 318 assert(ILI->second > KillingIntStart && "Unexpected interval"); 319 KillingIntEnd = std::max(KillingIntEnd, ILI->first); 320 ILI = IM.erase(ILI); 321 } 322 } 323 324 IM[KillingIntEnd] = KillingIntStart; 325 326 ILI = IM.begin(); 327 if (ILI->second <= DeadOff && ILI->first >= int64_t(DeadOff + DeadSize)) { 328 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: DeadLoc [" 329 << DeadOff << ", " << int64_t(DeadOff + DeadSize) 330 << ") Composite KillingLoc [" << ILI->second << ", " 331 << ILI->first << ")\n"); 332 ++NumCompletePartials; 333 return OW_Complete; 334 } 335 } 336 337 // Check for a dead store which writes to all the memory locations that 338 // the killing store writes to. 339 if (EnablePartialStoreMerging && KillingOff >= DeadOff && 340 int64_t(DeadOff + DeadSize) > KillingOff && 341 uint64_t(KillingOff - DeadOff) + KillingSize <= DeadSize) { 342 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite a dead load [" << DeadOff 343 << ", " << int64_t(DeadOff + DeadSize) 344 << ") by a killing store [" << KillingOff << ", " 345 << int64_t(KillingOff + KillingSize) << ")\n"); 346 // TODO: Maybe come up with a better name? 347 return OW_PartialEarlierWithFullLater; 348 } 349 350 // Another interesting case is if the killing store overwrites the end of the 351 // dead store. 352 // 353 // |--dead--| 354 // |-- killing --| 355 // 356 // In this case we may want to trim the size of dead store to avoid 357 // generating stores to addresses which will definitely be overwritten killing 358 // store. 359 if (!EnablePartialOverwriteTracking && 360 (KillingOff > DeadOff && KillingOff < int64_t(DeadOff + DeadSize) && 361 int64_t(KillingOff + KillingSize) >= int64_t(DeadOff + DeadSize))) 362 return OW_End; 363 364 // Finally, we also need to check if the killing store overwrites the 365 // beginning of the dead store. 366 // 367 // |--dead--| 368 // |-- killing --| 369 // 370 // In this case we may want to move the destination address and trim the size 371 // of dead store to avoid generating stores to addresses which will definitely 372 // be overwritten killing store. 373 if (!EnablePartialOverwriteTracking && 374 (KillingOff <= DeadOff && int64_t(KillingOff + KillingSize) > DeadOff)) { 375 assert(int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) && 376 "Expect to be handled as OW_Complete"); 377 return OW_Begin; 378 } 379 // Otherwise, they don't completely overlap. 380 return OW_Unknown; 381 } 382 383 /// Returns true if the memory which is accessed by the second instruction is not 384 /// modified between the first and the second instruction. 385 /// Precondition: Second instruction must be dominated by the first 386 /// instruction. 387 static bool 388 memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, 389 BatchAAResults &AA, const DataLayout &DL, 390 DominatorTree *DT) { 391 // Do a backwards scan through the CFG from SecondI to FirstI. Look for 392 // instructions which can modify the memory location accessed by SecondI. 393 // 394 // While doing the walk keep track of the address to check. It might be 395 // different in different basic blocks due to PHI translation. 396 using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>; 397 SmallVector<BlockAddressPair, 16> WorkList; 398 // Keep track of the address we visited each block with. Bail out if we 399 // visit a block with different addresses. 400 DenseMap<BasicBlock *, Value *> Visited; 401 402 BasicBlock::iterator FirstBBI(FirstI); 403 ++FirstBBI; 404 BasicBlock::iterator SecondBBI(SecondI); 405 BasicBlock *FirstBB = FirstI->getParent(); 406 BasicBlock *SecondBB = SecondI->getParent(); 407 MemoryLocation MemLoc; 408 if (auto *MemSet = dyn_cast<MemSetInst>(SecondI)) 409 MemLoc = MemoryLocation::getForDest(MemSet); 410 else 411 MemLoc = MemoryLocation::get(SecondI); 412 413 auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr); 414 415 // Start checking the SecondBB. 416 WorkList.push_back( 417 std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr))); 418 bool isFirstBlock = true; 419 420 // Check all blocks going backward until we reach the FirstBB. 421 while (!WorkList.empty()) { 422 BlockAddressPair Current = WorkList.pop_back_val(); 423 BasicBlock *B = Current.first; 424 PHITransAddr &Addr = Current.second; 425 Value *Ptr = Addr.getAddr(); 426 427 // Ignore instructions before FirstI if this is the FirstBB. 428 BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin()); 429 430 BasicBlock::iterator EI; 431 if (isFirstBlock) { 432 // Ignore instructions after SecondI if this is the first visit of SecondBB. 433 assert(B == SecondBB && "first block is not the store block"); 434 EI = SecondBBI; 435 isFirstBlock = false; 436 } else { 437 // It's not SecondBB or (in case of a loop) the second visit of SecondBB. 438 // In this case we also have to look at instructions after SecondI. 439 EI = B->end(); 440 } 441 for (; BI != EI; ++BI) { 442 Instruction *I = &*BI; 443 if (I->mayWriteToMemory() && I != SecondI) 444 if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr)))) 445 return false; 446 } 447 if (B != FirstBB) { 448 assert(B != &FirstBB->getParent()->getEntryBlock() && 449 "Should not hit the entry block because SI must be dominated by LI"); 450 for (BasicBlock *Pred : predecessors(B)) { 451 PHITransAddr PredAddr = Addr; 452 if (PredAddr.NeedsPHITranslationFromBlock(B)) { 453 if (!PredAddr.IsPotentiallyPHITranslatable()) 454 return false; 455 if (PredAddr.PHITranslateValue(B, Pred, DT, false)) 456 return false; 457 } 458 Value *TranslatedPtr = PredAddr.getAddr(); 459 auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr)); 460 if (!Inserted.second) { 461 // We already visited this block before. If it was with a different 462 // address - bail out! 463 if (TranslatedPtr != Inserted.first->second) 464 return false; 465 // ... otherwise just skip it. 466 continue; 467 } 468 WorkList.push_back(std::make_pair(Pred, PredAddr)); 469 } 470 } 471 } 472 return true; 473 } 474 475 static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart, 476 uint64_t &DeadSize, int64_t KillingStart, 477 uint64_t KillingSize, bool IsOverwriteEnd) { 478 auto *DeadIntrinsic = cast<AnyMemIntrinsic>(DeadI); 479 Align PrefAlign = DeadIntrinsic->getDestAlign().valueOrOne(); 480 481 // We assume that memet/memcpy operates in chunks of the "largest" native 482 // type size and aligned on the same value. That means optimal start and size 483 // of memset/memcpy should be modulo of preferred alignment of that type. That 484 // is it there is no any sense in trying to reduce store size any further 485 // since any "extra" stores comes for free anyway. 486 // On the other hand, maximum alignment we can achieve is limited by alignment 487 // of initial store. 488 489 // TODO: Limit maximum alignment by preferred (or abi?) alignment of the 490 // "largest" native type. 491 // Note: What is the proper way to get that value? 492 // Should TargetTransformInfo::getRegisterBitWidth be used or anything else? 493 // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign); 494 495 int64_t ToRemoveStart = 0; 496 uint64_t ToRemoveSize = 0; 497 // Compute start and size of the region to remove. Make sure 'PrefAlign' is 498 // maintained on the remaining store. 499 if (IsOverwriteEnd) { 500 // Calculate required adjustment for 'KillingStart' in order to keep 501 // remaining store size aligned on 'PerfAlign'. 502 uint64_t Off = 503 offsetToAlignment(uint64_t(KillingStart - DeadStart), PrefAlign); 504 ToRemoveStart = KillingStart + Off; 505 if (DeadSize <= uint64_t(ToRemoveStart - DeadStart)) 506 return false; 507 ToRemoveSize = DeadSize - uint64_t(ToRemoveStart - DeadStart); 508 } else { 509 ToRemoveStart = DeadStart; 510 assert(KillingSize >= uint64_t(DeadStart - KillingStart) && 511 "Not overlapping accesses?"); 512 ToRemoveSize = KillingSize - uint64_t(DeadStart - KillingStart); 513 // Calculate required adjustment for 'ToRemoveSize'in order to keep 514 // start of the remaining store aligned on 'PerfAlign'. 515 uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign); 516 if (Off != 0) { 517 if (ToRemoveSize <= (PrefAlign.value() - Off)) 518 return false; 519 ToRemoveSize -= PrefAlign.value() - Off; 520 } 521 assert(isAligned(PrefAlign, ToRemoveSize) && 522 "Should preserve selected alignment"); 523 } 524 525 assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove"); 526 assert(DeadSize > ToRemoveSize && "Can't remove more than original size"); 527 528 uint64_t NewSize = DeadSize - ToRemoveSize; 529 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(DeadI)) { 530 // When shortening an atomic memory intrinsic, the newly shortened 531 // length must remain an integer multiple of the element size. 532 const uint32_t ElementSize = AMI->getElementSizeInBytes(); 533 if (0 != NewSize % ElementSize) 534 return false; 535 } 536 537 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW " 538 << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *DeadI 539 << "\n KILLER [" << ToRemoveStart << ", " 540 << int64_t(ToRemoveStart + ToRemoveSize) << ")\n"); 541 542 Value *DeadWriteLength = DeadIntrinsic->getLength(); 543 Value *TrimmedLength = ConstantInt::get(DeadWriteLength->getType(), NewSize); 544 DeadIntrinsic->setLength(TrimmedLength); 545 DeadIntrinsic->setDestAlignment(PrefAlign); 546 547 if (!IsOverwriteEnd) { 548 Value *OrigDest = DeadIntrinsic->getRawDest(); 549 Type *Int8PtrTy = 550 Type::getInt8PtrTy(DeadIntrinsic->getContext(), 551 OrigDest->getType()->getPointerAddressSpace()); 552 Value *Dest = OrigDest; 553 if (OrigDest->getType() != Int8PtrTy) 554 Dest = CastInst::CreatePointerCast(OrigDest, Int8PtrTy, "", DeadI); 555 Value *Indices[1] = { 556 ConstantInt::get(DeadWriteLength->getType(), ToRemoveSize)}; 557 Instruction *NewDestGEP = GetElementPtrInst::CreateInBounds( 558 Type::getInt8Ty(DeadIntrinsic->getContext()), Dest, Indices, "", DeadI); 559 NewDestGEP->setDebugLoc(DeadIntrinsic->getDebugLoc()); 560 if (NewDestGEP->getType() != OrigDest->getType()) 561 NewDestGEP = CastInst::CreatePointerCast(NewDestGEP, OrigDest->getType(), 562 "", DeadI); 563 DeadIntrinsic->setDest(NewDestGEP); 564 } 565 566 // Finally update start and size of dead access. 567 if (!IsOverwriteEnd) 568 DeadStart += ToRemoveSize; 569 DeadSize = NewSize; 570 571 return true; 572 } 573 574 static bool tryToShortenEnd(Instruction *DeadI, OverlapIntervalsTy &IntervalMap, 575 int64_t &DeadStart, uint64_t &DeadSize) { 576 if (IntervalMap.empty() || !isShortenableAtTheEnd(DeadI)) 577 return false; 578 579 OverlapIntervalsTy::iterator OII = --IntervalMap.end(); 580 int64_t KillingStart = OII->second; 581 uint64_t KillingSize = OII->first - KillingStart; 582 583 assert(OII->first - KillingStart >= 0 && "Size expected to be positive"); 584 585 if (KillingStart > DeadStart && 586 // Note: "KillingStart - KillingStart" is known to be positive due to 587 // preceding check. 588 (uint64_t)(KillingStart - DeadStart) < DeadSize && 589 // Note: "DeadSize - (uint64_t)(KillingStart - DeadStart)" is known to 590 // be non negative due to preceding checks. 591 KillingSize >= DeadSize - (uint64_t)(KillingStart - DeadStart)) { 592 if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize, 593 true)) { 594 IntervalMap.erase(OII); 595 return true; 596 } 597 } 598 return false; 599 } 600 601 static bool tryToShortenBegin(Instruction *DeadI, 602 OverlapIntervalsTy &IntervalMap, 603 int64_t &DeadStart, uint64_t &DeadSize) { 604 if (IntervalMap.empty() || !isShortenableAtTheBeginning(DeadI)) 605 return false; 606 607 OverlapIntervalsTy::iterator OII = IntervalMap.begin(); 608 int64_t KillingStart = OII->second; 609 uint64_t KillingSize = OII->first - KillingStart; 610 611 assert(OII->first - KillingStart >= 0 && "Size expected to be positive"); 612 613 if (KillingStart <= DeadStart && 614 // Note: "DeadStart - KillingStart" is known to be non negative due to 615 // preceding check. 616 KillingSize > (uint64_t)(DeadStart - KillingStart)) { 617 // Note: "KillingSize - (uint64_t)(DeadStart - DeadStart)" is known to 618 // be positive due to preceding checks. 619 assert(KillingSize - (uint64_t)(DeadStart - KillingStart) < DeadSize && 620 "Should have been handled as OW_Complete"); 621 if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize, 622 false)) { 623 IntervalMap.erase(OII); 624 return true; 625 } 626 } 627 return false; 628 } 629 630 static Constant * 631 tryToMergePartialOverlappingStores(StoreInst *KillingI, StoreInst *DeadI, 632 int64_t KillingOffset, int64_t DeadOffset, 633 const DataLayout &DL, BatchAAResults &AA, 634 DominatorTree *DT) { 635 636 if (DeadI && isa<ConstantInt>(DeadI->getValueOperand()) && 637 DL.typeSizeEqualsStoreSize(DeadI->getValueOperand()->getType()) && 638 KillingI && isa<ConstantInt>(KillingI->getValueOperand()) && 639 DL.typeSizeEqualsStoreSize(KillingI->getValueOperand()->getType()) && 640 memoryIsNotModifiedBetween(DeadI, KillingI, AA, DL, DT)) { 641 // If the store we find is: 642 // a) partially overwritten by the store to 'Loc' 643 // b) the killing store is fully contained in the dead one and 644 // c) they both have a constant value 645 // d) none of the two stores need padding 646 // Merge the two stores, replacing the dead store's value with a 647 // merge of both values. 648 // TODO: Deal with other constant types (vectors, etc), and probably 649 // some mem intrinsics (if needed) 650 651 APInt DeadValue = cast<ConstantInt>(DeadI->getValueOperand())->getValue(); 652 APInt KillingValue = 653 cast<ConstantInt>(KillingI->getValueOperand())->getValue(); 654 unsigned KillingBits = KillingValue.getBitWidth(); 655 assert(DeadValue.getBitWidth() > KillingValue.getBitWidth()); 656 KillingValue = KillingValue.zext(DeadValue.getBitWidth()); 657 658 // Offset of the smaller store inside the larger store 659 unsigned BitOffsetDiff = (KillingOffset - DeadOffset) * 8; 660 unsigned LShiftAmount = 661 DL.isBigEndian() ? DeadValue.getBitWidth() - BitOffsetDiff - KillingBits 662 : BitOffsetDiff; 663 APInt Mask = APInt::getBitsSet(DeadValue.getBitWidth(), LShiftAmount, 664 LShiftAmount + KillingBits); 665 // Clear the bits we'll be replacing, then OR with the smaller 666 // store, shifted appropriately. 667 APInt Merged = (DeadValue & ~Mask) | (KillingValue << LShiftAmount); 668 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Dead: " << *DeadI 669 << "\n Killing: " << *KillingI 670 << "\n Merged Value: " << Merged << '\n'); 671 return ConstantInt::get(DeadI->getValueOperand()->getType(), Merged); 672 } 673 return nullptr; 674 } 675 676 namespace { 677 // Returns true if \p I is an intrisnic that does not read or write memory. 678 bool isNoopIntrinsic(Instruction *I) { 679 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 680 switch (II->getIntrinsicID()) { 681 case Intrinsic::lifetime_start: 682 case Intrinsic::lifetime_end: 683 case Intrinsic::invariant_end: 684 case Intrinsic::launder_invariant_group: 685 case Intrinsic::assume: 686 return true; 687 case Intrinsic::dbg_addr: 688 case Intrinsic::dbg_declare: 689 case Intrinsic::dbg_label: 690 case Intrinsic::dbg_value: 691 llvm_unreachable("Intrinsic should not be modeled in MemorySSA"); 692 default: 693 return false; 694 } 695 } 696 return false; 697 } 698 699 // Check if we can ignore \p D for DSE. 700 bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) { 701 Instruction *DI = D->getMemoryInst(); 702 // Calls that only access inaccessible memory cannot read or write any memory 703 // locations we consider for elimination. 704 if (auto *CB = dyn_cast<CallBase>(DI)) 705 if (CB->onlyAccessesInaccessibleMemory()) 706 return true; 707 708 // We can eliminate stores to locations not visible to the caller across 709 // throwing instructions. 710 if (DI->mayThrow() && !DefVisibleToCaller) 711 return true; 712 713 // We can remove the dead stores, irrespective of the fence and its ordering 714 // (release/acquire/seq_cst). Fences only constraints the ordering of 715 // already visible stores, it does not make a store visible to other 716 // threads. So, skipping over a fence does not change a store from being 717 // dead. 718 if (isa<FenceInst>(DI)) 719 return true; 720 721 // Skip intrinsics that do not really read or modify memory. 722 if (isNoopIntrinsic(DI)) 723 return true; 724 725 return false; 726 } 727 728 struct DSEState { 729 Function &F; 730 AliasAnalysis &AA; 731 EarliestEscapeInfo EI; 732 733 /// The single BatchAA instance that is used to cache AA queries. It will 734 /// not be invalidated over the whole run. This is safe, because: 735 /// 1. Only memory writes are removed, so the alias cache for memory 736 /// locations remains valid. 737 /// 2. No new instructions are added (only instructions removed), so cached 738 /// information for a deleted value cannot be accessed by a re-used new 739 /// value pointer. 740 BatchAAResults BatchAA; 741 742 MemorySSA &MSSA; 743 DominatorTree &DT; 744 PostDominatorTree &PDT; 745 const TargetLibraryInfo &TLI; 746 const DataLayout &DL; 747 const LoopInfo &LI; 748 749 // Whether the function contains any irreducible control flow, useful for 750 // being accurately able to detect loops. 751 bool ContainsIrreducibleLoops; 752 753 // All MemoryDefs that potentially could kill other MemDefs. 754 SmallVector<MemoryDef *, 64> MemDefs; 755 // Any that should be skipped as they are already deleted 756 SmallPtrSet<MemoryAccess *, 4> SkipStores; 757 // Keep track whether a given object is captured before return or not. 758 DenseMap<const Value *, bool> CapturedBeforeReturn; 759 // Keep track of all of the objects that are invisible to the caller after 760 // the function returns. 761 DenseMap<const Value *, bool> InvisibleToCallerAfterRet; 762 // Keep track of blocks with throwing instructions not modeled in MemorySSA. 763 SmallPtrSet<BasicBlock *, 16> ThrowingBlocks; 764 // Post-order numbers for each basic block. Used to figure out if memory 765 // accesses are executed before another access. 766 DenseMap<BasicBlock *, unsigned> PostOrderNumbers; 767 // Values that are only used with assumes. Used to refine pointer escape 768 // analysis. 769 SmallPtrSet<const Value *, 32> EphValues; 770 771 /// Keep track of instructions (partly) overlapping with killing MemoryDefs per 772 /// basic block. 773 MapVector<BasicBlock *, InstOverlapIntervalsTy> IOLs; 774 // Check if there are root nodes that are terminated by UnreachableInst. 775 // Those roots pessimize post-dominance queries. If there are such roots, 776 // fall back to CFG scan starting from all non-unreachable roots. 777 bool AnyUnreachableExit; 778 779 // Whether or not we should iterate on removing dead stores at the end of the 780 // function due to removing a store causing a previously captured pointer to 781 // no longer be captured. 782 bool ShouldIterateEndOfFunctionDSE; 783 784 // Class contains self-reference, make sure it's not copied/moved. 785 DSEState(const DSEState &) = delete; 786 DSEState &operator=(const DSEState &) = delete; 787 788 DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT, 789 PostDominatorTree &PDT, AssumptionCache &AC, 790 const TargetLibraryInfo &TLI, const LoopInfo &LI) 791 : F(F), AA(AA), EI(DT, LI, EphValues), BatchAA(AA, &EI), MSSA(MSSA), 792 DT(DT), PDT(PDT), TLI(TLI), DL(F.getParent()->getDataLayout()), LI(LI) { 793 // Collect blocks with throwing instructions not modeled in MemorySSA and 794 // alloc-like objects. 795 unsigned PO = 0; 796 for (BasicBlock *BB : post_order(&F)) { 797 PostOrderNumbers[BB] = PO++; 798 for (Instruction &I : *BB) { 799 MemoryAccess *MA = MSSA.getMemoryAccess(&I); 800 if (I.mayThrow() && !MA) 801 ThrowingBlocks.insert(I.getParent()); 802 803 auto *MD = dyn_cast_or_null<MemoryDef>(MA); 804 if (MD && MemDefs.size() < MemorySSADefsPerBlockLimit && 805 (getLocForWrite(&I) || isMemTerminatorInst(&I))) 806 MemDefs.push_back(MD); 807 } 808 } 809 810 // Treat byval or inalloca arguments the same as Allocas, stores to them are 811 // dead at the end of the function. 812 for (Argument &AI : F.args()) 813 if (AI.hasPassPointeeByValueCopyAttr()) 814 InvisibleToCallerAfterRet.insert({&AI, true}); 815 816 // Collect whether there is any irreducible control flow in the function. 817 ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI); 818 819 AnyUnreachableExit = any_of(PDT.roots(), [](const BasicBlock *E) { 820 return isa<UnreachableInst>(E->getTerminator()); 821 }); 822 823 CodeMetrics::collectEphemeralValues(&F, &AC, EphValues); 824 } 825 826 /// Return 'OW_Complete' if a store to the 'KillingLoc' location (by \p 827 /// KillingI instruction) completely overwrites a store to the 'DeadLoc' 828 /// location (by \p DeadI instruction). 829 /// Return OW_MaybePartial if \p KillingI does not completely overwrite 830 /// \p DeadI, but they both write to the same underlying object. In that 831 /// case, use isPartialOverwrite to check if \p KillingI partially overwrites 832 /// \p DeadI. Returns 'OR_None' if \p KillingI is known to not overwrite the 833 /// \p DeadI. Returns 'OW_Unknown' if nothing can be determined. 834 OverwriteResult isOverwrite(const Instruction *KillingI, 835 const Instruction *DeadI, 836 const MemoryLocation &KillingLoc, 837 const MemoryLocation &DeadLoc, 838 int64_t &KillingOff, int64_t &DeadOff) { 839 // AliasAnalysis does not always account for loops. Limit overwrite checks 840 // to dependencies for which we can guarantee they are independent of any 841 // loops they are in. 842 if (!isGuaranteedLoopIndependent(DeadI, KillingI, DeadLoc)) 843 return OW_Unknown; 844 845 const Value *DeadPtr = DeadLoc.Ptr->stripPointerCasts(); 846 const Value *KillingPtr = KillingLoc.Ptr->stripPointerCasts(); 847 const Value *DeadUndObj = getUnderlyingObject(DeadPtr); 848 const Value *KillingUndObj = getUnderlyingObject(KillingPtr); 849 850 // Check whether the killing store overwrites the whole object, in which 851 // case the size/offset of the dead store does not matter. 852 if (DeadUndObj == KillingUndObj && KillingLoc.Size.isPrecise()) { 853 uint64_t KillingUndObjSize = getPointerSize(KillingUndObj, DL, TLI, &F); 854 if (KillingUndObjSize != MemoryLocation::UnknownSize && 855 KillingUndObjSize == KillingLoc.Size.getValue()) 856 return OW_Complete; 857 } 858 859 // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll 860 // get imprecise values here, though (except for unknown sizes). 861 if (!KillingLoc.Size.isPrecise() || !DeadLoc.Size.isPrecise()) { 862 // In case no constant size is known, try to an IR values for the number 863 // of bytes written and check if they match. 864 const auto *KillingMemI = dyn_cast<MemIntrinsic>(KillingI); 865 const auto *DeadMemI = dyn_cast<MemIntrinsic>(DeadI); 866 if (KillingMemI && DeadMemI) { 867 const Value *KillingV = KillingMemI->getLength(); 868 const Value *DeadV = DeadMemI->getLength(); 869 if (KillingV == DeadV && BatchAA.isMustAlias(DeadLoc, KillingLoc)) 870 return OW_Complete; 871 } 872 873 // Masked stores have imprecise locations, but we can reason about them 874 // to some extent. 875 return isMaskedStoreOverwrite(KillingI, DeadI, BatchAA); 876 } 877 878 const uint64_t KillingSize = KillingLoc.Size.getValue(); 879 const uint64_t DeadSize = DeadLoc.Size.getValue(); 880 881 // Query the alias information 882 AliasResult AAR = BatchAA.alias(KillingLoc, DeadLoc); 883 884 // If the start pointers are the same, we just have to compare sizes to see if 885 // the killing store was larger than the dead store. 886 if (AAR == AliasResult::MustAlias) { 887 // Make sure that the KillingSize size is >= the DeadSize size. 888 if (KillingSize >= DeadSize) 889 return OW_Complete; 890 } 891 892 // If we hit a partial alias we may have a full overwrite 893 if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) { 894 int32_t Off = AAR.getOffset(); 895 if (Off >= 0 && (uint64_t)Off + DeadSize <= KillingSize) 896 return OW_Complete; 897 } 898 899 // If we can't resolve the same pointers to the same object, then we can't 900 // analyze them at all. 901 if (DeadUndObj != KillingUndObj) { 902 // Non aliasing stores to different objects don't overlap. Note that 903 // if the killing store is known to overwrite whole object (out of 904 // bounds access overwrites whole object as well) then it is assumed to 905 // completely overwrite any store to the same object even if they don't 906 // actually alias (see next check). 907 if (AAR == AliasResult::NoAlias) 908 return OW_None; 909 return OW_Unknown; 910 } 911 912 // Okay, we have stores to two completely different pointers. Try to 913 // decompose the pointer into a "base + constant_offset" form. If the base 914 // pointers are equal, then we can reason about the two stores. 915 DeadOff = 0; 916 KillingOff = 0; 917 const Value *DeadBasePtr = 918 GetPointerBaseWithConstantOffset(DeadPtr, DeadOff, DL); 919 const Value *KillingBasePtr = 920 GetPointerBaseWithConstantOffset(KillingPtr, KillingOff, DL); 921 922 // If the base pointers still differ, we have two completely different 923 // stores. 924 if (DeadBasePtr != KillingBasePtr) 925 return OW_Unknown; 926 927 // The killing access completely overlaps the dead store if and only if 928 // both start and end of the dead one is "inside" the killing one: 929 // |<->|--dead--|<->| 930 // |-----killing------| 931 // Accesses may overlap if and only if start of one of them is "inside" 932 // another one: 933 // |<->|--dead--|<-------->| 934 // |-------killing--------| 935 // OR 936 // |-------dead-------| 937 // |<->|---killing---|<----->| 938 // 939 // We have to be careful here as *Off is signed while *.Size is unsigned. 940 941 // Check if the dead access starts "not before" the killing one. 942 if (DeadOff >= KillingOff) { 943 // If the dead access ends "not after" the killing access then the 944 // dead one is completely overwritten by the killing one. 945 if (uint64_t(DeadOff - KillingOff) + DeadSize <= KillingSize) 946 return OW_Complete; 947 // If start of the dead access is "before" end of the killing access 948 // then accesses overlap. 949 else if ((uint64_t)(DeadOff - KillingOff) < KillingSize) 950 return OW_MaybePartial; 951 } 952 // If start of the killing access is "before" end of the dead access then 953 // accesses overlap. 954 else if ((uint64_t)(KillingOff - DeadOff) < DeadSize) { 955 return OW_MaybePartial; 956 } 957 958 // Can reach here only if accesses are known not to overlap. 959 return OW_None; 960 } 961 962 bool isInvisibleToCallerAfterRet(const Value *V) { 963 if (isa<AllocaInst>(V)) 964 return true; 965 auto I = InvisibleToCallerAfterRet.insert({V, false}); 966 if (I.second) { 967 if (!isInvisibleToCallerOnUnwind(V)) { 968 I.first->second = false; 969 } else if (isNoAliasCall(V)) { 970 I.first->second = !PointerMayBeCaptured(V, true, false, EphValues); 971 } 972 } 973 return I.first->second; 974 } 975 976 bool isInvisibleToCallerOnUnwind(const Value *V) { 977 bool RequiresNoCaptureBeforeUnwind; 978 if (!isNotVisibleOnUnwind(V, RequiresNoCaptureBeforeUnwind)) 979 return false; 980 if (!RequiresNoCaptureBeforeUnwind) 981 return true; 982 983 auto I = CapturedBeforeReturn.insert({V, true}); 984 if (I.second) 985 // NOTE: This could be made more precise by PointerMayBeCapturedBefore 986 // with the killing MemoryDef. But we refrain from doing so for now to 987 // limit compile-time and this does not cause any changes to the number 988 // of stores removed on a large test set in practice. 989 I.first->second = PointerMayBeCaptured(V, false, true, EphValues); 990 return !I.first->second; 991 } 992 993 Optional<MemoryLocation> getLocForWrite(Instruction *I) const { 994 if (!I->mayWriteToMemory()) 995 return None; 996 997 if (auto *CB = dyn_cast<CallBase>(I)) 998 return MemoryLocation::getForDest(CB, TLI); 999 1000 return MemoryLocation::getOrNone(I); 1001 } 1002 1003 /// Assuming this instruction has a dead analyzable write, can we delete 1004 /// this instruction? 1005 bool isRemovable(Instruction *I) { 1006 assert(getLocForWrite(I) && "Must have analyzable write"); 1007 1008 // Don't remove volatile/atomic stores. 1009 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1010 return SI->isUnordered(); 1011 1012 if (auto *CB = dyn_cast<CallBase>(I)) { 1013 // Don't remove volatile memory intrinsics. 1014 if (auto *MI = dyn_cast<MemIntrinsic>(CB)) 1015 return !MI->isVolatile(); 1016 1017 // Never remove dead lifetime intrinsics, e.g. because they are followed 1018 // by a free. 1019 if (CB->isLifetimeStartOrEnd()) 1020 return false; 1021 1022 return CB->use_empty() && CB->willReturn() && CB->doesNotThrow() && 1023 !CB->isTerminator(); 1024 } 1025 1026 return false; 1027 } 1028 1029 /// Returns true if \p UseInst completely overwrites \p DefLoc 1030 /// (stored by \p DefInst). 1031 bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst, 1032 Instruction *UseInst) { 1033 // UseInst has a MemoryDef associated in MemorySSA. It's possible for a 1034 // MemoryDef to not write to memory, e.g. a volatile load is modeled as a 1035 // MemoryDef. 1036 if (!UseInst->mayWriteToMemory()) 1037 return false; 1038 1039 if (auto *CB = dyn_cast<CallBase>(UseInst)) 1040 if (CB->onlyAccessesInaccessibleMemory()) 1041 return false; 1042 1043 int64_t InstWriteOffset, DepWriteOffset; 1044 if (auto CC = getLocForWrite(UseInst)) 1045 return isOverwrite(UseInst, DefInst, *CC, DefLoc, InstWriteOffset, 1046 DepWriteOffset) == OW_Complete; 1047 return false; 1048 } 1049 1050 /// Returns true if \p Def is not read before returning from the function. 1051 bool isWriteAtEndOfFunction(MemoryDef *Def) { 1052 LLVM_DEBUG(dbgs() << " Check if def " << *Def << " (" 1053 << *Def->getMemoryInst() 1054 << ") is at the end the function \n"); 1055 1056 auto MaybeLoc = getLocForWrite(Def->getMemoryInst()); 1057 if (!MaybeLoc) { 1058 LLVM_DEBUG(dbgs() << " ... could not get location for write.\n"); 1059 return false; 1060 } 1061 1062 SmallVector<MemoryAccess *, 4> WorkList; 1063 SmallPtrSet<MemoryAccess *, 8> Visited; 1064 auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) { 1065 if (!Visited.insert(Acc).second) 1066 return; 1067 for (Use &U : Acc->uses()) 1068 WorkList.push_back(cast<MemoryAccess>(U.getUser())); 1069 }; 1070 PushMemUses(Def); 1071 for (unsigned I = 0; I < WorkList.size(); I++) { 1072 if (WorkList.size() >= MemorySSAScanLimit) { 1073 LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n"); 1074 return false; 1075 } 1076 1077 MemoryAccess *UseAccess = WorkList[I]; 1078 // Simply adding the users of MemoryPhi to the worklist is not enough, 1079 // because we might miss read clobbers in different iterations of a loop, 1080 // for example. 1081 // TODO: Add support for phi translation to handle the loop case. 1082 if (isa<MemoryPhi>(UseAccess)) 1083 return false; 1084 1085 // TODO: Checking for aliasing is expensive. Consider reducing the amount 1086 // of times this is called and/or caching it. 1087 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst(); 1088 if (isReadClobber(*MaybeLoc, UseInst)) { 1089 LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst << ".\n"); 1090 return false; 1091 } 1092 1093 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) 1094 PushMemUses(UseDef); 1095 } 1096 return true; 1097 } 1098 1099 /// If \p I is a memory terminator like llvm.lifetime.end or free, return a 1100 /// pair with the MemoryLocation terminated by \p I and a boolean flag 1101 /// indicating whether \p I is a free-like call. 1102 Optional<std::pair<MemoryLocation, bool>> 1103 getLocForTerminator(Instruction *I) const { 1104 uint64_t Len; 1105 Value *Ptr; 1106 if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len), 1107 m_Value(Ptr)))) 1108 return {std::make_pair(MemoryLocation(Ptr, Len), false)}; 1109 1110 if (auto *CB = dyn_cast<CallBase>(I)) { 1111 if (Value *FreedOp = getFreedOperand(CB, &TLI)) 1112 return {std::make_pair(MemoryLocation::getAfter(FreedOp), true)}; 1113 } 1114 1115 return None; 1116 } 1117 1118 /// Returns true if \p I is a memory terminator instruction like 1119 /// llvm.lifetime.end or free. 1120 bool isMemTerminatorInst(Instruction *I) const { 1121 auto *CB = dyn_cast<CallBase>(I); 1122 return CB && (CB->getIntrinsicID() == Intrinsic::lifetime_end || 1123 getFreedOperand(CB, &TLI) != nullptr); 1124 } 1125 1126 /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from 1127 /// instruction \p AccessI. 1128 bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI, 1129 Instruction *MaybeTerm) { 1130 Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc = 1131 getLocForTerminator(MaybeTerm); 1132 1133 if (!MaybeTermLoc) 1134 return false; 1135 1136 // If the terminator is a free-like call, all accesses to the underlying 1137 // object can be considered terminated. 1138 if (getUnderlyingObject(Loc.Ptr) != 1139 getUnderlyingObject(MaybeTermLoc->first.Ptr)) 1140 return false; 1141 1142 auto TermLoc = MaybeTermLoc->first; 1143 if (MaybeTermLoc->second) { 1144 const Value *LocUO = getUnderlyingObject(Loc.Ptr); 1145 return BatchAA.isMustAlias(TermLoc.Ptr, LocUO); 1146 } 1147 int64_t InstWriteOffset = 0; 1148 int64_t DepWriteOffset = 0; 1149 return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, InstWriteOffset, 1150 DepWriteOffset) == OW_Complete; 1151 } 1152 1153 // Returns true if \p Use may read from \p DefLoc. 1154 bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) { 1155 if (isNoopIntrinsic(UseInst)) 1156 return false; 1157 1158 // Monotonic or weaker atomic stores can be re-ordered and do not need to be 1159 // treated as read clobber. 1160 if (auto SI = dyn_cast<StoreInst>(UseInst)) 1161 return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic); 1162 1163 if (!UseInst->mayReadFromMemory()) 1164 return false; 1165 1166 if (auto *CB = dyn_cast<CallBase>(UseInst)) 1167 if (CB->onlyAccessesInaccessibleMemory()) 1168 return false; 1169 1170 return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc)); 1171 } 1172 1173 /// Returns true if a dependency between \p Current and \p KillingDef is 1174 /// guaranteed to be loop invariant for the loops that they are in. Either 1175 /// because they are known to be in the same block, in the same loop level or 1176 /// by guaranteeing that \p CurrentLoc only references a single MemoryLocation 1177 /// during execution of the containing function. 1178 bool isGuaranteedLoopIndependent(const Instruction *Current, 1179 const Instruction *KillingDef, 1180 const MemoryLocation &CurrentLoc) { 1181 // If the dependency is within the same block or loop level (being careful 1182 // of irreducible loops), we know that AA will return a valid result for the 1183 // memory dependency. (Both at the function level, outside of any loop, 1184 // would also be valid but we currently disable that to limit compile time). 1185 if (Current->getParent() == KillingDef->getParent()) 1186 return true; 1187 const Loop *CurrentLI = LI.getLoopFor(Current->getParent()); 1188 if (!ContainsIrreducibleLoops && CurrentLI && 1189 CurrentLI == LI.getLoopFor(KillingDef->getParent())) 1190 return true; 1191 // Otherwise check the memory location is invariant to any loops. 1192 return isGuaranteedLoopInvariant(CurrentLoc.Ptr); 1193 } 1194 1195 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible 1196 /// loop. In particular, this guarantees that it only references a single 1197 /// MemoryLocation during execution of the containing function. 1198 bool isGuaranteedLoopInvariant(const Value *Ptr) { 1199 Ptr = Ptr->stripPointerCasts(); 1200 if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) 1201 if (GEP->hasAllConstantIndices()) 1202 Ptr = GEP->getPointerOperand()->stripPointerCasts(); 1203 1204 if (auto *I = dyn_cast<Instruction>(Ptr)) 1205 return I->getParent()->isEntryBlock(); 1206 return true; 1207 } 1208 1209 // Find a MemoryDef writing to \p KillingLoc and dominating \p StartAccess, 1210 // with no read access between them or on any other path to a function exit 1211 // block if \p KillingLoc is not accessible after the function returns. If 1212 // there is no such MemoryDef, return None. The returned value may not 1213 // (completely) overwrite \p KillingLoc. Currently we bail out when we 1214 // encounter an aliasing MemoryUse (read). 1215 Optional<MemoryAccess *> 1216 getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess, 1217 const MemoryLocation &KillingLoc, const Value *KillingUndObj, 1218 unsigned &ScanLimit, unsigned &WalkerStepLimit, 1219 bool IsMemTerm, unsigned &PartialLimit) { 1220 if (ScanLimit == 0 || WalkerStepLimit == 0) { 1221 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n"); 1222 return None; 1223 } 1224 1225 MemoryAccess *Current = StartAccess; 1226 Instruction *KillingI = KillingDef->getMemoryInst(); 1227 LLVM_DEBUG(dbgs() << " trying to get dominating access\n"); 1228 1229 // Only optimize defining access of KillingDef when directly starting at its 1230 // defining access. The defining access also must only access KillingLoc. At 1231 // the moment we only support instructions with a single write location, so 1232 // it should be sufficient to disable optimizations for instructions that 1233 // also read from memory. 1234 bool CanOptimize = OptimizeMemorySSA && 1235 KillingDef->getDefiningAccess() == StartAccess && 1236 !KillingI->mayReadFromMemory(); 1237 1238 // Find the next clobbering Mod access for DefLoc, starting at StartAccess. 1239 Optional<MemoryLocation> CurrentLoc; 1240 for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) { 1241 LLVM_DEBUG({ 1242 dbgs() << " visiting " << *Current; 1243 if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current)) 1244 dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst() 1245 << ")"; 1246 dbgs() << "\n"; 1247 }); 1248 1249 // Reached TOP. 1250 if (MSSA.isLiveOnEntryDef(Current)) { 1251 LLVM_DEBUG(dbgs() << " ... found LiveOnEntryDef\n"); 1252 if (CanOptimize && Current != KillingDef->getDefiningAccess()) 1253 // The first clobbering def is... none. 1254 KillingDef->setOptimized(Current); 1255 return None; 1256 } 1257 1258 // Cost of a step. Accesses in the same block are more likely to be valid 1259 // candidates for elimination, hence consider them cheaper. 1260 unsigned StepCost = KillingDef->getBlock() == Current->getBlock() 1261 ? MemorySSASameBBStepCost 1262 : MemorySSAOtherBBStepCost; 1263 if (WalkerStepLimit <= StepCost) { 1264 LLVM_DEBUG(dbgs() << " ... hit walker step limit\n"); 1265 return None; 1266 } 1267 WalkerStepLimit -= StepCost; 1268 1269 // Return for MemoryPhis. They cannot be eliminated directly and the 1270 // caller is responsible for traversing them. 1271 if (isa<MemoryPhi>(Current)) { 1272 LLVM_DEBUG(dbgs() << " ... found MemoryPhi\n"); 1273 return Current; 1274 } 1275 1276 // Below, check if CurrentDef is a valid candidate to be eliminated by 1277 // KillingDef. If it is not, check the next candidate. 1278 MemoryDef *CurrentDef = cast<MemoryDef>(Current); 1279 Instruction *CurrentI = CurrentDef->getMemoryInst(); 1280 1281 if (canSkipDef(CurrentDef, !isInvisibleToCallerOnUnwind(KillingUndObj))) { 1282 CanOptimize = false; 1283 continue; 1284 } 1285 1286 // Before we try to remove anything, check for any extra throwing 1287 // instructions that block us from DSEing 1288 if (mayThrowBetween(KillingI, CurrentI, KillingUndObj)) { 1289 LLVM_DEBUG(dbgs() << " ... skip, may throw!\n"); 1290 return None; 1291 } 1292 1293 // Check for anything that looks like it will be a barrier to further 1294 // removal 1295 if (isDSEBarrier(KillingUndObj, CurrentI)) { 1296 LLVM_DEBUG(dbgs() << " ... skip, barrier\n"); 1297 return None; 1298 } 1299 1300 // If Current is known to be on path that reads DefLoc or is a read 1301 // clobber, bail out, as the path is not profitable. We skip this check 1302 // for intrinsic calls, because the code knows how to handle memcpy 1303 // intrinsics. 1304 if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(KillingLoc, CurrentI)) 1305 return None; 1306 1307 // Quick check if there are direct uses that are read-clobbers. 1308 if (any_of(Current->uses(), [this, &KillingLoc, StartAccess](Use &U) { 1309 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser())) 1310 return !MSSA.dominates(StartAccess, UseOrDef) && 1311 isReadClobber(KillingLoc, UseOrDef->getMemoryInst()); 1312 return false; 1313 })) { 1314 LLVM_DEBUG(dbgs() << " ... found a read clobber\n"); 1315 return None; 1316 } 1317 1318 // If Current does not have an analyzable write location or is not 1319 // removable, skip it. 1320 CurrentLoc = getLocForWrite(CurrentI); 1321 if (!CurrentLoc || !isRemovable(CurrentI)) { 1322 CanOptimize = false; 1323 continue; 1324 } 1325 1326 // AliasAnalysis does not account for loops. Limit elimination to 1327 // candidates for which we can guarantee they always store to the same 1328 // memory location and not located in different loops. 1329 if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) { 1330 LLVM_DEBUG(dbgs() << " ... not guaranteed loop independent\n"); 1331 CanOptimize = false; 1332 continue; 1333 } 1334 1335 if (IsMemTerm) { 1336 // If the killing def is a memory terminator (e.g. lifetime.end), check 1337 // the next candidate if the current Current does not write the same 1338 // underlying object as the terminator. 1339 if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) { 1340 CanOptimize = false; 1341 continue; 1342 } 1343 } else { 1344 int64_t KillingOffset = 0; 1345 int64_t DeadOffset = 0; 1346 auto OR = isOverwrite(KillingI, CurrentI, KillingLoc, *CurrentLoc, 1347 KillingOffset, DeadOffset); 1348 if (CanOptimize) { 1349 // CurrentDef is the earliest write clobber of KillingDef. Use it as 1350 // optimized access. Do not optimize if CurrentDef is already the 1351 // defining access of KillingDef. 1352 if (CurrentDef != KillingDef->getDefiningAccess() && 1353 (OR == OW_Complete || OR == OW_MaybePartial)) 1354 KillingDef->setOptimized(CurrentDef); 1355 1356 // Once a may-aliasing def is encountered do not set an optimized 1357 // access. 1358 if (OR != OW_None) 1359 CanOptimize = false; 1360 } 1361 1362 // If Current does not write to the same object as KillingDef, check 1363 // the next candidate. 1364 if (OR == OW_Unknown || OR == OW_None) 1365 continue; 1366 else if (OR == OW_MaybePartial) { 1367 // If KillingDef only partially overwrites Current, check the next 1368 // candidate if the partial step limit is exceeded. This aggressively 1369 // limits the number of candidates for partial store elimination, 1370 // which are less likely to be removable in the end. 1371 if (PartialLimit <= 1) { 1372 WalkerStepLimit -= 1; 1373 LLVM_DEBUG(dbgs() << " ... reached partial limit ... continue with next access\n"); 1374 continue; 1375 } 1376 PartialLimit -= 1; 1377 } 1378 } 1379 break; 1380 }; 1381 1382 // Accesses to objects accessible after the function returns can only be 1383 // eliminated if the access is dead along all paths to the exit. Collect 1384 // the blocks with killing (=completely overwriting MemoryDefs) and check if 1385 // they cover all paths from MaybeDeadAccess to any function exit. 1386 SmallPtrSet<Instruction *, 16> KillingDefs; 1387 KillingDefs.insert(KillingDef->getMemoryInst()); 1388 MemoryAccess *MaybeDeadAccess = Current; 1389 MemoryLocation MaybeDeadLoc = *CurrentLoc; 1390 Instruction *MaybeDeadI = cast<MemoryDef>(MaybeDeadAccess)->getMemoryInst(); 1391 LLVM_DEBUG(dbgs() << " Checking for reads of " << *MaybeDeadAccess << " (" 1392 << *MaybeDeadI << ")\n"); 1393 1394 SmallSetVector<MemoryAccess *, 32> WorkList; 1395 auto PushMemUses = [&WorkList](MemoryAccess *Acc) { 1396 for (Use &U : Acc->uses()) 1397 WorkList.insert(cast<MemoryAccess>(U.getUser())); 1398 }; 1399 PushMemUses(MaybeDeadAccess); 1400 1401 // Check if DeadDef may be read. 1402 for (unsigned I = 0; I < WorkList.size(); I++) { 1403 MemoryAccess *UseAccess = WorkList[I]; 1404 1405 LLVM_DEBUG(dbgs() << " " << *UseAccess); 1406 // Bail out if the number of accesses to check exceeds the scan limit. 1407 if (ScanLimit < (WorkList.size() - I)) { 1408 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n"); 1409 return None; 1410 } 1411 --ScanLimit; 1412 NumDomMemDefChecks++; 1413 1414 if (isa<MemoryPhi>(UseAccess)) { 1415 if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) { 1416 return DT.properlyDominates(KI->getParent(), 1417 UseAccess->getBlock()); 1418 })) { 1419 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n"); 1420 continue; 1421 } 1422 LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n"); 1423 PushMemUses(UseAccess); 1424 continue; 1425 } 1426 1427 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst(); 1428 LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n"); 1429 1430 if (any_of(KillingDefs, [this, UseInst](Instruction *KI) { 1431 return DT.dominates(KI, UseInst); 1432 })) { 1433 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n"); 1434 continue; 1435 } 1436 1437 // A memory terminator kills all preceeding MemoryDefs and all succeeding 1438 // MemoryAccesses. We do not have to check it's users. 1439 if (isMemTerminator(MaybeDeadLoc, MaybeDeadI, UseInst)) { 1440 LLVM_DEBUG( 1441 dbgs() 1442 << " ... skipping, memterminator invalidates following accesses\n"); 1443 continue; 1444 } 1445 1446 if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) { 1447 LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n"); 1448 PushMemUses(UseAccess); 1449 continue; 1450 } 1451 1452 if (UseInst->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj)) { 1453 LLVM_DEBUG(dbgs() << " ... found throwing instruction\n"); 1454 return None; 1455 } 1456 1457 // Uses which may read the original MemoryDef mean we cannot eliminate the 1458 // original MD. Stop walk. 1459 if (isReadClobber(MaybeDeadLoc, UseInst)) { 1460 LLVM_DEBUG(dbgs() << " ... found read clobber\n"); 1461 return None; 1462 } 1463 1464 // If this worklist walks back to the original memory access (and the 1465 // pointer is not guarenteed loop invariant) then we cannot assume that a 1466 // store kills itself. 1467 if (MaybeDeadAccess == UseAccess && 1468 !isGuaranteedLoopInvariant(MaybeDeadLoc.Ptr)) { 1469 LLVM_DEBUG(dbgs() << " ... found not loop invariant self access\n"); 1470 return None; 1471 } 1472 // Otherwise, for the KillingDef and MaybeDeadAccess we only have to check 1473 // if it reads the memory location. 1474 // TODO: It would probably be better to check for self-reads before 1475 // calling the function. 1476 if (KillingDef == UseAccess || MaybeDeadAccess == UseAccess) { 1477 LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n"); 1478 continue; 1479 } 1480 1481 // Check all uses for MemoryDefs, except for defs completely overwriting 1482 // the original location. Otherwise we have to check uses of *all* 1483 // MemoryDefs we discover, including non-aliasing ones. Otherwise we might 1484 // miss cases like the following 1485 // 1 = Def(LoE) ; <----- DeadDef stores [0,1] 1486 // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3] 1487 // Use(2) ; MayAlias 2 *and* 1, loads [0, 3]. 1488 // (The Use points to the *first* Def it may alias) 1489 // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias, 1490 // stores [0,1] 1491 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) { 1492 if (isCompleteOverwrite(MaybeDeadLoc, MaybeDeadI, UseInst)) { 1493 BasicBlock *MaybeKillingBlock = UseInst->getParent(); 1494 if (PostOrderNumbers.find(MaybeKillingBlock)->second < 1495 PostOrderNumbers.find(MaybeDeadAccess->getBlock())->second) { 1496 if (!isInvisibleToCallerAfterRet(KillingUndObj)) { 1497 LLVM_DEBUG(dbgs() 1498 << " ... found killing def " << *UseInst << "\n"); 1499 KillingDefs.insert(UseInst); 1500 } 1501 } else { 1502 LLVM_DEBUG(dbgs() 1503 << " ... found preceeding def " << *UseInst << "\n"); 1504 return None; 1505 } 1506 } else 1507 PushMemUses(UseDef); 1508 } 1509 } 1510 1511 // For accesses to locations visible after the function returns, make sure 1512 // that the location is dead (=overwritten) along all paths from 1513 // MaybeDeadAccess to the exit. 1514 if (!isInvisibleToCallerAfterRet(KillingUndObj)) { 1515 SmallPtrSet<BasicBlock *, 16> KillingBlocks; 1516 for (Instruction *KD : KillingDefs) 1517 KillingBlocks.insert(KD->getParent()); 1518 assert(!KillingBlocks.empty() && 1519 "Expected at least a single killing block"); 1520 1521 // Find the common post-dominator of all killing blocks. 1522 BasicBlock *CommonPred = *KillingBlocks.begin(); 1523 for (BasicBlock *BB : llvm::drop_begin(KillingBlocks)) { 1524 if (!CommonPred) 1525 break; 1526 CommonPred = PDT.findNearestCommonDominator(CommonPred, BB); 1527 } 1528 1529 // If the common post-dominator does not post-dominate MaybeDeadAccess, 1530 // there is a path from MaybeDeadAccess to an exit not going through a 1531 // killing block. 1532 if (!PDT.dominates(CommonPred, MaybeDeadAccess->getBlock())) { 1533 if (!AnyUnreachableExit) 1534 return None; 1535 1536 // Fall back to CFG scan starting at all non-unreachable roots if not 1537 // all paths to the exit go through CommonPred. 1538 CommonPred = nullptr; 1539 } 1540 1541 // If CommonPred itself is in the set of killing blocks, we're done. 1542 if (KillingBlocks.count(CommonPred)) 1543 return {MaybeDeadAccess}; 1544 1545 SetVector<BasicBlock *> WorkList; 1546 // If CommonPred is null, there are multiple exits from the function. 1547 // They all have to be added to the worklist. 1548 if (CommonPred) 1549 WorkList.insert(CommonPred); 1550 else 1551 for (BasicBlock *R : PDT.roots()) { 1552 if (!isa<UnreachableInst>(R->getTerminator())) 1553 WorkList.insert(R); 1554 } 1555 1556 NumCFGTries++; 1557 // Check if all paths starting from an exit node go through one of the 1558 // killing blocks before reaching MaybeDeadAccess. 1559 for (unsigned I = 0; I < WorkList.size(); I++) { 1560 NumCFGChecks++; 1561 BasicBlock *Current = WorkList[I]; 1562 if (KillingBlocks.count(Current)) 1563 continue; 1564 if (Current == MaybeDeadAccess->getBlock()) 1565 return None; 1566 1567 // MaybeDeadAccess is reachable from the entry, so we don't have to 1568 // explore unreachable blocks further. 1569 if (!DT.isReachableFromEntry(Current)) 1570 continue; 1571 1572 for (BasicBlock *Pred : predecessors(Current)) 1573 WorkList.insert(Pred); 1574 1575 if (WorkList.size() >= MemorySSAPathCheckLimit) 1576 return None; 1577 } 1578 NumCFGSuccess++; 1579 } 1580 1581 // No aliasing MemoryUses of MaybeDeadAccess found, MaybeDeadAccess is 1582 // potentially dead. 1583 return {MaybeDeadAccess}; 1584 } 1585 1586 // Delete dead memory defs 1587 void deleteDeadInstruction(Instruction *SI) { 1588 MemorySSAUpdater Updater(&MSSA); 1589 SmallVector<Instruction *, 32> NowDeadInsts; 1590 NowDeadInsts.push_back(SI); 1591 --NumFastOther; 1592 1593 while (!NowDeadInsts.empty()) { 1594 Instruction *DeadInst = NowDeadInsts.pop_back_val(); 1595 ++NumFastOther; 1596 1597 // Try to preserve debug information attached to the dead instruction. 1598 salvageDebugInfo(*DeadInst); 1599 salvageKnowledge(DeadInst); 1600 1601 // Remove the Instruction from MSSA. 1602 if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) { 1603 if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) { 1604 SkipStores.insert(MD); 1605 if (auto *SI = dyn_cast<StoreInst>(MD->getMemoryInst())) { 1606 if (SI->getValueOperand()->getType()->isPointerTy()) { 1607 const Value *UO = getUnderlyingObject(SI->getValueOperand()); 1608 if (CapturedBeforeReturn.erase(UO)) 1609 ShouldIterateEndOfFunctionDSE = true; 1610 InvisibleToCallerAfterRet.erase(UO); 1611 } 1612 } 1613 } 1614 1615 Updater.removeMemoryAccess(MA); 1616 } 1617 1618 auto I = IOLs.find(DeadInst->getParent()); 1619 if (I != IOLs.end()) 1620 I->second.erase(DeadInst); 1621 // Remove its operands 1622 for (Use &O : DeadInst->operands()) 1623 if (Instruction *OpI = dyn_cast<Instruction>(O)) { 1624 O = nullptr; 1625 if (isInstructionTriviallyDead(OpI, &TLI)) 1626 NowDeadInsts.push_back(OpI); 1627 } 1628 1629 EI.removeInstruction(DeadInst); 1630 DeadInst->eraseFromParent(); 1631 } 1632 } 1633 1634 // Check for any extra throws between \p KillingI and \p DeadI that block 1635 // DSE. This only checks extra maythrows (those that aren't MemoryDef's). 1636 // MemoryDef that may throw are handled during the walk from one def to the 1637 // next. 1638 bool mayThrowBetween(Instruction *KillingI, Instruction *DeadI, 1639 const Value *KillingUndObj) { 1640 // First see if we can ignore it by using the fact that KillingI is an 1641 // alloca/alloca like object that is not visible to the caller during 1642 // execution of the function. 1643 if (KillingUndObj && isInvisibleToCallerOnUnwind(KillingUndObj)) 1644 return false; 1645 1646 if (KillingI->getParent() == DeadI->getParent()) 1647 return ThrowingBlocks.count(KillingI->getParent()); 1648 return !ThrowingBlocks.empty(); 1649 } 1650 1651 // Check if \p DeadI acts as a DSE barrier for \p KillingI. The following 1652 // instructions act as barriers: 1653 // * A memory instruction that may throw and \p KillingI accesses a non-stack 1654 // object. 1655 // * Atomic stores stronger that monotonic. 1656 bool isDSEBarrier(const Value *KillingUndObj, Instruction *DeadI) { 1657 // If DeadI may throw it acts as a barrier, unless we are to an 1658 // alloca/alloca like object that does not escape. 1659 if (DeadI->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj)) 1660 return true; 1661 1662 // If DeadI is an atomic load/store stronger than monotonic, do not try to 1663 // eliminate/reorder it. 1664 if (DeadI->isAtomic()) { 1665 if (auto *LI = dyn_cast<LoadInst>(DeadI)) 1666 return isStrongerThanMonotonic(LI->getOrdering()); 1667 if (auto *SI = dyn_cast<StoreInst>(DeadI)) 1668 return isStrongerThanMonotonic(SI->getOrdering()); 1669 if (auto *ARMW = dyn_cast<AtomicRMWInst>(DeadI)) 1670 return isStrongerThanMonotonic(ARMW->getOrdering()); 1671 if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(DeadI)) 1672 return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) || 1673 isStrongerThanMonotonic(CmpXchg->getFailureOrdering()); 1674 llvm_unreachable("other instructions should be skipped in MemorySSA"); 1675 } 1676 return false; 1677 } 1678 1679 /// Eliminate writes to objects that are not visible in the caller and are not 1680 /// accessed before returning from the function. 1681 bool eliminateDeadWritesAtEndOfFunction() { 1682 bool MadeChange = false; 1683 LLVM_DEBUG( 1684 dbgs() 1685 << "Trying to eliminate MemoryDefs at the end of the function\n"); 1686 do { 1687 ShouldIterateEndOfFunctionDSE = false; 1688 for (MemoryDef *Def : llvm::reverse(MemDefs)) { 1689 if (SkipStores.contains(Def)) 1690 continue; 1691 1692 Instruction *DefI = Def->getMemoryInst(); 1693 auto DefLoc = getLocForWrite(DefI); 1694 if (!DefLoc || !isRemovable(DefI)) 1695 continue; 1696 1697 // NOTE: Currently eliminating writes at the end of a function is 1698 // limited to MemoryDefs with a single underlying object, to save 1699 // compile-time. In practice it appears the case with multiple 1700 // underlying objects is very uncommon. If it turns out to be important, 1701 // we can use getUnderlyingObjects here instead. 1702 const Value *UO = getUnderlyingObject(DefLoc->Ptr); 1703 if (!isInvisibleToCallerAfterRet(UO)) 1704 continue; 1705 1706 if (isWriteAtEndOfFunction(Def)) { 1707 // See through pointer-to-pointer bitcasts 1708 LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end " 1709 "of the function\n"); 1710 deleteDeadInstruction(DefI); 1711 ++NumFastStores; 1712 MadeChange = true; 1713 } 1714 } 1715 } while (ShouldIterateEndOfFunctionDSE); 1716 return MadeChange; 1717 } 1718 1719 /// If we have a zero initializing memset following a call to malloc, 1720 /// try folding it into a call to calloc. 1721 bool tryFoldIntoCalloc(MemoryDef *Def, const Value *DefUO) { 1722 Instruction *DefI = Def->getMemoryInst(); 1723 MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI); 1724 if (!MemSet) 1725 // TODO: Could handle zero store to small allocation as well. 1726 return false; 1727 Constant *StoredConstant = dyn_cast<Constant>(MemSet->getValue()); 1728 if (!StoredConstant || !StoredConstant->isNullValue()) 1729 return false; 1730 1731 if (!isRemovable(DefI)) 1732 // The memset might be volatile.. 1733 return false; 1734 1735 if (F.hasFnAttribute(Attribute::SanitizeMemory) || 1736 F.hasFnAttribute(Attribute::SanitizeAddress) || 1737 F.hasFnAttribute(Attribute::SanitizeHWAddress) || 1738 F.getName() == "calloc") 1739 return false; 1740 auto *Malloc = const_cast<CallInst *>(dyn_cast<CallInst>(DefUO)); 1741 if (!Malloc) 1742 return false; 1743 auto *InnerCallee = Malloc->getCalledFunction(); 1744 if (!InnerCallee) 1745 return false; 1746 LibFunc Func; 1747 if (!TLI.getLibFunc(*InnerCallee, Func) || !TLI.has(Func) || 1748 Func != LibFunc_malloc) 1749 return false; 1750 1751 auto shouldCreateCalloc = [](CallInst *Malloc, CallInst *Memset) { 1752 // Check for br(icmp ptr, null), truebb, falsebb) pattern at the end 1753 // of malloc block 1754 auto *MallocBB = Malloc->getParent(), 1755 *MemsetBB = Memset->getParent(); 1756 if (MallocBB == MemsetBB) 1757 return true; 1758 auto *Ptr = Memset->getArgOperand(0); 1759 auto *TI = MallocBB->getTerminator(); 1760 ICmpInst::Predicate Pred; 1761 BasicBlock *TrueBB, *FalseBB; 1762 if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Ptr), m_Zero()), TrueBB, 1763 FalseBB))) 1764 return false; 1765 if (Pred != ICmpInst::ICMP_EQ || MemsetBB != FalseBB) 1766 return false; 1767 return true; 1768 }; 1769 1770 if (Malloc->getOperand(0) != MemSet->getLength()) 1771 return false; 1772 if (!shouldCreateCalloc(Malloc, MemSet) || 1773 !DT.dominates(Malloc, MemSet) || 1774 !memoryIsNotModifiedBetween(Malloc, MemSet, BatchAA, DL, &DT)) 1775 return false; 1776 IRBuilder<> IRB(Malloc); 1777 const auto &DL = Malloc->getModule()->getDataLayout(); 1778 auto *Calloc = 1779 emitCalloc(ConstantInt::get(IRB.getIntPtrTy(DL), 1), 1780 Malloc->getArgOperand(0), IRB, TLI); 1781 if (!Calloc) 1782 return false; 1783 MemorySSAUpdater Updater(&MSSA); 1784 auto *LastDef = 1785 cast<MemoryDef>(Updater.getMemorySSA()->getMemoryAccess(Malloc)); 1786 auto *NewAccess = 1787 Updater.createMemoryAccessAfter(cast<Instruction>(Calloc), LastDef, 1788 LastDef); 1789 auto *NewAccessMD = cast<MemoryDef>(NewAccess); 1790 Updater.insertDef(NewAccessMD, /*RenameUses=*/true); 1791 Updater.removeMemoryAccess(Malloc); 1792 Malloc->replaceAllUsesWith(Calloc); 1793 Malloc->eraseFromParent(); 1794 return true; 1795 } 1796 1797 /// \returns true if \p Def is a no-op store, either because it 1798 /// directly stores back a loaded value or stores zero to a calloced object. 1799 bool storeIsNoop(MemoryDef *Def, const Value *DefUO) { 1800 Instruction *DefI = Def->getMemoryInst(); 1801 StoreInst *Store = dyn_cast<StoreInst>(DefI); 1802 MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI); 1803 Constant *StoredConstant = nullptr; 1804 if (Store) 1805 StoredConstant = dyn_cast<Constant>(Store->getOperand(0)); 1806 else if (MemSet) 1807 StoredConstant = dyn_cast<Constant>(MemSet->getValue()); 1808 else 1809 return false; 1810 1811 if (!isRemovable(DefI)) 1812 return false; 1813 1814 if (StoredConstant) { 1815 Constant *InitC = 1816 getInitialValueOfAllocation(DefUO, &TLI, StoredConstant->getType()); 1817 // If the clobbering access is LiveOnEntry, no instructions between them 1818 // can modify the memory location. 1819 if (InitC && InitC == StoredConstant) 1820 return MSSA.isLiveOnEntryDef( 1821 MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def)); 1822 } 1823 1824 if (!Store) 1825 return false; 1826 1827 if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) { 1828 if (LoadI->getPointerOperand() == Store->getOperand(1)) { 1829 // Get the defining access for the load. 1830 auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess(); 1831 // Fast path: the defining accesses are the same. 1832 if (LoadAccess == Def->getDefiningAccess()) 1833 return true; 1834 1835 // Look through phi accesses. Recursively scan all phi accesses by 1836 // adding them to a worklist. Bail when we run into a memory def that 1837 // does not match LoadAccess. 1838 SetVector<MemoryAccess *> ToCheck; 1839 MemoryAccess *Current = 1840 MSSA.getWalker()->getClobberingMemoryAccess(Def); 1841 // We don't want to bail when we run into the store memory def. But, 1842 // the phi access may point to it. So, pretend like we've already 1843 // checked it. 1844 ToCheck.insert(Def); 1845 ToCheck.insert(Current); 1846 // Start at current (1) to simulate already having checked Def. 1847 for (unsigned I = 1; I < ToCheck.size(); ++I) { 1848 Current = ToCheck[I]; 1849 if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) { 1850 // Check all the operands. 1851 for (auto &Use : PhiAccess->incoming_values()) 1852 ToCheck.insert(cast<MemoryAccess>(&Use)); 1853 continue; 1854 } 1855 1856 // If we found a memory def, bail. This happens when we have an 1857 // unrelated write in between an otherwise noop store. 1858 assert(isa<MemoryDef>(Current) && 1859 "Only MemoryDefs should reach here."); 1860 // TODO: Skip no alias MemoryDefs that have no aliasing reads. 1861 // We are searching for the definition of the store's destination. 1862 // So, if that is the same definition as the load, then this is a 1863 // noop. Otherwise, fail. 1864 if (LoadAccess != Current) 1865 return false; 1866 } 1867 return true; 1868 } 1869 } 1870 1871 return false; 1872 } 1873 1874 bool removePartiallyOverlappedStores(InstOverlapIntervalsTy &IOL) { 1875 bool Changed = false; 1876 for (auto OI : IOL) { 1877 Instruction *DeadI = OI.first; 1878 MemoryLocation Loc = *getLocForWrite(DeadI); 1879 assert(isRemovable(DeadI) && "Expect only removable instruction"); 1880 1881 const Value *Ptr = Loc.Ptr->stripPointerCasts(); 1882 int64_t DeadStart = 0; 1883 uint64_t DeadSize = Loc.Size.getValue(); 1884 GetPointerBaseWithConstantOffset(Ptr, DeadStart, DL); 1885 OverlapIntervalsTy &IntervalMap = OI.second; 1886 Changed |= tryToShortenEnd(DeadI, IntervalMap, DeadStart, DeadSize); 1887 if (IntervalMap.empty()) 1888 continue; 1889 Changed |= tryToShortenBegin(DeadI, IntervalMap, DeadStart, DeadSize); 1890 } 1891 return Changed; 1892 } 1893 1894 /// Eliminates writes to locations where the value that is being written 1895 /// is already stored at the same location. 1896 bool eliminateRedundantStoresOfExistingValues() { 1897 bool MadeChange = false; 1898 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs that write the " 1899 "already existing value\n"); 1900 for (auto *Def : MemDefs) { 1901 if (SkipStores.contains(Def) || MSSA.isLiveOnEntryDef(Def)) 1902 continue; 1903 1904 Instruction *DefInst = Def->getMemoryInst(); 1905 auto MaybeDefLoc = getLocForWrite(DefInst); 1906 if (!MaybeDefLoc || !isRemovable(DefInst)) 1907 continue; 1908 1909 MemoryDef *UpperDef; 1910 // To conserve compile-time, we avoid walking to the next clobbering def. 1911 // Instead, we just try to get the optimized access, if it exists. DSE 1912 // will try to optimize defs during the earlier traversal. 1913 if (Def->isOptimized()) 1914 UpperDef = dyn_cast<MemoryDef>(Def->getOptimized()); 1915 else 1916 UpperDef = dyn_cast<MemoryDef>(Def->getDefiningAccess()); 1917 if (!UpperDef || MSSA.isLiveOnEntryDef(UpperDef)) 1918 continue; 1919 1920 Instruction *UpperInst = UpperDef->getMemoryInst(); 1921 auto IsRedundantStore = [&]() { 1922 if (DefInst->isIdenticalTo(UpperInst)) 1923 return true; 1924 if (auto *MemSetI = dyn_cast<MemSetInst>(UpperInst)) { 1925 if (auto *SI = dyn_cast<StoreInst>(DefInst)) { 1926 // MemSetInst must have a write location. 1927 MemoryLocation UpperLoc = *getLocForWrite(UpperInst); 1928 int64_t InstWriteOffset = 0; 1929 int64_t DepWriteOffset = 0; 1930 auto OR = isOverwrite(UpperInst, DefInst, UpperLoc, *MaybeDefLoc, 1931 InstWriteOffset, DepWriteOffset); 1932 Value *StoredByte = isBytewiseValue(SI->getValueOperand(), DL); 1933 return StoredByte && StoredByte == MemSetI->getOperand(1) && 1934 OR == OW_Complete; 1935 } 1936 } 1937 return false; 1938 }; 1939 1940 if (!IsRedundantStore() || isReadClobber(*MaybeDefLoc, DefInst)) 1941 continue; 1942 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *DefInst 1943 << '\n'); 1944 deleteDeadInstruction(DefInst); 1945 NumRedundantStores++; 1946 MadeChange = true; 1947 } 1948 return MadeChange; 1949 } 1950 }; 1951 1952 static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, 1953 DominatorTree &DT, PostDominatorTree &PDT, 1954 AssumptionCache &AC, 1955 const TargetLibraryInfo &TLI, 1956 const LoopInfo &LI) { 1957 bool MadeChange = false; 1958 1959 MSSA.ensureOptimizedUses(); 1960 DSEState State(F, AA, MSSA, DT, PDT, AC, TLI, LI); 1961 // For each store: 1962 for (unsigned I = 0; I < State.MemDefs.size(); I++) { 1963 MemoryDef *KillingDef = State.MemDefs[I]; 1964 if (State.SkipStores.count(KillingDef)) 1965 continue; 1966 Instruction *KillingI = KillingDef->getMemoryInst(); 1967 1968 Optional<MemoryLocation> MaybeKillingLoc; 1969 if (State.isMemTerminatorInst(KillingI)) 1970 MaybeKillingLoc = State.getLocForTerminator(KillingI).map( 1971 [](const std::pair<MemoryLocation, bool> &P) { return P.first; }); 1972 else 1973 MaybeKillingLoc = State.getLocForWrite(KillingI); 1974 1975 if (!MaybeKillingLoc) { 1976 LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for " 1977 << *KillingI << "\n"); 1978 continue; 1979 } 1980 MemoryLocation KillingLoc = *MaybeKillingLoc; 1981 assert(KillingLoc.Ptr && "KillingLoc should not be null"); 1982 const Value *KillingUndObj = getUnderlyingObject(KillingLoc.Ptr); 1983 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by " 1984 << *KillingDef << " (" << *KillingI << ")\n"); 1985 1986 unsigned ScanLimit = MemorySSAScanLimit; 1987 unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit; 1988 unsigned PartialLimit = MemorySSAPartialStoreLimit; 1989 // Worklist of MemoryAccesses that may be killed by KillingDef. 1990 SetVector<MemoryAccess *> ToCheck; 1991 ToCheck.insert(KillingDef->getDefiningAccess()); 1992 1993 bool Shortend = false; 1994 bool IsMemTerm = State.isMemTerminatorInst(KillingI); 1995 // Check if MemoryAccesses in the worklist are killed by KillingDef. 1996 for (unsigned I = 0; I < ToCheck.size(); I++) { 1997 MemoryAccess *Current = ToCheck[I]; 1998 if (State.SkipStores.count(Current)) 1999 continue; 2000 2001 Optional<MemoryAccess *> MaybeDeadAccess = State.getDomMemoryDef( 2002 KillingDef, Current, KillingLoc, KillingUndObj, ScanLimit, 2003 WalkerStepLimit, IsMemTerm, PartialLimit); 2004 2005 if (!MaybeDeadAccess) { 2006 LLVM_DEBUG(dbgs() << " finished walk\n"); 2007 continue; 2008 } 2009 2010 MemoryAccess *DeadAccess = *MaybeDeadAccess; 2011 LLVM_DEBUG(dbgs() << " Checking if we can kill " << *DeadAccess); 2012 if (isa<MemoryPhi>(DeadAccess)) { 2013 LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n"); 2014 for (Value *V : cast<MemoryPhi>(DeadAccess)->incoming_values()) { 2015 MemoryAccess *IncomingAccess = cast<MemoryAccess>(V); 2016 BasicBlock *IncomingBlock = IncomingAccess->getBlock(); 2017 BasicBlock *PhiBlock = DeadAccess->getBlock(); 2018 2019 // We only consider incoming MemoryAccesses that come before the 2020 // MemoryPhi. Otherwise we could discover candidates that do not 2021 // strictly dominate our starting def. 2022 if (State.PostOrderNumbers[IncomingBlock] > 2023 State.PostOrderNumbers[PhiBlock]) 2024 ToCheck.insert(IncomingAccess); 2025 } 2026 continue; 2027 } 2028 auto *DeadDefAccess = cast<MemoryDef>(DeadAccess); 2029 Instruction *DeadI = DeadDefAccess->getMemoryInst(); 2030 LLVM_DEBUG(dbgs() << " (" << *DeadI << ")\n"); 2031 ToCheck.insert(DeadDefAccess->getDefiningAccess()); 2032 NumGetDomMemoryDefPassed++; 2033 2034 if (!DebugCounter::shouldExecute(MemorySSACounter)) 2035 continue; 2036 2037 MemoryLocation DeadLoc = *State.getLocForWrite(DeadI); 2038 2039 if (IsMemTerm) { 2040 const Value *DeadUndObj = getUnderlyingObject(DeadLoc.Ptr); 2041 if (KillingUndObj != DeadUndObj) 2042 continue; 2043 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI 2044 << "\n KILLER: " << *KillingI << '\n'); 2045 State.deleteDeadInstruction(DeadI); 2046 ++NumFastStores; 2047 MadeChange = true; 2048 } else { 2049 // Check if DeadI overwrites KillingI. 2050 int64_t KillingOffset = 0; 2051 int64_t DeadOffset = 0; 2052 OverwriteResult OR = State.isOverwrite( 2053 KillingI, DeadI, KillingLoc, DeadLoc, KillingOffset, DeadOffset); 2054 if (OR == OW_MaybePartial) { 2055 auto Iter = State.IOLs.insert( 2056 std::make_pair<BasicBlock *, InstOverlapIntervalsTy>( 2057 DeadI->getParent(), InstOverlapIntervalsTy())); 2058 auto &IOL = Iter.first->second; 2059 OR = isPartialOverwrite(KillingLoc, DeadLoc, KillingOffset, 2060 DeadOffset, DeadI, IOL); 2061 } 2062 2063 if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) { 2064 auto *DeadSI = dyn_cast<StoreInst>(DeadI); 2065 auto *KillingSI = dyn_cast<StoreInst>(KillingI); 2066 // We are re-using tryToMergePartialOverlappingStores, which requires 2067 // DeadSI to dominate DeadSI. 2068 // TODO: implement tryToMergeParialOverlappingStores using MemorySSA. 2069 if (DeadSI && KillingSI && DT.dominates(DeadSI, KillingSI)) { 2070 if (Constant *Merged = tryToMergePartialOverlappingStores( 2071 KillingSI, DeadSI, KillingOffset, DeadOffset, State.DL, 2072 State.BatchAA, &DT)) { 2073 2074 // Update stored value of earlier store to merged constant. 2075 DeadSI->setOperand(0, Merged); 2076 ++NumModifiedStores; 2077 MadeChange = true; 2078 2079 Shortend = true; 2080 // Remove killing store and remove any outstanding overlap 2081 // intervals for the updated store. 2082 State.deleteDeadInstruction(KillingSI); 2083 auto I = State.IOLs.find(DeadSI->getParent()); 2084 if (I != State.IOLs.end()) 2085 I->second.erase(DeadSI); 2086 break; 2087 } 2088 } 2089 } 2090 2091 if (OR == OW_Complete) { 2092 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI 2093 << "\n KILLER: " << *KillingI << '\n'); 2094 State.deleteDeadInstruction(DeadI); 2095 ++NumFastStores; 2096 MadeChange = true; 2097 } 2098 } 2099 } 2100 2101 // Check if the store is a no-op. 2102 if (!Shortend && State.storeIsNoop(KillingDef, KillingUndObj)) { 2103 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *KillingI 2104 << '\n'); 2105 State.deleteDeadInstruction(KillingI); 2106 NumRedundantStores++; 2107 MadeChange = true; 2108 continue; 2109 } 2110 2111 // Can we form a calloc from a memset/malloc pair? 2112 if (!Shortend && State.tryFoldIntoCalloc(KillingDef, KillingUndObj)) { 2113 LLVM_DEBUG(dbgs() << "DSE: Remove memset after forming calloc:\n" 2114 << " DEAD: " << *KillingI << '\n'); 2115 State.deleteDeadInstruction(KillingI); 2116 MadeChange = true; 2117 continue; 2118 } 2119 } 2120 2121 if (EnablePartialOverwriteTracking) 2122 for (auto &KV : State.IOLs) 2123 MadeChange |= State.removePartiallyOverlappedStores(KV.second); 2124 2125 MadeChange |= State.eliminateRedundantStoresOfExistingValues(); 2126 MadeChange |= State.eliminateDeadWritesAtEndOfFunction(); 2127 return MadeChange; 2128 } 2129 } // end anonymous namespace 2130 2131 //===----------------------------------------------------------------------===// 2132 // DSE Pass 2133 //===----------------------------------------------------------------------===// 2134 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) { 2135 AliasAnalysis &AA = AM.getResult<AAManager>(F); 2136 const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F); 2137 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); 2138 MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA(); 2139 PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F); 2140 AssumptionCache &AC = AM.getResult<AssumptionAnalysis>(F); 2141 LoopInfo &LI = AM.getResult<LoopAnalysis>(F); 2142 2143 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, AC, TLI, LI); 2144 2145 #ifdef LLVM_ENABLE_STATS 2146 if (AreStatisticsEnabled()) 2147 for (auto &I : instructions(F)) 2148 NumRemainingStores += isa<StoreInst>(&I); 2149 #endif 2150 2151 if (!Changed) 2152 return PreservedAnalyses::all(); 2153 2154 PreservedAnalyses PA; 2155 PA.preserveSet<CFGAnalyses>(); 2156 PA.preserve<MemorySSAAnalysis>(); 2157 PA.preserve<LoopAnalysis>(); 2158 return PA; 2159 } 2160 2161 namespace { 2162 2163 /// A legacy pass for the legacy pass manager that wraps \c DSEPass. 2164 class DSELegacyPass : public FunctionPass { 2165 public: 2166 static char ID; // Pass identification, replacement for typeid 2167 2168 DSELegacyPass() : FunctionPass(ID) { 2169 initializeDSELegacyPassPass(*PassRegistry::getPassRegistry()); 2170 } 2171 2172 bool runOnFunction(Function &F) override { 2173 if (skipFunction(F)) 2174 return false; 2175 2176 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 2177 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2178 const TargetLibraryInfo &TLI = 2179 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 2180 MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); 2181 PostDominatorTree &PDT = 2182 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree(); 2183 AssumptionCache &AC = 2184 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2185 LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2186 2187 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, AC, TLI, LI); 2188 2189 #ifdef LLVM_ENABLE_STATS 2190 if (AreStatisticsEnabled()) 2191 for (auto &I : instructions(F)) 2192 NumRemainingStores += isa<StoreInst>(&I); 2193 #endif 2194 2195 return Changed; 2196 } 2197 2198 void getAnalysisUsage(AnalysisUsage &AU) const override { 2199 AU.setPreservesCFG(); 2200 AU.addRequired<AAResultsWrapperPass>(); 2201 AU.addRequired<TargetLibraryInfoWrapperPass>(); 2202 AU.addPreserved<GlobalsAAWrapperPass>(); 2203 AU.addRequired<DominatorTreeWrapperPass>(); 2204 AU.addPreserved<DominatorTreeWrapperPass>(); 2205 AU.addRequired<PostDominatorTreeWrapperPass>(); 2206 AU.addRequired<MemorySSAWrapperPass>(); 2207 AU.addPreserved<PostDominatorTreeWrapperPass>(); 2208 AU.addPreserved<MemorySSAWrapperPass>(); 2209 AU.addRequired<LoopInfoWrapperPass>(); 2210 AU.addPreserved<LoopInfoWrapperPass>(); 2211 AU.addRequired<AssumptionCacheTracker>(); 2212 } 2213 }; 2214 2215 } // end anonymous namespace 2216 2217 char DSELegacyPass::ID = 0; 2218 2219 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false, 2220 false) 2221 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2222 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass) 2223 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2224 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 2225 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 2226 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 2227 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2228 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 2229 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 2230 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false, 2231 false) 2232 2233 FunctionPass *llvm::createDeadStoreEliminationPass() { 2234 return new DSELegacyPass(); 2235 } 2236