1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs various transformations related to eliminating memcpy 10 // calls, or transforming sets of stores into memset's. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" 15 #include "llvm/ADT/DenseSet.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/ADT/iterator_range.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/GlobalsModRef.h" 24 #include "llvm/Analysis/Loads.h" 25 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 26 #include "llvm/Analysis/MemoryLocation.h" 27 #include "llvm/Analysis/MemorySSA.h" 28 #include "llvm/Analysis/MemorySSAUpdater.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/BasicBlock.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/Dominators.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/GetElementPtrTypeIterator.h" 39 #include "llvm/IR/GlobalVariable.h" 40 #include "llvm/IR/IRBuilder.h" 41 #include "llvm/IR/InstrTypes.h" 42 #include "llvm/IR/Instruction.h" 43 #include "llvm/IR/Instructions.h" 44 #include "llvm/IR/IntrinsicInst.h" 45 #include "llvm/IR/Intrinsics.h" 46 #include "llvm/IR/LLVMContext.h" 47 #include "llvm/IR/Module.h" 48 #include "llvm/IR/Operator.h" 49 #include "llvm/IR/PassManager.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/User.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/Debug.h" 57 #include "llvm/Support/MathExtras.h" 58 #include "llvm/Support/raw_ostream.h" 59 #include "llvm/Transforms/Scalar.h" 60 #include "llvm/Transforms/Utils/Local.h" 61 #include <algorithm> 62 #include <cassert> 63 #include <cstdint> 64 #include <utility> 65 66 using namespace llvm; 67 68 #define DEBUG_TYPE "memcpyopt" 69 70 static cl::opt<bool> 71 EnableMemorySSA("enable-memcpyopt-memoryssa", cl::init(false), cl::Hidden, 72 cl::desc("Use MemorySSA-backed MemCpyOpt.")); 73 74 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 75 STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 76 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 77 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 78 STATISTIC(NumCallSlot, "Number of call slot optimizations performed"); 79 80 namespace { 81 82 /// Represents a range of memset'd bytes with the ByteVal value. 83 /// This allows us to analyze stores like: 84 /// store 0 -> P+1 85 /// store 0 -> P+0 86 /// store 0 -> P+3 87 /// store 0 -> P+2 88 /// which sometimes happens with stores to arrays of structs etc. When we see 89 /// the first store, we make a range [1, 2). The second store extends the range 90 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 91 /// two ranges into [0, 3) which is memset'able. 92 struct MemsetRange { 93 // Start/End - A semi range that describes the span that this range covers. 94 // The range is closed at the start and open at the end: [Start, End). 95 int64_t Start, End; 96 97 /// StartPtr - The getelementptr instruction that points to the start of the 98 /// range. 99 Value *StartPtr; 100 101 /// Alignment - The known alignment of the first store. 102 unsigned Alignment; 103 104 /// TheStores - The actual stores that make up this range. 105 SmallVector<Instruction*, 16> TheStores; 106 107 bool isProfitableToUseMemset(const DataLayout &DL) const; 108 }; 109 110 } // end anonymous namespace 111 112 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { 113 // If we found more than 4 stores to merge or 16 bytes, use memset. 114 if (TheStores.size() >= 4 || End-Start >= 16) return true; 115 116 // If there is nothing to merge, don't do anything. 117 if (TheStores.size() < 2) return false; 118 119 // If any of the stores are a memset, then it is always good to extend the 120 // memset. 121 for (Instruction *SI : TheStores) 122 if (!isa<StoreInst>(SI)) 123 return true; 124 125 // Assume that the code generator is capable of merging pairs of stores 126 // together if it wants to. 127 if (TheStores.size() == 2) return false; 128 129 // If we have fewer than 8 stores, it can still be worthwhile to do this. 130 // For example, merging 4 i8 stores into an i32 store is useful almost always. 131 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 132 // memset will be split into 2 32-bit stores anyway) and doing so can 133 // pessimize the llvm optimizer. 134 // 135 // Since we don't have perfect knowledge here, make some assumptions: assume 136 // the maximum GPR width is the same size as the largest legal integer 137 // size. If so, check to see whether we will end up actually reducing the 138 // number of stores used. 139 unsigned Bytes = unsigned(End-Start); 140 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; 141 if (MaxIntSize == 0) 142 MaxIntSize = 1; 143 unsigned NumPointerStores = Bytes / MaxIntSize; 144 145 // Assume the remaining bytes if any are done a byte at a time. 146 unsigned NumByteStores = Bytes % MaxIntSize; 147 148 // If we will reduce the # stores (according to this heuristic), do the 149 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 150 // etc. 151 return TheStores.size() > NumPointerStores+NumByteStores; 152 } 153 154 namespace { 155 156 class MemsetRanges { 157 using range_iterator = SmallVectorImpl<MemsetRange>::iterator; 158 159 /// A sorted list of the memset ranges. 160 SmallVector<MemsetRange, 8> Ranges; 161 162 const DataLayout &DL; 163 164 public: 165 MemsetRanges(const DataLayout &DL) : DL(DL) {} 166 167 using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator; 168 169 const_iterator begin() const { return Ranges.begin(); } 170 const_iterator end() const { return Ranges.end(); } 171 bool empty() const { return Ranges.empty(); } 172 173 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 174 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 175 addStore(OffsetFromFirst, SI); 176 else 177 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 178 } 179 180 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 181 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); 182 183 addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(), 184 SI->getAlign().value(), SI); 185 } 186 187 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 188 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 189 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI); 190 } 191 192 void addRange(int64_t Start, int64_t Size, Value *Ptr, 193 unsigned Alignment, Instruction *Inst); 194 }; 195 196 } // end anonymous namespace 197 198 /// Add a new store to the MemsetRanges data structure. This adds a 199 /// new range for the specified store at the specified offset, merging into 200 /// existing ranges as appropriate. 201 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 202 unsigned Alignment, Instruction *Inst) { 203 int64_t End = Start+Size; 204 205 range_iterator I = partition_point( 206 Ranges, [=](const MemsetRange &O) { return O.End < Start; }); 207 208 // We now know that I == E, in which case we didn't find anything to merge 209 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 210 // to insert a new range. Handle this now. 211 if (I == Ranges.end() || End < I->Start) { 212 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 213 R.Start = Start; 214 R.End = End; 215 R.StartPtr = Ptr; 216 R.Alignment = Alignment; 217 R.TheStores.push_back(Inst); 218 return; 219 } 220 221 // This store overlaps with I, add it. 222 I->TheStores.push_back(Inst); 223 224 // At this point, we may have an interval that completely contains our store. 225 // If so, just add it to the interval and return. 226 if (I->Start <= Start && I->End >= End) 227 return; 228 229 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 230 // but is not entirely contained within the range. 231 232 // See if the range extends the start of the range. In this case, it couldn't 233 // possibly cause it to join the prior range, because otherwise we would have 234 // stopped on *it*. 235 if (Start < I->Start) { 236 I->Start = Start; 237 I->StartPtr = Ptr; 238 I->Alignment = Alignment; 239 } 240 241 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 242 // is in or right at the end of I), and that End >= I->Start. Extend I out to 243 // End. 244 if (End > I->End) { 245 I->End = End; 246 range_iterator NextI = I; 247 while (++NextI != Ranges.end() && End >= NextI->Start) { 248 // Merge the range in. 249 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 250 if (NextI->End > I->End) 251 I->End = NextI->End; 252 Ranges.erase(NextI); 253 NextI = I; 254 } 255 } 256 } 257 258 //===----------------------------------------------------------------------===// 259 // MemCpyOptLegacyPass Pass 260 //===----------------------------------------------------------------------===// 261 262 namespace { 263 264 class MemCpyOptLegacyPass : public FunctionPass { 265 MemCpyOptPass Impl; 266 267 public: 268 static char ID; // Pass identification, replacement for typeid 269 270 MemCpyOptLegacyPass() : FunctionPass(ID) { 271 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry()); 272 } 273 274 bool runOnFunction(Function &F) override; 275 276 private: 277 // This transformation requires dominator postdominator info 278 void getAnalysisUsage(AnalysisUsage &AU) const override { 279 AU.setPreservesCFG(); 280 AU.addRequired<AssumptionCacheTracker>(); 281 AU.addRequired<DominatorTreeWrapperPass>(); 282 AU.addPreserved<DominatorTreeWrapperPass>(); 283 AU.addPreserved<GlobalsAAWrapperPass>(); 284 AU.addRequired<TargetLibraryInfoWrapperPass>(); 285 if (!EnableMemorySSA) 286 AU.addRequired<MemoryDependenceWrapperPass>(); 287 AU.addPreserved<MemoryDependenceWrapperPass>(); 288 AU.addRequired<AAResultsWrapperPass>(); 289 AU.addPreserved<AAResultsWrapperPass>(); 290 if (EnableMemorySSA) 291 AU.addRequired<MemorySSAWrapperPass>(); 292 AU.addPreserved<MemorySSAWrapperPass>(); 293 } 294 }; 295 296 } // end anonymous namespace 297 298 char MemCpyOptLegacyPass::ID = 0; 299 300 /// The public interface to this file... 301 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); } 302 303 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 304 false, false) 305 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 306 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 307 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 308 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 309 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 310 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 311 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 312 false, false) 313 314 // Check that V is either not accessible by the caller, or unwinding cannot 315 // occur between Start and End. 316 static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start, 317 Instruction *End) { 318 assert(Start->getParent() == End->getParent() && "Must be in same block"); 319 if (!Start->getFunction()->doesNotThrow() && 320 !isa<AllocaInst>(getUnderlyingObject(V))) { 321 for (const Instruction &I : 322 make_range(Start->getIterator(), End->getIterator())) { 323 if (I.mayThrow()) 324 return true; 325 } 326 } 327 return false; 328 } 329 330 void MemCpyOptPass::eraseInstruction(Instruction *I) { 331 if (MSSAU) 332 MSSAU->removeMemoryAccess(I); 333 if (MD) 334 MD->removeInstruction(I); 335 I->eraseFromParent(); 336 } 337 338 // Check for mod or ref of Loc between Start and End, excluding both boundaries. 339 // Start and End must be in the same block 340 static bool accessedBetween(AliasAnalysis &AA, MemoryLocation Loc, 341 const MemoryUseOrDef *Start, 342 const MemoryUseOrDef *End) { 343 assert(Start->getBlock() == End->getBlock() && "Only local supported"); 344 for (const MemoryAccess &MA : 345 make_range(++Start->getIterator(), End->getIterator())) { 346 if (isModOrRefSet(AA.getModRefInfo(cast<MemoryUseOrDef>(MA).getMemoryInst(), 347 Loc))) 348 return true; 349 } 350 return false; 351 } 352 353 // Check for mod of Loc between Start and End, excluding both boundaries. 354 // Start and End can be in different blocks. 355 static bool writtenBetween(MemorySSA *MSSA, MemoryLocation Loc, 356 const MemoryUseOrDef *Start, 357 const MemoryUseOrDef *End) { 358 // TODO: Only walk until we hit Start. 359 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( 360 End->getDefiningAccess(), Loc); 361 return !MSSA->dominates(Clobber, Start); 362 } 363 364 /// When scanning forward over instructions, we look for some other patterns to 365 /// fold away. In particular, this looks for stores to neighboring locations of 366 /// memory. If it sees enough consecutive ones, it attempts to merge them 367 /// together into a memcpy/memset. 368 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, 369 Value *StartPtr, 370 Value *ByteVal) { 371 const DataLayout &DL = StartInst->getModule()->getDataLayout(); 372 373 // Okay, so we now have a single store that can be splatable. Scan to find 374 // all subsequent stores of the same value to offset from the same pointer. 375 // Join these together into ranges, so we can decide whether contiguous blocks 376 // are stored. 377 MemsetRanges Ranges(DL); 378 379 BasicBlock::iterator BI(StartInst); 380 381 // Keeps track of the last memory use or def before the insertion point for 382 // the new memset. The new MemoryDef for the inserted memsets will be inserted 383 // after MemInsertPoint. It points to either LastMemDef or to the last user 384 // before the insertion point of the memset, if there are any such users. 385 MemoryUseOrDef *MemInsertPoint = nullptr; 386 // Keeps track of the last MemoryDef between StartInst and the insertion point 387 // for the new memset. This will become the defining access of the inserted 388 // memsets. 389 MemoryDef *LastMemDef = nullptr; 390 for (++BI; !BI->isTerminator(); ++BI) { 391 if (MSSAU) { 392 auto *CurrentAcc = cast_or_null<MemoryUseOrDef>( 393 MSSAU->getMemorySSA()->getMemoryAccess(&*BI)); 394 if (CurrentAcc) { 395 MemInsertPoint = CurrentAcc; 396 if (auto *CurrentDef = dyn_cast<MemoryDef>(CurrentAcc)) 397 LastMemDef = CurrentDef; 398 } 399 } 400 401 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 402 // If the instruction is readnone, ignore it, otherwise bail out. We 403 // don't even allow readonly here because we don't want something like: 404 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 405 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 406 break; 407 continue; 408 } 409 410 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { 411 // If this is a store, see if we can merge it in. 412 if (!NextStore->isSimple()) break; 413 414 Value *StoredVal = NextStore->getValueOperand(); 415 416 // Don't convert stores of non-integral pointer types to memsets (which 417 // stores integers). 418 if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) 419 break; 420 421 // Check to see if this stored value is of the same byte-splattable value. 422 Value *StoredByte = isBytewiseValue(StoredVal, DL); 423 if (isa<UndefValue>(ByteVal) && StoredByte) 424 ByteVal = StoredByte; 425 if (ByteVal != StoredByte) 426 break; 427 428 // Check to see if this store is to a constant offset from the start ptr. 429 Optional<int64_t> Offset = 430 isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL); 431 if (!Offset) 432 break; 433 434 Ranges.addStore(*Offset, NextStore); 435 } else { 436 MemSetInst *MSI = cast<MemSetInst>(BI); 437 438 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 439 !isa<ConstantInt>(MSI->getLength())) 440 break; 441 442 // Check to see if this store is to a constant offset from the start ptr. 443 Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL); 444 if (!Offset) 445 break; 446 447 Ranges.addMemSet(*Offset, MSI); 448 } 449 } 450 451 // If we have no ranges, then we just had a single store with nothing that 452 // could be merged in. This is a very common case of course. 453 if (Ranges.empty()) 454 return nullptr; 455 456 // If we had at least one store that could be merged in, add the starting 457 // store as well. We try to avoid this unless there is at least something 458 // interesting as a small compile-time optimization. 459 Ranges.addInst(0, StartInst); 460 461 // If we create any memsets, we put it right before the first instruction that 462 // isn't part of the memset block. This ensure that the memset is dominated 463 // by any addressing instruction needed by the start of the block. 464 IRBuilder<> Builder(&*BI); 465 466 // Now that we have full information about ranges, loop over the ranges and 467 // emit memset's for anything big enough to be worthwhile. 468 Instruction *AMemSet = nullptr; 469 for (const MemsetRange &Range : Ranges) { 470 if (Range.TheStores.size() == 1) continue; 471 472 // If it is profitable to lower this range to memset, do so now. 473 if (!Range.isProfitableToUseMemset(DL)) 474 continue; 475 476 // Otherwise, we do want to transform this! Create a new memset. 477 // Get the starting pointer of the block. 478 StartPtr = Range.StartPtr; 479 480 AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start, 481 MaybeAlign(Range.Alignment)); 482 LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI 483 : Range.TheStores) dbgs() 484 << *SI << '\n'; 485 dbgs() << "With: " << *AMemSet << '\n'); 486 if (!Range.TheStores.empty()) 487 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 488 489 if (MSSAU) { 490 assert(LastMemDef && MemInsertPoint && 491 "Both LastMemDef and MemInsertPoint need to be set"); 492 auto *NewDef = 493 cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI 494 ? MSSAU->createMemoryAccessBefore( 495 AMemSet, LastMemDef, MemInsertPoint) 496 : MSSAU->createMemoryAccessAfter( 497 AMemSet, LastMemDef, MemInsertPoint)); 498 MSSAU->insertDef(NewDef, /*RenameUses=*/true); 499 LastMemDef = NewDef; 500 MemInsertPoint = NewDef; 501 } 502 503 // Zap all the stores. 504 for (Instruction *SI : Range.TheStores) 505 eraseInstruction(SI); 506 507 ++NumMemSetInfer; 508 } 509 510 return AMemSet; 511 } 512 513 // This method try to lift a store instruction before position P. 514 // It will lift the store and its argument + that anything that 515 // may alias with these. 516 // The method returns true if it was successful. 517 bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) { 518 // If the store alias this position, early bail out. 519 MemoryLocation StoreLoc = MemoryLocation::get(SI); 520 if (isModOrRefSet(AA->getModRefInfo(P, StoreLoc))) 521 return false; 522 523 // Keep track of the arguments of all instruction we plan to lift 524 // so we can make sure to lift them as well if appropriate. 525 DenseSet<Instruction*> Args; 526 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) 527 if (Ptr->getParent() == SI->getParent()) 528 Args.insert(Ptr); 529 530 // Instruction to lift before P. 531 SmallVector<Instruction *, 8> ToLift{SI}; 532 533 // Memory locations of lifted instructions. 534 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc}; 535 536 // Lifted calls. 537 SmallVector<const CallBase *, 8> Calls; 538 539 const MemoryLocation LoadLoc = MemoryLocation::get(LI); 540 541 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { 542 auto *C = &*I; 543 544 // Make sure hoisting does not perform a store that was not guaranteed to 545 // happen. 546 if (!isGuaranteedToTransferExecutionToSuccessor(C)) 547 return false; 548 549 bool MayAlias = isModOrRefSet(AA->getModRefInfo(C, None)); 550 551 bool NeedLift = false; 552 if (Args.erase(C)) 553 NeedLift = true; 554 else if (MayAlias) { 555 NeedLift = llvm::any_of(MemLocs, [C, this](const MemoryLocation &ML) { 556 return isModOrRefSet(AA->getModRefInfo(C, ML)); 557 }); 558 559 if (!NeedLift) 560 NeedLift = llvm::any_of(Calls, [C, this](const CallBase *Call) { 561 return isModOrRefSet(AA->getModRefInfo(C, Call)); 562 }); 563 } 564 565 if (!NeedLift) 566 continue; 567 568 if (MayAlias) { 569 // Since LI is implicitly moved downwards past the lifted instructions, 570 // none of them may modify its source. 571 if (isModSet(AA->getModRefInfo(C, LoadLoc))) 572 return false; 573 else if (const auto *Call = dyn_cast<CallBase>(C)) { 574 // If we can't lift this before P, it's game over. 575 if (isModOrRefSet(AA->getModRefInfo(P, Call))) 576 return false; 577 578 Calls.push_back(Call); 579 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { 580 // If we can't lift this before P, it's game over. 581 auto ML = MemoryLocation::get(C); 582 if (isModOrRefSet(AA->getModRefInfo(P, ML))) 583 return false; 584 585 MemLocs.push_back(ML); 586 } else 587 // We don't know how to lift this instruction. 588 return false; 589 } 590 591 ToLift.push_back(C); 592 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) 593 if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) { 594 if (A->getParent() == SI->getParent()) { 595 // Cannot hoist user of P above P 596 if(A == P) return false; 597 Args.insert(A); 598 } 599 } 600 } 601 602 // Find MSSA insertion point. Normally P will always have a corresponding 603 // memory access before which we can insert. However, with non-standard AA 604 // pipelines, there may be a mismatch between AA and MSSA, in which case we 605 // will scan for a memory access before P. In either case, we know for sure 606 // that at least the load will have a memory access. 607 // TODO: Simplify this once P will be determined by MSSA, in which case the 608 // discrepancy can no longer occur. 609 MemoryUseOrDef *MemInsertPoint = nullptr; 610 if (MSSAU) { 611 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(P)) { 612 MemInsertPoint = cast<MemoryUseOrDef>(--MA->getIterator()); 613 } else { 614 const Instruction *ConstP = P; 615 for (const Instruction &I : make_range(++ConstP->getReverseIterator(), 616 ++LI->getReverseIterator())) { 617 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(&I)) { 618 MemInsertPoint = MA; 619 break; 620 } 621 } 622 } 623 } 624 625 // We made it, we need to lift. 626 for (auto *I : llvm::reverse(ToLift)) { 627 LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n"); 628 I->moveBefore(P); 629 if (MSSAU) { 630 assert(MemInsertPoint && "Must have found insert point"); 631 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(I)) { 632 MSSAU->moveAfter(MA, MemInsertPoint); 633 MemInsertPoint = MA; 634 } 635 } 636 } 637 638 return true; 639 } 640 641 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 642 if (!SI->isSimple()) return false; 643 644 // Avoid merging nontemporal stores since the resulting 645 // memcpy/memset would not be able to preserve the nontemporal hint. 646 // In theory we could teach how to propagate the !nontemporal metadata to 647 // memset calls. However, that change would force the backend to 648 // conservatively expand !nontemporal memset calls back to sequences of 649 // store instructions (effectively undoing the merging). 650 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 651 return false; 652 653 const DataLayout &DL = SI->getModule()->getDataLayout(); 654 655 Value *StoredVal = SI->getValueOperand(); 656 657 // Not all the transforms below are correct for non-integral pointers, bail 658 // until we've audited the individual pieces. 659 if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) 660 return false; 661 662 // Load to store forwarding can be interpreted as memcpy. 663 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 664 if (LI->isSimple() && LI->hasOneUse() && 665 LI->getParent() == SI->getParent()) { 666 667 auto *T = LI->getType(); 668 if (T->isAggregateType()) { 669 MemoryLocation LoadLoc = MemoryLocation::get(LI); 670 671 // We use alias analysis to check if an instruction may store to 672 // the memory we load from in between the load and the store. If 673 // such an instruction is found, we try to promote there instead 674 // of at the store position. 675 // TODO: Can use MSSA for this. 676 Instruction *P = SI; 677 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { 678 if (isModSet(AA->getModRefInfo(&I, LoadLoc))) { 679 P = &I; 680 break; 681 } 682 } 683 684 // We found an instruction that may write to the loaded memory. 685 // We can try to promote at this position instead of the store 686 // position if nothing alias the store memory after this and the store 687 // destination is not in the range. 688 if (P && P != SI) { 689 if (!moveUp(SI, P, LI)) 690 P = nullptr; 691 } 692 693 // If a valid insertion position is found, then we can promote 694 // the load/store pair to a memcpy. 695 if (P) { 696 // If we load from memory that may alias the memory we store to, 697 // memmove must be used to preserve semantic. If not, memcpy can 698 // be used. 699 bool UseMemMove = false; 700 if (!AA->isNoAlias(MemoryLocation::get(SI), LoadLoc)) 701 UseMemMove = true; 702 703 uint64_t Size = DL.getTypeStoreSize(T); 704 705 IRBuilder<> Builder(P); 706 Instruction *M; 707 if (UseMemMove) 708 M = Builder.CreateMemMove( 709 SI->getPointerOperand(), SI->getAlign(), 710 LI->getPointerOperand(), LI->getAlign(), Size); 711 else 712 M = Builder.CreateMemCpy( 713 SI->getPointerOperand(), SI->getAlign(), 714 LI->getPointerOperand(), LI->getAlign(), Size); 715 716 LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => " 717 << *M << "\n"); 718 719 if (MSSAU) { 720 auto *LastDef = 721 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)); 722 auto *NewAccess = 723 MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); 724 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 725 } 726 727 eraseInstruction(SI); 728 eraseInstruction(LI); 729 ++NumMemCpyInstr; 730 731 // Make sure we do not invalidate the iterator. 732 BBI = M->getIterator(); 733 return true; 734 } 735 } 736 737 // Detect cases where we're performing call slot forwarding, but 738 // happen to be using a load-store pair to implement it, rather than 739 // a memcpy. 740 CallInst *C = nullptr; 741 if (EnableMemorySSA) { 742 if (auto *LoadClobber = dyn_cast<MemoryUseOrDef>( 743 MSSA->getWalker()->getClobberingMemoryAccess(LI))) { 744 // The load most post-dom the call. Limit to the same block for now. 745 // TODO: Support non-local call-slot optimization? 746 if (LoadClobber->getBlock() == SI->getParent()) 747 C = dyn_cast_or_null<CallInst>(LoadClobber->getMemoryInst()); 748 } 749 } else { 750 MemDepResult ldep = MD->getDependency(LI); 751 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) 752 C = dyn_cast<CallInst>(ldep.getInst()); 753 } 754 755 if (C) { 756 // Check that nothing touches the dest of the "copy" between 757 // the call and the store. 758 MemoryLocation StoreLoc = MemoryLocation::get(SI); 759 if (EnableMemorySSA) { 760 if (accessedBetween(*AA, StoreLoc, MSSA->getMemoryAccess(C), 761 MSSA->getMemoryAccess(SI))) 762 C = nullptr; 763 } else { 764 for (BasicBlock::iterator I = --SI->getIterator(), 765 E = C->getIterator(); 766 I != E; --I) { 767 if (isModOrRefSet(AA->getModRefInfo(&*I, StoreLoc))) { 768 C = nullptr; 769 break; 770 } 771 } 772 } 773 } 774 775 if (C) { 776 bool changed = performCallSlotOptzn( 777 LI, SI, SI->getPointerOperand()->stripPointerCasts(), 778 LI->getPointerOperand()->stripPointerCasts(), 779 DL.getTypeStoreSize(SI->getOperand(0)->getType()), 780 commonAlignment(SI->getAlign(), LI->getAlign()), C); 781 if (changed) { 782 eraseInstruction(SI); 783 eraseInstruction(LI); 784 ++NumMemCpyInstr; 785 return true; 786 } 787 } 788 } 789 } 790 791 // There are two cases that are interesting for this code to handle: memcpy 792 // and memset. Right now we only handle memset. 793 794 // Ensure that the value being stored is something that can be memset'able a 795 // byte at a time like "0" or "-1" or any width, as well as things like 796 // 0xA0A0A0A0 and 0.0. 797 auto *V = SI->getOperand(0); 798 if (Value *ByteVal = isBytewiseValue(V, DL)) { 799 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 800 ByteVal)) { 801 BBI = I->getIterator(); // Don't invalidate iterator. 802 return true; 803 } 804 805 // If we have an aggregate, we try to promote it to memset regardless 806 // of opportunity for merging as it can expose optimization opportunities 807 // in subsequent passes. 808 auto *T = V->getType(); 809 if (T->isAggregateType()) { 810 uint64_t Size = DL.getTypeStoreSize(T); 811 IRBuilder<> Builder(SI); 812 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, 813 SI->getAlign()); 814 815 LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n"); 816 817 if (MSSAU) { 818 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI))); 819 auto *LastDef = 820 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)); 821 auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); 822 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 823 } 824 825 eraseInstruction(SI); 826 NumMemSetInfer++; 827 828 // Make sure we do not invalidate the iterator. 829 BBI = M->getIterator(); 830 return true; 831 } 832 } 833 834 return false; 835 } 836 837 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 838 // See if there is another memset or store neighboring this memset which 839 // allows us to widen out the memset to do a single larger store. 840 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 841 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 842 MSI->getValue())) { 843 BBI = I->getIterator(); // Don't invalidate iterator. 844 return true; 845 } 846 return false; 847 } 848 849 /// Takes a memcpy and a call that it depends on, 850 /// and checks for the possibility of a call slot optimization by having 851 /// the call write its result directly into the destination of the memcpy. 852 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad, 853 Instruction *cpyStore, Value *cpyDest, 854 Value *cpySrc, uint64_t cpyLen, 855 Align cpyAlign, CallInst *C) { 856 // The general transformation to keep in mind is 857 // 858 // call @func(..., src, ...) 859 // memcpy(dest, src, ...) 860 // 861 // -> 862 // 863 // memcpy(dest, src, ...) 864 // call @func(..., dest, ...) 865 // 866 // Since moving the memcpy is technically awkward, we additionally check that 867 // src only holds uninitialized values at the moment of the call, meaning that 868 // the memcpy can be discarded rather than moved. 869 870 // Lifetime marks shouldn't be operated on. 871 if (Function *F = C->getCalledFunction()) 872 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) 873 return false; 874 875 // Require that src be an alloca. This simplifies the reasoning considerably. 876 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 877 if (!srcAlloca) 878 return false; 879 880 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 881 if (!srcArraySize) 882 return false; 883 884 const DataLayout &DL = cpyLoad->getModule()->getDataLayout(); 885 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * 886 srcArraySize->getZExtValue(); 887 888 if (cpyLen < srcSize) 889 return false; 890 891 // Check that accessing the first srcSize bytes of dest will not cause a 892 // trap. Otherwise the transform is invalid since it might cause a trap 893 // to occur earlier than it otherwise would. 894 if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpyLen), 895 DL, C, DT)) 896 return false; 897 898 // Make sure that nothing can observe cpyDest being written early. There are 899 // a number of cases to consider: 900 // 1. cpyDest cannot be accessed between C and cpyStore as a precondition of 901 // the transform. 902 // 2. C itself may not access cpyDest (prior to the transform). This is 903 // checked further below. 904 // 3. If cpyDest is accessible to the caller of this function (potentially 905 // captured and not based on an alloca), we need to ensure that we cannot 906 // unwind between C and cpyStore. This is checked here. 907 // 4. If cpyDest is potentially captured, there may be accesses to it from 908 // another thread. In this case, we need to check that cpyStore is 909 // guaranteed to be executed if C is. As it is a non-atomic access, it 910 // renders accesses from other threads undefined. 911 // TODO: This is currently not checked. 912 if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore)) 913 return false; 914 915 // Check that dest points to memory that is at least as aligned as src. 916 Align srcAlign = srcAlloca->getAlign(); 917 bool isDestSufficientlyAligned = srcAlign <= cpyAlign; 918 // If dest is not aligned enough and we can't increase its alignment then 919 // bail out. 920 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) 921 return false; 922 923 // Check that src is not accessed except via the call and the memcpy. This 924 // guarantees that it holds only undefined values when passed in (so the final 925 // memcpy can be dropped), that it is not read or written between the call and 926 // the memcpy, and that writing beyond the end of it is undefined. 927 SmallVector<User *, 8> srcUseList(srcAlloca->users()); 928 while (!srcUseList.empty()) { 929 User *U = srcUseList.pop_back_val(); 930 931 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { 932 append_range(srcUseList, U->users()); 933 continue; 934 } 935 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { 936 if (!G->hasAllZeroIndices()) 937 return false; 938 939 append_range(srcUseList, U->users()); 940 continue; 941 } 942 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) 943 if (IT->isLifetimeStartOrEnd()) 944 continue; 945 946 if (U != C && U != cpyLoad) 947 return false; 948 } 949 950 // Check that src isn't captured by the called function since the 951 // transformation can cause aliasing issues in that case. 952 for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI) 953 if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI)) 954 return false; 955 956 // Since we're changing the parameter to the callsite, we need to make sure 957 // that what would be the new parameter dominates the callsite. 958 if (!DT->dominates(cpyDest, C)) { 959 // Support moving a constant index GEP before the call. 960 auto *GEP = dyn_cast<GetElementPtrInst>(cpyDest); 961 if (GEP && GEP->hasAllConstantIndices() && 962 DT->dominates(GEP->getPointerOperand(), C)) 963 GEP->moveBefore(C); 964 else 965 return false; 966 } 967 968 // In addition to knowing that the call does not access src in some 969 // unexpected manner, for example via a global, which we deduce from 970 // the use analysis, we also need to know that it does not sneakily 971 // access dest. We rely on AA to figure this out for us. 972 ModRefInfo MR = AA->getModRefInfo(C, cpyDest, LocationSize::precise(srcSize)); 973 // If necessary, perform additional analysis. 974 if (isModOrRefSet(MR)) 975 MR = AA->callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), DT); 976 if (isModOrRefSet(MR)) 977 return false; 978 979 // We can't create address space casts here because we don't know if they're 980 // safe for the target. 981 if (cpySrc->getType()->getPointerAddressSpace() != 982 cpyDest->getType()->getPointerAddressSpace()) 983 return false; 984 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) 985 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc && 986 cpySrc->getType()->getPointerAddressSpace() != 987 C->getArgOperand(ArgI)->getType()->getPointerAddressSpace()) 988 return false; 989 990 // All the checks have passed, so do the transformation. 991 bool changedArgument = false; 992 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) 993 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) { 994 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest 995 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 996 cpyDest->getName(), C); 997 changedArgument = true; 998 if (C->getArgOperand(ArgI)->getType() == Dest->getType()) 999 C->setArgOperand(ArgI, Dest); 1000 else 1001 C->setArgOperand(ArgI, CastInst::CreatePointerCast( 1002 Dest, C->getArgOperand(ArgI)->getType(), 1003 Dest->getName(), C)); 1004 } 1005 1006 if (!changedArgument) 1007 return false; 1008 1009 // If the destination wasn't sufficiently aligned then increase its alignment. 1010 if (!isDestSufficientlyAligned) { 1011 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); 1012 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); 1013 } 1014 1015 // Drop any cached information about the call, because we may have changed 1016 // its dependence information by changing its parameter. 1017 if (MD) 1018 MD->removeInstruction(C); 1019 1020 // Update AA metadata 1021 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be 1022 // handled here, but combineMetadata doesn't support them yet 1023 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 1024 LLVMContext::MD_noalias, 1025 LLVMContext::MD_invariant_group, 1026 LLVMContext::MD_access_group}; 1027 combineMetadata(C, cpyLoad, KnownIDs, true); 1028 1029 ++NumCallSlot; 1030 return true; 1031 } 1032 1033 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is 1034 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. 1035 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, 1036 MemCpyInst *MDep) { 1037 // We can only transforms memcpy's where the dest of one is the source of the 1038 // other. 1039 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 1040 return false; 1041 1042 // If dep instruction is reading from our current input, then it is a noop 1043 // transfer and substituting the input won't change this instruction. Just 1044 // ignore the input and let someone else zap MDep. This handles cases like: 1045 // memcpy(a <- a) 1046 // memcpy(b <- a) 1047 if (M->getSource() == MDep->getSource()) 1048 return false; 1049 1050 // Second, the length of the memcpy's must be the same, or the preceding one 1051 // must be larger than the following one. 1052 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 1053 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); 1054 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 1055 return false; 1056 1057 // Verify that the copied-from memory doesn't change in between the two 1058 // transfers. For example, in: 1059 // memcpy(a <- b) 1060 // *b = 42; 1061 // memcpy(c <- a) 1062 // It would be invalid to transform the second memcpy into memcpy(c <- b). 1063 // 1064 // TODO: If the code between M and MDep is transparent to the destination "c", 1065 // then we could still perform the xform by moving M up to the first memcpy. 1066 if (EnableMemorySSA) { 1067 // TODO: It would be sufficient to check the MDep source up to the memcpy 1068 // size of M, rather than MDep. 1069 if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep), 1070 MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(M))) 1071 return false; 1072 } else { 1073 // NOTE: This is conservative, it will stop on any read from the source loc, 1074 // not just the defining memcpy. 1075 MemDepResult SourceDep = 1076 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, 1077 M->getIterator(), M->getParent()); 1078 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1079 return false; 1080 } 1081 1082 // If the dest of the second might alias the source of the first, then the 1083 // source and dest might overlap. We still want to eliminate the intermediate 1084 // value, but we have to generate a memmove instead of memcpy. 1085 bool UseMemMove = false; 1086 if (!AA->isNoAlias(MemoryLocation::getForDest(M), 1087 MemoryLocation::getForSource(MDep))) 1088 UseMemMove = true; 1089 1090 // If all checks passed, then we can transform M. 1091 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n" 1092 << *MDep << '\n' << *M << '\n'); 1093 1094 // TODO: Is this worth it if we're creating a less aligned memcpy? For 1095 // example we could be moving from movaps -> movq on x86. 1096 IRBuilder<> Builder(M); 1097 Instruction *NewM; 1098 if (UseMemMove) 1099 NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(), 1100 MDep->getRawSource(), MDep->getSourceAlign(), 1101 M->getLength(), M->isVolatile()); 1102 else 1103 NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(), 1104 MDep->getRawSource(), MDep->getSourceAlign(), 1105 M->getLength(), M->isVolatile()); 1106 1107 if (MSSAU) { 1108 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M))); 1109 auto *LastDef = cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); 1110 auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1111 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1112 } 1113 1114 // Remove the instruction we're replacing. 1115 eraseInstruction(M); 1116 ++NumMemCpyInstr; 1117 return true; 1118 } 1119 1120 /// We've found that the (upward scanning) memory dependence of \p MemCpy is 1121 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that 1122 /// weren't copied over by \p MemCpy. 1123 /// 1124 /// In other words, transform: 1125 /// \code 1126 /// memset(dst, c, dst_size); 1127 /// memcpy(dst, src, src_size); 1128 /// \endcode 1129 /// into: 1130 /// \code 1131 /// memcpy(dst, src, src_size); 1132 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); 1133 /// \endcode 1134 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, 1135 MemSetInst *MemSet) { 1136 // We can only transform memset/memcpy with the same destination. 1137 if (MemSet->getDest() != MemCpy->getDest()) 1138 return false; 1139 1140 // Check that src and dst of the memcpy aren't the same. While memcpy 1141 // operands cannot partially overlap, exact equality is allowed. 1142 if (!AA->isNoAlias(MemoryLocation(MemCpy->getSource(), 1143 LocationSize::precise(1)), 1144 MemoryLocation(MemCpy->getDest(), 1145 LocationSize::precise(1)))) 1146 return false; 1147 1148 if (EnableMemorySSA) { 1149 // We know that dst up to src_size is not written. We now need to make sure 1150 // that dst up to dst_size is not accessed. (If we did not move the memset, 1151 // checking for reads would be sufficient.) 1152 if (accessedBetween(*AA, MemoryLocation::getForDest(MemSet), 1153 MSSA->getMemoryAccess(MemSet), 1154 MSSA->getMemoryAccess(MemCpy))) { 1155 return false; 1156 } 1157 } else { 1158 // We have already checked that dst up to src_size is not accessed. We 1159 // need to make sure that there are no accesses up to dst_size either. 1160 MemDepResult DstDepInfo = MD->getPointerDependencyFrom( 1161 MemoryLocation::getForDest(MemSet), false, MemCpy->getIterator(), 1162 MemCpy->getParent()); 1163 if (DstDepInfo.getInst() != MemSet) 1164 return false; 1165 } 1166 1167 // Use the same i8* dest as the memcpy, killing the memset dest if different. 1168 Value *Dest = MemCpy->getRawDest(); 1169 Value *DestSize = MemSet->getLength(); 1170 Value *SrcSize = MemCpy->getLength(); 1171 1172 if (mayBeVisibleThroughUnwinding(Dest, MemSet, MemCpy)) 1173 return false; 1174 1175 // By default, create an unaligned memset. 1176 unsigned Align = 1; 1177 // If Dest is aligned, and SrcSize is constant, use the minimum alignment 1178 // of the sum. 1179 const unsigned DestAlign = 1180 std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment()); 1181 if (DestAlign > 1) 1182 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) 1183 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); 1184 1185 IRBuilder<> Builder(MemCpy); 1186 1187 // If the sizes have different types, zext the smaller one. 1188 if (DestSize->getType() != SrcSize->getType()) { 1189 if (DestSize->getType()->getIntegerBitWidth() > 1190 SrcSize->getType()->getIntegerBitWidth()) 1191 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); 1192 else 1193 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); 1194 } 1195 1196 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize); 1197 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize); 1198 Value *MemsetLen = Builder.CreateSelect( 1199 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff); 1200 Instruction *NewMemSet = Builder.CreateMemSet( 1201 Builder.CreateGEP(Dest->getType()->getPointerElementType(), Dest, 1202 SrcSize), 1203 MemSet->getOperand(1), MemsetLen, MaybeAlign(Align)); 1204 1205 if (MSSAU) { 1206 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) && 1207 "MemCpy must be a MemoryDef"); 1208 // The new memset is inserted after the memcpy, but it is known that its 1209 // defining access is the memset about to be removed which immediately 1210 // precedes the memcpy. 1211 auto *LastDef = 1212 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); 1213 auto *NewAccess = MSSAU->createMemoryAccessBefore( 1214 NewMemSet, LastDef->getDefiningAccess(), LastDef); 1215 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1216 } 1217 1218 eraseInstruction(MemSet); 1219 return true; 1220 } 1221 1222 /// Determine whether the instruction has undefined content for the given Size, 1223 /// either because it was freshly alloca'd or started its lifetime. 1224 static bool hasUndefContents(Instruction *I, ConstantInt *Size) { 1225 if (isa<AllocaInst>(I)) 1226 return true; 1227 1228 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 1229 if (II->getIntrinsicID() == Intrinsic::lifetime_start) 1230 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) 1231 if (LTSize->getZExtValue() >= Size->getZExtValue()) 1232 return true; 1233 1234 return false; 1235 } 1236 1237 static bool hasUndefContentsMSSA(MemorySSA *MSSA, AliasAnalysis *AA, Value *V, 1238 MemoryDef *Def, ConstantInt *Size) { 1239 if (MSSA->isLiveOnEntryDef(Def)) 1240 return isa<AllocaInst>(getUnderlyingObject(V)); 1241 1242 if (IntrinsicInst *II = 1243 dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) { 1244 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 1245 ConstantInt *LTSize = cast<ConstantInt>(II->getArgOperand(0)); 1246 if (AA->isMustAlias(V, II->getArgOperand(1)) && 1247 LTSize->getZExtValue() >= Size->getZExtValue()) 1248 return true; 1249 } 1250 } 1251 1252 return false; 1253 } 1254 1255 /// Transform memcpy to memset when its source was just memset. 1256 /// In other words, turn: 1257 /// \code 1258 /// memset(dst1, c, dst1_size); 1259 /// memcpy(dst2, dst1, dst2_size); 1260 /// \endcode 1261 /// into: 1262 /// \code 1263 /// memset(dst1, c, dst1_size); 1264 /// memset(dst2, c, dst2_size); 1265 /// \endcode 1266 /// When dst2_size <= dst1_size. 1267 /// 1268 /// The \p MemCpy must have a Constant length. 1269 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, 1270 MemSetInst *MemSet) { 1271 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and 1272 // memcpying from the same address. Otherwise it is hard to reason about. 1273 if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) 1274 return false; 1275 1276 // A known memset size is required. 1277 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength()); 1278 if (!MemSetSize) 1279 return false; 1280 1281 // Make sure the memcpy doesn't read any more than what the memset wrote. 1282 // Don't worry about sizes larger than i64. 1283 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength()); 1284 if (CopySize->getZExtValue() > MemSetSize->getZExtValue()) { 1285 // If the memcpy is larger than the memset, but the memory was undef prior 1286 // to the memset, we can just ignore the tail. Technically we're only 1287 // interested in the bytes from MemSetSize..CopySize here, but as we can't 1288 // easily represent this location, we use the full 0..CopySize range. 1289 MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy); 1290 bool CanReduceSize = false; 1291 if (EnableMemorySSA) { 1292 MemoryUseOrDef *MemSetAccess = MSSA->getMemoryAccess(MemSet); 1293 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( 1294 MemSetAccess->getDefiningAccess(), MemCpyLoc); 1295 if (auto *MD = dyn_cast<MemoryDef>(Clobber)) 1296 if (hasUndefContentsMSSA(MSSA, AA, MemCpy->getSource(), MD, CopySize)) 1297 CanReduceSize = true; 1298 } else { 1299 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1300 MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent()); 1301 if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize)) 1302 CanReduceSize = true; 1303 } 1304 1305 if (!CanReduceSize) 1306 return false; 1307 CopySize = MemSetSize; 1308 } 1309 1310 IRBuilder<> Builder(MemCpy); 1311 Instruction *NewM = 1312 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), 1313 CopySize, MaybeAlign(MemCpy->getDestAlignment())); 1314 if (MSSAU) { 1315 auto *LastDef = 1316 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); 1317 auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1318 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1319 } 1320 1321 return true; 1322 } 1323 1324 /// Perform simplification of memcpy's. If we have memcpy A 1325 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 1326 /// B to be a memcpy from X to Z (or potentially a memmove, depending on 1327 /// circumstances). This allows later passes to remove the first memcpy 1328 /// altogether. 1329 bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) { 1330 // We can only optimize non-volatile memcpy's. 1331 if (M->isVolatile()) return false; 1332 1333 // If the source and destination of the memcpy are the same, then zap it. 1334 if (M->getSource() == M->getDest()) { 1335 ++BBI; 1336 eraseInstruction(M); 1337 return true; 1338 } 1339 1340 // If copying from a constant, try to turn the memcpy into a memset. 1341 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) 1342 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 1343 if (Value *ByteVal = isBytewiseValue(GV->getInitializer(), 1344 M->getModule()->getDataLayout())) { 1345 IRBuilder<> Builder(M); 1346 Instruction *NewM = 1347 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), 1348 MaybeAlign(M->getDestAlignment()), false); 1349 if (MSSAU) { 1350 auto *LastDef = 1351 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); 1352 auto *NewAccess = 1353 MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1354 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1355 } 1356 1357 eraseInstruction(M); 1358 ++NumCpyToSet; 1359 return true; 1360 } 1361 1362 if (EnableMemorySSA) { 1363 MemoryUseOrDef *MA = MSSA->getMemoryAccess(M); 1364 MemoryAccess *AnyClobber = MSSA->getWalker()->getClobberingMemoryAccess(MA); 1365 MemoryLocation DestLoc = MemoryLocation::getForDest(M); 1366 const MemoryAccess *DestClobber = 1367 MSSA->getWalker()->getClobberingMemoryAccess(AnyClobber, DestLoc); 1368 1369 // Try to turn a partially redundant memset + memcpy into 1370 // memcpy + smaller memset. We don't need the memcpy size for this. 1371 // The memcpy most post-dom the memset, so limit this to the same basic 1372 // block. A non-local generalization is likely not worthwhile. 1373 if (auto *MD = dyn_cast<MemoryDef>(DestClobber)) 1374 if (auto *MDep = dyn_cast_or_null<MemSetInst>(MD->getMemoryInst())) 1375 if (DestClobber->getBlock() == M->getParent()) 1376 if (processMemSetMemCpyDependence(M, MDep)) 1377 return true; 1378 1379 // The optimizations after this point require the memcpy size. 1380 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 1381 if (!CopySize) return false; 1382 1383 MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess( 1384 AnyClobber, MemoryLocation::getForSource(M)); 1385 1386 // There are four possible optimizations we can do for memcpy: 1387 // a) memcpy-memcpy xform which exposes redundance for DSE. 1388 // b) call-memcpy xform for return slot optimization. 1389 // c) memcpy from freshly alloca'd space or space that has just started 1390 // its lifetime copies undefined data, and we can therefore eliminate 1391 // the memcpy in favor of the data that was already at the destination. 1392 // d) memcpy from a just-memset'd source can be turned into memset. 1393 if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) { 1394 if (Instruction *MI = MD->getMemoryInst()) { 1395 if (auto *C = dyn_cast<CallInst>(MI)) { 1396 // The memcpy must post-dom the call. Limit to the same block for now. 1397 // Additionally, we need to ensure that there are no accesses to dest 1398 // between the call and the memcpy. Accesses to src will be checked 1399 // by performCallSlotOptzn(). 1400 // TODO: Support non-local call-slot optimization? 1401 if (C->getParent() == M->getParent() && 1402 !accessedBetween(*AA, DestLoc, MD, MA)) { 1403 // FIXME: Can we pass in either of dest/src alignment here instead 1404 // of conservatively taking the minimum? 1405 Align Alignment = std::min(M->getDestAlign().valueOrOne(), 1406 M->getSourceAlign().valueOrOne()); 1407 if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), 1408 CopySize->getZExtValue(), Alignment, C)) { 1409 LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n" 1410 << " call: " << *C << "\n" 1411 << " memcpy: " << *M << "\n"); 1412 eraseInstruction(M); 1413 ++NumMemCpyInstr; 1414 return true; 1415 } 1416 } 1417 } 1418 if (auto *MDep = dyn_cast<MemCpyInst>(MI)) 1419 return processMemCpyMemCpyDependence(M, MDep); 1420 if (auto *MDep = dyn_cast<MemSetInst>(MI)) { 1421 if (performMemCpyToMemSetOptzn(M, MDep)) { 1422 LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n"); 1423 eraseInstruction(M); 1424 ++NumCpyToSet; 1425 return true; 1426 } 1427 } 1428 } 1429 1430 if (hasUndefContentsMSSA(MSSA, AA, M->getSource(), MD, CopySize)) { 1431 LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n"); 1432 eraseInstruction(M); 1433 ++NumMemCpyInstr; 1434 return true; 1435 } 1436 } 1437 } else { 1438 MemDepResult DepInfo = MD->getDependency(M); 1439 1440 // Try to turn a partially redundant memset + memcpy into 1441 // memcpy + smaller memset. We don't need the memcpy size for this. 1442 if (DepInfo.isClobber()) 1443 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) 1444 if (processMemSetMemCpyDependence(M, MDep)) 1445 return true; 1446 1447 // The optimizations after this point require the memcpy size. 1448 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 1449 if (!CopySize) return false; 1450 1451 // There are four possible optimizations we can do for memcpy: 1452 // a) memcpy-memcpy xform which exposes redundance for DSE. 1453 // b) call-memcpy xform for return slot optimization. 1454 // c) memcpy from freshly alloca'd space or space that has just started 1455 // its lifetime copies undefined data, and we can therefore eliminate 1456 // the memcpy in favor of the data that was already at the destination. 1457 // d) memcpy from a just-memset'd source can be turned into memset. 1458 if (DepInfo.isClobber()) { 1459 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 1460 // FIXME: Can we pass in either of dest/src alignment here instead 1461 // of conservatively taking the minimum? 1462 Align Alignment = std::min(M->getDestAlign().valueOrOne(), 1463 M->getSourceAlign().valueOrOne()); 1464 if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), 1465 CopySize->getZExtValue(), Alignment, C)) { 1466 eraseInstruction(M); 1467 ++NumMemCpyInstr; 1468 return true; 1469 } 1470 } 1471 } 1472 1473 MemoryLocation SrcLoc = MemoryLocation::getForSource(M); 1474 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( 1475 SrcLoc, true, M->getIterator(), M->getParent()); 1476 1477 if (SrcDepInfo.isClobber()) { 1478 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) 1479 return processMemCpyMemCpyDependence(M, MDep); 1480 } else if (SrcDepInfo.isDef()) { 1481 if (hasUndefContents(SrcDepInfo.getInst(), CopySize)) { 1482 eraseInstruction(M); 1483 ++NumMemCpyInstr; 1484 return true; 1485 } 1486 } 1487 1488 if (SrcDepInfo.isClobber()) 1489 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) 1490 if (performMemCpyToMemSetOptzn(M, MDep)) { 1491 eraseInstruction(M); 1492 ++NumCpyToSet; 1493 return true; 1494 } 1495 } 1496 1497 return false; 1498 } 1499 1500 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed 1501 /// not to alias. 1502 bool MemCpyOptPass::processMemMove(MemMoveInst *M) { 1503 if (!TLI->has(LibFunc_memmove)) 1504 return false; 1505 1506 // See if the pointers alias. 1507 if (!AA->isNoAlias(MemoryLocation::getForDest(M), 1508 MemoryLocation::getForSource(M))) 1509 return false; 1510 1511 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M 1512 << "\n"); 1513 1514 // If not, then we know we can transform this. 1515 Type *ArgTys[3] = { M->getRawDest()->getType(), 1516 M->getRawSource()->getType(), 1517 M->getLength()->getType() }; 1518 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), 1519 Intrinsic::memcpy, ArgTys)); 1520 1521 // For MemorySSA nothing really changes (except that memcpy may imply stricter 1522 // aliasing guarantees). 1523 1524 // MemDep may have over conservative information about this instruction, just 1525 // conservatively flush it from the cache. 1526 if (MD) 1527 MD->removeInstruction(M); 1528 1529 ++NumMoveToCpy; 1530 return true; 1531 } 1532 1533 /// This is called on every byval argument in call sites. 1534 bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) { 1535 const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout(); 1536 // Find out what feeds this byval argument. 1537 Value *ByValArg = CB.getArgOperand(ArgNo); 1538 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); 1539 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); 1540 MemoryLocation Loc(ByValArg, LocationSize::precise(ByValSize)); 1541 MemCpyInst *MDep = nullptr; 1542 if (EnableMemorySSA) { 1543 MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(&CB); 1544 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( 1545 CallAccess->getDefiningAccess(), Loc); 1546 if (auto *MD = dyn_cast<MemoryDef>(Clobber)) 1547 MDep = dyn_cast_or_null<MemCpyInst>(MD->getMemoryInst()); 1548 } else { 1549 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1550 Loc, true, CB.getIterator(), CB.getParent()); 1551 if (!DepInfo.isClobber()) 1552 return false; 1553 MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 1554 } 1555 1556 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 1557 // a memcpy, see if we can byval from the source of the memcpy instead of the 1558 // result. 1559 if (!MDep || MDep->isVolatile() || 1560 ByValArg->stripPointerCasts() != MDep->getDest()) 1561 return false; 1562 1563 // The length of the memcpy must be larger or equal to the size of the byval. 1564 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 1565 if (!C1 || C1->getValue().getZExtValue() < ByValSize) 1566 return false; 1567 1568 // Get the alignment of the byval. If the call doesn't specify the alignment, 1569 // then it is some target specific value that we can't know. 1570 MaybeAlign ByValAlign = CB.getParamAlign(ArgNo); 1571 if (!ByValAlign) return false; 1572 1573 // If it is greater than the memcpy, then we check to see if we can force the 1574 // source of the memcpy to the alignment we need. If we fail, we bail out. 1575 MaybeAlign MemDepAlign = MDep->getSourceAlign(); 1576 if ((!MemDepAlign || *MemDepAlign < *ByValAlign) && 1577 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC, 1578 DT) < *ByValAlign) 1579 return false; 1580 1581 // The address space of the memcpy source must match the byval argument 1582 if (MDep->getSource()->getType()->getPointerAddressSpace() != 1583 ByValArg->getType()->getPointerAddressSpace()) 1584 return false; 1585 1586 // Verify that the copied-from memory doesn't change in between the memcpy and 1587 // the byval call. 1588 // memcpy(a <- b) 1589 // *b = 42; 1590 // foo(*a) 1591 // It would be invalid to transform the second memcpy into foo(*b). 1592 if (EnableMemorySSA) { 1593 if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep), 1594 MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(&CB))) 1595 return false; 1596 } else { 1597 // NOTE: This is conservative, it will stop on any read from the source loc, 1598 // not just the defining memcpy. 1599 MemDepResult SourceDep = MD->getPointerDependencyFrom( 1600 MemoryLocation::getForSource(MDep), false, 1601 CB.getIterator(), MDep->getParent()); 1602 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1603 return false; 1604 } 1605 1606 Value *TmpCast = MDep->getSource(); 1607 if (MDep->getSource()->getType() != ByValArg->getType()) { 1608 BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 1609 "tmpcast", &CB); 1610 // Set the tmpcast's DebugLoc to MDep's 1611 TmpBitCast->setDebugLoc(MDep->getDebugLoc()); 1612 TmpCast = TmpBitCast; 1613 } 1614 1615 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" 1616 << " " << *MDep << "\n" 1617 << " " << CB << "\n"); 1618 1619 // Otherwise we're good! Update the byval argument. 1620 CB.setArgOperand(ArgNo, TmpCast); 1621 ++NumMemCpyInstr; 1622 return true; 1623 } 1624 1625 /// Executes one iteration of MemCpyOptPass. 1626 bool MemCpyOptPass::iterateOnFunction(Function &F) { 1627 bool MadeChange = false; 1628 1629 // Walk all instruction in the function. 1630 for (BasicBlock &BB : F) { 1631 // Skip unreachable blocks. For example processStore assumes that an 1632 // instruction in a BB can't be dominated by a later instruction in the 1633 // same BB (which is a scenario that can happen for an unreachable BB that 1634 // has itself as a predecessor). 1635 if (!DT->isReachableFromEntry(&BB)) 1636 continue; 1637 1638 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 1639 // Avoid invalidating the iterator. 1640 Instruction *I = &*BI++; 1641 1642 bool RepeatInstruction = false; 1643 1644 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1645 MadeChange |= processStore(SI, BI); 1646 else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) 1647 RepeatInstruction = processMemSet(M, BI); 1648 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 1649 RepeatInstruction = processMemCpy(M, BI); 1650 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) 1651 RepeatInstruction = processMemMove(M); 1652 else if (auto *CB = dyn_cast<CallBase>(I)) { 1653 for (unsigned i = 0, e = CB->arg_size(); i != e; ++i) 1654 if (CB->isByValArgument(i)) 1655 MadeChange |= processByValArgument(*CB, i); 1656 } 1657 1658 // Reprocess the instruction if desired. 1659 if (RepeatInstruction) { 1660 if (BI != BB.begin()) 1661 --BI; 1662 MadeChange = true; 1663 } 1664 } 1665 } 1666 1667 return MadeChange; 1668 } 1669 1670 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { 1671 auto *MD = !EnableMemorySSA ? &AM.getResult<MemoryDependenceAnalysis>(F) 1672 : AM.getCachedResult<MemoryDependenceAnalysis>(F); 1673 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1674 auto *AA = &AM.getResult<AAManager>(F); 1675 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 1676 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 1677 auto *MSSA = EnableMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F) 1678 : AM.getCachedResult<MemorySSAAnalysis>(F); 1679 1680 bool MadeChange = 1681 runImpl(F, MD, &TLI, AA, AC, DT, MSSA ? &MSSA->getMSSA() : nullptr); 1682 if (!MadeChange) 1683 return PreservedAnalyses::all(); 1684 1685 PreservedAnalyses PA; 1686 PA.preserveSet<CFGAnalyses>(); 1687 PA.preserve<GlobalsAA>(); 1688 if (MD) 1689 PA.preserve<MemoryDependenceAnalysis>(); 1690 if (MSSA) 1691 PA.preserve<MemorySSAAnalysis>(); 1692 return PA; 1693 } 1694 1695 bool MemCpyOptPass::runImpl(Function &F, MemoryDependenceResults *MD_, 1696 TargetLibraryInfo *TLI_, AliasAnalysis *AA_, 1697 AssumptionCache *AC_, DominatorTree *DT_, 1698 MemorySSA *MSSA_) { 1699 bool MadeChange = false; 1700 MD = MD_; 1701 TLI = TLI_; 1702 AA = AA_; 1703 AC = AC_; 1704 DT = DT_; 1705 MSSA = MSSA_; 1706 MemorySSAUpdater MSSAU_(MSSA_); 1707 MSSAU = MSSA_ ? &MSSAU_ : nullptr; 1708 // If we don't have at least memset and memcpy, there is little point of doing 1709 // anything here. These are required by a freestanding implementation, so if 1710 // even they are disabled, there is no point in trying hard. 1711 if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy)) 1712 return false; 1713 1714 while (true) { 1715 if (!iterateOnFunction(F)) 1716 break; 1717 MadeChange = true; 1718 } 1719 1720 if (MSSA_ && VerifyMemorySSA) 1721 MSSA_->verifyMemorySSA(); 1722 1723 MD = nullptr; 1724 return MadeChange; 1725 } 1726 1727 /// This is the main transformation entry point for a function. 1728 bool MemCpyOptLegacyPass::runOnFunction(Function &F) { 1729 if (skipFunction(F)) 1730 return false; 1731 1732 auto *MDWP = !EnableMemorySSA 1733 ? &getAnalysis<MemoryDependenceWrapperPass>() 1734 : getAnalysisIfAvailable<MemoryDependenceWrapperPass>(); 1735 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1736 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1737 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1738 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1739 auto *MSSAWP = EnableMemorySSA 1740 ? &getAnalysis<MemorySSAWrapperPass>() 1741 : getAnalysisIfAvailable<MemorySSAWrapperPass>(); 1742 1743 return Impl.runImpl(F, MDWP ? & MDWP->getMemDep() : nullptr, TLI, AA, AC, DT, 1744 MSSAWP ? &MSSAWP->getMSSA() : nullptr); 1745 } 1746