1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs various transformations related to eliminating memcpy 10 // calls, or transforming sets of stores into memset's. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" 15 #include "llvm/ADT/DenseSet.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/ADT/iterator_range.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/AssumptionCache.h" 22 #include "llvm/Analysis/CaptureTracking.h" 23 #include "llvm/Analysis/GlobalsModRef.h" 24 #include "llvm/Analysis/Loads.h" 25 #include "llvm/Analysis/MemoryLocation.h" 26 #include "llvm/Analysis/MemorySSA.h" 27 #include "llvm/Analysis/MemorySSAUpdater.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/IR/BasicBlock.h" 31 #include "llvm/IR/Constants.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/DerivedTypes.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/GlobalVariable.h" 37 #include "llvm/IR/IRBuilder.h" 38 #include "llvm/IR/InstrTypes.h" 39 #include "llvm/IR/Instruction.h" 40 #include "llvm/IR/Instructions.h" 41 #include "llvm/IR/IntrinsicInst.h" 42 #include "llvm/IR/Intrinsics.h" 43 #include "llvm/IR/LLVMContext.h" 44 #include "llvm/IR/Module.h" 45 #include "llvm/IR/PassManager.h" 46 #include "llvm/IR/Type.h" 47 #include "llvm/IR/User.h" 48 #include "llvm/IR/Value.h" 49 #include "llvm/Support/Casting.h" 50 #include "llvm/Support/Debug.h" 51 #include "llvm/Support/MathExtras.h" 52 #include "llvm/Support/raw_ostream.h" 53 #include "llvm/Transforms/Utils/Local.h" 54 #include <algorithm> 55 #include <cassert> 56 #include <cstdint> 57 #include <optional> 58 59 using namespace llvm; 60 61 #define DEBUG_TYPE "memcpyopt" 62 63 static cl::opt<bool> EnableMemCpyOptWithoutLibcalls( 64 "enable-memcpyopt-without-libcalls", cl::Hidden, 65 cl::desc("Enable memcpyopt even when libcalls are disabled")); 66 67 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 68 STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 69 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 70 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 71 STATISTIC(NumCallSlot, "Number of call slot optimizations performed"); 72 73 namespace { 74 75 /// Represents a range of memset'd bytes with the ByteVal value. 76 /// This allows us to analyze stores like: 77 /// store 0 -> P+1 78 /// store 0 -> P+0 79 /// store 0 -> P+3 80 /// store 0 -> P+2 81 /// which sometimes happens with stores to arrays of structs etc. When we see 82 /// the first store, we make a range [1, 2). The second store extends the range 83 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 84 /// two ranges into [0, 3) which is memset'able. 85 struct MemsetRange { 86 // Start/End - A semi range that describes the span that this range covers. 87 // The range is closed at the start and open at the end: [Start, End). 88 int64_t Start, End; 89 90 /// StartPtr - The getelementptr instruction that points to the start of the 91 /// range. 92 Value *StartPtr; 93 94 /// Alignment - The known alignment of the first store. 95 MaybeAlign Alignment; 96 97 /// TheStores - The actual stores that make up this range. 98 SmallVector<Instruction*, 16> TheStores; 99 100 bool isProfitableToUseMemset(const DataLayout &DL) const; 101 }; 102 103 } // end anonymous namespace 104 105 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { 106 // If we found more than 4 stores to merge or 16 bytes, use memset. 107 if (TheStores.size() >= 4 || End-Start >= 16) return true; 108 109 // If there is nothing to merge, don't do anything. 110 if (TheStores.size() < 2) return false; 111 112 // If any of the stores are a memset, then it is always good to extend the 113 // memset. 114 for (Instruction *SI : TheStores) 115 if (!isa<StoreInst>(SI)) 116 return true; 117 118 // Assume that the code generator is capable of merging pairs of stores 119 // together if it wants to. 120 if (TheStores.size() == 2) return false; 121 122 // If we have fewer than 8 stores, it can still be worthwhile to do this. 123 // For example, merging 4 i8 stores into an i32 store is useful almost always. 124 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 125 // memset will be split into 2 32-bit stores anyway) and doing so can 126 // pessimize the llvm optimizer. 127 // 128 // Since we don't have perfect knowledge here, make some assumptions: assume 129 // the maximum GPR width is the same size as the largest legal integer 130 // size. If so, check to see whether we will end up actually reducing the 131 // number of stores used. 132 unsigned Bytes = unsigned(End-Start); 133 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; 134 if (MaxIntSize == 0) 135 MaxIntSize = 1; 136 unsigned NumPointerStores = Bytes / MaxIntSize; 137 138 // Assume the remaining bytes if any are done a byte at a time. 139 unsigned NumByteStores = Bytes % MaxIntSize; 140 141 // If we will reduce the # stores (according to this heuristic), do the 142 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 143 // etc. 144 return TheStores.size() > NumPointerStores+NumByteStores; 145 } 146 147 namespace { 148 149 class MemsetRanges { 150 using range_iterator = SmallVectorImpl<MemsetRange>::iterator; 151 152 /// A sorted list of the memset ranges. 153 SmallVector<MemsetRange, 8> Ranges; 154 155 const DataLayout &DL; 156 157 public: 158 MemsetRanges(const DataLayout &DL) : DL(DL) {} 159 160 using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator; 161 162 const_iterator begin() const { return Ranges.begin(); } 163 const_iterator end() const { return Ranges.end(); } 164 bool empty() const { return Ranges.empty(); } 165 166 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 167 if (auto *SI = dyn_cast<StoreInst>(Inst)) 168 addStore(OffsetFromFirst, SI); 169 else 170 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 171 } 172 173 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 174 TypeSize StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); 175 assert(!StoreSize.isScalable() && "Can't track scalable-typed stores"); 176 addRange(OffsetFromFirst, StoreSize.getFixedValue(), 177 SI->getPointerOperand(), SI->getAlign(), SI); 178 } 179 180 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 181 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 182 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlign(), MSI); 183 } 184 185 void addRange(int64_t Start, int64_t Size, Value *Ptr, MaybeAlign Alignment, 186 Instruction *Inst); 187 }; 188 189 } // end anonymous namespace 190 191 /// Add a new store to the MemsetRanges data structure. This adds a 192 /// new range for the specified store at the specified offset, merging into 193 /// existing ranges as appropriate. 194 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 195 MaybeAlign Alignment, Instruction *Inst) { 196 int64_t End = Start+Size; 197 198 range_iterator I = partition_point( 199 Ranges, [=](const MemsetRange &O) { return O.End < Start; }); 200 201 // We now know that I == E, in which case we didn't find anything to merge 202 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 203 // to insert a new range. Handle this now. 204 if (I == Ranges.end() || End < I->Start) { 205 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 206 R.Start = Start; 207 R.End = End; 208 R.StartPtr = Ptr; 209 R.Alignment = Alignment; 210 R.TheStores.push_back(Inst); 211 return; 212 } 213 214 // This store overlaps with I, add it. 215 I->TheStores.push_back(Inst); 216 217 // At this point, we may have an interval that completely contains our store. 218 // If so, just add it to the interval and return. 219 if (I->Start <= Start && I->End >= End) 220 return; 221 222 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 223 // but is not entirely contained within the range. 224 225 // See if the range extends the start of the range. In this case, it couldn't 226 // possibly cause it to join the prior range, because otherwise we would have 227 // stopped on *it*. 228 if (Start < I->Start) { 229 I->Start = Start; 230 I->StartPtr = Ptr; 231 I->Alignment = Alignment; 232 } 233 234 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 235 // is in or right at the end of I), and that End >= I->Start. Extend I out to 236 // End. 237 if (End > I->End) { 238 I->End = End; 239 range_iterator NextI = I; 240 while (++NextI != Ranges.end() && End >= NextI->Start) { 241 // Merge the range in. 242 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 243 if (NextI->End > I->End) 244 I->End = NextI->End; 245 Ranges.erase(NextI); 246 NextI = I; 247 } 248 } 249 } 250 251 //===----------------------------------------------------------------------===// 252 // MemCpyOptLegacyPass Pass 253 //===----------------------------------------------------------------------===// 254 255 // Check that V is either not accessible by the caller, or unwinding cannot 256 // occur between Start and End. 257 static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start, 258 Instruction *End) { 259 assert(Start->getParent() == End->getParent() && "Must be in same block"); 260 // Function can't unwind, so it also can't be visible through unwinding. 261 if (Start->getFunction()->doesNotThrow()) 262 return false; 263 264 // Object is not visible on unwind. 265 // TODO: Support RequiresNoCaptureBeforeUnwind case. 266 bool RequiresNoCaptureBeforeUnwind; 267 if (isNotVisibleOnUnwind(getUnderlyingObject(V), 268 RequiresNoCaptureBeforeUnwind) && 269 !RequiresNoCaptureBeforeUnwind) 270 return false; 271 272 // Check whether there are any unwinding instructions in the range. 273 return any_of(make_range(Start->getIterator(), End->getIterator()), 274 [](const Instruction &I) { return I.mayThrow(); }); 275 } 276 277 void MemCpyOptPass::eraseInstruction(Instruction *I) { 278 MSSAU->removeMemoryAccess(I); 279 I->eraseFromParent(); 280 } 281 282 // Check for mod or ref of Loc between Start and End, excluding both boundaries. 283 // Start and End must be in the same block. 284 // If SkippedLifetimeStart is provided, skip over one clobbering lifetime.start 285 // intrinsic and store it inside SkippedLifetimeStart. 286 static bool accessedBetween(BatchAAResults &AA, MemoryLocation Loc, 287 const MemoryUseOrDef *Start, 288 const MemoryUseOrDef *End, 289 Instruction **SkippedLifetimeStart = nullptr) { 290 assert(Start->getBlock() == End->getBlock() && "Only local supported"); 291 for (const MemoryAccess &MA : 292 make_range(++Start->getIterator(), End->getIterator())) { 293 Instruction *I = cast<MemoryUseOrDef>(MA).getMemoryInst(); 294 if (isModOrRefSet(AA.getModRefInfo(I, Loc))) { 295 auto *II = dyn_cast<IntrinsicInst>(I); 296 if (II && II->getIntrinsicID() == Intrinsic::lifetime_start && 297 SkippedLifetimeStart && !*SkippedLifetimeStart) { 298 *SkippedLifetimeStart = I; 299 continue; 300 } 301 302 return true; 303 } 304 } 305 return false; 306 } 307 308 // Check for mod of Loc between Start and End, excluding both boundaries. 309 // Start and End can be in different blocks. 310 static bool writtenBetween(MemorySSA *MSSA, BatchAAResults &AA, 311 MemoryLocation Loc, const MemoryUseOrDef *Start, 312 const MemoryUseOrDef *End) { 313 if (isa<MemoryUse>(End)) { 314 // For MemoryUses, getClobberingMemoryAccess may skip non-clobbering writes. 315 // Manually check read accesses between Start and End, if they are in the 316 // same block, for clobbers. Otherwise assume Loc is clobbered. 317 return Start->getBlock() != End->getBlock() || 318 any_of( 319 make_range(std::next(Start->getIterator()), End->getIterator()), 320 [&AA, Loc](const MemoryAccess &Acc) { 321 if (isa<MemoryUse>(&Acc)) 322 return false; 323 Instruction *AccInst = 324 cast<MemoryUseOrDef>(&Acc)->getMemoryInst(); 325 return isModSet(AA.getModRefInfo(AccInst, Loc)); 326 }); 327 } 328 329 // TODO: Only walk until we hit Start. 330 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( 331 End->getDefiningAccess(), Loc, AA); 332 return !MSSA->dominates(Clobber, Start); 333 } 334 335 // Update AA metadata 336 static void combineAAMetadata(Instruction *ReplInst, Instruction *I) { 337 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be 338 // handled here, but combineMetadata doesn't support them yet 339 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 340 LLVMContext::MD_noalias, 341 LLVMContext::MD_invariant_group, 342 LLVMContext::MD_access_group}; 343 combineMetadata(ReplInst, I, KnownIDs, true); 344 } 345 346 /// When scanning forward over instructions, we look for some other patterns to 347 /// fold away. In particular, this looks for stores to neighboring locations of 348 /// memory. If it sees enough consecutive ones, it attempts to merge them 349 /// together into a memcpy/memset. 350 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, 351 Value *StartPtr, 352 Value *ByteVal) { 353 const DataLayout &DL = StartInst->getModule()->getDataLayout(); 354 355 // We can't track scalable types 356 if (auto *SI = dyn_cast<StoreInst>(StartInst)) 357 if (DL.getTypeStoreSize(SI->getOperand(0)->getType()).isScalable()) 358 return nullptr; 359 360 // Okay, so we now have a single store that can be splatable. Scan to find 361 // all subsequent stores of the same value to offset from the same pointer. 362 // Join these together into ranges, so we can decide whether contiguous blocks 363 // are stored. 364 MemsetRanges Ranges(DL); 365 366 BasicBlock::iterator BI(StartInst); 367 368 // Keeps track of the last memory use or def before the insertion point for 369 // the new memset. The new MemoryDef for the inserted memsets will be inserted 370 // after MemInsertPoint. It points to either LastMemDef or to the last user 371 // before the insertion point of the memset, if there are any such users. 372 MemoryUseOrDef *MemInsertPoint = nullptr; 373 // Keeps track of the last MemoryDef between StartInst and the insertion point 374 // for the new memset. This will become the defining access of the inserted 375 // memsets. 376 MemoryDef *LastMemDef = nullptr; 377 for (++BI; !BI->isTerminator(); ++BI) { 378 auto *CurrentAcc = cast_or_null<MemoryUseOrDef>( 379 MSSAU->getMemorySSA()->getMemoryAccess(&*BI)); 380 if (CurrentAcc) { 381 MemInsertPoint = CurrentAcc; 382 if (auto *CurrentDef = dyn_cast<MemoryDef>(CurrentAcc)) 383 LastMemDef = CurrentDef; 384 } 385 386 // Calls that only access inaccessible memory do not block merging 387 // accessible stores. 388 if (auto *CB = dyn_cast<CallBase>(BI)) { 389 if (CB->onlyAccessesInaccessibleMemory()) 390 continue; 391 } 392 393 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 394 // If the instruction is readnone, ignore it, otherwise bail out. We 395 // don't even allow readonly here because we don't want something like: 396 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 397 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 398 break; 399 continue; 400 } 401 402 if (auto *NextStore = dyn_cast<StoreInst>(BI)) { 403 // If this is a store, see if we can merge it in. 404 if (!NextStore->isSimple()) break; 405 406 Value *StoredVal = NextStore->getValueOperand(); 407 408 // Don't convert stores of non-integral pointer types to memsets (which 409 // stores integers). 410 if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) 411 break; 412 413 // We can't track ranges involving scalable types. 414 if (DL.getTypeStoreSize(StoredVal->getType()).isScalable()) 415 break; 416 417 // Check to see if this stored value is of the same byte-splattable value. 418 Value *StoredByte = isBytewiseValue(StoredVal, DL); 419 if (isa<UndefValue>(ByteVal) && StoredByte) 420 ByteVal = StoredByte; 421 if (ByteVal != StoredByte) 422 break; 423 424 // Check to see if this store is to a constant offset from the start ptr. 425 std::optional<int64_t> Offset = 426 NextStore->getPointerOperand()->getPointerOffsetFrom(StartPtr, DL); 427 if (!Offset) 428 break; 429 430 Ranges.addStore(*Offset, NextStore); 431 } else { 432 auto *MSI = cast<MemSetInst>(BI); 433 434 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 435 !isa<ConstantInt>(MSI->getLength())) 436 break; 437 438 // Check to see if this store is to a constant offset from the start ptr. 439 std::optional<int64_t> Offset = 440 MSI->getDest()->getPointerOffsetFrom(StartPtr, DL); 441 if (!Offset) 442 break; 443 444 Ranges.addMemSet(*Offset, MSI); 445 } 446 } 447 448 // If we have no ranges, then we just had a single store with nothing that 449 // could be merged in. This is a very common case of course. 450 if (Ranges.empty()) 451 return nullptr; 452 453 // If we had at least one store that could be merged in, add the starting 454 // store as well. We try to avoid this unless there is at least something 455 // interesting as a small compile-time optimization. 456 Ranges.addInst(0, StartInst); 457 458 // If we create any memsets, we put it right before the first instruction that 459 // isn't part of the memset block. This ensure that the memset is dominated 460 // by any addressing instruction needed by the start of the block. 461 IRBuilder<> Builder(&*BI); 462 463 // Now that we have full information about ranges, loop over the ranges and 464 // emit memset's for anything big enough to be worthwhile. 465 Instruction *AMemSet = nullptr; 466 for (const MemsetRange &Range : Ranges) { 467 if (Range.TheStores.size() == 1) continue; 468 469 // If it is profitable to lower this range to memset, do so now. 470 if (!Range.isProfitableToUseMemset(DL)) 471 continue; 472 473 // Otherwise, we do want to transform this! Create a new memset. 474 // Get the starting pointer of the block. 475 StartPtr = Range.StartPtr; 476 477 AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start, 478 Range.Alignment); 479 AMemSet->mergeDIAssignID(Range.TheStores); 480 481 LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI 482 : Range.TheStores) dbgs() 483 << *SI << '\n'; 484 dbgs() << "With: " << *AMemSet << '\n'); 485 if (!Range.TheStores.empty()) 486 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 487 488 assert(LastMemDef && MemInsertPoint && 489 "Both LastMemDef and MemInsertPoint need to be set"); 490 auto *NewDef = 491 cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI 492 ? MSSAU->createMemoryAccessBefore( 493 AMemSet, LastMemDef, MemInsertPoint) 494 : MSSAU->createMemoryAccessAfter( 495 AMemSet, LastMemDef, MemInsertPoint)); 496 MSSAU->insertDef(NewDef, /*RenameUses=*/true); 497 LastMemDef = NewDef; 498 MemInsertPoint = NewDef; 499 500 // Zap all the stores. 501 for (Instruction *SI : Range.TheStores) 502 eraseInstruction(SI); 503 504 ++NumMemSetInfer; 505 } 506 507 return AMemSet; 508 } 509 510 // This method try to lift a store instruction before position P. 511 // It will lift the store and its argument + that anything that 512 // may alias with these. 513 // The method returns true if it was successful. 514 bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) { 515 // If the store alias this position, early bail out. 516 MemoryLocation StoreLoc = MemoryLocation::get(SI); 517 if (isModOrRefSet(AA->getModRefInfo(P, StoreLoc))) 518 return false; 519 520 // Keep track of the arguments of all instruction we plan to lift 521 // so we can make sure to lift them as well if appropriate. 522 DenseSet<Instruction*> Args; 523 auto AddArg = [&](Value *Arg) { 524 auto *I = dyn_cast<Instruction>(Arg); 525 if (I && I->getParent() == SI->getParent()) { 526 // Cannot hoist user of P above P 527 if (I == P) return false; 528 Args.insert(I); 529 } 530 return true; 531 }; 532 if (!AddArg(SI->getPointerOperand())) 533 return false; 534 535 // Instruction to lift before P. 536 SmallVector<Instruction *, 8> ToLift{SI}; 537 538 // Memory locations of lifted instructions. 539 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc}; 540 541 // Lifted calls. 542 SmallVector<const CallBase *, 8> Calls; 543 544 const MemoryLocation LoadLoc = MemoryLocation::get(LI); 545 546 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { 547 auto *C = &*I; 548 549 // Make sure hoisting does not perform a store that was not guaranteed to 550 // happen. 551 if (!isGuaranteedToTransferExecutionToSuccessor(C)) 552 return false; 553 554 bool MayAlias = isModOrRefSet(AA->getModRefInfo(C, std::nullopt)); 555 556 bool NeedLift = false; 557 if (Args.erase(C)) 558 NeedLift = true; 559 else if (MayAlias) { 560 NeedLift = llvm::any_of(MemLocs, [C, this](const MemoryLocation &ML) { 561 return isModOrRefSet(AA->getModRefInfo(C, ML)); 562 }); 563 564 if (!NeedLift) 565 NeedLift = llvm::any_of(Calls, [C, this](const CallBase *Call) { 566 return isModOrRefSet(AA->getModRefInfo(C, Call)); 567 }); 568 } 569 570 if (!NeedLift) 571 continue; 572 573 if (MayAlias) { 574 // Since LI is implicitly moved downwards past the lifted instructions, 575 // none of them may modify its source. 576 if (isModSet(AA->getModRefInfo(C, LoadLoc))) 577 return false; 578 else if (const auto *Call = dyn_cast<CallBase>(C)) { 579 // If we can't lift this before P, it's game over. 580 if (isModOrRefSet(AA->getModRefInfo(P, Call))) 581 return false; 582 583 Calls.push_back(Call); 584 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { 585 // If we can't lift this before P, it's game over. 586 auto ML = MemoryLocation::get(C); 587 if (isModOrRefSet(AA->getModRefInfo(P, ML))) 588 return false; 589 590 MemLocs.push_back(ML); 591 } else 592 // We don't know how to lift this instruction. 593 return false; 594 } 595 596 ToLift.push_back(C); 597 for (Value *Op : C->operands()) 598 if (!AddArg(Op)) 599 return false; 600 } 601 602 // Find MSSA insertion point. Normally P will always have a corresponding 603 // memory access before which we can insert. However, with non-standard AA 604 // pipelines, there may be a mismatch between AA and MSSA, in which case we 605 // will scan for a memory access before P. In either case, we know for sure 606 // that at least the load will have a memory access. 607 // TODO: Simplify this once P will be determined by MSSA, in which case the 608 // discrepancy can no longer occur. 609 MemoryUseOrDef *MemInsertPoint = nullptr; 610 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(P)) { 611 MemInsertPoint = cast<MemoryUseOrDef>(--MA->getIterator()); 612 } else { 613 const Instruction *ConstP = P; 614 for (const Instruction &I : make_range(++ConstP->getReverseIterator(), 615 ++LI->getReverseIterator())) { 616 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(&I)) { 617 MemInsertPoint = MA; 618 break; 619 } 620 } 621 } 622 623 // We made it, we need to lift. 624 for (auto *I : llvm::reverse(ToLift)) { 625 LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n"); 626 I->moveBefore(P); 627 assert(MemInsertPoint && "Must have found insert point"); 628 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(I)) { 629 MSSAU->moveAfter(MA, MemInsertPoint); 630 MemInsertPoint = MA; 631 } 632 } 633 634 return true; 635 } 636 637 bool MemCpyOptPass::processStoreOfLoad(StoreInst *SI, LoadInst *LI, 638 const DataLayout &DL, 639 BasicBlock::iterator &BBI) { 640 if (!LI->isSimple() || !LI->hasOneUse() || 641 LI->getParent() != SI->getParent()) 642 return false; 643 644 auto *T = LI->getType(); 645 // Don't introduce calls to memcpy/memmove intrinsics out of thin air if 646 // the corresponding libcalls are not available. 647 // TODO: We should really distinguish between libcall availability and 648 // our ability to introduce intrinsics. 649 if (T->isAggregateType() && 650 (EnableMemCpyOptWithoutLibcalls || 651 (TLI->has(LibFunc_memcpy) && TLI->has(LibFunc_memmove)))) { 652 MemoryLocation LoadLoc = MemoryLocation::get(LI); 653 654 // We use alias analysis to check if an instruction may store to 655 // the memory we load from in between the load and the store. If 656 // such an instruction is found, we try to promote there instead 657 // of at the store position. 658 // TODO: Can use MSSA for this. 659 Instruction *P = SI; 660 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { 661 if (isModSet(AA->getModRefInfo(&I, LoadLoc))) { 662 P = &I; 663 break; 664 } 665 } 666 667 // We found an instruction that may write to the loaded memory. 668 // We can try to promote at this position instead of the store 669 // position if nothing aliases the store memory after this and the store 670 // destination is not in the range. 671 if (P && P != SI) { 672 if (!moveUp(SI, P, LI)) 673 P = nullptr; 674 } 675 676 // If a valid insertion position is found, then we can promote 677 // the load/store pair to a memcpy. 678 if (P) { 679 // If we load from memory that may alias the memory we store to, 680 // memmove must be used to preserve semantic. If not, memcpy can 681 // be used. Also, if we load from constant memory, memcpy can be used 682 // as the constant memory won't be modified. 683 bool UseMemMove = false; 684 if (isModSet(AA->getModRefInfo(SI, LoadLoc))) 685 UseMemMove = true; 686 687 uint64_t Size = DL.getTypeStoreSize(T); 688 689 IRBuilder<> Builder(P); 690 Instruction *M; 691 if (UseMemMove) 692 M = Builder.CreateMemMove( 693 SI->getPointerOperand(), SI->getAlign(), 694 LI->getPointerOperand(), LI->getAlign(), Size); 695 else 696 M = Builder.CreateMemCpy( 697 SI->getPointerOperand(), SI->getAlign(), 698 LI->getPointerOperand(), LI->getAlign(), Size); 699 M->copyMetadata(*SI, LLVMContext::MD_DIAssignID); 700 701 LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => " 702 << *M << "\n"); 703 704 auto *LastDef = 705 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)); 706 auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); 707 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 708 709 eraseInstruction(SI); 710 eraseInstruction(LI); 711 ++NumMemCpyInstr; 712 713 // Make sure we do not invalidate the iterator. 714 BBI = M->getIterator(); 715 return true; 716 } 717 } 718 719 // Detect cases where we're performing call slot forwarding, but 720 // happen to be using a load-store pair to implement it, rather than 721 // a memcpy. 722 BatchAAResults BAA(*AA); 723 auto GetCall = [&]() -> CallInst * { 724 // We defer this expensive clobber walk until the cheap checks 725 // have been done on the source inside performCallSlotOptzn. 726 if (auto *LoadClobber = dyn_cast<MemoryUseOrDef>( 727 MSSA->getWalker()->getClobberingMemoryAccess(LI, BAA))) 728 return dyn_cast_or_null<CallInst>(LoadClobber->getMemoryInst()); 729 return nullptr; 730 }; 731 732 bool Changed = performCallSlotOptzn( 733 LI, SI, SI->getPointerOperand()->stripPointerCasts(), 734 LI->getPointerOperand()->stripPointerCasts(), 735 DL.getTypeStoreSize(SI->getOperand(0)->getType()), 736 std::min(SI->getAlign(), LI->getAlign()), BAA, GetCall); 737 if (Changed) { 738 eraseInstruction(SI); 739 eraseInstruction(LI); 740 ++NumMemCpyInstr; 741 return true; 742 } 743 744 return false; 745 } 746 747 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 748 if (!SI->isSimple()) return false; 749 750 // Avoid merging nontemporal stores since the resulting 751 // memcpy/memset would not be able to preserve the nontemporal hint. 752 // In theory we could teach how to propagate the !nontemporal metadata to 753 // memset calls. However, that change would force the backend to 754 // conservatively expand !nontemporal memset calls back to sequences of 755 // store instructions (effectively undoing the merging). 756 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 757 return false; 758 759 const DataLayout &DL = SI->getModule()->getDataLayout(); 760 761 Value *StoredVal = SI->getValueOperand(); 762 763 // Not all the transforms below are correct for non-integral pointers, bail 764 // until we've audited the individual pieces. 765 if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) 766 return false; 767 768 // Load to store forwarding can be interpreted as memcpy. 769 if (auto *LI = dyn_cast<LoadInst>(StoredVal)) 770 return processStoreOfLoad(SI, LI, DL, BBI); 771 772 // The following code creates memset intrinsics out of thin air. Don't do 773 // this if the corresponding libfunc is not available. 774 // TODO: We should really distinguish between libcall availability and 775 // our ability to introduce intrinsics. 776 if (!(TLI->has(LibFunc_memset) || EnableMemCpyOptWithoutLibcalls)) 777 return false; 778 779 // There are two cases that are interesting for this code to handle: memcpy 780 // and memset. Right now we only handle memset. 781 782 // Ensure that the value being stored is something that can be memset'able a 783 // byte at a time like "0" or "-1" or any width, as well as things like 784 // 0xA0A0A0A0 and 0.0. 785 auto *V = SI->getOperand(0); 786 if (Value *ByteVal = isBytewiseValue(V, DL)) { 787 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 788 ByteVal)) { 789 BBI = I->getIterator(); // Don't invalidate iterator. 790 return true; 791 } 792 793 // If we have an aggregate, we try to promote it to memset regardless 794 // of opportunity for merging as it can expose optimization opportunities 795 // in subsequent passes. 796 auto *T = V->getType(); 797 if (T->isAggregateType()) { 798 uint64_t Size = DL.getTypeStoreSize(T); 799 IRBuilder<> Builder(SI); 800 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, 801 SI->getAlign()); 802 M->copyMetadata(*SI, LLVMContext::MD_DIAssignID); 803 804 LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n"); 805 806 // The newly inserted memset is immediately overwritten by the original 807 // store, so we do not need to rename uses. 808 auto *StoreDef = cast<MemoryDef>(MSSA->getMemoryAccess(SI)); 809 auto *NewAccess = MSSAU->createMemoryAccessBefore( 810 M, StoreDef->getDefiningAccess(), StoreDef); 811 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/false); 812 813 eraseInstruction(SI); 814 NumMemSetInfer++; 815 816 // Make sure we do not invalidate the iterator. 817 BBI = M->getIterator(); 818 return true; 819 } 820 } 821 822 return false; 823 } 824 825 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 826 // See if there is another memset or store neighboring this memset which 827 // allows us to widen out the memset to do a single larger store. 828 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 829 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 830 MSI->getValue())) { 831 BBI = I->getIterator(); // Don't invalidate iterator. 832 return true; 833 } 834 return false; 835 } 836 837 /// Takes a memcpy and a call that it depends on, 838 /// and checks for the possibility of a call slot optimization by having 839 /// the call write its result directly into the destination of the memcpy. 840 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad, 841 Instruction *cpyStore, Value *cpyDest, 842 Value *cpySrc, TypeSize cpySize, 843 Align cpyDestAlign, BatchAAResults &BAA, 844 std::function<CallInst *()> GetC) { 845 // The general transformation to keep in mind is 846 // 847 // call @func(..., src, ...) 848 // memcpy(dest, src, ...) 849 // 850 // -> 851 // 852 // memcpy(dest, src, ...) 853 // call @func(..., dest, ...) 854 // 855 // Since moving the memcpy is technically awkward, we additionally check that 856 // src only holds uninitialized values at the moment of the call, meaning that 857 // the memcpy can be discarded rather than moved. 858 859 // We can't optimize scalable types. 860 if (cpySize.isScalable()) 861 return false; 862 863 // Require that src be an alloca. This simplifies the reasoning considerably. 864 auto *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 865 if (!srcAlloca) 866 return false; 867 868 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 869 if (!srcArraySize) 870 return false; 871 872 const DataLayout &DL = cpyLoad->getModule()->getDataLayout(); 873 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * 874 srcArraySize->getZExtValue(); 875 876 if (cpySize < srcSize) 877 return false; 878 879 CallInst *C = GetC(); 880 if (!C) 881 return false; 882 883 // Lifetime marks shouldn't be operated on. 884 if (Function *F = C->getCalledFunction()) 885 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) 886 return false; 887 888 889 if (C->getParent() != cpyStore->getParent()) { 890 LLVM_DEBUG(dbgs() << "Call Slot: block local restriction\n"); 891 return false; 892 } 893 894 MemoryLocation DestLoc = isa<StoreInst>(cpyStore) ? 895 MemoryLocation::get(cpyStore) : 896 MemoryLocation::getForDest(cast<MemCpyInst>(cpyStore)); 897 898 // Check that nothing touches the dest of the copy between 899 // the call and the store/memcpy. 900 Instruction *SkippedLifetimeStart = nullptr; 901 if (accessedBetween(BAA, DestLoc, MSSA->getMemoryAccess(C), 902 MSSA->getMemoryAccess(cpyStore), &SkippedLifetimeStart)) { 903 LLVM_DEBUG(dbgs() << "Call Slot: Dest pointer modified after call\n"); 904 return false; 905 } 906 907 // If we need to move a lifetime.start above the call, make sure that we can 908 // actually do so. If the argument is bitcasted for example, we would have to 909 // move the bitcast as well, which we don't handle. 910 if (SkippedLifetimeStart) { 911 auto *LifetimeArg = 912 dyn_cast<Instruction>(SkippedLifetimeStart->getOperand(1)); 913 if (LifetimeArg && LifetimeArg->getParent() == C->getParent() && 914 C->comesBefore(LifetimeArg)) 915 return false; 916 } 917 918 // Check that accessing the first srcSize bytes of dest will not cause a 919 // trap. Otherwise the transform is invalid since it might cause a trap 920 // to occur earlier than it otherwise would. 921 if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpySize), 922 DL, C, AC, DT)) { 923 LLVM_DEBUG(dbgs() << "Call Slot: Dest pointer not dereferenceable\n"); 924 return false; 925 } 926 927 // Make sure that nothing can observe cpyDest being written early. There are 928 // a number of cases to consider: 929 // 1. cpyDest cannot be accessed between C and cpyStore as a precondition of 930 // the transform. 931 // 2. C itself may not access cpyDest (prior to the transform). This is 932 // checked further below. 933 // 3. If cpyDest is accessible to the caller of this function (potentially 934 // captured and not based on an alloca), we need to ensure that we cannot 935 // unwind between C and cpyStore. This is checked here. 936 // 4. If cpyDest is potentially captured, there may be accesses to it from 937 // another thread. In this case, we need to check that cpyStore is 938 // guaranteed to be executed if C is. As it is a non-atomic access, it 939 // renders accesses from other threads undefined. 940 // TODO: This is currently not checked. 941 if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore)) { 942 LLVM_DEBUG(dbgs() << "Call Slot: Dest may be visible through unwinding\n"); 943 return false; 944 } 945 946 // Check that dest points to memory that is at least as aligned as src. 947 Align srcAlign = srcAlloca->getAlign(); 948 bool isDestSufficientlyAligned = srcAlign <= cpyDestAlign; 949 // If dest is not aligned enough and we can't increase its alignment then 950 // bail out. 951 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) { 952 LLVM_DEBUG(dbgs() << "Call Slot: Dest not sufficiently aligned\n"); 953 return false; 954 } 955 956 // Check that src is not accessed except via the call and the memcpy. This 957 // guarantees that it holds only undefined values when passed in (so the final 958 // memcpy can be dropped), that it is not read or written between the call and 959 // the memcpy, and that writing beyond the end of it is undefined. 960 SmallVector<User *, 8> srcUseList(srcAlloca->users()); 961 while (!srcUseList.empty()) { 962 User *U = srcUseList.pop_back_val(); 963 964 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { 965 append_range(srcUseList, U->users()); 966 continue; 967 } 968 if (const auto *G = dyn_cast<GetElementPtrInst>(U)) { 969 if (!G->hasAllZeroIndices()) 970 return false; 971 972 append_range(srcUseList, U->users()); 973 continue; 974 } 975 if (const auto *IT = dyn_cast<IntrinsicInst>(U)) 976 if (IT->isLifetimeStartOrEnd()) 977 continue; 978 979 if (U != C && U != cpyLoad) 980 return false; 981 } 982 983 // Check whether src is captured by the called function, in which case there 984 // may be further indirect uses of src. 985 bool SrcIsCaptured = any_of(C->args(), [&](Use &U) { 986 return U->stripPointerCasts() == cpySrc && 987 !C->doesNotCapture(C->getArgOperandNo(&U)); 988 }); 989 990 // If src is captured, then check whether there are any potential uses of 991 // src through the captured pointer before the lifetime of src ends, either 992 // due to a lifetime.end or a return from the function. 993 if (SrcIsCaptured) { 994 // Check that dest is not captured before/at the call. We have already 995 // checked that src is not captured before it. If either had been captured, 996 // then the call might be comparing the argument against the captured dest 997 // or src pointer. 998 Value *DestObj = getUnderlyingObject(cpyDest); 999 if (!isIdentifiedFunctionLocal(DestObj) || 1000 PointerMayBeCapturedBefore(DestObj, /* ReturnCaptures */ true, 1001 /* StoreCaptures */ true, C, DT, 1002 /* IncludeI */ true)) 1003 return false; 1004 1005 MemoryLocation SrcLoc = 1006 MemoryLocation(srcAlloca, LocationSize::precise(srcSize)); 1007 for (Instruction &I : 1008 make_range(++C->getIterator(), C->getParent()->end())) { 1009 // Lifetime of srcAlloca ends at lifetime.end. 1010 if (auto *II = dyn_cast<IntrinsicInst>(&I)) { 1011 if (II->getIntrinsicID() == Intrinsic::lifetime_end && 1012 II->getArgOperand(1)->stripPointerCasts() == srcAlloca && 1013 cast<ConstantInt>(II->getArgOperand(0))->uge(srcSize)) 1014 break; 1015 } 1016 1017 // Lifetime of srcAlloca ends at return. 1018 if (isa<ReturnInst>(&I)) 1019 break; 1020 1021 // Ignore the direct read of src in the load. 1022 if (&I == cpyLoad) 1023 continue; 1024 1025 // Check whether this instruction may mod/ref src through the captured 1026 // pointer (we have already any direct mod/refs in the loop above). 1027 // Also bail if we hit a terminator, as we don't want to scan into other 1028 // blocks. 1029 if (isModOrRefSet(BAA.getModRefInfo(&I, SrcLoc)) || I.isTerminator()) 1030 return false; 1031 } 1032 } 1033 1034 // Since we're changing the parameter to the callsite, we need to make sure 1035 // that what would be the new parameter dominates the callsite. 1036 if (!DT->dominates(cpyDest, C)) { 1037 // Support moving a constant index GEP before the call. 1038 auto *GEP = dyn_cast<GetElementPtrInst>(cpyDest); 1039 if (GEP && GEP->hasAllConstantIndices() && 1040 DT->dominates(GEP->getPointerOperand(), C)) 1041 GEP->moveBefore(C); 1042 else 1043 return false; 1044 } 1045 1046 // In addition to knowing that the call does not access src in some 1047 // unexpected manner, for example via a global, which we deduce from 1048 // the use analysis, we also need to know that it does not sneakily 1049 // access dest. We rely on AA to figure this out for us. 1050 MemoryLocation DestWithSrcSize(cpyDest, LocationSize::precise(srcSize)); 1051 ModRefInfo MR = BAA.getModRefInfo(C, DestWithSrcSize); 1052 // If necessary, perform additional analysis. 1053 if (isModOrRefSet(MR)) 1054 MR = BAA.callCapturesBefore(C, DestWithSrcSize, DT); 1055 if (isModOrRefSet(MR)) 1056 return false; 1057 1058 // We can't create address space casts here because we don't know if they're 1059 // safe for the target. 1060 if (cpySrc->getType()->getPointerAddressSpace() != 1061 cpyDest->getType()->getPointerAddressSpace()) 1062 return false; 1063 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) 1064 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc && 1065 cpySrc->getType()->getPointerAddressSpace() != 1066 C->getArgOperand(ArgI)->getType()->getPointerAddressSpace()) 1067 return false; 1068 1069 // All the checks have passed, so do the transformation. 1070 bool changedArgument = false; 1071 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) 1072 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) { 1073 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest 1074 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 1075 cpyDest->getName(), C); 1076 changedArgument = true; 1077 if (C->getArgOperand(ArgI)->getType() == Dest->getType()) 1078 C->setArgOperand(ArgI, Dest); 1079 else 1080 C->setArgOperand(ArgI, CastInst::CreatePointerCast( 1081 Dest, C->getArgOperand(ArgI)->getType(), 1082 Dest->getName(), C)); 1083 } 1084 1085 if (!changedArgument) 1086 return false; 1087 1088 // If the destination wasn't sufficiently aligned then increase its alignment. 1089 if (!isDestSufficientlyAligned) { 1090 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); 1091 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); 1092 } 1093 1094 if (SkippedLifetimeStart) { 1095 SkippedLifetimeStart->moveBefore(C); 1096 MSSAU->moveBefore(MSSA->getMemoryAccess(SkippedLifetimeStart), 1097 MSSA->getMemoryAccess(C)); 1098 } 1099 1100 combineAAMetadata(C, cpyLoad); 1101 if (cpyLoad != cpyStore) 1102 combineAAMetadata(C, cpyStore); 1103 1104 ++NumCallSlot; 1105 return true; 1106 } 1107 1108 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is 1109 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. 1110 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, 1111 MemCpyInst *MDep, 1112 BatchAAResults &BAA) { 1113 // We can only transforms memcpy's where the dest of one is the source of the 1114 // other. 1115 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 1116 return false; 1117 1118 // If dep instruction is reading from our current input, then it is a noop 1119 // transfer and substituting the input won't change this instruction. Just 1120 // ignore the input and let someone else zap MDep. This handles cases like: 1121 // memcpy(a <- a) 1122 // memcpy(b <- a) 1123 if (M->getSource() == MDep->getSource()) 1124 return false; 1125 1126 // Second, the length of the memcpy's must be the same, or the preceding one 1127 // must be larger than the following one. 1128 if (MDep->getLength() != M->getLength()) { 1129 auto *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 1130 auto *MLen = dyn_cast<ConstantInt>(M->getLength()); 1131 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 1132 return false; 1133 } 1134 1135 // Verify that the copied-from memory doesn't change in between the two 1136 // transfers. For example, in: 1137 // memcpy(a <- b) 1138 // *b = 42; 1139 // memcpy(c <- a) 1140 // It would be invalid to transform the second memcpy into memcpy(c <- b). 1141 // 1142 // TODO: If the code between M and MDep is transparent to the destination "c", 1143 // then we could still perform the xform by moving M up to the first memcpy. 1144 // TODO: It would be sufficient to check the MDep source up to the memcpy 1145 // size of M, rather than MDep. 1146 if (writtenBetween(MSSA, BAA, MemoryLocation::getForSource(MDep), 1147 MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(M))) 1148 return false; 1149 1150 // If the dest of the second might alias the source of the first, then the 1151 // source and dest might overlap. In addition, if the source of the first 1152 // points to constant memory, they won't overlap by definition. Otherwise, we 1153 // still want to eliminate the intermediate value, but we have to generate a 1154 // memmove instead of memcpy. 1155 bool UseMemMove = false; 1156 if (isModSet(BAA.getModRefInfo(M, MemoryLocation::getForSource(MDep)))) { 1157 // Don't convert llvm.memcpy.inline into memmove because memmove can be 1158 // lowered as a call, and that is not allowed for llvm.memcpy.inline (and 1159 // there is no inline version of llvm.memmove) 1160 if (isa<MemCpyInlineInst>(M)) 1161 return false; 1162 UseMemMove = true; 1163 } 1164 1165 // If all checks passed, then we can transform M. 1166 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n" 1167 << *MDep << '\n' << *M << '\n'); 1168 1169 // TODO: Is this worth it if we're creating a less aligned memcpy? For 1170 // example we could be moving from movaps -> movq on x86. 1171 IRBuilder<> Builder(M); 1172 Instruction *NewM; 1173 if (UseMemMove) 1174 NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(), 1175 MDep->getRawSource(), MDep->getSourceAlign(), 1176 M->getLength(), M->isVolatile()); 1177 else if (isa<MemCpyInlineInst>(M)) { 1178 // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is 1179 // never allowed since that would allow the latter to be lowered as a call 1180 // to an external function. 1181 NewM = Builder.CreateMemCpyInline( 1182 M->getRawDest(), M->getDestAlign(), MDep->getRawSource(), 1183 MDep->getSourceAlign(), M->getLength(), M->isVolatile()); 1184 } else 1185 NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(), 1186 MDep->getRawSource(), MDep->getSourceAlign(), 1187 M->getLength(), M->isVolatile()); 1188 NewM->copyMetadata(*M, LLVMContext::MD_DIAssignID); 1189 1190 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M))); 1191 auto *LastDef = cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); 1192 auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1193 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1194 1195 // Remove the instruction we're replacing. 1196 eraseInstruction(M); 1197 ++NumMemCpyInstr; 1198 return true; 1199 } 1200 1201 /// We've found that the (upward scanning) memory dependence of \p MemCpy is 1202 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that 1203 /// weren't copied over by \p MemCpy. 1204 /// 1205 /// In other words, transform: 1206 /// \code 1207 /// memset(dst, c, dst_size); 1208 /// ... 1209 /// memcpy(dst, src, src_size); 1210 /// \endcode 1211 /// into: 1212 /// \code 1213 /// ... 1214 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); 1215 /// memcpy(dst, src, src_size); 1216 /// \endcode 1217 /// 1218 /// The memset is sunk to just before the memcpy to ensure that src_size is 1219 /// present when emitting the simplified memset. 1220 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, 1221 MemSetInst *MemSet, 1222 BatchAAResults &BAA) { 1223 // We can only transform memset/memcpy with the same destination. 1224 if (!BAA.isMustAlias(MemSet->getDest(), MemCpy->getDest())) 1225 return false; 1226 1227 // Check that src and dst of the memcpy aren't the same. While memcpy 1228 // operands cannot partially overlap, exact equality is allowed. 1229 if (isModSet(BAA.getModRefInfo(MemCpy, MemoryLocation::getForSource(MemCpy)))) 1230 return false; 1231 1232 // We know that dst up to src_size is not written. We now need to make sure 1233 // that dst up to dst_size is not accessed. (If we did not move the memset, 1234 // checking for reads would be sufficient.) 1235 if (accessedBetween(BAA, MemoryLocation::getForDest(MemSet), 1236 MSSA->getMemoryAccess(MemSet), 1237 MSSA->getMemoryAccess(MemCpy))) 1238 return false; 1239 1240 // Use the same i8* dest as the memcpy, killing the memset dest if different. 1241 Value *Dest = MemCpy->getRawDest(); 1242 Value *DestSize = MemSet->getLength(); 1243 Value *SrcSize = MemCpy->getLength(); 1244 1245 if (mayBeVisibleThroughUnwinding(Dest, MemSet, MemCpy)) 1246 return false; 1247 1248 // If the sizes are the same, simply drop the memset instead of generating 1249 // a replacement with zero size. 1250 if (DestSize == SrcSize) { 1251 eraseInstruction(MemSet); 1252 return true; 1253 } 1254 1255 // By default, create an unaligned memset. 1256 Align Alignment = Align(1); 1257 // If Dest is aligned, and SrcSize is constant, use the minimum alignment 1258 // of the sum. 1259 const Align DestAlign = std::max(MemSet->getDestAlign().valueOrOne(), 1260 MemCpy->getDestAlign().valueOrOne()); 1261 if (DestAlign > 1) 1262 if (auto *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) 1263 Alignment = commonAlignment(DestAlign, SrcSizeC->getZExtValue()); 1264 1265 IRBuilder<> Builder(MemCpy); 1266 1267 // Preserve the debug location of the old memset for the code emitted here 1268 // related to the new memset. This is correct according to the rules in 1269 // https://llvm.org/docs/HowToUpdateDebugInfo.html about "when to preserve an 1270 // instruction location", given that we move the memset within the basic 1271 // block. 1272 assert(MemSet->getParent() == MemCpy->getParent() && 1273 "Preserving debug location based on moving memset within BB."); 1274 Builder.SetCurrentDebugLocation(MemSet->getDebugLoc()); 1275 1276 // If the sizes have different types, zext the smaller one. 1277 if (DestSize->getType() != SrcSize->getType()) { 1278 if (DestSize->getType()->getIntegerBitWidth() > 1279 SrcSize->getType()->getIntegerBitWidth()) 1280 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); 1281 else 1282 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); 1283 } 1284 1285 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize); 1286 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize); 1287 Value *MemsetLen = Builder.CreateSelect( 1288 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff); 1289 unsigned DestAS = Dest->getType()->getPointerAddressSpace(); 1290 Instruction *NewMemSet = Builder.CreateMemSet( 1291 Builder.CreateGEP( 1292 Builder.getInt8Ty(), 1293 Builder.CreatePointerCast(Dest, Builder.getInt8PtrTy(DestAS)), 1294 SrcSize), 1295 MemSet->getOperand(1), MemsetLen, Alignment); 1296 1297 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) && 1298 "MemCpy must be a MemoryDef"); 1299 // The new memset is inserted before the memcpy, and it is known that the 1300 // memcpy's defining access is the memset about to be removed. 1301 auto *LastDef = 1302 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); 1303 auto *NewAccess = MSSAU->createMemoryAccessBefore( 1304 NewMemSet, LastDef->getDefiningAccess(), LastDef); 1305 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1306 1307 eraseInstruction(MemSet); 1308 return true; 1309 } 1310 1311 /// Determine whether the instruction has undefined content for the given Size, 1312 /// either because it was freshly alloca'd or started its lifetime. 1313 static bool hasUndefContents(MemorySSA *MSSA, BatchAAResults &AA, Value *V, 1314 MemoryDef *Def, Value *Size) { 1315 if (MSSA->isLiveOnEntryDef(Def)) 1316 return isa<AllocaInst>(getUnderlyingObject(V)); 1317 1318 if (auto *II = dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) { 1319 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 1320 auto *LTSize = cast<ConstantInt>(II->getArgOperand(0)); 1321 1322 if (auto *CSize = dyn_cast<ConstantInt>(Size)) { 1323 if (AA.isMustAlias(V, II->getArgOperand(1)) && 1324 LTSize->getZExtValue() >= CSize->getZExtValue()) 1325 return true; 1326 } 1327 1328 // If the lifetime.start covers a whole alloca (as it almost always 1329 // does) and we're querying a pointer based on that alloca, then we know 1330 // the memory is definitely undef, regardless of how exactly we alias. 1331 // The size also doesn't matter, as an out-of-bounds access would be UB. 1332 if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V))) { 1333 if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) { 1334 const DataLayout &DL = Alloca->getModule()->getDataLayout(); 1335 if (std::optional<TypeSize> AllocaSize = 1336 Alloca->getAllocationSize(DL)) 1337 if (*AllocaSize == LTSize->getValue()) 1338 return true; 1339 } 1340 } 1341 } 1342 } 1343 1344 return false; 1345 } 1346 1347 /// Transform memcpy to memset when its source was just memset. 1348 /// In other words, turn: 1349 /// \code 1350 /// memset(dst1, c, dst1_size); 1351 /// memcpy(dst2, dst1, dst2_size); 1352 /// \endcode 1353 /// into: 1354 /// \code 1355 /// memset(dst1, c, dst1_size); 1356 /// memset(dst2, c, dst2_size); 1357 /// \endcode 1358 /// When dst2_size <= dst1_size. 1359 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, 1360 MemSetInst *MemSet, 1361 BatchAAResults &BAA) { 1362 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and 1363 // memcpying from the same address. Otherwise it is hard to reason about. 1364 if (!BAA.isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) 1365 return false; 1366 1367 Value *MemSetSize = MemSet->getLength(); 1368 Value *CopySize = MemCpy->getLength(); 1369 1370 if (MemSetSize != CopySize) { 1371 // Make sure the memcpy doesn't read any more than what the memset wrote. 1372 // Don't worry about sizes larger than i64. 1373 1374 // A known memset size is required. 1375 auto *CMemSetSize = dyn_cast<ConstantInt>(MemSetSize); 1376 if (!CMemSetSize) 1377 return false; 1378 1379 // A known memcpy size is also required. 1380 auto *CCopySize = dyn_cast<ConstantInt>(CopySize); 1381 if (!CCopySize) 1382 return false; 1383 if (CCopySize->getZExtValue() > CMemSetSize->getZExtValue()) { 1384 // If the memcpy is larger than the memset, but the memory was undef prior 1385 // to the memset, we can just ignore the tail. Technically we're only 1386 // interested in the bytes from MemSetSize..CopySize here, but as we can't 1387 // easily represent this location, we use the full 0..CopySize range. 1388 MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy); 1389 bool CanReduceSize = false; 1390 MemoryUseOrDef *MemSetAccess = MSSA->getMemoryAccess(MemSet); 1391 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( 1392 MemSetAccess->getDefiningAccess(), MemCpyLoc, BAA); 1393 if (auto *MD = dyn_cast<MemoryDef>(Clobber)) 1394 if (hasUndefContents(MSSA, BAA, MemCpy->getSource(), MD, CopySize)) 1395 CanReduceSize = true; 1396 1397 if (!CanReduceSize) 1398 return false; 1399 CopySize = MemSetSize; 1400 } 1401 } 1402 1403 IRBuilder<> Builder(MemCpy); 1404 Instruction *NewM = 1405 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), 1406 CopySize, MemCpy->getDestAlign()); 1407 auto *LastDef = 1408 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); 1409 auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1410 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1411 1412 return true; 1413 } 1414 1415 /// Perform simplification of memcpy's. If we have memcpy A 1416 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 1417 /// B to be a memcpy from X to Z (or potentially a memmove, depending on 1418 /// circumstances). This allows later passes to remove the first memcpy 1419 /// altogether. 1420 bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) { 1421 // We can only optimize non-volatile memcpy's. 1422 if (M->isVolatile()) return false; 1423 1424 // If the source and destination of the memcpy are the same, then zap it. 1425 if (M->getSource() == M->getDest()) { 1426 ++BBI; 1427 eraseInstruction(M); 1428 return true; 1429 } 1430 1431 // If copying from a constant, try to turn the memcpy into a memset. 1432 if (auto *GV = dyn_cast<GlobalVariable>(M->getSource())) 1433 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 1434 if (Value *ByteVal = isBytewiseValue(GV->getInitializer(), 1435 M->getModule()->getDataLayout())) { 1436 IRBuilder<> Builder(M); 1437 Instruction *NewM = Builder.CreateMemSet( 1438 M->getRawDest(), ByteVal, M->getLength(), M->getDestAlign(), false); 1439 auto *LastDef = 1440 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); 1441 auto *NewAccess = 1442 MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1443 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1444 1445 eraseInstruction(M); 1446 ++NumCpyToSet; 1447 return true; 1448 } 1449 1450 BatchAAResults BAA(*AA); 1451 MemoryUseOrDef *MA = MSSA->getMemoryAccess(M); 1452 // FIXME: Not using getClobberingMemoryAccess() here due to PR54682. 1453 MemoryAccess *AnyClobber = MA->getDefiningAccess(); 1454 MemoryLocation DestLoc = MemoryLocation::getForDest(M); 1455 const MemoryAccess *DestClobber = 1456 MSSA->getWalker()->getClobberingMemoryAccess(AnyClobber, DestLoc, BAA); 1457 1458 // Try to turn a partially redundant memset + memcpy into 1459 // smaller memset + memcpy. We don't need the memcpy size for this. 1460 // The memcpy must post-dom the memset, so limit this to the same basic 1461 // block. A non-local generalization is likely not worthwhile. 1462 if (auto *MD = dyn_cast<MemoryDef>(DestClobber)) 1463 if (auto *MDep = dyn_cast_or_null<MemSetInst>(MD->getMemoryInst())) 1464 if (DestClobber->getBlock() == M->getParent()) 1465 if (processMemSetMemCpyDependence(M, MDep, BAA)) 1466 return true; 1467 1468 MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess( 1469 AnyClobber, MemoryLocation::getForSource(M), BAA); 1470 1471 // There are four possible optimizations we can do for memcpy: 1472 // a) memcpy-memcpy xform which exposes redundance for DSE. 1473 // b) call-memcpy xform for return slot optimization. 1474 // c) memcpy from freshly alloca'd space or space that has just started 1475 // its lifetime copies undefined data, and we can therefore eliminate 1476 // the memcpy in favor of the data that was already at the destination. 1477 // d) memcpy from a just-memset'd source can be turned into memset. 1478 if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) { 1479 if (Instruction *MI = MD->getMemoryInst()) { 1480 if (auto *CopySize = dyn_cast<ConstantInt>(M->getLength())) { 1481 if (auto *C = dyn_cast<CallInst>(MI)) { 1482 if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), 1483 TypeSize::getFixed(CopySize->getZExtValue()), 1484 M->getDestAlign().valueOrOne(), BAA, 1485 [C]() -> CallInst * { return C; })) { 1486 LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n" 1487 << " call: " << *C << "\n" 1488 << " memcpy: " << *M << "\n"); 1489 eraseInstruction(M); 1490 ++NumMemCpyInstr; 1491 return true; 1492 } 1493 } 1494 } 1495 if (auto *MDep = dyn_cast<MemCpyInst>(MI)) 1496 return processMemCpyMemCpyDependence(M, MDep, BAA); 1497 if (auto *MDep = dyn_cast<MemSetInst>(MI)) { 1498 if (performMemCpyToMemSetOptzn(M, MDep, BAA)) { 1499 LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n"); 1500 eraseInstruction(M); 1501 ++NumCpyToSet; 1502 return true; 1503 } 1504 } 1505 } 1506 1507 if (hasUndefContents(MSSA, BAA, M->getSource(), MD, M->getLength())) { 1508 LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n"); 1509 eraseInstruction(M); 1510 ++NumMemCpyInstr; 1511 return true; 1512 } 1513 } 1514 1515 return false; 1516 } 1517 1518 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed 1519 /// not to alias. 1520 bool MemCpyOptPass::processMemMove(MemMoveInst *M) { 1521 // See if the source could be modified by this memmove potentially. 1522 if (isModSet(AA->getModRefInfo(M, MemoryLocation::getForSource(M)))) 1523 return false; 1524 1525 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M 1526 << "\n"); 1527 1528 // If not, then we know we can transform this. 1529 Type *ArgTys[3] = { M->getRawDest()->getType(), 1530 M->getRawSource()->getType(), 1531 M->getLength()->getType() }; 1532 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), 1533 Intrinsic::memcpy, ArgTys)); 1534 1535 // For MemorySSA nothing really changes (except that memcpy may imply stricter 1536 // aliasing guarantees). 1537 1538 ++NumMoveToCpy; 1539 return true; 1540 } 1541 1542 /// This is called on every byval argument in call sites. 1543 bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) { 1544 const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout(); 1545 // Find out what feeds this byval argument. 1546 Value *ByValArg = CB.getArgOperand(ArgNo); 1547 Type *ByValTy = CB.getParamByValType(ArgNo); 1548 TypeSize ByValSize = DL.getTypeAllocSize(ByValTy); 1549 MemoryLocation Loc(ByValArg, LocationSize::precise(ByValSize)); 1550 MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(&CB); 1551 if (!CallAccess) 1552 return false; 1553 MemCpyInst *MDep = nullptr; 1554 BatchAAResults BAA(*AA); 1555 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( 1556 CallAccess->getDefiningAccess(), Loc, BAA); 1557 if (auto *MD = dyn_cast<MemoryDef>(Clobber)) 1558 MDep = dyn_cast_or_null<MemCpyInst>(MD->getMemoryInst()); 1559 1560 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 1561 // a memcpy, see if we can byval from the source of the memcpy instead of the 1562 // result. 1563 if (!MDep || MDep->isVolatile() || 1564 ByValArg->stripPointerCasts() != MDep->getDest()) 1565 return false; 1566 1567 // The length of the memcpy must be larger or equal to the size of the byval. 1568 auto *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 1569 if (!C1 || !TypeSize::isKnownGE( 1570 TypeSize::getFixed(C1->getValue().getZExtValue()), ByValSize)) 1571 return false; 1572 1573 // Get the alignment of the byval. If the call doesn't specify the alignment, 1574 // then it is some target specific value that we can't know. 1575 MaybeAlign ByValAlign = CB.getParamAlign(ArgNo); 1576 if (!ByValAlign) return false; 1577 1578 // If it is greater than the memcpy, then we check to see if we can force the 1579 // source of the memcpy to the alignment we need. If we fail, we bail out. 1580 MaybeAlign MemDepAlign = MDep->getSourceAlign(); 1581 if ((!MemDepAlign || *MemDepAlign < *ByValAlign) && 1582 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC, 1583 DT) < *ByValAlign) 1584 return false; 1585 1586 // The address space of the memcpy source must match the byval argument 1587 if (MDep->getSource()->getType()->getPointerAddressSpace() != 1588 ByValArg->getType()->getPointerAddressSpace()) 1589 return false; 1590 1591 // Verify that the copied-from memory doesn't change in between the memcpy and 1592 // the byval call. 1593 // memcpy(a <- b) 1594 // *b = 42; 1595 // foo(*a) 1596 // It would be invalid to transform the second memcpy into foo(*b). 1597 if (writtenBetween(MSSA, BAA, MemoryLocation::getForSource(MDep), 1598 MSSA->getMemoryAccess(MDep), CallAccess)) 1599 return false; 1600 1601 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" 1602 << " " << *MDep << "\n" 1603 << " " << CB << "\n"); 1604 1605 // Otherwise we're good! Update the byval argument. 1606 combineAAMetadata(&CB, MDep); 1607 CB.setArgOperand(ArgNo, MDep->getSource()); 1608 ++NumMemCpyInstr; 1609 return true; 1610 } 1611 1612 /// This is called on memcpy dest pointer arguments attributed as immutable 1613 /// during call. Try to use memcpy source directly if all of the following 1614 /// conditions are satisfied. 1615 /// 1. The memcpy dst is neither modified during the call nor captured by the 1616 /// call. (if readonly, noalias, nocapture attributes on call-site.) 1617 /// 2. The memcpy dst is an alloca with known alignment & size. 1618 /// 2-1. The memcpy length == the alloca size which ensures that the new 1619 /// pointer is dereferenceable for the required range 1620 /// 2-2. The src pointer has alignment >= the alloca alignment or can be 1621 /// enforced so. 1622 /// 3. The memcpy dst and src is not modified between the memcpy and the call. 1623 /// (if MSSA clobber check is safe.) 1624 /// 4. The memcpy src is not modified during the call. (ModRef check shows no 1625 /// Mod.) 1626 bool MemCpyOptPass::processImmutArgument(CallBase &CB, unsigned ArgNo) { 1627 // 1. Ensure passed argument is immutable during call. 1628 if (!(CB.paramHasAttr(ArgNo, Attribute::NoAlias) && 1629 CB.paramHasAttr(ArgNo, Attribute::NoCapture))) 1630 return false; 1631 const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout(); 1632 Value *ImmutArg = CB.getArgOperand(ArgNo); 1633 1634 // 2. Check that arg is alloca 1635 // TODO: Even if the arg gets back to branches, we can remove memcpy if all 1636 // the alloca alignments can be enforced to source alignment. 1637 auto *AI = dyn_cast<AllocaInst>(ImmutArg->stripPointerCasts()); 1638 if (!AI) 1639 return false; 1640 1641 std::optional<TypeSize> AllocaSize = AI->getAllocationSize(DL); 1642 // Can't handle unknown size alloca. 1643 // (e.g. Variable Length Array, Scalable Vector) 1644 if (!AllocaSize || AllocaSize->isScalable()) 1645 return false; 1646 MemoryLocation Loc(ImmutArg, LocationSize::precise(*AllocaSize)); 1647 MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(&CB); 1648 if (!CallAccess) 1649 return false; 1650 1651 MemCpyInst *MDep = nullptr; 1652 BatchAAResults BAA(*AA); 1653 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( 1654 CallAccess->getDefiningAccess(), Loc, BAA); 1655 if (auto *MD = dyn_cast<MemoryDef>(Clobber)) 1656 MDep = dyn_cast_or_null<MemCpyInst>(MD->getMemoryInst()); 1657 1658 // If the immut argument isn't fed by a memcpy, ignore it. If it is fed by 1659 // a memcpy, check that the arg equals the memcpy dest. 1660 if (!MDep || MDep->isVolatile() || AI != MDep->getDest()) 1661 return false; 1662 1663 // The address space of the memcpy source must match the immut argument 1664 if (MDep->getSource()->getType()->getPointerAddressSpace() != 1665 ImmutArg->getType()->getPointerAddressSpace()) 1666 return false; 1667 1668 // 2-1. The length of the memcpy must be equal to the size of the alloca. 1669 auto *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 1670 if (!MDepLen || AllocaSize != MDepLen->getValue()) 1671 return false; 1672 1673 // 2-2. the memcpy source align must be larger than or equal the alloca's 1674 // align. If not so, we check to see if we can force the source of the memcpy 1675 // to the alignment we need. If we fail, we bail out. 1676 Align MemDepAlign = MDep->getSourceAlign().valueOrOne(); 1677 Align AllocaAlign = AI->getAlign(); 1678 if (MemDepAlign < AllocaAlign && 1679 getOrEnforceKnownAlignment(MDep->getSource(), AllocaAlign, DL, &CB, AC, 1680 DT) < AllocaAlign) 1681 return false; 1682 1683 // 3. Verify that the source doesn't change in between the memcpy and 1684 // the call. 1685 // memcpy(a <- b) 1686 // *b = 42; 1687 // foo(*a) 1688 // It would be invalid to transform the second memcpy into foo(*b). 1689 if (writtenBetween(MSSA, BAA, MemoryLocation::getForSource(MDep), 1690 MSSA->getMemoryAccess(MDep), CallAccess)) 1691 return false; 1692 1693 // 4. The memcpy src must not be modified during the call. 1694 if (isModSet(AA->getModRefInfo(&CB, MemoryLocation::getForSource(MDep)))) 1695 return false; 1696 1697 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to Immut src:\n" 1698 << " " << *MDep << "\n" 1699 << " " << CB << "\n"); 1700 1701 // Otherwise we're good! Update the immut argument. 1702 combineAAMetadata(&CB, MDep); 1703 CB.setArgOperand(ArgNo, MDep->getSource()); 1704 ++NumMemCpyInstr; 1705 return true; 1706 } 1707 1708 /// Executes one iteration of MemCpyOptPass. 1709 bool MemCpyOptPass::iterateOnFunction(Function &F) { 1710 bool MadeChange = false; 1711 1712 // Walk all instruction in the function. 1713 for (BasicBlock &BB : F) { 1714 // Skip unreachable blocks. For example processStore assumes that an 1715 // instruction in a BB can't be dominated by a later instruction in the 1716 // same BB (which is a scenario that can happen for an unreachable BB that 1717 // has itself as a predecessor). 1718 if (!DT->isReachableFromEntry(&BB)) 1719 continue; 1720 1721 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 1722 // Avoid invalidating the iterator. 1723 Instruction *I = &*BI++; 1724 1725 bool RepeatInstruction = false; 1726 1727 if (auto *SI = dyn_cast<StoreInst>(I)) 1728 MadeChange |= processStore(SI, BI); 1729 else if (auto *M = dyn_cast<MemSetInst>(I)) 1730 RepeatInstruction = processMemSet(M, BI); 1731 else if (auto *M = dyn_cast<MemCpyInst>(I)) 1732 RepeatInstruction = processMemCpy(M, BI); 1733 else if (auto *M = dyn_cast<MemMoveInst>(I)) 1734 RepeatInstruction = processMemMove(M); 1735 else if (auto *CB = dyn_cast<CallBase>(I)) { 1736 for (unsigned i = 0, e = CB->arg_size(); i != e; ++i) { 1737 if (CB->isByValArgument(i)) 1738 MadeChange |= processByValArgument(*CB, i); 1739 else if (CB->onlyReadsMemory(i)) 1740 MadeChange |= processImmutArgument(*CB, i); 1741 } 1742 } 1743 1744 // Reprocess the instruction if desired. 1745 if (RepeatInstruction) { 1746 if (BI != BB.begin()) 1747 --BI; 1748 MadeChange = true; 1749 } 1750 } 1751 } 1752 1753 return MadeChange; 1754 } 1755 1756 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { 1757 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1758 auto *AA = &AM.getResult<AAManager>(F); 1759 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 1760 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 1761 auto *MSSA = &AM.getResult<MemorySSAAnalysis>(F); 1762 1763 bool MadeChange = runImpl(F, &TLI, AA, AC, DT, &MSSA->getMSSA()); 1764 if (!MadeChange) 1765 return PreservedAnalyses::all(); 1766 1767 PreservedAnalyses PA; 1768 PA.preserveSet<CFGAnalyses>(); 1769 PA.preserve<MemorySSAAnalysis>(); 1770 return PA; 1771 } 1772 1773 bool MemCpyOptPass::runImpl(Function &F, TargetLibraryInfo *TLI_, 1774 AliasAnalysis *AA_, AssumptionCache *AC_, 1775 DominatorTree *DT_, MemorySSA *MSSA_) { 1776 bool MadeChange = false; 1777 TLI = TLI_; 1778 AA = AA_; 1779 AC = AC_; 1780 DT = DT_; 1781 MSSA = MSSA_; 1782 MemorySSAUpdater MSSAU_(MSSA_); 1783 MSSAU = &MSSAU_; 1784 1785 while (true) { 1786 if (!iterateOnFunction(F)) 1787 break; 1788 MadeChange = true; 1789 } 1790 1791 if (VerifyMemorySSA) 1792 MSSA_->verifyMemorySSA(); 1793 1794 return MadeChange; 1795 } 1796