1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This transformation implements the well known scalar replacement of 10 /// aggregates transformation. It tries to identify promotable elements of an 11 /// aggregate alloca, and promote them to registers. It will also try to 12 /// convert uses of an element (or set of elements) of an alloca into a vector 13 /// or bitfield-style integer scalar if appropriate. 14 /// 15 /// It works to do this with minimal slicing of the alloca so that regions 16 /// which are merely transferred in and out of external memory remain unchanged 17 /// and are not decomposed to scalar code. 18 /// 19 /// Because this also performs alloca promotion, it can be thought of as also 20 /// serving the purpose of SSA formation. The algorithm iterates on the 21 /// function until all opportunities for promotion have been realized. 22 /// 23 //===----------------------------------------------------------------------===// 24 25 #include "llvm/Transforms/Scalar/SROA.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/DenseMap.h" 29 #include "llvm/ADT/PointerIntPair.h" 30 #include "llvm/ADT/STLExtras.h" 31 #include "llvm/ADT/SetVector.h" 32 #include "llvm/ADT/SmallBitVector.h" 33 #include "llvm/ADT/SmallPtrSet.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/Twine.h" 38 #include "llvm/ADT/iterator.h" 39 #include "llvm/ADT/iterator_range.h" 40 #include "llvm/Analysis/AssumptionCache.h" 41 #include "llvm/Analysis/GlobalsModRef.h" 42 #include "llvm/Analysis/Loads.h" 43 #include "llvm/Analysis/PtrUseVisitor.h" 44 #include "llvm/Config/llvm-config.h" 45 #include "llvm/IR/BasicBlock.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/ConstantFolder.h" 48 #include "llvm/IR/Constants.h" 49 #include "llvm/IR/DIBuilder.h" 50 #include "llvm/IR/DataLayout.h" 51 #include "llvm/IR/DebugInfoMetadata.h" 52 #include "llvm/IR/DerivedTypes.h" 53 #include "llvm/IR/Dominators.h" 54 #include "llvm/IR/Function.h" 55 #include "llvm/IR/GetElementPtrTypeIterator.h" 56 #include "llvm/IR/GlobalAlias.h" 57 #include "llvm/IR/IRBuilder.h" 58 #include "llvm/IR/InstVisitor.h" 59 #include "llvm/IR/InstrTypes.h" 60 #include "llvm/IR/Instruction.h" 61 #include "llvm/IR/Instructions.h" 62 #include "llvm/IR/IntrinsicInst.h" 63 #include "llvm/IR/Intrinsics.h" 64 #include "llvm/IR/LLVMContext.h" 65 #include "llvm/IR/Metadata.h" 66 #include "llvm/IR/Module.h" 67 #include "llvm/IR/Operator.h" 68 #include "llvm/IR/PassManager.h" 69 #include "llvm/IR/Type.h" 70 #include "llvm/IR/Use.h" 71 #include "llvm/IR/User.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/InitializePasses.h" 74 #include "llvm/Pass.h" 75 #include "llvm/Support/Casting.h" 76 #include "llvm/Support/CommandLine.h" 77 #include "llvm/Support/Compiler.h" 78 #include "llvm/Support/Debug.h" 79 #include "llvm/Support/ErrorHandling.h" 80 #include "llvm/Support/MathExtras.h" 81 #include "llvm/Support/raw_ostream.h" 82 #include "llvm/Transforms/Scalar.h" 83 #include "llvm/Transforms/Utils/Local.h" 84 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 85 #include <algorithm> 86 #include <cassert> 87 #include <chrono> 88 #include <cstddef> 89 #include <cstdint> 90 #include <cstring> 91 #include <iterator> 92 #include <string> 93 #include <tuple> 94 #include <utility> 95 #include <vector> 96 97 using namespace llvm; 98 using namespace llvm::sroa; 99 100 #define DEBUG_TYPE "sroa" 101 102 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); 103 STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); 104 STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca"); 105 STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten"); 106 STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition"); 107 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); 108 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); 109 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion"); 110 STATISTIC(NumDeleted, "Number of instructions deleted"); 111 STATISTIC(NumVectorized, "Number of vectorized aggregates"); 112 113 /// Hidden option to experiment with completely strict handling of inbounds 114 /// GEPs. 115 static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false), 116 cl::Hidden); 117 118 namespace { 119 120 /// A custom IRBuilder inserter which prefixes all names, but only in 121 /// Assert builds. 122 class IRBuilderPrefixedInserter final : public IRBuilderDefaultInserter { 123 std::string Prefix; 124 125 const Twine getNameWithPrefix(const Twine &Name) const { 126 return Name.isTriviallyEmpty() ? Name : Prefix + Name; 127 } 128 129 public: 130 void SetNamePrefix(const Twine &P) { Prefix = P.str(); } 131 132 void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, 133 BasicBlock::iterator InsertPt) const override { 134 IRBuilderDefaultInserter::InsertHelper(I, getNameWithPrefix(Name), BB, 135 InsertPt); 136 } 137 }; 138 139 /// Provide a type for IRBuilder that drops names in release builds. 140 using IRBuilderTy = IRBuilder<ConstantFolder, IRBuilderPrefixedInserter>; 141 142 /// A used slice of an alloca. 143 /// 144 /// This structure represents a slice of an alloca used by some instruction. It 145 /// stores both the begin and end offsets of this use, a pointer to the use 146 /// itself, and a flag indicating whether we can classify the use as splittable 147 /// or not when forming partitions of the alloca. 148 class Slice { 149 /// The beginning offset of the range. 150 uint64_t BeginOffset = 0; 151 152 /// The ending offset, not included in the range. 153 uint64_t EndOffset = 0; 154 155 /// Storage for both the use of this slice and whether it can be 156 /// split. 157 PointerIntPair<Use *, 1, bool> UseAndIsSplittable; 158 159 public: 160 Slice() = default; 161 162 Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable) 163 : BeginOffset(BeginOffset), EndOffset(EndOffset), 164 UseAndIsSplittable(U, IsSplittable) {} 165 166 uint64_t beginOffset() const { return BeginOffset; } 167 uint64_t endOffset() const { return EndOffset; } 168 169 bool isSplittable() const { return UseAndIsSplittable.getInt(); } 170 void makeUnsplittable() { UseAndIsSplittable.setInt(false); } 171 172 Use *getUse() const { return UseAndIsSplittable.getPointer(); } 173 174 bool isDead() const { return getUse() == nullptr; } 175 void kill() { UseAndIsSplittable.setPointer(nullptr); } 176 177 /// Support for ordering ranges. 178 /// 179 /// This provides an ordering over ranges such that start offsets are 180 /// always increasing, and within equal start offsets, the end offsets are 181 /// decreasing. Thus the spanning range comes first in a cluster with the 182 /// same start position. 183 bool operator<(const Slice &RHS) const { 184 if (beginOffset() < RHS.beginOffset()) 185 return true; 186 if (beginOffset() > RHS.beginOffset()) 187 return false; 188 if (isSplittable() != RHS.isSplittable()) 189 return !isSplittable(); 190 if (endOffset() > RHS.endOffset()) 191 return true; 192 return false; 193 } 194 195 /// Support comparison with a single offset to allow binary searches. 196 friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS, 197 uint64_t RHSOffset) { 198 return LHS.beginOffset() < RHSOffset; 199 } 200 friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, 201 const Slice &RHS) { 202 return LHSOffset < RHS.beginOffset(); 203 } 204 205 bool operator==(const Slice &RHS) const { 206 return isSplittable() == RHS.isSplittable() && 207 beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset(); 208 } 209 bool operator!=(const Slice &RHS) const { return !operator==(RHS); } 210 }; 211 212 } // end anonymous namespace 213 214 /// Representation of the alloca slices. 215 /// 216 /// This class represents the slices of an alloca which are formed by its 217 /// various uses. If a pointer escapes, we can't fully build a representation 218 /// for the slices used and we reflect that in this structure. The uses are 219 /// stored, sorted by increasing beginning offset and with unsplittable slices 220 /// starting at a particular offset before splittable slices. 221 class llvm::sroa::AllocaSlices { 222 public: 223 /// Construct the slices of a particular alloca. 224 AllocaSlices(const DataLayout &DL, AllocaInst &AI); 225 226 /// Test whether a pointer to the allocation escapes our analysis. 227 /// 228 /// If this is true, the slices are never fully built and should be 229 /// ignored. 230 bool isEscaped() const { return PointerEscapingInstr; } 231 232 /// Support for iterating over the slices. 233 /// @{ 234 using iterator = SmallVectorImpl<Slice>::iterator; 235 using range = iterator_range<iterator>; 236 237 iterator begin() { return Slices.begin(); } 238 iterator end() { return Slices.end(); } 239 240 using const_iterator = SmallVectorImpl<Slice>::const_iterator; 241 using const_range = iterator_range<const_iterator>; 242 243 const_iterator begin() const { return Slices.begin(); } 244 const_iterator end() const { return Slices.end(); } 245 /// @} 246 247 /// Erase a range of slices. 248 void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); } 249 250 /// Insert new slices for this alloca. 251 /// 252 /// This moves the slices into the alloca's slices collection, and re-sorts 253 /// everything so that the usual ordering properties of the alloca's slices 254 /// hold. 255 void insert(ArrayRef<Slice> NewSlices) { 256 int OldSize = Slices.size(); 257 Slices.append(NewSlices.begin(), NewSlices.end()); 258 auto SliceI = Slices.begin() + OldSize; 259 llvm::sort(SliceI, Slices.end()); 260 std::inplace_merge(Slices.begin(), SliceI, Slices.end()); 261 } 262 263 // Forward declare the iterator and range accessor for walking the 264 // partitions. 265 class partition_iterator; 266 iterator_range<partition_iterator> partitions(); 267 268 /// Access the dead users for this alloca. 269 ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; } 270 271 /// Access Uses that should be dropped if the alloca is promotable. 272 ArrayRef<Use *> getDeadUsesIfPromotable() const { 273 return DeadUseIfPromotable; 274 } 275 276 /// Access the dead operands referring to this alloca. 277 /// 278 /// These are operands which have cannot actually be used to refer to the 279 /// alloca as they are outside its range and the user doesn't correct for 280 /// that. These mostly consist of PHI node inputs and the like which we just 281 /// need to replace with undef. 282 ArrayRef<Use *> getDeadOperands() const { return DeadOperands; } 283 284 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 285 void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; 286 void printSlice(raw_ostream &OS, const_iterator I, 287 StringRef Indent = " ") const; 288 void printUse(raw_ostream &OS, const_iterator I, 289 StringRef Indent = " ") const; 290 void print(raw_ostream &OS) const; 291 void dump(const_iterator I) const; 292 void dump() const; 293 #endif 294 295 private: 296 template <typename DerivedT, typename RetT = void> class BuilderBase; 297 class SliceBuilder; 298 299 friend class AllocaSlices::SliceBuilder; 300 301 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 302 /// Handle to alloca instruction to simplify method interfaces. 303 AllocaInst &AI; 304 #endif 305 306 /// The instruction responsible for this alloca not having a known set 307 /// of slices. 308 /// 309 /// When an instruction (potentially) escapes the pointer to the alloca, we 310 /// store a pointer to that here and abort trying to form slices of the 311 /// alloca. This will be null if the alloca slices are analyzed successfully. 312 Instruction *PointerEscapingInstr; 313 314 /// The slices of the alloca. 315 /// 316 /// We store a vector of the slices formed by uses of the alloca here. This 317 /// vector is sorted by increasing begin offset, and then the unsplittable 318 /// slices before the splittable ones. See the Slice inner class for more 319 /// details. 320 SmallVector<Slice, 8> Slices; 321 322 /// Instructions which will become dead if we rewrite the alloca. 323 /// 324 /// Note that these are not separated by slice. This is because we expect an 325 /// alloca to be completely rewritten or not rewritten at all. If rewritten, 326 /// all these instructions can simply be removed and replaced with undef as 327 /// they come from outside of the allocated space. 328 SmallVector<Instruction *, 8> DeadUsers; 329 330 /// Uses which will become dead if can promote the alloca. 331 SmallVector<Use *, 8> DeadUseIfPromotable; 332 333 /// Operands which will become dead if we rewrite the alloca. 334 /// 335 /// These are operands that in their particular use can be replaced with 336 /// undef when we rewrite the alloca. These show up in out-of-bounds inputs 337 /// to PHI nodes and the like. They aren't entirely dead (there might be 338 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we 339 /// want to swap this particular input for undef to simplify the use lists of 340 /// the alloca. 341 SmallVector<Use *, 8> DeadOperands; 342 }; 343 344 /// A partition of the slices. 345 /// 346 /// An ephemeral representation for a range of slices which can be viewed as 347 /// a partition of the alloca. This range represents a span of the alloca's 348 /// memory which cannot be split, and provides access to all of the slices 349 /// overlapping some part of the partition. 350 /// 351 /// Objects of this type are produced by traversing the alloca's slices, but 352 /// are only ephemeral and not persistent. 353 class llvm::sroa::Partition { 354 private: 355 friend class AllocaSlices; 356 friend class AllocaSlices::partition_iterator; 357 358 using iterator = AllocaSlices::iterator; 359 360 /// The beginning and ending offsets of the alloca for this 361 /// partition. 362 uint64_t BeginOffset = 0, EndOffset = 0; 363 364 /// The start and end iterators of this partition. 365 iterator SI, SJ; 366 367 /// A collection of split slice tails overlapping the partition. 368 SmallVector<Slice *, 4> SplitTails; 369 370 /// Raw constructor builds an empty partition starting and ending at 371 /// the given iterator. 372 Partition(iterator SI) : SI(SI), SJ(SI) {} 373 374 public: 375 /// The start offset of this partition. 376 /// 377 /// All of the contained slices start at or after this offset. 378 uint64_t beginOffset() const { return BeginOffset; } 379 380 /// The end offset of this partition. 381 /// 382 /// All of the contained slices end at or before this offset. 383 uint64_t endOffset() const { return EndOffset; } 384 385 /// The size of the partition. 386 /// 387 /// Note that this can never be zero. 388 uint64_t size() const { 389 assert(BeginOffset < EndOffset && "Partitions must span some bytes!"); 390 return EndOffset - BeginOffset; 391 } 392 393 /// Test whether this partition contains no slices, and merely spans 394 /// a region occupied by split slices. 395 bool empty() const { return SI == SJ; } 396 397 /// \name Iterate slices that start within the partition. 398 /// These may be splittable or unsplittable. They have a begin offset >= the 399 /// partition begin offset. 400 /// @{ 401 // FIXME: We should probably define a "concat_iterator" helper and use that 402 // to stitch together pointee_iterators over the split tails and the 403 // contiguous iterators of the partition. That would give a much nicer 404 // interface here. We could then additionally expose filtered iterators for 405 // split, unsplit, and unsplittable splices based on the usage patterns. 406 iterator begin() const { return SI; } 407 iterator end() const { return SJ; } 408 /// @} 409 410 /// Get the sequence of split slice tails. 411 /// 412 /// These tails are of slices which start before this partition but are 413 /// split and overlap into the partition. We accumulate these while forming 414 /// partitions. 415 ArrayRef<Slice *> splitSliceTails() const { return SplitTails; } 416 }; 417 418 /// An iterator over partitions of the alloca's slices. 419 /// 420 /// This iterator implements the core algorithm for partitioning the alloca's 421 /// slices. It is a forward iterator as we don't support backtracking for 422 /// efficiency reasons, and re-use a single storage area to maintain the 423 /// current set of split slices. 424 /// 425 /// It is templated on the slice iterator type to use so that it can operate 426 /// with either const or non-const slice iterators. 427 class AllocaSlices::partition_iterator 428 : public iterator_facade_base<partition_iterator, std::forward_iterator_tag, 429 Partition> { 430 friend class AllocaSlices; 431 432 /// Most of the state for walking the partitions is held in a class 433 /// with a nice interface for examining them. 434 Partition P; 435 436 /// We need to keep the end of the slices to know when to stop. 437 AllocaSlices::iterator SE; 438 439 /// We also need to keep track of the maximum split end offset seen. 440 /// FIXME: Do we really? 441 uint64_t MaxSplitSliceEndOffset = 0; 442 443 /// Sets the partition to be empty at given iterator, and sets the 444 /// end iterator. 445 partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE) 446 : P(SI), SE(SE) { 447 // If not already at the end, advance our state to form the initial 448 // partition. 449 if (SI != SE) 450 advance(); 451 } 452 453 /// Advance the iterator to the next partition. 454 /// 455 /// Requires that the iterator not be at the end of the slices. 456 void advance() { 457 assert((P.SI != SE || !P.SplitTails.empty()) && 458 "Cannot advance past the end of the slices!"); 459 460 // Clear out any split uses which have ended. 461 if (!P.SplitTails.empty()) { 462 if (P.EndOffset >= MaxSplitSliceEndOffset) { 463 // If we've finished all splits, this is easy. 464 P.SplitTails.clear(); 465 MaxSplitSliceEndOffset = 0; 466 } else { 467 // Remove the uses which have ended in the prior partition. This 468 // cannot change the max split slice end because we just checked that 469 // the prior partition ended prior to that max. 470 llvm::erase_if(P.SplitTails, 471 [&](Slice *S) { return S->endOffset() <= P.EndOffset; }); 472 assert(llvm::any_of(P.SplitTails, 473 [&](Slice *S) { 474 return S->endOffset() == MaxSplitSliceEndOffset; 475 }) && 476 "Could not find the current max split slice offset!"); 477 assert(llvm::all_of(P.SplitTails, 478 [&](Slice *S) { 479 return S->endOffset() <= MaxSplitSliceEndOffset; 480 }) && 481 "Max split slice end offset is not actually the max!"); 482 } 483 } 484 485 // If P.SI is already at the end, then we've cleared the split tail and 486 // now have an end iterator. 487 if (P.SI == SE) { 488 assert(P.SplitTails.empty() && "Failed to clear the split slices!"); 489 return; 490 } 491 492 // If we had a non-empty partition previously, set up the state for 493 // subsequent partitions. 494 if (P.SI != P.SJ) { 495 // Accumulate all the splittable slices which started in the old 496 // partition into the split list. 497 for (Slice &S : P) 498 if (S.isSplittable() && S.endOffset() > P.EndOffset) { 499 P.SplitTails.push_back(&S); 500 MaxSplitSliceEndOffset = 501 std::max(S.endOffset(), MaxSplitSliceEndOffset); 502 } 503 504 // Start from the end of the previous partition. 505 P.SI = P.SJ; 506 507 // If P.SI is now at the end, we at most have a tail of split slices. 508 if (P.SI == SE) { 509 P.BeginOffset = P.EndOffset; 510 P.EndOffset = MaxSplitSliceEndOffset; 511 return; 512 } 513 514 // If the we have split slices and the next slice is after a gap and is 515 // not splittable immediately form an empty partition for the split 516 // slices up until the next slice begins. 517 if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset && 518 !P.SI->isSplittable()) { 519 P.BeginOffset = P.EndOffset; 520 P.EndOffset = P.SI->beginOffset(); 521 return; 522 } 523 } 524 525 // OK, we need to consume new slices. Set the end offset based on the 526 // current slice, and step SJ past it. The beginning offset of the 527 // partition is the beginning offset of the next slice unless we have 528 // pre-existing split slices that are continuing, in which case we begin 529 // at the prior end offset. 530 P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset; 531 P.EndOffset = P.SI->endOffset(); 532 ++P.SJ; 533 534 // There are two strategies to form a partition based on whether the 535 // partition starts with an unsplittable slice or a splittable slice. 536 if (!P.SI->isSplittable()) { 537 // When we're forming an unsplittable region, it must always start at 538 // the first slice and will extend through its end. 539 assert(P.BeginOffset == P.SI->beginOffset()); 540 541 // Form a partition including all of the overlapping slices with this 542 // unsplittable slice. 543 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { 544 if (!P.SJ->isSplittable()) 545 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); 546 ++P.SJ; 547 } 548 549 // We have a partition across a set of overlapping unsplittable 550 // partitions. 551 return; 552 } 553 554 // If we're starting with a splittable slice, then we need to form 555 // a synthetic partition spanning it and any other overlapping splittable 556 // splices. 557 assert(P.SI->isSplittable() && "Forming a splittable partition!"); 558 559 // Collect all of the overlapping splittable slices. 560 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset && 561 P.SJ->isSplittable()) { 562 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); 563 ++P.SJ; 564 } 565 566 // Back upiP.EndOffset if we ended the span early when encountering an 567 // unsplittable slice. This synthesizes the early end offset of 568 // a partition spanning only splittable slices. 569 if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { 570 assert(!P.SJ->isSplittable()); 571 P.EndOffset = P.SJ->beginOffset(); 572 } 573 } 574 575 public: 576 bool operator==(const partition_iterator &RHS) const { 577 assert(SE == RHS.SE && 578 "End iterators don't match between compared partition iterators!"); 579 580 // The observed positions of partitions is marked by the P.SI iterator and 581 // the emptiness of the split slices. The latter is only relevant when 582 // P.SI == SE, as the end iterator will additionally have an empty split 583 // slices list, but the prior may have the same P.SI and a tail of split 584 // slices. 585 if (P.SI == RHS.P.SI && P.SplitTails.empty() == RHS.P.SplitTails.empty()) { 586 assert(P.SJ == RHS.P.SJ && 587 "Same set of slices formed two different sized partitions!"); 588 assert(P.SplitTails.size() == RHS.P.SplitTails.size() && 589 "Same slice position with differently sized non-empty split " 590 "slice tails!"); 591 return true; 592 } 593 return false; 594 } 595 596 partition_iterator &operator++() { 597 advance(); 598 return *this; 599 } 600 601 Partition &operator*() { return P; } 602 }; 603 604 /// A forward range over the partitions of the alloca's slices. 605 /// 606 /// This accesses an iterator range over the partitions of the alloca's 607 /// slices. It computes these partitions on the fly based on the overlapping 608 /// offsets of the slices and the ability to split them. It will visit "empty" 609 /// partitions to cover regions of the alloca only accessed via split 610 /// slices. 611 iterator_range<AllocaSlices::partition_iterator> AllocaSlices::partitions() { 612 return make_range(partition_iterator(begin(), end()), 613 partition_iterator(end(), end())); 614 } 615 616 static Value *foldSelectInst(SelectInst &SI) { 617 // If the condition being selected on is a constant or the same value is 618 // being selected between, fold the select. Yes this does (rarely) happen 619 // early on. 620 if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition())) 621 return SI.getOperand(1 + CI->isZero()); 622 if (SI.getOperand(1) == SI.getOperand(2)) 623 return SI.getOperand(1); 624 625 return nullptr; 626 } 627 628 /// A helper that folds a PHI node or a select. 629 static Value *foldPHINodeOrSelectInst(Instruction &I) { 630 if (PHINode *PN = dyn_cast<PHINode>(&I)) { 631 // If PN merges together the same value, return that value. 632 return PN->hasConstantValue(); 633 } 634 return foldSelectInst(cast<SelectInst>(I)); 635 } 636 637 /// Builder for the alloca slices. 638 /// 639 /// This class builds a set of alloca slices by recursively visiting the uses 640 /// of an alloca and making a slice for each load and store at each offset. 641 class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> { 642 friend class PtrUseVisitor<SliceBuilder>; 643 friend class InstVisitor<SliceBuilder>; 644 645 using Base = PtrUseVisitor<SliceBuilder>; 646 647 const uint64_t AllocSize; 648 AllocaSlices &AS; 649 650 SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap; 651 SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes; 652 653 /// Set to de-duplicate dead instructions found in the use walk. 654 SmallPtrSet<Instruction *, 4> VisitedDeadInsts; 655 656 public: 657 SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS) 658 : PtrUseVisitor<SliceBuilder>(DL), 659 AllocSize(DL.getTypeAllocSize(AI.getAllocatedType()).getFixedSize()), 660 AS(AS) {} 661 662 private: 663 void markAsDead(Instruction &I) { 664 if (VisitedDeadInsts.insert(&I).second) 665 AS.DeadUsers.push_back(&I); 666 } 667 668 void insertUse(Instruction &I, const APInt &Offset, uint64_t Size, 669 bool IsSplittable = false) { 670 // Completely skip uses which have a zero size or start either before or 671 // past the end of the allocation. 672 if (Size == 0 || Offset.uge(AllocSize)) { 673 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" 674 << Offset 675 << " which has zero size or starts outside of the " 676 << AllocSize << " byte alloca:\n" 677 << " alloca: " << AS.AI << "\n" 678 << " use: " << I << "\n"); 679 return markAsDead(I); 680 } 681 682 uint64_t BeginOffset = Offset.getZExtValue(); 683 uint64_t EndOffset = BeginOffset + Size; 684 685 // Clamp the end offset to the end of the allocation. Note that this is 686 // formulated to handle even the case where "BeginOffset + Size" overflows. 687 // This may appear superficially to be something we could ignore entirely, 688 // but that is not so! There may be widened loads or PHI-node uses where 689 // some instructions are dead but not others. We can't completely ignore 690 // them, and so have to record at least the information here. 691 assert(AllocSize >= BeginOffset); // Established above. 692 if (Size > AllocSize - BeginOffset) { 693 LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" 694 << Offset << " to remain within the " << AllocSize 695 << " byte alloca:\n" 696 << " alloca: " << AS.AI << "\n" 697 << " use: " << I << "\n"); 698 EndOffset = AllocSize; 699 } 700 701 AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable)); 702 } 703 704 void visitBitCastInst(BitCastInst &BC) { 705 if (BC.use_empty()) 706 return markAsDead(BC); 707 708 return Base::visitBitCastInst(BC); 709 } 710 711 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 712 if (ASC.use_empty()) 713 return markAsDead(ASC); 714 715 return Base::visitAddrSpaceCastInst(ASC); 716 } 717 718 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 719 if (GEPI.use_empty()) 720 return markAsDead(GEPI); 721 722 if (SROAStrictInbounds && GEPI.isInBounds()) { 723 // FIXME: This is a manually un-factored variant of the basic code inside 724 // of GEPs with checking of the inbounds invariant specified in the 725 // langref in a very strict sense. If we ever want to enable 726 // SROAStrictInbounds, this code should be factored cleanly into 727 // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds 728 // by writing out the code here where we have the underlying allocation 729 // size readily available. 730 APInt GEPOffset = Offset; 731 const DataLayout &DL = GEPI.getModule()->getDataLayout(); 732 for (gep_type_iterator GTI = gep_type_begin(GEPI), 733 GTE = gep_type_end(GEPI); 734 GTI != GTE; ++GTI) { 735 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 736 if (!OpC) 737 break; 738 739 // Handle a struct index, which adds its field offset to the pointer. 740 if (StructType *STy = GTI.getStructTypeOrNull()) { 741 unsigned ElementIdx = OpC->getZExtValue(); 742 const StructLayout *SL = DL.getStructLayout(STy); 743 GEPOffset += 744 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); 745 } else { 746 // For array or vector indices, scale the index by the size of the 747 // type. 748 APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth()); 749 GEPOffset += 750 Index * 751 APInt(Offset.getBitWidth(), 752 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize()); 753 } 754 755 // If this index has computed an intermediate pointer which is not 756 // inbounds, then the result of the GEP is a poison value and we can 757 // delete it and all uses. 758 if (GEPOffset.ugt(AllocSize)) 759 return markAsDead(GEPI); 760 } 761 } 762 763 return Base::visitGetElementPtrInst(GEPI); 764 } 765 766 void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset, 767 uint64_t Size, bool IsVolatile) { 768 // We allow splitting of non-volatile loads and stores where the type is an 769 // integer type. These may be used to implement 'memcpy' or other "transfer 770 // of bits" patterns. 771 bool IsSplittable = Ty->isIntegerTy() && !IsVolatile; 772 773 insertUse(I, Offset, Size, IsSplittable); 774 } 775 776 void visitLoadInst(LoadInst &LI) { 777 assert((!LI.isSimple() || LI.getType()->isSingleValueType()) && 778 "All simple FCA loads should have been pre-split"); 779 780 if (!IsOffsetKnown) 781 return PI.setAborted(&LI); 782 783 if (LI.isVolatile() && 784 LI.getPointerAddressSpace() != DL.getAllocaAddrSpace()) 785 return PI.setAborted(&LI); 786 787 if (isa<ScalableVectorType>(LI.getType())) 788 return PI.setAborted(&LI); 789 790 uint64_t Size = DL.getTypeStoreSize(LI.getType()).getFixedSize(); 791 return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile()); 792 } 793 794 void visitStoreInst(StoreInst &SI) { 795 Value *ValOp = SI.getValueOperand(); 796 if (ValOp == *U) 797 return PI.setEscapedAndAborted(&SI); 798 if (!IsOffsetKnown) 799 return PI.setAborted(&SI); 800 801 if (SI.isVolatile() && 802 SI.getPointerAddressSpace() != DL.getAllocaAddrSpace()) 803 return PI.setAborted(&SI); 804 805 if (isa<ScalableVectorType>(ValOp->getType())) 806 return PI.setAborted(&SI); 807 808 uint64_t Size = DL.getTypeStoreSize(ValOp->getType()).getFixedSize(); 809 810 // If this memory access can be shown to *statically* extend outside the 811 // bounds of the allocation, it's behavior is undefined, so simply 812 // ignore it. Note that this is more strict than the generic clamping 813 // behavior of insertUse. We also try to handle cases which might run the 814 // risk of overflow. 815 // FIXME: We should instead consider the pointer to have escaped if this 816 // function is being instrumented for addressing bugs or race conditions. 817 if (Size > AllocSize || Offset.ugt(AllocSize - Size)) { 818 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" 819 << Offset << " which extends past the end of the " 820 << AllocSize << " byte alloca:\n" 821 << " alloca: " << AS.AI << "\n" 822 << " use: " << SI << "\n"); 823 return markAsDead(SI); 824 } 825 826 assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && 827 "All simple FCA stores should have been pre-split"); 828 handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile()); 829 } 830 831 void visitMemSetInst(MemSetInst &II) { 832 assert(II.getRawDest() == *U && "Pointer use is not the destination?"); 833 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 834 if ((Length && Length->getValue() == 0) || 835 (IsOffsetKnown && Offset.uge(AllocSize))) 836 // Zero-length mem transfer intrinsics can be ignored entirely. 837 return markAsDead(II); 838 839 if (!IsOffsetKnown) 840 return PI.setAborted(&II); 841 842 // Don't replace this with a store with a different address space. TODO: 843 // Use a store with the casted new alloca? 844 if (II.isVolatile() && II.getDestAddressSpace() != DL.getAllocaAddrSpace()) 845 return PI.setAborted(&II); 846 847 insertUse(II, Offset, Length ? Length->getLimitedValue() 848 : AllocSize - Offset.getLimitedValue(), 849 (bool)Length); 850 } 851 852 void visitMemTransferInst(MemTransferInst &II) { 853 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 854 if (Length && Length->getValue() == 0) 855 // Zero-length mem transfer intrinsics can be ignored entirely. 856 return markAsDead(II); 857 858 // Because we can visit these intrinsics twice, also check to see if the 859 // first time marked this instruction as dead. If so, skip it. 860 if (VisitedDeadInsts.count(&II)) 861 return; 862 863 if (!IsOffsetKnown) 864 return PI.setAborted(&II); 865 866 // Don't replace this with a load/store with a different address space. 867 // TODO: Use a store with the casted new alloca? 868 if (II.isVolatile() && 869 (II.getDestAddressSpace() != DL.getAllocaAddrSpace() || 870 II.getSourceAddressSpace() != DL.getAllocaAddrSpace())) 871 return PI.setAborted(&II); 872 873 // This side of the transfer is completely out-of-bounds, and so we can 874 // nuke the entire transfer. However, we also need to nuke the other side 875 // if already added to our partitions. 876 // FIXME: Yet another place we really should bypass this when 877 // instrumenting for ASan. 878 if (Offset.uge(AllocSize)) { 879 SmallDenseMap<Instruction *, unsigned>::iterator MTPI = 880 MemTransferSliceMap.find(&II); 881 if (MTPI != MemTransferSliceMap.end()) 882 AS.Slices[MTPI->second].kill(); 883 return markAsDead(II); 884 } 885 886 uint64_t RawOffset = Offset.getLimitedValue(); 887 uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset; 888 889 // Check for the special case where the same exact value is used for both 890 // source and dest. 891 if (*U == II.getRawDest() && *U == II.getRawSource()) { 892 // For non-volatile transfers this is a no-op. 893 if (!II.isVolatile()) 894 return markAsDead(II); 895 896 return insertUse(II, Offset, Size, /*IsSplittable=*/false); 897 } 898 899 // If we have seen both source and destination for a mem transfer, then 900 // they both point to the same alloca. 901 bool Inserted; 902 SmallDenseMap<Instruction *, unsigned>::iterator MTPI; 903 std::tie(MTPI, Inserted) = 904 MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size())); 905 unsigned PrevIdx = MTPI->second; 906 if (!Inserted) { 907 Slice &PrevP = AS.Slices[PrevIdx]; 908 909 // Check if the begin offsets match and this is a non-volatile transfer. 910 // In that case, we can completely elide the transfer. 911 if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) { 912 PrevP.kill(); 913 return markAsDead(II); 914 } 915 916 // Otherwise we have an offset transfer within the same alloca. We can't 917 // split those. 918 PrevP.makeUnsplittable(); 919 } 920 921 // Insert the use now that we've fixed up the splittable nature. 922 insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length); 923 924 // Check that we ended up with a valid index in the map. 925 assert(AS.Slices[PrevIdx].getUse()->getUser() == &II && 926 "Map index doesn't point back to a slice with this user."); 927 } 928 929 // Disable SRoA for any intrinsics except for lifetime invariants. 930 // FIXME: What about debug intrinsics? This matches old behavior, but 931 // doesn't make sense. 932 void visitIntrinsicInst(IntrinsicInst &II) { 933 if (II.isDroppable()) { 934 AS.DeadUseIfPromotable.push_back(U); 935 return; 936 } 937 938 if (!IsOffsetKnown) 939 return PI.setAborted(&II); 940 941 if (II.isLifetimeStartOrEnd()) { 942 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0)); 943 uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(), 944 Length->getLimitedValue()); 945 insertUse(II, Offset, Size, true); 946 return; 947 } 948 949 Base::visitIntrinsicInst(II); 950 } 951 952 Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) { 953 // We consider any PHI or select that results in a direct load or store of 954 // the same offset to be a viable use for slicing purposes. These uses 955 // are considered unsplittable and the size is the maximum loaded or stored 956 // size. 957 SmallPtrSet<Instruction *, 4> Visited; 958 SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses; 959 Visited.insert(Root); 960 Uses.push_back(std::make_pair(cast<Instruction>(*U), Root)); 961 const DataLayout &DL = Root->getModule()->getDataLayout(); 962 // If there are no loads or stores, the access is dead. We mark that as 963 // a size zero access. 964 Size = 0; 965 do { 966 Instruction *I, *UsedI; 967 std::tie(UsedI, I) = Uses.pop_back_val(); 968 969 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 970 Size = std::max(Size, 971 DL.getTypeStoreSize(LI->getType()).getFixedSize()); 972 continue; 973 } 974 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 975 Value *Op = SI->getOperand(0); 976 if (Op == UsedI) 977 return SI; 978 Size = std::max(Size, 979 DL.getTypeStoreSize(Op->getType()).getFixedSize()); 980 continue; 981 } 982 983 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 984 if (!GEP->hasAllZeroIndices()) 985 return GEP; 986 } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) && 987 !isa<SelectInst>(I) && !isa<AddrSpaceCastInst>(I)) { 988 return I; 989 } 990 991 for (User *U : I->users()) 992 if (Visited.insert(cast<Instruction>(U)).second) 993 Uses.push_back(std::make_pair(I, cast<Instruction>(U))); 994 } while (!Uses.empty()); 995 996 return nullptr; 997 } 998 999 void visitPHINodeOrSelectInst(Instruction &I) { 1000 assert(isa<PHINode>(I) || isa<SelectInst>(I)); 1001 if (I.use_empty()) 1002 return markAsDead(I); 1003 1004 // TODO: We could use SimplifyInstruction here to fold PHINodes and 1005 // SelectInsts. However, doing so requires to change the current 1006 // dead-operand-tracking mechanism. For instance, suppose neither loading 1007 // from %U nor %other traps. Then "load (select undef, %U, %other)" does not 1008 // trap either. However, if we simply replace %U with undef using the 1009 // current dead-operand-tracking mechanism, "load (select undef, undef, 1010 // %other)" may trap because the select may return the first operand 1011 // "undef". 1012 if (Value *Result = foldPHINodeOrSelectInst(I)) { 1013 if (Result == *U) 1014 // If the result of the constant fold will be the pointer, recurse 1015 // through the PHI/select as if we had RAUW'ed it. 1016 enqueueUsers(I); 1017 else 1018 // Otherwise the operand to the PHI/select is dead, and we can replace 1019 // it with undef. 1020 AS.DeadOperands.push_back(U); 1021 1022 return; 1023 } 1024 1025 if (!IsOffsetKnown) 1026 return PI.setAborted(&I); 1027 1028 // See if we already have computed info on this node. 1029 uint64_t &Size = PHIOrSelectSizes[&I]; 1030 if (!Size) { 1031 // This is a new PHI/Select, check for an unsafe use of it. 1032 if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size)) 1033 return PI.setAborted(UnsafeI); 1034 } 1035 1036 // For PHI and select operands outside the alloca, we can't nuke the entire 1037 // phi or select -- the other side might still be relevant, so we special 1038 // case them here and use a separate structure to track the operands 1039 // themselves which should be replaced with undef. 1040 // FIXME: This should instead be escaped in the event we're instrumenting 1041 // for address sanitization. 1042 if (Offset.uge(AllocSize)) { 1043 AS.DeadOperands.push_back(U); 1044 return; 1045 } 1046 1047 insertUse(I, Offset, Size); 1048 } 1049 1050 void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); } 1051 1052 void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); } 1053 1054 /// Disable SROA entirely if there are unhandled users of the alloca. 1055 void visitInstruction(Instruction &I) { PI.setAborted(&I); } 1056 }; 1057 1058 AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) 1059 : 1060 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1061 AI(AI), 1062 #endif 1063 PointerEscapingInstr(nullptr) { 1064 SliceBuilder PB(DL, AI, *this); 1065 SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); 1066 if (PtrI.isEscaped() || PtrI.isAborted()) { 1067 // FIXME: We should sink the escape vs. abort info into the caller nicely, 1068 // possibly by just storing the PtrInfo in the AllocaSlices. 1069 PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst() 1070 : PtrI.getAbortingInst(); 1071 assert(PointerEscapingInstr && "Did not track a bad instruction"); 1072 return; 1073 } 1074 1075 llvm::erase_if(Slices, [](const Slice &S) { return S.isDead(); }); 1076 1077 // Sort the uses. This arranges for the offsets to be in ascending order, 1078 // and the sizes to be in descending order. 1079 llvm::stable_sort(Slices); 1080 } 1081 1082 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1083 1084 void AllocaSlices::print(raw_ostream &OS, const_iterator I, 1085 StringRef Indent) const { 1086 printSlice(OS, I, Indent); 1087 OS << "\n"; 1088 printUse(OS, I, Indent); 1089 } 1090 1091 void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I, 1092 StringRef Indent) const { 1093 OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")" 1094 << " slice #" << (I - begin()) 1095 << (I->isSplittable() ? " (splittable)" : ""); 1096 } 1097 1098 void AllocaSlices::printUse(raw_ostream &OS, const_iterator I, 1099 StringRef Indent) const { 1100 OS << Indent << " used by: " << *I->getUse()->getUser() << "\n"; 1101 } 1102 1103 void AllocaSlices::print(raw_ostream &OS) const { 1104 if (PointerEscapingInstr) { 1105 OS << "Can't analyze slices for alloca: " << AI << "\n" 1106 << " A pointer to this alloca escaped by:\n" 1107 << " " << *PointerEscapingInstr << "\n"; 1108 return; 1109 } 1110 1111 OS << "Slices of alloca: " << AI << "\n"; 1112 for (const_iterator I = begin(), E = end(); I != E; ++I) 1113 print(OS, I); 1114 } 1115 1116 LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const { 1117 print(dbgs(), I); 1118 } 1119 LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); } 1120 1121 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1122 1123 /// Walk the range of a partitioning looking for a common type to cover this 1124 /// sequence of slices. 1125 static std::pair<Type *, IntegerType *> 1126 findCommonType(AllocaSlices::const_iterator B, AllocaSlices::const_iterator E, 1127 uint64_t EndOffset) { 1128 Type *Ty = nullptr; 1129 bool TyIsCommon = true; 1130 IntegerType *ITy = nullptr; 1131 1132 // Note that we need to look at *every* alloca slice's Use to ensure we 1133 // always get consistent results regardless of the order of slices. 1134 for (AllocaSlices::const_iterator I = B; I != E; ++I) { 1135 Use *U = I->getUse(); 1136 if (isa<IntrinsicInst>(*U->getUser())) 1137 continue; 1138 if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset) 1139 continue; 1140 1141 Type *UserTy = nullptr; 1142 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1143 UserTy = LI->getType(); 1144 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1145 UserTy = SI->getValueOperand()->getType(); 1146 } 1147 1148 if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) { 1149 // If the type is larger than the partition, skip it. We only encounter 1150 // this for split integer operations where we want to use the type of the 1151 // entity causing the split. Also skip if the type is not a byte width 1152 // multiple. 1153 if (UserITy->getBitWidth() % 8 != 0 || 1154 UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset())) 1155 continue; 1156 1157 // Track the largest bitwidth integer type used in this way in case there 1158 // is no common type. 1159 if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth()) 1160 ITy = UserITy; 1161 } 1162 1163 // To avoid depending on the order of slices, Ty and TyIsCommon must not 1164 // depend on types skipped above. 1165 if (!UserTy || (Ty && Ty != UserTy)) 1166 TyIsCommon = false; // Give up on anything but an iN type. 1167 else 1168 Ty = UserTy; 1169 } 1170 1171 return {TyIsCommon ? Ty : nullptr, ITy}; 1172 } 1173 1174 /// PHI instructions that use an alloca and are subsequently loaded can be 1175 /// rewritten to load both input pointers in the pred blocks and then PHI the 1176 /// results, allowing the load of the alloca to be promoted. 1177 /// From this: 1178 /// %P2 = phi [i32* %Alloca, i32* %Other] 1179 /// %V = load i32* %P2 1180 /// to: 1181 /// %V1 = load i32* %Alloca -> will be mem2reg'd 1182 /// ... 1183 /// %V2 = load i32* %Other 1184 /// ... 1185 /// %V = phi [i32 %V1, i32 %V2] 1186 /// 1187 /// We can do this to a select if its only uses are loads and if the operands 1188 /// to the select can be loaded unconditionally. 1189 /// 1190 /// FIXME: This should be hoisted into a generic utility, likely in 1191 /// Transforms/Util/Local.h 1192 static bool isSafePHIToSpeculate(PHINode &PN) { 1193 const DataLayout &DL = PN.getModule()->getDataLayout(); 1194 1195 // For now, we can only do this promotion if the load is in the same block 1196 // as the PHI, and if there are no stores between the phi and load. 1197 // TODO: Allow recursive phi users. 1198 // TODO: Allow stores. 1199 BasicBlock *BB = PN.getParent(); 1200 Align MaxAlign; 1201 uint64_t APWidth = DL.getIndexTypeSizeInBits(PN.getType()); 1202 APInt MaxSize(APWidth, 0); 1203 bool HaveLoad = false; 1204 for (User *U : PN.users()) { 1205 LoadInst *LI = dyn_cast<LoadInst>(U); 1206 if (!LI || !LI->isSimple()) 1207 return false; 1208 1209 // For now we only allow loads in the same block as the PHI. This is 1210 // a common case that happens when instcombine merges two loads through 1211 // a PHI. 1212 if (LI->getParent() != BB) 1213 return false; 1214 1215 // Ensure that there are no instructions between the PHI and the load that 1216 // could store. 1217 for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI) 1218 if (BBI->mayWriteToMemory()) 1219 return false; 1220 1221 uint64_t Size = DL.getTypeStoreSize(LI->getType()).getFixedSize(); 1222 MaxAlign = std::max(MaxAlign, LI->getAlign()); 1223 MaxSize = MaxSize.ult(Size) ? APInt(APWidth, Size) : MaxSize; 1224 HaveLoad = true; 1225 } 1226 1227 if (!HaveLoad) 1228 return false; 1229 1230 // We can only transform this if it is safe to push the loads into the 1231 // predecessor blocks. The only thing to watch out for is that we can't put 1232 // a possibly trapping load in the predecessor if it is a critical edge. 1233 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1234 Instruction *TI = PN.getIncomingBlock(Idx)->getTerminator(); 1235 Value *InVal = PN.getIncomingValue(Idx); 1236 1237 // If the value is produced by the terminator of the predecessor (an 1238 // invoke) or it has side-effects, there is no valid place to put a load 1239 // in the predecessor. 1240 if (TI == InVal || TI->mayHaveSideEffects()) 1241 return false; 1242 1243 // If the predecessor has a single successor, then the edge isn't 1244 // critical. 1245 if (TI->getNumSuccessors() == 1) 1246 continue; 1247 1248 // If this pointer is always safe to load, or if we can prove that there 1249 // is already a load in the block, then we can move the load to the pred 1250 // block. 1251 if (isSafeToLoadUnconditionally(InVal, MaxAlign, MaxSize, DL, TI)) 1252 continue; 1253 1254 return false; 1255 } 1256 1257 return true; 1258 } 1259 1260 static void speculatePHINodeLoads(PHINode &PN) { 1261 LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); 1262 1263 LoadInst *SomeLoad = cast<LoadInst>(PN.user_back()); 1264 Type *LoadTy = SomeLoad->getType(); 1265 IRBuilderTy PHIBuilder(&PN); 1266 PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(), 1267 PN.getName() + ".sroa.speculated"); 1268 1269 // Get the AA tags and alignment to use from one of the loads. It does not 1270 // matter which one we get and if any differ. 1271 AAMDNodes AATags; 1272 SomeLoad->getAAMetadata(AATags); 1273 Align Alignment = SomeLoad->getAlign(); 1274 1275 // Rewrite all loads of the PN to use the new PHI. 1276 while (!PN.use_empty()) { 1277 LoadInst *LI = cast<LoadInst>(PN.user_back()); 1278 LI->replaceAllUsesWith(NewPN); 1279 LI->eraseFromParent(); 1280 } 1281 1282 // Inject loads into all of the pred blocks. 1283 DenseMap<BasicBlock*, Value*> InjectedLoads; 1284 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1285 BasicBlock *Pred = PN.getIncomingBlock(Idx); 1286 Value *InVal = PN.getIncomingValue(Idx); 1287 1288 // A PHI node is allowed to have multiple (duplicated) entries for the same 1289 // basic block, as long as the value is the same. So if we already injected 1290 // a load in the predecessor, then we should reuse the same load for all 1291 // duplicated entries. 1292 if (Value* V = InjectedLoads.lookup(Pred)) { 1293 NewPN->addIncoming(V, Pred); 1294 continue; 1295 } 1296 1297 Instruction *TI = Pred->getTerminator(); 1298 IRBuilderTy PredBuilder(TI); 1299 1300 LoadInst *Load = PredBuilder.CreateAlignedLoad( 1301 LoadTy, InVal, Alignment, 1302 (PN.getName() + ".sroa.speculate.load." + Pred->getName())); 1303 ++NumLoadsSpeculated; 1304 if (AATags) 1305 Load->setAAMetadata(AATags); 1306 NewPN->addIncoming(Load, Pred); 1307 InjectedLoads[Pred] = Load; 1308 } 1309 1310 LLVM_DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); 1311 PN.eraseFromParent(); 1312 } 1313 1314 /// Select instructions that use an alloca and are subsequently loaded can be 1315 /// rewritten to load both input pointers and then select between the result, 1316 /// allowing the load of the alloca to be promoted. 1317 /// From this: 1318 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other 1319 /// %V = load i32* %P2 1320 /// to: 1321 /// %V1 = load i32* %Alloca -> will be mem2reg'd 1322 /// %V2 = load i32* %Other 1323 /// %V = select i1 %cond, i32 %V1, i32 %V2 1324 /// 1325 /// We can do this to a select if its only uses are loads and if the operand 1326 /// to the select can be loaded unconditionally. 1327 static bool isSafeSelectToSpeculate(SelectInst &SI) { 1328 Value *TValue = SI.getTrueValue(); 1329 Value *FValue = SI.getFalseValue(); 1330 const DataLayout &DL = SI.getModule()->getDataLayout(); 1331 1332 for (User *U : SI.users()) { 1333 LoadInst *LI = dyn_cast<LoadInst>(U); 1334 if (!LI || !LI->isSimple()) 1335 return false; 1336 1337 // Both operands to the select need to be dereferenceable, either 1338 // absolutely (e.g. allocas) or at this point because we can see other 1339 // accesses to it. 1340 if (!isSafeToLoadUnconditionally(TValue, LI->getType(), 1341 LI->getAlign(), DL, LI)) 1342 return false; 1343 if (!isSafeToLoadUnconditionally(FValue, LI->getType(), 1344 LI->getAlign(), DL, LI)) 1345 return false; 1346 } 1347 1348 return true; 1349 } 1350 1351 static void speculateSelectInstLoads(SelectInst &SI) { 1352 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 1353 1354 IRBuilderTy IRB(&SI); 1355 Value *TV = SI.getTrueValue(); 1356 Value *FV = SI.getFalseValue(); 1357 // Replace the loads of the select with a select of two loads. 1358 while (!SI.use_empty()) { 1359 LoadInst *LI = cast<LoadInst>(SI.user_back()); 1360 assert(LI->isSimple() && "We only speculate simple loads"); 1361 1362 IRB.SetInsertPoint(LI); 1363 LoadInst *TL = IRB.CreateLoad(LI->getType(), TV, 1364 LI->getName() + ".sroa.speculate.load.true"); 1365 LoadInst *FL = IRB.CreateLoad(LI->getType(), FV, 1366 LI->getName() + ".sroa.speculate.load.false"); 1367 NumLoadsSpeculated += 2; 1368 1369 // Transfer alignment and AA info if present. 1370 TL->setAlignment(LI->getAlign()); 1371 FL->setAlignment(LI->getAlign()); 1372 1373 AAMDNodes Tags; 1374 LI->getAAMetadata(Tags); 1375 if (Tags) { 1376 TL->setAAMetadata(Tags); 1377 FL->setAAMetadata(Tags); 1378 } 1379 1380 Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, 1381 LI->getName() + ".sroa.speculated"); 1382 1383 LLVM_DEBUG(dbgs() << " speculated to: " << *V << "\n"); 1384 LI->replaceAllUsesWith(V); 1385 LI->eraseFromParent(); 1386 } 1387 SI.eraseFromParent(); 1388 } 1389 1390 /// Build a GEP out of a base pointer and indices. 1391 /// 1392 /// This will return the BasePtr if that is valid, or build a new GEP 1393 /// instruction using the IRBuilder if GEP-ing is needed. 1394 static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, 1395 SmallVectorImpl<Value *> &Indices, 1396 const Twine &NamePrefix) { 1397 if (Indices.empty()) 1398 return BasePtr; 1399 1400 // A single zero index is a no-op, so check for this and avoid building a GEP 1401 // in that case. 1402 if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero()) 1403 return BasePtr; 1404 1405 return IRB.CreateInBoundsGEP(BasePtr->getType()->getPointerElementType(), 1406 BasePtr, Indices, NamePrefix + "sroa_idx"); 1407 } 1408 1409 /// Get a natural GEP off of the BasePtr walking through Ty toward 1410 /// TargetTy without changing the offset of the pointer. 1411 /// 1412 /// This routine assumes we've already established a properly offset GEP with 1413 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with 1414 /// zero-indices down through type layers until we find one the same as 1415 /// TargetTy. If we can't find one with the same type, we at least try to use 1416 /// one with the same size. If none of that works, we just produce the GEP as 1417 /// indicated by Indices to have the correct offset. 1418 static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, 1419 Value *BasePtr, Type *Ty, Type *TargetTy, 1420 SmallVectorImpl<Value *> &Indices, 1421 const Twine &NamePrefix) { 1422 if (Ty == TargetTy) 1423 return buildGEP(IRB, BasePtr, Indices, NamePrefix); 1424 1425 // Offset size to use for the indices. 1426 unsigned OffsetSize = DL.getIndexTypeSizeInBits(BasePtr->getType()); 1427 1428 // See if we can descend into a struct and locate a field with the correct 1429 // type. 1430 unsigned NumLayers = 0; 1431 Type *ElementTy = Ty; 1432 do { 1433 if (ElementTy->isPointerTy()) 1434 break; 1435 1436 if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) { 1437 ElementTy = ArrayTy->getElementType(); 1438 Indices.push_back(IRB.getIntN(OffsetSize, 0)); 1439 } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) { 1440 ElementTy = VectorTy->getElementType(); 1441 Indices.push_back(IRB.getInt32(0)); 1442 } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) { 1443 if (STy->element_begin() == STy->element_end()) 1444 break; // Nothing left to descend into. 1445 ElementTy = *STy->element_begin(); 1446 Indices.push_back(IRB.getInt32(0)); 1447 } else { 1448 break; 1449 } 1450 ++NumLayers; 1451 } while (ElementTy != TargetTy); 1452 if (ElementTy != TargetTy) 1453 Indices.erase(Indices.end() - NumLayers, Indices.end()); 1454 1455 return buildGEP(IRB, BasePtr, Indices, NamePrefix); 1456 } 1457 1458 /// Recursively compute indices for a natural GEP. 1459 /// 1460 /// This is the recursive step for getNaturalGEPWithOffset that walks down the 1461 /// element types adding appropriate indices for the GEP. 1462 static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, 1463 Value *Ptr, Type *Ty, APInt &Offset, 1464 Type *TargetTy, 1465 SmallVectorImpl<Value *> &Indices, 1466 const Twine &NamePrefix) { 1467 if (Offset == 0) 1468 return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices, 1469 NamePrefix); 1470 1471 // We can't recurse through pointer types. 1472 if (Ty->isPointerTy()) 1473 return nullptr; 1474 1475 // We try to analyze GEPs over vectors here, but note that these GEPs are 1476 // extremely poorly defined currently. The long-term goal is to remove GEPing 1477 // over a vector from the IR completely. 1478 if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) { 1479 unsigned ElementSizeInBits = 1480 DL.getTypeSizeInBits(VecTy->getScalarType()).getFixedSize(); 1481 if (ElementSizeInBits % 8 != 0) { 1482 // GEPs over non-multiple of 8 size vector elements are invalid. 1483 return nullptr; 1484 } 1485 APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8); 1486 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1487 if (NumSkippedElements.ugt(cast<FixedVectorType>(VecTy)->getNumElements())) 1488 return nullptr; 1489 Offset -= NumSkippedElements * ElementSize; 1490 Indices.push_back(IRB.getInt(NumSkippedElements)); 1491 return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(), 1492 Offset, TargetTy, Indices, NamePrefix); 1493 } 1494 1495 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { 1496 Type *ElementTy = ArrTy->getElementType(); 1497 APInt ElementSize(Offset.getBitWidth(), 1498 DL.getTypeAllocSize(ElementTy).getFixedSize()); 1499 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1500 if (NumSkippedElements.ugt(ArrTy->getNumElements())) 1501 return nullptr; 1502 1503 Offset -= NumSkippedElements * ElementSize; 1504 Indices.push_back(IRB.getInt(NumSkippedElements)); 1505 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1506 Indices, NamePrefix); 1507 } 1508 1509 StructType *STy = dyn_cast<StructType>(Ty); 1510 if (!STy) 1511 return nullptr; 1512 1513 const StructLayout *SL = DL.getStructLayout(STy); 1514 uint64_t StructOffset = Offset.getZExtValue(); 1515 if (StructOffset >= SL->getSizeInBytes()) 1516 return nullptr; 1517 unsigned Index = SL->getElementContainingOffset(StructOffset); 1518 Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index)); 1519 Type *ElementTy = STy->getElementType(Index); 1520 if (Offset.uge(DL.getTypeAllocSize(ElementTy).getFixedSize())) 1521 return nullptr; // The offset points into alignment padding. 1522 1523 Indices.push_back(IRB.getInt32(Index)); 1524 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1525 Indices, NamePrefix); 1526 } 1527 1528 /// Get a natural GEP from a base pointer to a particular offset and 1529 /// resulting in a particular type. 1530 /// 1531 /// The goal is to produce a "natural" looking GEP that works with the existing 1532 /// composite types to arrive at the appropriate offset and element type for 1533 /// a pointer. TargetTy is the element type the returned GEP should point-to if 1534 /// possible. We recurse by decreasing Offset, adding the appropriate index to 1535 /// Indices, and setting Ty to the result subtype. 1536 /// 1537 /// If no natural GEP can be constructed, this function returns null. 1538 static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, 1539 Value *Ptr, APInt Offset, Type *TargetTy, 1540 SmallVectorImpl<Value *> &Indices, 1541 const Twine &NamePrefix) { 1542 PointerType *Ty = cast<PointerType>(Ptr->getType()); 1543 1544 // Don't consider any GEPs through an i8* as natural unless the TargetTy is 1545 // an i8. 1546 if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8)) 1547 return nullptr; 1548 1549 Type *ElementTy = Ty->getElementType(); 1550 if (!ElementTy->isSized()) 1551 return nullptr; // We can't GEP through an unsized element. 1552 if (isa<ScalableVectorType>(ElementTy)) 1553 return nullptr; 1554 APInt ElementSize(Offset.getBitWidth(), 1555 DL.getTypeAllocSize(ElementTy).getFixedSize()); 1556 if (ElementSize == 0) 1557 return nullptr; // Zero-length arrays can't help us build a natural GEP. 1558 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1559 1560 Offset -= NumSkippedElements * ElementSize; 1561 Indices.push_back(IRB.getInt(NumSkippedElements)); 1562 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1563 Indices, NamePrefix); 1564 } 1565 1566 /// Compute an adjusted pointer from Ptr by Offset bytes where the 1567 /// resulting pointer has PointerTy. 1568 /// 1569 /// This tries very hard to compute a "natural" GEP which arrives at the offset 1570 /// and produces the pointer type desired. Where it cannot, it will try to use 1571 /// the natural GEP to arrive at the offset and bitcast to the type. Where that 1572 /// fails, it will try to use an existing i8* and GEP to the byte offset and 1573 /// bitcast to the type. 1574 /// 1575 /// The strategy for finding the more natural GEPs is to peel off layers of the 1576 /// pointer, walking back through bit casts and GEPs, searching for a base 1577 /// pointer from which we can compute a natural GEP with the desired 1578 /// properties. The algorithm tries to fold as many constant indices into 1579 /// a single GEP as possible, thus making each GEP more independent of the 1580 /// surrounding code. 1581 static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, 1582 APInt Offset, Type *PointerTy, 1583 const Twine &NamePrefix) { 1584 // Even though we don't look through PHI nodes, we could be called on an 1585 // instruction in an unreachable block, which may be on a cycle. 1586 SmallPtrSet<Value *, 4> Visited; 1587 Visited.insert(Ptr); 1588 SmallVector<Value *, 4> Indices; 1589 1590 // We may end up computing an offset pointer that has the wrong type. If we 1591 // never are able to compute one directly that has the correct type, we'll 1592 // fall back to it, so keep it and the base it was computed from around here. 1593 Value *OffsetPtr = nullptr; 1594 Value *OffsetBasePtr; 1595 1596 // Remember any i8 pointer we come across to re-use if we need to do a raw 1597 // byte offset. 1598 Value *Int8Ptr = nullptr; 1599 APInt Int8PtrOffset(Offset.getBitWidth(), 0); 1600 1601 PointerType *TargetPtrTy = cast<PointerType>(PointerTy); 1602 Type *TargetTy = TargetPtrTy->getElementType(); 1603 1604 // As `addrspacecast` is , `Ptr` (the storage pointer) may have different 1605 // address space from the expected `PointerTy` (the pointer to be used). 1606 // Adjust the pointer type based the original storage pointer. 1607 auto AS = cast<PointerType>(Ptr->getType())->getAddressSpace(); 1608 PointerTy = TargetTy->getPointerTo(AS); 1609 1610 do { 1611 // First fold any existing GEPs into the offset. 1612 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 1613 APInt GEPOffset(Offset.getBitWidth(), 0); 1614 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 1615 break; 1616 Offset += GEPOffset; 1617 Ptr = GEP->getPointerOperand(); 1618 if (!Visited.insert(Ptr).second) 1619 break; 1620 } 1621 1622 // See if we can perform a natural GEP here. 1623 Indices.clear(); 1624 if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy, 1625 Indices, NamePrefix)) { 1626 // If we have a new natural pointer at the offset, clear out any old 1627 // offset pointer we computed. Unless it is the base pointer or 1628 // a non-instruction, we built a GEP we don't need. Zap it. 1629 if (OffsetPtr && OffsetPtr != OffsetBasePtr) 1630 if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) { 1631 assert(I->use_empty() && "Built a GEP with uses some how!"); 1632 I->eraseFromParent(); 1633 } 1634 OffsetPtr = P; 1635 OffsetBasePtr = Ptr; 1636 // If we also found a pointer of the right type, we're done. 1637 if (P->getType() == PointerTy) 1638 break; 1639 } 1640 1641 // Stash this pointer if we've found an i8*. 1642 if (Ptr->getType()->isIntegerTy(8)) { 1643 Int8Ptr = Ptr; 1644 Int8PtrOffset = Offset; 1645 } 1646 1647 // Peel off a layer of the pointer and update the offset appropriately. 1648 if (Operator::getOpcode(Ptr) == Instruction::BitCast) { 1649 Ptr = cast<Operator>(Ptr)->getOperand(0); 1650 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 1651 if (GA->isInterposable()) 1652 break; 1653 Ptr = GA->getAliasee(); 1654 } else { 1655 break; 1656 } 1657 assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!"); 1658 } while (Visited.insert(Ptr).second); 1659 1660 if (!OffsetPtr) { 1661 if (!Int8Ptr) { 1662 Int8Ptr = IRB.CreateBitCast( 1663 Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()), 1664 NamePrefix + "sroa_raw_cast"); 1665 Int8PtrOffset = Offset; 1666 } 1667 1668 OffsetPtr = Int8PtrOffset == 0 1669 ? Int8Ptr 1670 : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr, 1671 IRB.getInt(Int8PtrOffset), 1672 NamePrefix + "sroa_raw_idx"); 1673 } 1674 Ptr = OffsetPtr; 1675 1676 // On the off chance we were targeting i8*, guard the bitcast here. 1677 if (cast<PointerType>(Ptr->getType()) != TargetPtrTy) { 1678 Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, 1679 TargetPtrTy, 1680 NamePrefix + "sroa_cast"); 1681 } 1682 1683 return Ptr; 1684 } 1685 1686 /// Compute the adjusted alignment for a load or store from an offset. 1687 static Align getAdjustedAlignment(Instruction *I, uint64_t Offset) { 1688 return commonAlignment(getLoadStoreAlignment(I), Offset); 1689 } 1690 1691 /// Test whether we can convert a value from the old to the new type. 1692 /// 1693 /// This predicate should be used to guard calls to convertValue in order to 1694 /// ensure that we only try to convert viable values. The strategy is that we 1695 /// will peel off single element struct and array wrappings to get to an 1696 /// underlying value, and convert that value. 1697 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { 1698 if (OldTy == NewTy) 1699 return true; 1700 1701 // For integer types, we can't handle any bit-width differences. This would 1702 // break both vector conversions with extension and introduce endianness 1703 // issues when in conjunction with loads and stores. 1704 if (isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) { 1705 assert(cast<IntegerType>(OldTy)->getBitWidth() != 1706 cast<IntegerType>(NewTy)->getBitWidth() && 1707 "We can't have the same bitwidth for different int types"); 1708 return false; 1709 } 1710 1711 if (DL.getTypeSizeInBits(NewTy).getFixedSize() != 1712 DL.getTypeSizeInBits(OldTy).getFixedSize()) 1713 return false; 1714 if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) 1715 return false; 1716 1717 // We can convert pointers to integers and vice-versa. Same for vectors 1718 // of pointers and integers. 1719 OldTy = OldTy->getScalarType(); 1720 NewTy = NewTy->getScalarType(); 1721 if (NewTy->isPointerTy() || OldTy->isPointerTy()) { 1722 if (NewTy->isPointerTy() && OldTy->isPointerTy()) { 1723 unsigned OldAS = OldTy->getPointerAddressSpace(); 1724 unsigned NewAS = NewTy->getPointerAddressSpace(); 1725 // Convert pointers if they are pointers from the same address space or 1726 // different integral (not non-integral) address spaces with the same 1727 // pointer size. 1728 return OldAS == NewAS || 1729 (!DL.isNonIntegralAddressSpace(OldAS) && 1730 !DL.isNonIntegralAddressSpace(NewAS) && 1731 DL.getPointerSize(OldAS) == DL.getPointerSize(NewAS)); 1732 } 1733 1734 // We can convert integers to integral pointers, but not to non-integral 1735 // pointers. 1736 if (OldTy->isIntegerTy()) 1737 return !DL.isNonIntegralPointerType(NewTy); 1738 1739 // We can convert integral pointers to integers, but non-integral pointers 1740 // need to remain pointers. 1741 if (!DL.isNonIntegralPointerType(OldTy)) 1742 return NewTy->isIntegerTy(); 1743 1744 return false; 1745 } 1746 1747 return true; 1748 } 1749 1750 /// Generic routine to convert an SSA value to a value of a different 1751 /// type. 1752 /// 1753 /// This will try various different casting techniques, such as bitcasts, 1754 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test 1755 /// two types for viability with this routine. 1756 static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 1757 Type *NewTy) { 1758 Type *OldTy = V->getType(); 1759 assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type"); 1760 1761 if (OldTy == NewTy) 1762 return V; 1763 1764 assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) && 1765 "Integer types must be the exact same to convert."); 1766 1767 // See if we need inttoptr for this type pair. May require additional bitcast. 1768 if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) { 1769 // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8* 1770 // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*> 1771 // Expand <4 x i32> to <2 x i8*> --> <4 x i32> to <2 x i64> to <2 x i8*> 1772 // Directly handle i64 to i8* 1773 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), 1774 NewTy); 1775 } 1776 1777 // See if we need ptrtoint for this type pair. May require additional bitcast. 1778 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) { 1779 // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128 1780 // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32> 1781 // Expand <2 x i8*> to <4 x i32> --> <2 x i8*> to <2 x i64> to <4 x i32> 1782 // Expand i8* to i64 --> i8* to i64 to i64 1783 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 1784 NewTy); 1785 } 1786 1787 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isPtrOrPtrVectorTy()) { 1788 unsigned OldAS = OldTy->getPointerAddressSpace(); 1789 unsigned NewAS = NewTy->getPointerAddressSpace(); 1790 // To convert pointers with different address spaces (they are already 1791 // checked convertible, i.e. they have the same pointer size), so far we 1792 // cannot use `bitcast` (which has restrict on the same address space) or 1793 // `addrspacecast` (which is not always no-op casting). Instead, use a pair 1794 // of no-op `ptrtoint`/`inttoptr` casts through an integer with the same bit 1795 // size. 1796 if (OldAS != NewAS) { 1797 assert(DL.getPointerSize(OldAS) == DL.getPointerSize(NewAS)); 1798 return IRB.CreateIntToPtr(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 1799 NewTy); 1800 } 1801 } 1802 1803 return IRB.CreateBitCast(V, NewTy); 1804 } 1805 1806 /// Test whether the given slice use can be promoted to a vector. 1807 /// 1808 /// This function is called to test each entry in a partition which is slated 1809 /// for a single slice. 1810 static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S, 1811 VectorType *Ty, 1812 uint64_t ElementSize, 1813 const DataLayout &DL) { 1814 // First validate the slice offsets. 1815 uint64_t BeginOffset = 1816 std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset(); 1817 uint64_t BeginIndex = BeginOffset / ElementSize; 1818 if (BeginIndex * ElementSize != BeginOffset || 1819 BeginIndex >= cast<FixedVectorType>(Ty)->getNumElements()) 1820 return false; 1821 uint64_t EndOffset = 1822 std::min(S.endOffset(), P.endOffset()) - P.beginOffset(); 1823 uint64_t EndIndex = EndOffset / ElementSize; 1824 if (EndIndex * ElementSize != EndOffset || 1825 EndIndex > cast<FixedVectorType>(Ty)->getNumElements()) 1826 return false; 1827 1828 assert(EndIndex > BeginIndex && "Empty vector!"); 1829 uint64_t NumElements = EndIndex - BeginIndex; 1830 Type *SliceTy = (NumElements == 1) 1831 ? Ty->getElementType() 1832 : FixedVectorType::get(Ty->getElementType(), NumElements); 1833 1834 Type *SplitIntTy = 1835 Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8); 1836 1837 Use *U = S.getUse(); 1838 1839 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 1840 if (MI->isVolatile()) 1841 return false; 1842 if (!S.isSplittable()) 1843 return false; // Skip any unsplittable intrinsics. 1844 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 1845 if (!II->isLifetimeStartOrEnd() && !II->isDroppable()) 1846 return false; 1847 } else if (U->get()->getType()->getPointerElementType()->isStructTy()) { 1848 // Disable vector promotion when there are loads or stores of an FCA. 1849 return false; 1850 } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1851 if (LI->isVolatile()) 1852 return false; 1853 Type *LTy = LI->getType(); 1854 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { 1855 assert(LTy->isIntegerTy()); 1856 LTy = SplitIntTy; 1857 } 1858 if (!canConvertValue(DL, SliceTy, LTy)) 1859 return false; 1860 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1861 if (SI->isVolatile()) 1862 return false; 1863 Type *STy = SI->getValueOperand()->getType(); 1864 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { 1865 assert(STy->isIntegerTy()); 1866 STy = SplitIntTy; 1867 } 1868 if (!canConvertValue(DL, STy, SliceTy)) 1869 return false; 1870 } else { 1871 return false; 1872 } 1873 1874 return true; 1875 } 1876 1877 /// Test whether the given alloca partitioning and range of slices can be 1878 /// promoted to a vector. 1879 /// 1880 /// This is a quick test to check whether we can rewrite a particular alloca 1881 /// partition (and its newly formed alloca) into a vector alloca with only 1882 /// whole-vector loads and stores such that it could be promoted to a vector 1883 /// SSA value. We only can ensure this for a limited set of operations, and we 1884 /// don't want to do the rewrites unless we are confident that the result will 1885 /// be promotable, so we have an early test here. 1886 static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) { 1887 // Collect the candidate types for vector-based promotion. Also track whether 1888 // we have different element types. 1889 SmallVector<VectorType *, 4> CandidateTys; 1890 Type *CommonEltTy = nullptr; 1891 bool HaveCommonEltTy = true; 1892 auto CheckCandidateType = [&](Type *Ty) { 1893 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 1894 // Return if bitcast to vectors is different for total size in bits. 1895 if (!CandidateTys.empty()) { 1896 VectorType *V = CandidateTys[0]; 1897 if (DL.getTypeSizeInBits(VTy).getFixedSize() != 1898 DL.getTypeSizeInBits(V).getFixedSize()) { 1899 CandidateTys.clear(); 1900 return; 1901 } 1902 } 1903 CandidateTys.push_back(VTy); 1904 if (!CommonEltTy) 1905 CommonEltTy = VTy->getElementType(); 1906 else if (CommonEltTy != VTy->getElementType()) 1907 HaveCommonEltTy = false; 1908 } 1909 }; 1910 // Consider any loads or stores that are the exact size of the slice. 1911 for (const Slice &S : P) 1912 if (S.beginOffset() == P.beginOffset() && 1913 S.endOffset() == P.endOffset()) { 1914 if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser())) 1915 CheckCandidateType(LI->getType()); 1916 else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser())) 1917 CheckCandidateType(SI->getValueOperand()->getType()); 1918 } 1919 1920 // If we didn't find a vector type, nothing to do here. 1921 if (CandidateTys.empty()) 1922 return nullptr; 1923 1924 // Remove non-integer vector types if we had multiple common element types. 1925 // FIXME: It'd be nice to replace them with integer vector types, but we can't 1926 // do that until all the backends are known to produce good code for all 1927 // integer vector types. 1928 if (!HaveCommonEltTy) { 1929 llvm::erase_if(CandidateTys, [](VectorType *VTy) { 1930 return !VTy->getElementType()->isIntegerTy(); 1931 }); 1932 1933 // If there were no integer vector types, give up. 1934 if (CandidateTys.empty()) 1935 return nullptr; 1936 1937 // Rank the remaining candidate vector types. This is easy because we know 1938 // they're all integer vectors. We sort by ascending number of elements. 1939 auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) { 1940 (void)DL; 1941 assert(DL.getTypeSizeInBits(RHSTy).getFixedSize() == 1942 DL.getTypeSizeInBits(LHSTy).getFixedSize() && 1943 "Cannot have vector types of different sizes!"); 1944 assert(RHSTy->getElementType()->isIntegerTy() && 1945 "All non-integer types eliminated!"); 1946 assert(LHSTy->getElementType()->isIntegerTy() && 1947 "All non-integer types eliminated!"); 1948 return cast<FixedVectorType>(RHSTy)->getNumElements() < 1949 cast<FixedVectorType>(LHSTy)->getNumElements(); 1950 }; 1951 llvm::sort(CandidateTys, RankVectorTypes); 1952 CandidateTys.erase( 1953 std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes), 1954 CandidateTys.end()); 1955 } else { 1956 // The only way to have the same element type in every vector type is to 1957 // have the same vector type. Check that and remove all but one. 1958 #ifndef NDEBUG 1959 for (VectorType *VTy : CandidateTys) { 1960 assert(VTy->getElementType() == CommonEltTy && 1961 "Unaccounted for element type!"); 1962 assert(VTy == CandidateTys[0] && 1963 "Different vector types with the same element type!"); 1964 } 1965 #endif 1966 CandidateTys.resize(1); 1967 } 1968 1969 // Try each vector type, and return the one which works. 1970 auto CheckVectorTypeForPromotion = [&](VectorType *VTy) { 1971 uint64_t ElementSize = 1972 DL.getTypeSizeInBits(VTy->getElementType()).getFixedSize(); 1973 1974 // While the definition of LLVM vectors is bitpacked, we don't support sizes 1975 // that aren't byte sized. 1976 if (ElementSize % 8) 1977 return false; 1978 assert((DL.getTypeSizeInBits(VTy).getFixedSize() % 8) == 0 && 1979 "vector size not a multiple of element size?"); 1980 ElementSize /= 8; 1981 1982 for (const Slice &S : P) 1983 if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL)) 1984 return false; 1985 1986 for (const Slice *S : P.splitSliceTails()) 1987 if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL)) 1988 return false; 1989 1990 return true; 1991 }; 1992 for (VectorType *VTy : CandidateTys) 1993 if (CheckVectorTypeForPromotion(VTy)) 1994 return VTy; 1995 1996 return nullptr; 1997 } 1998 1999 /// Test whether a slice of an alloca is valid for integer widening. 2000 /// 2001 /// This implements the necessary checking for the \c isIntegerWideningViable 2002 /// test below on a single slice of the alloca. 2003 static bool isIntegerWideningViableForSlice(const Slice &S, 2004 uint64_t AllocBeginOffset, 2005 Type *AllocaTy, 2006 const DataLayout &DL, 2007 bool &WholeAllocaOp) { 2008 uint64_t Size = DL.getTypeStoreSize(AllocaTy).getFixedSize(); 2009 2010 uint64_t RelBegin = S.beginOffset() - AllocBeginOffset; 2011 uint64_t RelEnd = S.endOffset() - AllocBeginOffset; 2012 2013 // We can't reasonably handle cases where the load or store extends past 2014 // the end of the alloca's type and into its padding. 2015 if (RelEnd > Size) 2016 return false; 2017 2018 Use *U = S.getUse(); 2019 2020 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 2021 if (LI->isVolatile()) 2022 return false; 2023 // We can't handle loads that extend past the allocated memory. 2024 if (DL.getTypeStoreSize(LI->getType()).getFixedSize() > Size) 2025 return false; 2026 // So far, AllocaSliceRewriter does not support widening split slice tails 2027 // in rewriteIntegerLoad. 2028 if (S.beginOffset() < AllocBeginOffset) 2029 return false; 2030 // Note that we don't count vector loads or stores as whole-alloca 2031 // operations which enable integer widening because we would prefer to use 2032 // vector widening instead. 2033 if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size) 2034 WholeAllocaOp = true; 2035 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) { 2036 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedSize()) 2037 return false; 2038 } else if (RelBegin != 0 || RelEnd != Size || 2039 !canConvertValue(DL, AllocaTy, LI->getType())) { 2040 // Non-integer loads need to be convertible from the alloca type so that 2041 // they are promotable. 2042 return false; 2043 } 2044 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 2045 Type *ValueTy = SI->getValueOperand()->getType(); 2046 if (SI->isVolatile()) 2047 return false; 2048 // We can't handle stores that extend past the allocated memory. 2049 if (DL.getTypeStoreSize(ValueTy).getFixedSize() > Size) 2050 return false; 2051 // So far, AllocaSliceRewriter does not support widening split slice tails 2052 // in rewriteIntegerStore. 2053 if (S.beginOffset() < AllocBeginOffset) 2054 return false; 2055 // Note that we don't count vector loads or stores as whole-alloca 2056 // operations which enable integer widening because we would prefer to use 2057 // vector widening instead. 2058 if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size) 2059 WholeAllocaOp = true; 2060 if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) { 2061 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedSize()) 2062 return false; 2063 } else if (RelBegin != 0 || RelEnd != Size || 2064 !canConvertValue(DL, ValueTy, AllocaTy)) { 2065 // Non-integer stores need to be convertible to the alloca type so that 2066 // they are promotable. 2067 return false; 2068 } 2069 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 2070 if (MI->isVolatile() || !isa<Constant>(MI->getLength())) 2071 return false; 2072 if (!S.isSplittable()) 2073 return false; // Skip any unsplittable intrinsics. 2074 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 2075 if (!II->isLifetimeStartOrEnd() && !II->isDroppable()) 2076 return false; 2077 } else { 2078 return false; 2079 } 2080 2081 return true; 2082 } 2083 2084 /// Test whether the given alloca partition's integer operations can be 2085 /// widened to promotable ones. 2086 /// 2087 /// This is a quick test to check whether we can rewrite the integer loads and 2088 /// stores to a particular alloca into wider loads and stores and be able to 2089 /// promote the resulting alloca. 2090 static bool isIntegerWideningViable(Partition &P, Type *AllocaTy, 2091 const DataLayout &DL) { 2092 uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy).getFixedSize(); 2093 // Don't create integer types larger than the maximum bitwidth. 2094 if (SizeInBits > IntegerType::MAX_INT_BITS) 2095 return false; 2096 2097 // Don't try to handle allocas with bit-padding. 2098 if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy).getFixedSize()) 2099 return false; 2100 2101 // We need to ensure that an integer type with the appropriate bitwidth can 2102 // be converted to the alloca type, whatever that is. We don't want to force 2103 // the alloca itself to have an integer type if there is a more suitable one. 2104 Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits); 2105 if (!canConvertValue(DL, AllocaTy, IntTy) || 2106 !canConvertValue(DL, IntTy, AllocaTy)) 2107 return false; 2108 2109 // While examining uses, we ensure that the alloca has a covering load or 2110 // store. We don't want to widen the integer operations only to fail to 2111 // promote due to some other unsplittable entry (which we may make splittable 2112 // later). However, if there are only splittable uses, go ahead and assume 2113 // that we cover the alloca. 2114 // FIXME: We shouldn't consider split slices that happen to start in the 2115 // partition here... 2116 bool WholeAllocaOp = P.empty() && DL.isLegalInteger(SizeInBits); 2117 2118 for (const Slice &S : P) 2119 if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL, 2120 WholeAllocaOp)) 2121 return false; 2122 2123 for (const Slice *S : P.splitSliceTails()) 2124 if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL, 2125 WholeAllocaOp)) 2126 return false; 2127 2128 return WholeAllocaOp; 2129 } 2130 2131 static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 2132 IntegerType *Ty, uint64_t Offset, 2133 const Twine &Name) { 2134 LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); 2135 IntegerType *IntTy = cast<IntegerType>(V->getType()); 2136 assert(DL.getTypeStoreSize(Ty).getFixedSize() + Offset <= 2137 DL.getTypeStoreSize(IntTy).getFixedSize() && 2138 "Element extends past full value"); 2139 uint64_t ShAmt = 8 * Offset; 2140 if (DL.isBigEndian()) 2141 ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedSize() - 2142 DL.getTypeStoreSize(Ty).getFixedSize() - Offset); 2143 if (ShAmt) { 2144 V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); 2145 LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); 2146 } 2147 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 2148 "Cannot extract to a larger integer!"); 2149 if (Ty != IntTy) { 2150 V = IRB.CreateTrunc(V, Ty, Name + ".trunc"); 2151 LLVM_DEBUG(dbgs() << " trunced: " << *V << "\n"); 2152 } 2153 return V; 2154 } 2155 2156 static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, 2157 Value *V, uint64_t Offset, const Twine &Name) { 2158 IntegerType *IntTy = cast<IntegerType>(Old->getType()); 2159 IntegerType *Ty = cast<IntegerType>(V->getType()); 2160 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 2161 "Cannot insert a larger integer!"); 2162 LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); 2163 if (Ty != IntTy) { 2164 V = IRB.CreateZExt(V, IntTy, Name + ".ext"); 2165 LLVM_DEBUG(dbgs() << " extended: " << *V << "\n"); 2166 } 2167 assert(DL.getTypeStoreSize(Ty).getFixedSize() + Offset <= 2168 DL.getTypeStoreSize(IntTy).getFixedSize() && 2169 "Element store outside of alloca store"); 2170 uint64_t ShAmt = 8 * Offset; 2171 if (DL.isBigEndian()) 2172 ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedSize() - 2173 DL.getTypeStoreSize(Ty).getFixedSize() - Offset); 2174 if (ShAmt) { 2175 V = IRB.CreateShl(V, ShAmt, Name + ".shift"); 2176 LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); 2177 } 2178 2179 if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) { 2180 APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt); 2181 Old = IRB.CreateAnd(Old, Mask, Name + ".mask"); 2182 LLVM_DEBUG(dbgs() << " masked: " << *Old << "\n"); 2183 V = IRB.CreateOr(Old, V, Name + ".insert"); 2184 LLVM_DEBUG(dbgs() << " inserted: " << *V << "\n"); 2185 } 2186 return V; 2187 } 2188 2189 static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, 2190 unsigned EndIndex, const Twine &Name) { 2191 auto *VecTy = cast<FixedVectorType>(V->getType()); 2192 unsigned NumElements = EndIndex - BeginIndex; 2193 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2194 2195 if (NumElements == VecTy->getNumElements()) 2196 return V; 2197 2198 if (NumElements == 1) { 2199 V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex), 2200 Name + ".extract"); 2201 LLVM_DEBUG(dbgs() << " extract: " << *V << "\n"); 2202 return V; 2203 } 2204 2205 SmallVector<int, 8> Mask; 2206 Mask.reserve(NumElements); 2207 for (unsigned i = BeginIndex; i != EndIndex; ++i) 2208 Mask.push_back(i); 2209 V = IRB.CreateShuffleVector(V, Mask, Name + ".extract"); 2210 LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); 2211 return V; 2212 } 2213 2214 static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, 2215 unsigned BeginIndex, const Twine &Name) { 2216 VectorType *VecTy = cast<VectorType>(Old->getType()); 2217 assert(VecTy && "Can only insert a vector into a vector"); 2218 2219 VectorType *Ty = dyn_cast<VectorType>(V->getType()); 2220 if (!Ty) { 2221 // Single element to insert. 2222 V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex), 2223 Name + ".insert"); 2224 LLVM_DEBUG(dbgs() << " insert: " << *V << "\n"); 2225 return V; 2226 } 2227 2228 assert(cast<FixedVectorType>(Ty)->getNumElements() <= 2229 cast<FixedVectorType>(VecTy)->getNumElements() && 2230 "Too many elements!"); 2231 if (cast<FixedVectorType>(Ty)->getNumElements() == 2232 cast<FixedVectorType>(VecTy)->getNumElements()) { 2233 assert(V->getType() == VecTy && "Vector type mismatch"); 2234 return V; 2235 } 2236 unsigned EndIndex = BeginIndex + cast<FixedVectorType>(Ty)->getNumElements(); 2237 2238 // When inserting a smaller vector into the larger to store, we first 2239 // use a shuffle vector to widen it with undef elements, and then 2240 // a second shuffle vector to select between the loaded vector and the 2241 // incoming vector. 2242 SmallVector<int, 8> Mask; 2243 Mask.reserve(cast<FixedVectorType>(VecTy)->getNumElements()); 2244 for (unsigned i = 0; i != cast<FixedVectorType>(VecTy)->getNumElements(); ++i) 2245 if (i >= BeginIndex && i < EndIndex) 2246 Mask.push_back(i - BeginIndex); 2247 else 2248 Mask.push_back(-1); 2249 V = IRB.CreateShuffleVector(V, Mask, Name + ".expand"); 2250 LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); 2251 2252 SmallVector<Constant *, 8> Mask2; 2253 Mask2.reserve(cast<FixedVectorType>(VecTy)->getNumElements()); 2254 for (unsigned i = 0; i != cast<FixedVectorType>(VecTy)->getNumElements(); ++i) 2255 Mask2.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex)); 2256 2257 V = IRB.CreateSelect(ConstantVector::get(Mask2), V, Old, Name + "blend"); 2258 2259 LLVM_DEBUG(dbgs() << " blend: " << *V << "\n"); 2260 return V; 2261 } 2262 2263 /// Visitor to rewrite instructions using p particular slice of an alloca 2264 /// to use a new alloca. 2265 /// 2266 /// Also implements the rewriting to vector-based accesses when the partition 2267 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic 2268 /// lives here. 2269 class llvm::sroa::AllocaSliceRewriter 2270 : public InstVisitor<AllocaSliceRewriter, bool> { 2271 // Befriend the base class so it can delegate to private visit methods. 2272 friend class InstVisitor<AllocaSliceRewriter, bool>; 2273 2274 using Base = InstVisitor<AllocaSliceRewriter, bool>; 2275 2276 const DataLayout &DL; 2277 AllocaSlices &AS; 2278 SROA &Pass; 2279 AllocaInst &OldAI, &NewAI; 2280 const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset; 2281 Type *NewAllocaTy; 2282 2283 // This is a convenience and flag variable that will be null unless the new 2284 // alloca's integer operations should be widened to this integer type due to 2285 // passing isIntegerWideningViable above. If it is non-null, the desired 2286 // integer type will be stored here for easy access during rewriting. 2287 IntegerType *IntTy; 2288 2289 // If we are rewriting an alloca partition which can be written as pure 2290 // vector operations, we stash extra information here. When VecTy is 2291 // non-null, we have some strict guarantees about the rewritten alloca: 2292 // - The new alloca is exactly the size of the vector type here. 2293 // - The accesses all either map to the entire vector or to a single 2294 // element. 2295 // - The set of accessing instructions is only one of those handled above 2296 // in isVectorPromotionViable. Generally these are the same access kinds 2297 // which are promotable via mem2reg. 2298 VectorType *VecTy; 2299 Type *ElementTy; 2300 uint64_t ElementSize; 2301 2302 // The original offset of the slice currently being rewritten relative to 2303 // the original alloca. 2304 uint64_t BeginOffset = 0; 2305 uint64_t EndOffset = 0; 2306 2307 // The new offsets of the slice currently being rewritten relative to the 2308 // original alloca. 2309 uint64_t NewBeginOffset = 0, NewEndOffset = 0; 2310 2311 uint64_t SliceSize = 0; 2312 bool IsSplittable = false; 2313 bool IsSplit = false; 2314 Use *OldUse = nullptr; 2315 Instruction *OldPtr = nullptr; 2316 2317 // Track post-rewrite users which are PHI nodes and Selects. 2318 SmallSetVector<PHINode *, 8> &PHIUsers; 2319 SmallSetVector<SelectInst *, 8> &SelectUsers; 2320 2321 // Utility IR builder, whose name prefix is setup for each visited use, and 2322 // the insertion point is set to point to the user. 2323 IRBuilderTy IRB; 2324 2325 public: 2326 AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROA &Pass, 2327 AllocaInst &OldAI, AllocaInst &NewAI, 2328 uint64_t NewAllocaBeginOffset, 2329 uint64_t NewAllocaEndOffset, bool IsIntegerPromotable, 2330 VectorType *PromotableVecTy, 2331 SmallSetVector<PHINode *, 8> &PHIUsers, 2332 SmallSetVector<SelectInst *, 8> &SelectUsers) 2333 : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI), 2334 NewAllocaBeginOffset(NewAllocaBeginOffset), 2335 NewAllocaEndOffset(NewAllocaEndOffset), 2336 NewAllocaTy(NewAI.getAllocatedType()), 2337 IntTy( 2338 IsIntegerPromotable 2339 ? Type::getIntNTy(NewAI.getContext(), 2340 DL.getTypeSizeInBits(NewAI.getAllocatedType()) 2341 .getFixedSize()) 2342 : nullptr), 2343 VecTy(PromotableVecTy), 2344 ElementTy(VecTy ? VecTy->getElementType() : nullptr), 2345 ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy).getFixedSize() / 8 2346 : 0), 2347 PHIUsers(PHIUsers), SelectUsers(SelectUsers), 2348 IRB(NewAI.getContext(), ConstantFolder()) { 2349 if (VecTy) { 2350 assert((DL.getTypeSizeInBits(ElementTy).getFixedSize() % 8) == 0 && 2351 "Only multiple-of-8 sized vector elements are viable"); 2352 ++NumVectorized; 2353 } 2354 assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy)); 2355 } 2356 2357 bool visit(AllocaSlices::const_iterator I) { 2358 bool CanSROA = true; 2359 BeginOffset = I->beginOffset(); 2360 EndOffset = I->endOffset(); 2361 IsSplittable = I->isSplittable(); 2362 IsSplit = 2363 BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; 2364 LLVM_DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : "")); 2365 LLVM_DEBUG(AS.printSlice(dbgs(), I, "")); 2366 LLVM_DEBUG(dbgs() << "\n"); 2367 2368 // Compute the intersecting offset range. 2369 assert(BeginOffset < NewAllocaEndOffset); 2370 assert(EndOffset > NewAllocaBeginOffset); 2371 NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); 2372 NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); 2373 2374 SliceSize = NewEndOffset - NewBeginOffset; 2375 2376 OldUse = I->getUse(); 2377 OldPtr = cast<Instruction>(OldUse->get()); 2378 2379 Instruction *OldUserI = cast<Instruction>(OldUse->getUser()); 2380 IRB.SetInsertPoint(OldUserI); 2381 IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc()); 2382 IRB.getInserter().SetNamePrefix( 2383 Twine(NewAI.getName()) + "." + Twine(BeginOffset) + "."); 2384 2385 CanSROA &= visit(cast<Instruction>(OldUse->getUser())); 2386 if (VecTy || IntTy) 2387 assert(CanSROA); 2388 return CanSROA; 2389 } 2390 2391 private: 2392 // Make sure the other visit overloads are visible. 2393 using Base::visit; 2394 2395 // Every instruction which can end up as a user must have a rewrite rule. 2396 bool visitInstruction(Instruction &I) { 2397 LLVM_DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); 2398 llvm_unreachable("No rewrite rule for this instruction!"); 2399 } 2400 2401 Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) { 2402 // Note that the offset computation can use BeginOffset or NewBeginOffset 2403 // interchangeably for unsplit slices. 2404 assert(IsSplit || BeginOffset == NewBeginOffset); 2405 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2406 2407 #ifndef NDEBUG 2408 StringRef OldName = OldPtr->getName(); 2409 // Skip through the last '.sroa.' component of the name. 2410 size_t LastSROAPrefix = OldName.rfind(".sroa."); 2411 if (LastSROAPrefix != StringRef::npos) { 2412 OldName = OldName.substr(LastSROAPrefix + strlen(".sroa.")); 2413 // Look for an SROA slice index. 2414 size_t IndexEnd = OldName.find_first_not_of("0123456789"); 2415 if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') { 2416 // Strip the index and look for the offset. 2417 OldName = OldName.substr(IndexEnd + 1); 2418 size_t OffsetEnd = OldName.find_first_not_of("0123456789"); 2419 if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.') 2420 // Strip the offset. 2421 OldName = OldName.substr(OffsetEnd + 1); 2422 } 2423 } 2424 // Strip any SROA suffixes as well. 2425 OldName = OldName.substr(0, OldName.find(".sroa_")); 2426 #endif 2427 2428 return getAdjustedPtr(IRB, DL, &NewAI, 2429 APInt(DL.getIndexTypeSizeInBits(PointerTy), Offset), 2430 PointerTy, 2431 #ifndef NDEBUG 2432 Twine(OldName) + "." 2433 #else 2434 Twine() 2435 #endif 2436 ); 2437 } 2438 2439 /// Compute suitable alignment to access this slice of the *new* 2440 /// alloca. 2441 /// 2442 /// You can optionally pass a type to this routine and if that type's ABI 2443 /// alignment is itself suitable, this will return zero. 2444 Align getSliceAlign() { 2445 return commonAlignment(NewAI.getAlign(), 2446 NewBeginOffset - NewAllocaBeginOffset); 2447 } 2448 2449 unsigned getIndex(uint64_t Offset) { 2450 assert(VecTy && "Can only call getIndex when rewriting a vector"); 2451 uint64_t RelOffset = Offset - NewAllocaBeginOffset; 2452 assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds"); 2453 uint32_t Index = RelOffset / ElementSize; 2454 assert(Index * ElementSize == RelOffset); 2455 return Index; 2456 } 2457 2458 void deleteIfTriviallyDead(Value *V) { 2459 Instruction *I = cast<Instruction>(V); 2460 if (isInstructionTriviallyDead(I)) 2461 Pass.DeadInsts.push_back(I); 2462 } 2463 2464 Value *rewriteVectorizedLoadInst() { 2465 unsigned BeginIndex = getIndex(NewBeginOffset); 2466 unsigned EndIndex = getIndex(NewEndOffset); 2467 assert(EndIndex > BeginIndex && "Empty vector!"); 2468 2469 Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2470 NewAI.getAlign(), "load"); 2471 return extractVector(IRB, V, BeginIndex, EndIndex, "vec"); 2472 } 2473 2474 Value *rewriteIntegerLoad(LoadInst &LI) { 2475 assert(IntTy && "We cannot insert an integer to the alloca"); 2476 assert(!LI.isVolatile()); 2477 Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2478 NewAI.getAlign(), "load"); 2479 V = convertValue(DL, IRB, V, IntTy); 2480 assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2481 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2482 if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) { 2483 IntegerType *ExtractTy = Type::getIntNTy(LI.getContext(), SliceSize * 8); 2484 V = extractInteger(DL, IRB, V, ExtractTy, Offset, "extract"); 2485 } 2486 // It is possible that the extracted type is not the load type. This 2487 // happens if there is a load past the end of the alloca, and as 2488 // a consequence the slice is narrower but still a candidate for integer 2489 // lowering. To handle this case, we just zero extend the extracted 2490 // integer. 2491 assert(cast<IntegerType>(LI.getType())->getBitWidth() >= SliceSize * 8 && 2492 "Can only handle an extract for an overly wide load"); 2493 if (cast<IntegerType>(LI.getType())->getBitWidth() > SliceSize * 8) 2494 V = IRB.CreateZExt(V, LI.getType()); 2495 return V; 2496 } 2497 2498 bool visitLoadInst(LoadInst &LI) { 2499 LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); 2500 Value *OldOp = LI.getOperand(0); 2501 assert(OldOp == OldPtr); 2502 2503 AAMDNodes AATags; 2504 LI.getAAMetadata(AATags); 2505 2506 unsigned AS = LI.getPointerAddressSpace(); 2507 2508 Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8) 2509 : LI.getType(); 2510 const bool IsLoadPastEnd = 2511 DL.getTypeStoreSize(TargetTy).getFixedSize() > SliceSize; 2512 bool IsPtrAdjusted = false; 2513 Value *V; 2514 if (VecTy) { 2515 V = rewriteVectorizedLoadInst(); 2516 } else if (IntTy && LI.getType()->isIntegerTy()) { 2517 V = rewriteIntegerLoad(LI); 2518 } else if (NewBeginOffset == NewAllocaBeginOffset && 2519 NewEndOffset == NewAllocaEndOffset && 2520 (canConvertValue(DL, NewAllocaTy, TargetTy) || 2521 (IsLoadPastEnd && NewAllocaTy->isIntegerTy() && 2522 TargetTy->isIntegerTy()))) { 2523 LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2524 NewAI.getAlign(), LI.isVolatile(), 2525 LI.getName()); 2526 if (AATags) 2527 NewLI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2528 if (LI.isVolatile()) 2529 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 2530 if (NewLI->isAtomic()) 2531 NewLI->setAlignment(LI.getAlign()); 2532 2533 // Any !nonnull metadata or !range metadata on the old load is also valid 2534 // on the new load. This is even true in some cases even when the loads 2535 // are different types, for example by mapping !nonnull metadata to 2536 // !range metadata by modeling the null pointer constant converted to the 2537 // integer type. 2538 // FIXME: Add support for range metadata here. Currently the utilities 2539 // for this don't propagate range metadata in trivial cases from one 2540 // integer load to another, don't handle non-addrspace-0 null pointers 2541 // correctly, and don't have any support for mapping ranges as the 2542 // integer type becomes winder or narrower. 2543 if (MDNode *N = LI.getMetadata(LLVMContext::MD_nonnull)) 2544 copyNonnullMetadata(LI, N, *NewLI); 2545 2546 // Try to preserve nonnull metadata 2547 V = NewLI; 2548 2549 // If this is an integer load past the end of the slice (which means the 2550 // bytes outside the slice are undef or this load is dead) just forcibly 2551 // fix the integer size with correct handling of endianness. 2552 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) 2553 if (auto *TITy = dyn_cast<IntegerType>(TargetTy)) 2554 if (AITy->getBitWidth() < TITy->getBitWidth()) { 2555 V = IRB.CreateZExt(V, TITy, "load.ext"); 2556 if (DL.isBigEndian()) 2557 V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(), 2558 "endian_shift"); 2559 } 2560 } else { 2561 Type *LTy = TargetTy->getPointerTo(AS); 2562 LoadInst *NewLI = 2563 IRB.CreateAlignedLoad(TargetTy, getNewAllocaSlicePtr(IRB, LTy), 2564 getSliceAlign(), LI.isVolatile(), LI.getName()); 2565 if (AATags) 2566 NewLI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2567 if (LI.isVolatile()) 2568 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 2569 2570 V = NewLI; 2571 IsPtrAdjusted = true; 2572 } 2573 V = convertValue(DL, IRB, V, TargetTy); 2574 2575 if (IsSplit) { 2576 assert(!LI.isVolatile()); 2577 assert(LI.getType()->isIntegerTy() && 2578 "Only integer type loads and stores are split"); 2579 assert(SliceSize < DL.getTypeStoreSize(LI.getType()).getFixedSize() && 2580 "Split load isn't smaller than original load"); 2581 assert(DL.typeSizeEqualsStoreSize(LI.getType()) && 2582 "Non-byte-multiple bit width"); 2583 // Move the insertion point just past the load so that we can refer to it. 2584 IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI))); 2585 // Create a placeholder value with the same type as LI to use as the 2586 // basis for the new value. This allows us to replace the uses of LI with 2587 // the computed value, and then replace the placeholder with LI, leaving 2588 // LI only used for this computation. 2589 Value *Placeholder = new LoadInst( 2590 LI.getType(), UndefValue::get(LI.getType()->getPointerTo(AS)), "", 2591 false, Align(1)); 2592 V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset, 2593 "insert"); 2594 LI.replaceAllUsesWith(V); 2595 Placeholder->replaceAllUsesWith(&LI); 2596 Placeholder->deleteValue(); 2597 } else { 2598 LI.replaceAllUsesWith(V); 2599 } 2600 2601 Pass.DeadInsts.push_back(&LI); 2602 deleteIfTriviallyDead(OldOp); 2603 LLVM_DEBUG(dbgs() << " to: " << *V << "\n"); 2604 return !LI.isVolatile() && !IsPtrAdjusted; 2605 } 2606 2607 bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp, 2608 AAMDNodes AATags) { 2609 if (V->getType() != VecTy) { 2610 unsigned BeginIndex = getIndex(NewBeginOffset); 2611 unsigned EndIndex = getIndex(NewEndOffset); 2612 assert(EndIndex > BeginIndex && "Empty vector!"); 2613 unsigned NumElements = EndIndex - BeginIndex; 2614 assert(NumElements <= cast<FixedVectorType>(VecTy)->getNumElements() && 2615 "Too many elements!"); 2616 Type *SliceTy = (NumElements == 1) 2617 ? ElementTy 2618 : FixedVectorType::get(ElementTy, NumElements); 2619 if (V->getType() != SliceTy) 2620 V = convertValue(DL, IRB, V, SliceTy); 2621 2622 // Mix in the existing elements. 2623 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2624 NewAI.getAlign(), "load"); 2625 V = insertVector(IRB, Old, V, BeginIndex, "vec"); 2626 } 2627 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign()); 2628 if (AATags) 2629 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2630 Pass.DeadInsts.push_back(&SI); 2631 2632 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 2633 return true; 2634 } 2635 2636 bool rewriteIntegerStore(Value *V, StoreInst &SI, AAMDNodes AATags) { 2637 assert(IntTy && "We cannot extract an integer from the alloca"); 2638 assert(!SI.isVolatile()); 2639 if (DL.getTypeSizeInBits(V->getType()).getFixedSize() != 2640 IntTy->getBitWidth()) { 2641 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2642 NewAI.getAlign(), "oldload"); 2643 Old = convertValue(DL, IRB, Old, IntTy); 2644 assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2645 uint64_t Offset = BeginOffset - NewAllocaBeginOffset; 2646 V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert"); 2647 } 2648 V = convertValue(DL, IRB, V, NewAllocaTy); 2649 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign()); 2650 Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, 2651 LLVMContext::MD_access_group}); 2652 if (AATags) 2653 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2654 Pass.DeadInsts.push_back(&SI); 2655 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 2656 return true; 2657 } 2658 2659 bool visitStoreInst(StoreInst &SI) { 2660 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 2661 Value *OldOp = SI.getOperand(1); 2662 assert(OldOp == OldPtr); 2663 2664 AAMDNodes AATags; 2665 SI.getAAMetadata(AATags); 2666 2667 Value *V = SI.getValueOperand(); 2668 2669 // Strip all inbounds GEPs and pointer casts to try to dig out any root 2670 // alloca that should be re-examined after promoting this alloca. 2671 if (V->getType()->isPointerTy()) 2672 if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets())) 2673 Pass.PostPromotionWorklist.insert(AI); 2674 2675 if (SliceSize < DL.getTypeStoreSize(V->getType()).getFixedSize()) { 2676 assert(!SI.isVolatile()); 2677 assert(V->getType()->isIntegerTy() && 2678 "Only integer type loads and stores are split"); 2679 assert(DL.typeSizeEqualsStoreSize(V->getType()) && 2680 "Non-byte-multiple bit width"); 2681 IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8); 2682 V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset, 2683 "extract"); 2684 } 2685 2686 if (VecTy) 2687 return rewriteVectorizedStoreInst(V, SI, OldOp, AATags); 2688 if (IntTy && V->getType()->isIntegerTy()) 2689 return rewriteIntegerStore(V, SI, AATags); 2690 2691 const bool IsStorePastEnd = 2692 DL.getTypeStoreSize(V->getType()).getFixedSize() > SliceSize; 2693 StoreInst *NewSI; 2694 if (NewBeginOffset == NewAllocaBeginOffset && 2695 NewEndOffset == NewAllocaEndOffset && 2696 (canConvertValue(DL, V->getType(), NewAllocaTy) || 2697 (IsStorePastEnd && NewAllocaTy->isIntegerTy() && 2698 V->getType()->isIntegerTy()))) { 2699 // If this is an integer store past the end of slice (and thus the bytes 2700 // past that point are irrelevant or this is unreachable), truncate the 2701 // value prior to storing. 2702 if (auto *VITy = dyn_cast<IntegerType>(V->getType())) 2703 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) 2704 if (VITy->getBitWidth() > AITy->getBitWidth()) { 2705 if (DL.isBigEndian()) 2706 V = IRB.CreateLShr(V, VITy->getBitWidth() - AITy->getBitWidth(), 2707 "endian_shift"); 2708 V = IRB.CreateTrunc(V, AITy, "load.trunc"); 2709 } 2710 2711 V = convertValue(DL, IRB, V, NewAllocaTy); 2712 NewSI = 2713 IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign(), SI.isVolatile()); 2714 } else { 2715 unsigned AS = SI.getPointerAddressSpace(); 2716 Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS)); 2717 NewSI = 2718 IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(), SI.isVolatile()); 2719 } 2720 NewSI->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, 2721 LLVMContext::MD_access_group}); 2722 if (AATags) 2723 NewSI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2724 if (SI.isVolatile()) 2725 NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID()); 2726 if (NewSI->isAtomic()) 2727 NewSI->setAlignment(SI.getAlign()); 2728 Pass.DeadInsts.push_back(&SI); 2729 deleteIfTriviallyDead(OldOp); 2730 2731 LLVM_DEBUG(dbgs() << " to: " << *NewSI << "\n"); 2732 return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile(); 2733 } 2734 2735 /// Compute an integer value from splatting an i8 across the given 2736 /// number of bytes. 2737 /// 2738 /// Note that this routine assumes an i8 is a byte. If that isn't true, don't 2739 /// call this routine. 2740 /// FIXME: Heed the advice above. 2741 /// 2742 /// \param V The i8 value to splat. 2743 /// \param Size The number of bytes in the output (assuming i8 is one byte) 2744 Value *getIntegerSplat(Value *V, unsigned Size) { 2745 assert(Size > 0 && "Expected a positive number of bytes."); 2746 IntegerType *VTy = cast<IntegerType>(V->getType()); 2747 assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte"); 2748 if (Size == 1) 2749 return V; 2750 2751 Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8); 2752 V = IRB.CreateMul( 2753 IRB.CreateZExt(V, SplatIntTy, "zext"), 2754 ConstantExpr::getUDiv( 2755 Constant::getAllOnesValue(SplatIntTy), 2756 ConstantExpr::getZExt(Constant::getAllOnesValue(V->getType()), 2757 SplatIntTy)), 2758 "isplat"); 2759 return V; 2760 } 2761 2762 /// Compute a vector splat for a given element value. 2763 Value *getVectorSplat(Value *V, unsigned NumElements) { 2764 V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); 2765 LLVM_DEBUG(dbgs() << " splat: " << *V << "\n"); 2766 return V; 2767 } 2768 2769 bool visitMemSetInst(MemSetInst &II) { 2770 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 2771 assert(II.getRawDest() == OldPtr); 2772 2773 AAMDNodes AATags; 2774 II.getAAMetadata(AATags); 2775 2776 // If the memset has a variable size, it cannot be split, just adjust the 2777 // pointer to the new alloca. 2778 if (!isa<Constant>(II.getLength())) { 2779 assert(!IsSplit); 2780 assert(NewBeginOffset == BeginOffset); 2781 II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType())); 2782 II.setDestAlignment(getSliceAlign()); 2783 2784 deleteIfTriviallyDead(OldPtr); 2785 return false; 2786 } 2787 2788 // Record this instruction for deletion. 2789 Pass.DeadInsts.push_back(&II); 2790 2791 Type *AllocaTy = NewAI.getAllocatedType(); 2792 Type *ScalarTy = AllocaTy->getScalarType(); 2793 2794 const bool CanContinue = [&]() { 2795 if (VecTy || IntTy) 2796 return true; 2797 if (BeginOffset > NewAllocaBeginOffset || 2798 EndOffset < NewAllocaEndOffset) 2799 return false; 2800 auto *C = cast<ConstantInt>(II.getLength()); 2801 if (C->getBitWidth() > 64) 2802 return false; 2803 const auto Len = C->getZExtValue(); 2804 auto *Int8Ty = IntegerType::getInt8Ty(NewAI.getContext()); 2805 auto *SrcTy = FixedVectorType::get(Int8Ty, Len); 2806 return canConvertValue(DL, SrcTy, AllocaTy) && 2807 DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy).getFixedSize()); 2808 }(); 2809 2810 // If this doesn't map cleanly onto the alloca type, and that type isn't 2811 // a single value type, just emit a memset. 2812 if (!CanContinue) { 2813 Type *SizeTy = II.getLength()->getType(); 2814 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 2815 CallInst *New = IRB.CreateMemSet( 2816 getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size, 2817 MaybeAlign(getSliceAlign()), II.isVolatile()); 2818 if (AATags) 2819 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2820 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 2821 return false; 2822 } 2823 2824 // If we can represent this as a simple value, we have to build the actual 2825 // value to store, which requires expanding the byte present in memset to 2826 // a sensible representation for the alloca type. This is essentially 2827 // splatting the byte to a sufficiently wide integer, splatting it across 2828 // any desired vector width, and bitcasting to the final type. 2829 Value *V; 2830 2831 if (VecTy) { 2832 // If this is a memset of a vectorized alloca, insert it. 2833 assert(ElementTy == ScalarTy); 2834 2835 unsigned BeginIndex = getIndex(NewBeginOffset); 2836 unsigned EndIndex = getIndex(NewEndOffset); 2837 assert(EndIndex > BeginIndex && "Empty vector!"); 2838 unsigned NumElements = EndIndex - BeginIndex; 2839 assert(NumElements <= cast<FixedVectorType>(VecTy)->getNumElements() && 2840 "Too many elements!"); 2841 2842 Value *Splat = getIntegerSplat( 2843 II.getValue(), DL.getTypeSizeInBits(ElementTy).getFixedSize() / 8); 2844 Splat = convertValue(DL, IRB, Splat, ElementTy); 2845 if (NumElements > 1) 2846 Splat = getVectorSplat(Splat, NumElements); 2847 2848 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2849 NewAI.getAlign(), "oldload"); 2850 V = insertVector(IRB, Old, Splat, BeginIndex, "vec"); 2851 } else if (IntTy) { 2852 // If this is a memset on an alloca where we can widen stores, insert the 2853 // set integer. 2854 assert(!II.isVolatile()); 2855 2856 uint64_t Size = NewEndOffset - NewBeginOffset; 2857 V = getIntegerSplat(II.getValue(), Size); 2858 2859 if (IntTy && (BeginOffset != NewAllocaBeginOffset || 2860 EndOffset != NewAllocaBeginOffset)) { 2861 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2862 NewAI.getAlign(), "oldload"); 2863 Old = convertValue(DL, IRB, Old, IntTy); 2864 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2865 V = insertInteger(DL, IRB, Old, V, Offset, "insert"); 2866 } else { 2867 assert(V->getType() == IntTy && 2868 "Wrong type for an alloca wide integer!"); 2869 } 2870 V = convertValue(DL, IRB, V, AllocaTy); 2871 } else { 2872 // Established these invariants above. 2873 assert(NewBeginOffset == NewAllocaBeginOffset); 2874 assert(NewEndOffset == NewAllocaEndOffset); 2875 2876 V = getIntegerSplat(II.getValue(), 2877 DL.getTypeSizeInBits(ScalarTy).getFixedSize() / 8); 2878 if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy)) 2879 V = getVectorSplat( 2880 V, cast<FixedVectorType>(AllocaVecTy)->getNumElements()); 2881 2882 V = convertValue(DL, IRB, V, AllocaTy); 2883 } 2884 2885 StoreInst *New = 2886 IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign(), II.isVolatile()); 2887 if (AATags) 2888 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2889 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 2890 return !II.isVolatile(); 2891 } 2892 2893 bool visitMemTransferInst(MemTransferInst &II) { 2894 // Rewriting of memory transfer instructions can be a bit tricky. We break 2895 // them into two categories: split intrinsics and unsplit intrinsics. 2896 2897 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 2898 2899 AAMDNodes AATags; 2900 II.getAAMetadata(AATags); 2901 2902 bool IsDest = &II.getRawDestUse() == OldUse; 2903 assert((IsDest && II.getRawDest() == OldPtr) || 2904 (!IsDest && II.getRawSource() == OldPtr)); 2905 2906 MaybeAlign SliceAlign = getSliceAlign(); 2907 2908 // For unsplit intrinsics, we simply modify the source and destination 2909 // pointers in place. This isn't just an optimization, it is a matter of 2910 // correctness. With unsplit intrinsics we may be dealing with transfers 2911 // within a single alloca before SROA ran, or with transfers that have 2912 // a variable length. We may also be dealing with memmove instead of 2913 // memcpy, and so simply updating the pointers is the necessary for us to 2914 // update both source and dest of a single call. 2915 if (!IsSplittable) { 2916 Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2917 if (IsDest) { 2918 II.setDest(AdjustedPtr); 2919 II.setDestAlignment(SliceAlign); 2920 } 2921 else { 2922 II.setSource(AdjustedPtr); 2923 II.setSourceAlignment(SliceAlign); 2924 } 2925 2926 LLVM_DEBUG(dbgs() << " to: " << II << "\n"); 2927 deleteIfTriviallyDead(OldPtr); 2928 return false; 2929 } 2930 // For split transfer intrinsics we have an incredibly useful assurance: 2931 // the source and destination do not reside within the same alloca, and at 2932 // least one of them does not escape. This means that we can replace 2933 // memmove with memcpy, and we don't need to worry about all manner of 2934 // downsides to splitting and transforming the operations. 2935 2936 // If this doesn't map cleanly onto the alloca type, and that type isn't 2937 // a single value type, just emit a memcpy. 2938 bool EmitMemCpy = 2939 !VecTy && !IntTy && 2940 (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || 2941 SliceSize != 2942 DL.getTypeStoreSize(NewAI.getAllocatedType()).getFixedSize() || 2943 !NewAI.getAllocatedType()->isSingleValueType()); 2944 2945 // If we're just going to emit a memcpy, the alloca hasn't changed, and the 2946 // size hasn't been shrunk based on analysis of the viable range, this is 2947 // a no-op. 2948 if (EmitMemCpy && &OldAI == &NewAI) { 2949 // Ensure the start lines up. 2950 assert(NewBeginOffset == BeginOffset); 2951 2952 // Rewrite the size as needed. 2953 if (NewEndOffset != EndOffset) 2954 II.setLength(ConstantInt::get(II.getLength()->getType(), 2955 NewEndOffset - NewBeginOffset)); 2956 return false; 2957 } 2958 // Record this instruction for deletion. 2959 Pass.DeadInsts.push_back(&II); 2960 2961 // Strip all inbounds GEPs and pointer casts to try to dig out any root 2962 // alloca that should be re-examined after rewriting this instruction. 2963 Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); 2964 if (AllocaInst *AI = 2965 dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) { 2966 assert(AI != &OldAI && AI != &NewAI && 2967 "Splittable transfers cannot reach the same alloca on both ends."); 2968 Pass.Worklist.insert(AI); 2969 } 2970 2971 Type *OtherPtrTy = OtherPtr->getType(); 2972 unsigned OtherAS = OtherPtrTy->getPointerAddressSpace(); 2973 2974 // Compute the relative offset for the other pointer within the transfer. 2975 unsigned OffsetWidth = DL.getIndexSizeInBits(OtherAS); 2976 APInt OtherOffset(OffsetWidth, NewBeginOffset - BeginOffset); 2977 Align OtherAlign = 2978 (IsDest ? II.getSourceAlign() : II.getDestAlign()).valueOrOne(); 2979 OtherAlign = 2980 commonAlignment(OtherAlign, OtherOffset.zextOrTrunc(64).getZExtValue()); 2981 2982 if (EmitMemCpy) { 2983 // Compute the other pointer, folding as much as possible to produce 2984 // a single, simple GEP in most cases. 2985 OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 2986 OtherPtr->getName() + "."); 2987 2988 Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2989 Type *SizeTy = II.getLength()->getType(); 2990 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 2991 2992 Value *DestPtr, *SrcPtr; 2993 MaybeAlign DestAlign, SrcAlign; 2994 // Note: IsDest is true iff we're copying into the new alloca slice 2995 if (IsDest) { 2996 DestPtr = OurPtr; 2997 DestAlign = SliceAlign; 2998 SrcPtr = OtherPtr; 2999 SrcAlign = OtherAlign; 3000 } else { 3001 DestPtr = OtherPtr; 3002 DestAlign = OtherAlign; 3003 SrcPtr = OurPtr; 3004 SrcAlign = SliceAlign; 3005 } 3006 CallInst *New = IRB.CreateMemCpy(DestPtr, DestAlign, SrcPtr, SrcAlign, 3007 Size, II.isVolatile()); 3008 if (AATags) 3009 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 3010 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 3011 return false; 3012 } 3013 3014 bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && 3015 NewEndOffset == NewAllocaEndOffset; 3016 uint64_t Size = NewEndOffset - NewBeginOffset; 3017 unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0; 3018 unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0; 3019 unsigned NumElements = EndIndex - BeginIndex; 3020 IntegerType *SubIntTy = 3021 IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr; 3022 3023 // Reset the other pointer type to match the register type we're going to 3024 // use, but using the address space of the original other pointer. 3025 Type *OtherTy; 3026 if (VecTy && !IsWholeAlloca) { 3027 if (NumElements == 1) 3028 OtherTy = VecTy->getElementType(); 3029 else 3030 OtherTy = FixedVectorType::get(VecTy->getElementType(), NumElements); 3031 } else if (IntTy && !IsWholeAlloca) { 3032 OtherTy = SubIntTy; 3033 } else { 3034 OtherTy = NewAllocaTy; 3035 } 3036 OtherPtrTy = OtherTy->getPointerTo(OtherAS); 3037 3038 Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 3039 OtherPtr->getName() + "."); 3040 MaybeAlign SrcAlign = OtherAlign; 3041 Value *DstPtr = &NewAI; 3042 MaybeAlign DstAlign = SliceAlign; 3043 if (!IsDest) { 3044 std::swap(SrcPtr, DstPtr); 3045 std::swap(SrcAlign, DstAlign); 3046 } 3047 3048 Value *Src; 3049 if (VecTy && !IsWholeAlloca && !IsDest) { 3050 Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3051 NewAI.getAlign(), "load"); 3052 Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec"); 3053 } else if (IntTy && !IsWholeAlloca && !IsDest) { 3054 Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3055 NewAI.getAlign(), "load"); 3056 Src = convertValue(DL, IRB, Src, IntTy); 3057 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 3058 Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); 3059 } else { 3060 LoadInst *Load = IRB.CreateAlignedLoad(OtherTy, SrcPtr, SrcAlign, 3061 II.isVolatile(), "copyload"); 3062 if (AATags) 3063 Load->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 3064 Src = Load; 3065 } 3066 3067 if (VecTy && !IsWholeAlloca && IsDest) { 3068 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3069 NewAI.getAlign(), "oldload"); 3070 Src = insertVector(IRB, Old, Src, BeginIndex, "vec"); 3071 } else if (IntTy && !IsWholeAlloca && IsDest) { 3072 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3073 NewAI.getAlign(), "oldload"); 3074 Old = convertValue(DL, IRB, Old, IntTy); 3075 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 3076 Src = insertInteger(DL, IRB, Old, Src, Offset, "insert"); 3077 Src = convertValue(DL, IRB, Src, NewAllocaTy); 3078 } 3079 3080 StoreInst *Store = cast<StoreInst>( 3081 IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); 3082 if (AATags) 3083 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 3084 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 3085 return !II.isVolatile(); 3086 } 3087 3088 bool visitIntrinsicInst(IntrinsicInst &II) { 3089 assert((II.isLifetimeStartOrEnd() || II.isDroppable()) && 3090 "Unexpected intrinsic!"); 3091 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 3092 3093 // Record this instruction for deletion. 3094 Pass.DeadInsts.push_back(&II); 3095 3096 if (II.isDroppable()) { 3097 assert(II.getIntrinsicID() == Intrinsic::assume && "Expected assume"); 3098 // TODO For now we forget assumed information, this can be improved. 3099 OldPtr->dropDroppableUsesIn(II); 3100 return true; 3101 } 3102 3103 assert(II.getArgOperand(1) == OldPtr); 3104 // Lifetime intrinsics are only promotable if they cover the whole alloca. 3105 // Therefore, we drop lifetime intrinsics which don't cover the whole 3106 // alloca. 3107 // (In theory, intrinsics which partially cover an alloca could be 3108 // promoted, but PromoteMemToReg doesn't handle that case.) 3109 // FIXME: Check whether the alloca is promotable before dropping the 3110 // lifetime intrinsics? 3111 if (NewBeginOffset != NewAllocaBeginOffset || 3112 NewEndOffset != NewAllocaEndOffset) 3113 return true; 3114 3115 ConstantInt *Size = 3116 ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()), 3117 NewEndOffset - NewBeginOffset); 3118 // Lifetime intrinsics always expect an i8* so directly get such a pointer 3119 // for the new alloca slice. 3120 Type *PointerTy = IRB.getInt8PtrTy(OldPtr->getType()->getPointerAddressSpace()); 3121 Value *Ptr = getNewAllocaSlicePtr(IRB, PointerTy); 3122 Value *New; 3123 if (II.getIntrinsicID() == Intrinsic::lifetime_start) 3124 New = IRB.CreateLifetimeStart(Ptr, Size); 3125 else 3126 New = IRB.CreateLifetimeEnd(Ptr, Size); 3127 3128 (void)New; 3129 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 3130 3131 return true; 3132 } 3133 3134 void fixLoadStoreAlign(Instruction &Root) { 3135 // This algorithm implements the same visitor loop as 3136 // hasUnsafePHIOrSelectUse, and fixes the alignment of each load 3137 // or store found. 3138 SmallPtrSet<Instruction *, 4> Visited; 3139 SmallVector<Instruction *, 4> Uses; 3140 Visited.insert(&Root); 3141 Uses.push_back(&Root); 3142 do { 3143 Instruction *I = Uses.pop_back_val(); 3144 3145 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 3146 LI->setAlignment(std::min(LI->getAlign(), getSliceAlign())); 3147 continue; 3148 } 3149 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 3150 SI->setAlignment(std::min(SI->getAlign(), getSliceAlign())); 3151 continue; 3152 } 3153 3154 assert(isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I) || 3155 isa<PHINode>(I) || isa<SelectInst>(I) || 3156 isa<GetElementPtrInst>(I)); 3157 for (User *U : I->users()) 3158 if (Visited.insert(cast<Instruction>(U)).second) 3159 Uses.push_back(cast<Instruction>(U)); 3160 } while (!Uses.empty()); 3161 } 3162 3163 bool visitPHINode(PHINode &PN) { 3164 LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); 3165 assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable"); 3166 assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable"); 3167 3168 // We would like to compute a new pointer in only one place, but have it be 3169 // as local as possible to the PHI. To do that, we re-use the location of 3170 // the old pointer, which necessarily must be in the right position to 3171 // dominate the PHI. 3172 IRBuilderBase::InsertPointGuard Guard(IRB); 3173 if (isa<PHINode>(OldPtr)) 3174 IRB.SetInsertPoint(&*OldPtr->getParent()->getFirstInsertionPt()); 3175 else 3176 IRB.SetInsertPoint(OldPtr); 3177 IRB.SetCurrentDebugLocation(OldPtr->getDebugLoc()); 3178 3179 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 3180 // Replace the operands which were using the old pointer. 3181 std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr); 3182 3183 LLVM_DEBUG(dbgs() << " to: " << PN << "\n"); 3184 deleteIfTriviallyDead(OldPtr); 3185 3186 // Fix the alignment of any loads or stores using this PHI node. 3187 fixLoadStoreAlign(PN); 3188 3189 // PHIs can't be promoted on their own, but often can be speculated. We 3190 // check the speculation outside of the rewriter so that we see the 3191 // fully-rewritten alloca. 3192 PHIUsers.insert(&PN); 3193 return true; 3194 } 3195 3196 bool visitSelectInst(SelectInst &SI) { 3197 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 3198 assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) && 3199 "Pointer isn't an operand!"); 3200 assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); 3201 assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable"); 3202 3203 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 3204 // Replace the operands which were using the old pointer. 3205 if (SI.getOperand(1) == OldPtr) 3206 SI.setOperand(1, NewPtr); 3207 if (SI.getOperand(2) == OldPtr) 3208 SI.setOperand(2, NewPtr); 3209 3210 LLVM_DEBUG(dbgs() << " to: " << SI << "\n"); 3211 deleteIfTriviallyDead(OldPtr); 3212 3213 // Fix the alignment of any loads or stores using this select. 3214 fixLoadStoreAlign(SI); 3215 3216 // Selects can't be promoted on their own, but often can be speculated. We 3217 // check the speculation outside of the rewriter so that we see the 3218 // fully-rewritten alloca. 3219 SelectUsers.insert(&SI); 3220 return true; 3221 } 3222 }; 3223 3224 namespace { 3225 3226 /// Visitor to rewrite aggregate loads and stores as scalar. 3227 /// 3228 /// This pass aggressively rewrites all aggregate loads and stores on 3229 /// a particular pointer (or any pointer derived from it which we can identify) 3230 /// with scalar loads and stores. 3231 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> { 3232 // Befriend the base class so it can delegate to private visit methods. 3233 friend class InstVisitor<AggLoadStoreRewriter, bool>; 3234 3235 /// Queue of pointer uses to analyze and potentially rewrite. 3236 SmallVector<Use *, 8> Queue; 3237 3238 /// Set to prevent us from cycling with phi nodes and loops. 3239 SmallPtrSet<User *, 8> Visited; 3240 3241 /// The current pointer use being rewritten. This is used to dig up the used 3242 /// value (as opposed to the user). 3243 Use *U = nullptr; 3244 3245 /// Used to calculate offsets, and hence alignment, of subobjects. 3246 const DataLayout &DL; 3247 3248 public: 3249 AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {} 3250 3251 /// Rewrite loads and stores through a pointer and all pointers derived from 3252 /// it. 3253 bool rewrite(Instruction &I) { 3254 LLVM_DEBUG(dbgs() << " Rewriting FCA loads and stores...\n"); 3255 enqueueUsers(I); 3256 bool Changed = false; 3257 while (!Queue.empty()) { 3258 U = Queue.pop_back_val(); 3259 Changed |= visit(cast<Instruction>(U->getUser())); 3260 } 3261 return Changed; 3262 } 3263 3264 private: 3265 /// Enqueue all the users of the given instruction for further processing. 3266 /// This uses a set to de-duplicate users. 3267 void enqueueUsers(Instruction &I) { 3268 for (Use &U : I.uses()) 3269 if (Visited.insert(U.getUser()).second) 3270 Queue.push_back(&U); 3271 } 3272 3273 // Conservative default is to not rewrite anything. 3274 bool visitInstruction(Instruction &I) { return false; } 3275 3276 /// Generic recursive split emission class. 3277 template <typename Derived> class OpSplitter { 3278 protected: 3279 /// The builder used to form new instructions. 3280 IRBuilderTy IRB; 3281 3282 /// The indices which to be used with insert- or extractvalue to select the 3283 /// appropriate value within the aggregate. 3284 SmallVector<unsigned, 4> Indices; 3285 3286 /// The indices to a GEP instruction which will move Ptr to the correct slot 3287 /// within the aggregate. 3288 SmallVector<Value *, 4> GEPIndices; 3289 3290 /// The base pointer of the original op, used as a base for GEPing the 3291 /// split operations. 3292 Value *Ptr; 3293 3294 /// The base pointee type being GEPed into. 3295 Type *BaseTy; 3296 3297 /// Known alignment of the base pointer. 3298 Align BaseAlign; 3299 3300 /// To calculate offset of each component so we can correctly deduce 3301 /// alignments. 3302 const DataLayout &DL; 3303 3304 /// Initialize the splitter with an insertion point, Ptr and start with a 3305 /// single zero GEP index. 3306 OpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3307 Align BaseAlign, const DataLayout &DL) 3308 : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr), 3309 BaseTy(BaseTy), BaseAlign(BaseAlign), DL(DL) {} 3310 3311 public: 3312 /// Generic recursive split emission routine. 3313 /// 3314 /// This method recursively splits an aggregate op (load or store) into 3315 /// scalar or vector ops. It splits recursively until it hits a single value 3316 /// and emits that single value operation via the template argument. 3317 /// 3318 /// The logic of this routine relies on GEPs and insertvalue and 3319 /// extractvalue all operating with the same fundamental index list, merely 3320 /// formatted differently (GEPs need actual values). 3321 /// 3322 /// \param Ty The type being split recursively into smaller ops. 3323 /// \param Agg The aggregate value being built up or stored, depending on 3324 /// whether this is splitting a load or a store respectively. 3325 void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) { 3326 if (Ty->isSingleValueType()) { 3327 unsigned Offset = DL.getIndexedOffsetInType(BaseTy, GEPIndices); 3328 return static_cast<Derived *>(this)->emitFunc( 3329 Ty, Agg, commonAlignment(BaseAlign, Offset), Name); 3330 } 3331 3332 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 3333 unsigned OldSize = Indices.size(); 3334 (void)OldSize; 3335 for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size; 3336 ++Idx) { 3337 assert(Indices.size() == OldSize && "Did not return to the old size"); 3338 Indices.push_back(Idx); 3339 GEPIndices.push_back(IRB.getInt32(Idx)); 3340 emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx)); 3341 GEPIndices.pop_back(); 3342 Indices.pop_back(); 3343 } 3344 return; 3345 } 3346 3347 if (StructType *STy = dyn_cast<StructType>(Ty)) { 3348 unsigned OldSize = Indices.size(); 3349 (void)OldSize; 3350 for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size; 3351 ++Idx) { 3352 assert(Indices.size() == OldSize && "Did not return to the old size"); 3353 Indices.push_back(Idx); 3354 GEPIndices.push_back(IRB.getInt32(Idx)); 3355 emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx)); 3356 GEPIndices.pop_back(); 3357 Indices.pop_back(); 3358 } 3359 return; 3360 } 3361 3362 llvm_unreachable("Only arrays and structs are aggregate loadable types"); 3363 } 3364 }; 3365 3366 struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> { 3367 AAMDNodes AATags; 3368 3369 LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3370 AAMDNodes AATags, Align BaseAlign, const DataLayout &DL) 3371 : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, 3372 DL), 3373 AATags(AATags) {} 3374 3375 /// Emit a leaf load of a single value. This is called at the leaves of the 3376 /// recursive emission to actually load values. 3377 void emitFunc(Type *Ty, Value *&Agg, Align Alignment, const Twine &Name) { 3378 assert(Ty->isSingleValueType()); 3379 // Load the single value and insert it using the indices. 3380 Value *GEP = 3381 IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep"); 3382 LoadInst *Load = 3383 IRB.CreateAlignedLoad(Ty, GEP, Alignment, Name + ".load"); 3384 3385 APInt Offset( 3386 DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()), 0); 3387 if (AATags && 3388 GEPOperator::accumulateConstantOffset(BaseTy, GEPIndices, DL, Offset)) 3389 Load->setAAMetadata(AATags.shift(Offset.getZExtValue())); 3390 3391 Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); 3392 LLVM_DEBUG(dbgs() << " to: " << *Load << "\n"); 3393 } 3394 }; 3395 3396 bool visitLoadInst(LoadInst &LI) { 3397 assert(LI.getPointerOperand() == *U); 3398 if (!LI.isSimple() || LI.getType()->isSingleValueType()) 3399 return false; 3400 3401 // We have an aggregate being loaded, split it apart. 3402 LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); 3403 AAMDNodes AATags; 3404 LI.getAAMetadata(AATags); 3405 LoadOpSplitter Splitter(&LI, *U, LI.getType(), AATags, 3406 getAdjustedAlignment(&LI, 0), DL); 3407 Value *V = UndefValue::get(LI.getType()); 3408 Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca"); 3409 Visited.erase(&LI); 3410 LI.replaceAllUsesWith(V); 3411 LI.eraseFromParent(); 3412 return true; 3413 } 3414 3415 struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> { 3416 StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3417 AAMDNodes AATags, Align BaseAlign, const DataLayout &DL) 3418 : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, 3419 DL), 3420 AATags(AATags) {} 3421 AAMDNodes AATags; 3422 /// Emit a leaf store of a single value. This is called at the leaves of the 3423 /// recursive emission to actually produce stores. 3424 void emitFunc(Type *Ty, Value *&Agg, Align Alignment, const Twine &Name) { 3425 assert(Ty->isSingleValueType()); 3426 // Extract the single value and store it using the indices. 3427 // 3428 // The gep and extractvalue values are factored out of the CreateStore 3429 // call to make the output independent of the argument evaluation order. 3430 Value *ExtractValue = 3431 IRB.CreateExtractValue(Agg, Indices, Name + ".extract"); 3432 Value *InBoundsGEP = 3433 IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep"); 3434 StoreInst *Store = 3435 IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Alignment); 3436 3437 APInt Offset( 3438 DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()), 0); 3439 if (AATags && 3440 GEPOperator::accumulateConstantOffset(BaseTy, GEPIndices, DL, Offset)) 3441 Store->setAAMetadata(AATags.shift(Offset.getZExtValue())); 3442 3443 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 3444 } 3445 }; 3446 3447 bool visitStoreInst(StoreInst &SI) { 3448 if (!SI.isSimple() || SI.getPointerOperand() != *U) 3449 return false; 3450 Value *V = SI.getValueOperand(); 3451 if (V->getType()->isSingleValueType()) 3452 return false; 3453 3454 // We have an aggregate being stored, split it apart. 3455 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 3456 AAMDNodes AATags; 3457 SI.getAAMetadata(AATags); 3458 StoreOpSplitter Splitter(&SI, *U, V->getType(), AATags, 3459 getAdjustedAlignment(&SI, 0), DL); 3460 Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca"); 3461 Visited.erase(&SI); 3462 SI.eraseFromParent(); 3463 return true; 3464 } 3465 3466 bool visitBitCastInst(BitCastInst &BC) { 3467 enqueueUsers(BC); 3468 return false; 3469 } 3470 3471 bool visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 3472 enqueueUsers(ASC); 3473 return false; 3474 } 3475 3476 // Fold gep (select cond, ptr1, ptr2) => select cond, gep(ptr1), gep(ptr2) 3477 bool foldGEPSelect(GetElementPtrInst &GEPI) { 3478 if (!GEPI.hasAllConstantIndices()) 3479 return false; 3480 3481 SelectInst *Sel = cast<SelectInst>(GEPI.getPointerOperand()); 3482 3483 LLVM_DEBUG(dbgs() << " Rewriting gep(select) -> select(gep):" 3484 << "\n original: " << *Sel 3485 << "\n " << GEPI); 3486 3487 IRBuilderTy Builder(&GEPI); 3488 SmallVector<Value *, 4> Index(GEPI.indices()); 3489 bool IsInBounds = GEPI.isInBounds(); 3490 3491 Value *True = Sel->getTrueValue(); 3492 Value *NTrue = 3493 IsInBounds 3494 ? Builder.CreateInBoundsGEP(True, Index, 3495 True->getName() + ".sroa.gep") 3496 : Builder.CreateGEP(True, Index, True->getName() + ".sroa.gep"); 3497 3498 Value *False = Sel->getFalseValue(); 3499 3500 Value *NFalse = 3501 IsInBounds 3502 ? Builder.CreateInBoundsGEP(False, Index, 3503 False->getName() + ".sroa.gep") 3504 : Builder.CreateGEP(False, Index, False->getName() + ".sroa.gep"); 3505 3506 Value *NSel = Builder.CreateSelect(Sel->getCondition(), NTrue, NFalse, 3507 Sel->getName() + ".sroa.sel"); 3508 Visited.erase(&GEPI); 3509 GEPI.replaceAllUsesWith(NSel); 3510 GEPI.eraseFromParent(); 3511 Instruction *NSelI = cast<Instruction>(NSel); 3512 Visited.insert(NSelI); 3513 enqueueUsers(*NSelI); 3514 3515 LLVM_DEBUG(dbgs() << "\n to: " << *NTrue 3516 << "\n " << *NFalse 3517 << "\n " << *NSel << '\n'); 3518 3519 return true; 3520 } 3521 3522 // Fold gep (phi ptr1, ptr2) => phi gep(ptr1), gep(ptr2) 3523 bool foldGEPPhi(GetElementPtrInst &GEPI) { 3524 if (!GEPI.hasAllConstantIndices()) 3525 return false; 3526 3527 PHINode *PHI = cast<PHINode>(GEPI.getPointerOperand()); 3528 if (GEPI.getParent() != PHI->getParent() || 3529 llvm::any_of(PHI->incoming_values(), [](Value *In) 3530 { Instruction *I = dyn_cast<Instruction>(In); 3531 return !I || isa<GetElementPtrInst>(I) || isa<PHINode>(I) || 3532 succ_empty(I->getParent()) || 3533 !I->getParent()->isLegalToHoistInto(); 3534 })) 3535 return false; 3536 3537 LLVM_DEBUG(dbgs() << " Rewriting gep(phi) -> phi(gep):" 3538 << "\n original: " << *PHI 3539 << "\n " << GEPI 3540 << "\n to: "); 3541 3542 SmallVector<Value *, 4> Index(GEPI.indices()); 3543 bool IsInBounds = GEPI.isInBounds(); 3544 IRBuilderTy PHIBuilder(GEPI.getParent()->getFirstNonPHI()); 3545 PHINode *NewPN = PHIBuilder.CreatePHI(GEPI.getType(), 3546 PHI->getNumIncomingValues(), 3547 PHI->getName() + ".sroa.phi"); 3548 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I != E; ++I) { 3549 BasicBlock *B = PHI->getIncomingBlock(I); 3550 Value *NewVal = nullptr; 3551 int Idx = NewPN->getBasicBlockIndex(B); 3552 if (Idx >= 0) { 3553 NewVal = NewPN->getIncomingValue(Idx); 3554 } else { 3555 Instruction *In = cast<Instruction>(PHI->getIncomingValue(I)); 3556 3557 IRBuilderTy B(In->getParent(), std::next(In->getIterator())); 3558 NewVal = IsInBounds 3559 ? B.CreateInBoundsGEP(In, Index, In->getName() + ".sroa.gep") 3560 : B.CreateGEP(In, Index, In->getName() + ".sroa.gep"); 3561 } 3562 NewPN->addIncoming(NewVal, B); 3563 } 3564 3565 Visited.erase(&GEPI); 3566 GEPI.replaceAllUsesWith(NewPN); 3567 GEPI.eraseFromParent(); 3568 Visited.insert(NewPN); 3569 enqueueUsers(*NewPN); 3570 3571 LLVM_DEBUG(for (Value *In : NewPN->incoming_values()) 3572 dbgs() << "\n " << *In; 3573 dbgs() << "\n " << *NewPN << '\n'); 3574 3575 return true; 3576 } 3577 3578 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { 3579 if (isa<SelectInst>(GEPI.getPointerOperand()) && 3580 foldGEPSelect(GEPI)) 3581 return true; 3582 3583 if (isa<PHINode>(GEPI.getPointerOperand()) && 3584 foldGEPPhi(GEPI)) 3585 return true; 3586 3587 enqueueUsers(GEPI); 3588 return false; 3589 } 3590 3591 bool visitPHINode(PHINode &PN) { 3592 enqueueUsers(PN); 3593 return false; 3594 } 3595 3596 bool visitSelectInst(SelectInst &SI) { 3597 enqueueUsers(SI); 3598 return false; 3599 } 3600 }; 3601 3602 } // end anonymous namespace 3603 3604 /// Strip aggregate type wrapping. 3605 /// 3606 /// This removes no-op aggregate types wrapping an underlying type. It will 3607 /// strip as many layers of types as it can without changing either the type 3608 /// size or the allocated size. 3609 static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) { 3610 if (Ty->isSingleValueType()) 3611 return Ty; 3612 3613 uint64_t AllocSize = DL.getTypeAllocSize(Ty).getFixedSize(); 3614 uint64_t TypeSize = DL.getTypeSizeInBits(Ty).getFixedSize(); 3615 3616 Type *InnerTy; 3617 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { 3618 InnerTy = ArrTy->getElementType(); 3619 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 3620 const StructLayout *SL = DL.getStructLayout(STy); 3621 unsigned Index = SL->getElementContainingOffset(0); 3622 InnerTy = STy->getElementType(Index); 3623 } else { 3624 return Ty; 3625 } 3626 3627 if (AllocSize > DL.getTypeAllocSize(InnerTy).getFixedSize() || 3628 TypeSize > DL.getTypeSizeInBits(InnerTy).getFixedSize()) 3629 return Ty; 3630 3631 return stripAggregateTypeWrapping(DL, InnerTy); 3632 } 3633 3634 /// Try to find a partition of the aggregate type passed in for a given 3635 /// offset and size. 3636 /// 3637 /// This recurses through the aggregate type and tries to compute a subtype 3638 /// based on the offset and size. When the offset and size span a sub-section 3639 /// of an array, it will even compute a new array type for that sub-section, 3640 /// and the same for structs. 3641 /// 3642 /// Note that this routine is very strict and tries to find a partition of the 3643 /// type which produces the *exact* right offset and size. It is not forgiving 3644 /// when the size or offset cause either end of type-based partition to be off. 3645 /// Also, this is a best-effort routine. It is reasonable to give up and not 3646 /// return a type if necessary. 3647 static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, 3648 uint64_t Size) { 3649 if (Offset == 0 && DL.getTypeAllocSize(Ty).getFixedSize() == Size) 3650 return stripAggregateTypeWrapping(DL, Ty); 3651 if (Offset > DL.getTypeAllocSize(Ty).getFixedSize() || 3652 (DL.getTypeAllocSize(Ty).getFixedSize() - Offset) < Size) 3653 return nullptr; 3654 3655 if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) { 3656 Type *ElementTy; 3657 uint64_t TyNumElements; 3658 if (auto *AT = dyn_cast<ArrayType>(Ty)) { 3659 ElementTy = AT->getElementType(); 3660 TyNumElements = AT->getNumElements(); 3661 } else { 3662 // FIXME: This isn't right for vectors with non-byte-sized or 3663 // non-power-of-two sized elements. 3664 auto *VT = cast<FixedVectorType>(Ty); 3665 ElementTy = VT->getElementType(); 3666 TyNumElements = VT->getNumElements(); 3667 } 3668 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedSize(); 3669 uint64_t NumSkippedElements = Offset / ElementSize; 3670 if (NumSkippedElements >= TyNumElements) 3671 return nullptr; 3672 Offset -= NumSkippedElements * ElementSize; 3673 3674 // First check if we need to recurse. 3675 if (Offset > 0 || Size < ElementSize) { 3676 // Bail if the partition ends in a different array element. 3677 if ((Offset + Size) > ElementSize) 3678 return nullptr; 3679 // Recurse through the element type trying to peel off offset bytes. 3680 return getTypePartition(DL, ElementTy, Offset, Size); 3681 } 3682 assert(Offset == 0); 3683 3684 if (Size == ElementSize) 3685 return stripAggregateTypeWrapping(DL, ElementTy); 3686 assert(Size > ElementSize); 3687 uint64_t NumElements = Size / ElementSize; 3688 if (NumElements * ElementSize != Size) 3689 return nullptr; 3690 return ArrayType::get(ElementTy, NumElements); 3691 } 3692 3693 StructType *STy = dyn_cast<StructType>(Ty); 3694 if (!STy) 3695 return nullptr; 3696 3697 const StructLayout *SL = DL.getStructLayout(STy); 3698 if (Offset >= SL->getSizeInBytes()) 3699 return nullptr; 3700 uint64_t EndOffset = Offset + Size; 3701 if (EndOffset > SL->getSizeInBytes()) 3702 return nullptr; 3703 3704 unsigned Index = SL->getElementContainingOffset(Offset); 3705 Offset -= SL->getElementOffset(Index); 3706 3707 Type *ElementTy = STy->getElementType(Index); 3708 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedSize(); 3709 if (Offset >= ElementSize) 3710 return nullptr; // The offset points into alignment padding. 3711 3712 // See if any partition must be contained by the element. 3713 if (Offset > 0 || Size < ElementSize) { 3714 if ((Offset + Size) > ElementSize) 3715 return nullptr; 3716 return getTypePartition(DL, ElementTy, Offset, Size); 3717 } 3718 assert(Offset == 0); 3719 3720 if (Size == ElementSize) 3721 return stripAggregateTypeWrapping(DL, ElementTy); 3722 3723 StructType::element_iterator EI = STy->element_begin() + Index, 3724 EE = STy->element_end(); 3725 if (EndOffset < SL->getSizeInBytes()) { 3726 unsigned EndIndex = SL->getElementContainingOffset(EndOffset); 3727 if (Index == EndIndex) 3728 return nullptr; // Within a single element and its padding. 3729 3730 // Don't try to form "natural" types if the elements don't line up with the 3731 // expected size. 3732 // FIXME: We could potentially recurse down through the last element in the 3733 // sub-struct to find a natural end point. 3734 if (SL->getElementOffset(EndIndex) != EndOffset) 3735 return nullptr; 3736 3737 assert(Index < EndIndex); 3738 EE = STy->element_begin() + EndIndex; 3739 } 3740 3741 // Try to build up a sub-structure. 3742 StructType *SubTy = 3743 StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked()); 3744 const StructLayout *SubSL = DL.getStructLayout(SubTy); 3745 if (Size != SubSL->getSizeInBytes()) 3746 return nullptr; // The sub-struct doesn't have quite the size needed. 3747 3748 return SubTy; 3749 } 3750 3751 /// Pre-split loads and stores to simplify rewriting. 3752 /// 3753 /// We want to break up the splittable load+store pairs as much as 3754 /// possible. This is important to do as a preprocessing step, as once we 3755 /// start rewriting the accesses to partitions of the alloca we lose the 3756 /// necessary information to correctly split apart paired loads and stores 3757 /// which both point into this alloca. The case to consider is something like 3758 /// the following: 3759 /// 3760 /// %a = alloca [12 x i8] 3761 /// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0 3762 /// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4 3763 /// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8 3764 /// %iptr1 = bitcast i8* %gep1 to i64* 3765 /// %iptr2 = bitcast i8* %gep2 to i64* 3766 /// %fptr1 = bitcast i8* %gep1 to float* 3767 /// %fptr2 = bitcast i8* %gep2 to float* 3768 /// %fptr3 = bitcast i8* %gep3 to float* 3769 /// store float 0.0, float* %fptr1 3770 /// store float 1.0, float* %fptr2 3771 /// %v = load i64* %iptr1 3772 /// store i64 %v, i64* %iptr2 3773 /// %f1 = load float* %fptr2 3774 /// %f2 = load float* %fptr3 3775 /// 3776 /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and 3777 /// promote everything so we recover the 2 SSA values that should have been 3778 /// there all along. 3779 /// 3780 /// \returns true if any changes are made. 3781 bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { 3782 LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n"); 3783 3784 // Track the loads and stores which are candidates for pre-splitting here, in 3785 // the order they first appear during the partition scan. These give stable 3786 // iteration order and a basis for tracking which loads and stores we 3787 // actually split. 3788 SmallVector<LoadInst *, 4> Loads; 3789 SmallVector<StoreInst *, 4> Stores; 3790 3791 // We need to accumulate the splits required of each load or store where we 3792 // can find them via a direct lookup. This is important to cross-check loads 3793 // and stores against each other. We also track the slice so that we can kill 3794 // all the slices that end up split. 3795 struct SplitOffsets { 3796 Slice *S; 3797 std::vector<uint64_t> Splits; 3798 }; 3799 SmallDenseMap<Instruction *, SplitOffsets, 8> SplitOffsetsMap; 3800 3801 // Track loads out of this alloca which cannot, for any reason, be pre-split. 3802 // This is important as we also cannot pre-split stores of those loads! 3803 // FIXME: This is all pretty gross. It means that we can be more aggressive 3804 // in pre-splitting when the load feeding the store happens to come from 3805 // a separate alloca. Put another way, the effectiveness of SROA would be 3806 // decreased by a frontend which just concatenated all of its local allocas 3807 // into one big flat alloca. But defeating such patterns is exactly the job 3808 // SROA is tasked with! Sadly, to not have this discrepancy we would have 3809 // change store pre-splitting to actually force pre-splitting of the load 3810 // that feeds it *and all stores*. That makes pre-splitting much harder, but 3811 // maybe it would make it more principled? 3812 SmallPtrSet<LoadInst *, 8> UnsplittableLoads; 3813 3814 LLVM_DEBUG(dbgs() << " Searching for candidate loads and stores\n"); 3815 for (auto &P : AS.partitions()) { 3816 for (Slice &S : P) { 3817 Instruction *I = cast<Instruction>(S.getUse()->getUser()); 3818 if (!S.isSplittable() || S.endOffset() <= P.endOffset()) { 3819 // If this is a load we have to track that it can't participate in any 3820 // pre-splitting. If this is a store of a load we have to track that 3821 // that load also can't participate in any pre-splitting. 3822 if (auto *LI = dyn_cast<LoadInst>(I)) 3823 UnsplittableLoads.insert(LI); 3824 else if (auto *SI = dyn_cast<StoreInst>(I)) 3825 if (auto *LI = dyn_cast<LoadInst>(SI->getValueOperand())) 3826 UnsplittableLoads.insert(LI); 3827 continue; 3828 } 3829 assert(P.endOffset() > S.beginOffset() && 3830 "Empty or backwards partition!"); 3831 3832 // Determine if this is a pre-splittable slice. 3833 if (auto *LI = dyn_cast<LoadInst>(I)) { 3834 assert(!LI->isVolatile() && "Cannot split volatile loads!"); 3835 3836 // The load must be used exclusively to store into other pointers for 3837 // us to be able to arbitrarily pre-split it. The stores must also be 3838 // simple to avoid changing semantics. 3839 auto IsLoadSimplyStored = [](LoadInst *LI) { 3840 for (User *LU : LI->users()) { 3841 auto *SI = dyn_cast<StoreInst>(LU); 3842 if (!SI || !SI->isSimple()) 3843 return false; 3844 } 3845 return true; 3846 }; 3847 if (!IsLoadSimplyStored(LI)) { 3848 UnsplittableLoads.insert(LI); 3849 continue; 3850 } 3851 3852 Loads.push_back(LI); 3853 } else if (auto *SI = dyn_cast<StoreInst>(I)) { 3854 if (S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex())) 3855 // Skip stores *of* pointers. FIXME: This shouldn't even be possible! 3856 continue; 3857 auto *StoredLoad = dyn_cast<LoadInst>(SI->getValueOperand()); 3858 if (!StoredLoad || !StoredLoad->isSimple()) 3859 continue; 3860 assert(!SI->isVolatile() && "Cannot split volatile stores!"); 3861 3862 Stores.push_back(SI); 3863 } else { 3864 // Other uses cannot be pre-split. 3865 continue; 3866 } 3867 3868 // Record the initial split. 3869 LLVM_DEBUG(dbgs() << " Candidate: " << *I << "\n"); 3870 auto &Offsets = SplitOffsetsMap[I]; 3871 assert(Offsets.Splits.empty() && 3872 "Should not have splits the first time we see an instruction!"); 3873 Offsets.S = &S; 3874 Offsets.Splits.push_back(P.endOffset() - S.beginOffset()); 3875 } 3876 3877 // Now scan the already split slices, and add a split for any of them which 3878 // we're going to pre-split. 3879 for (Slice *S : P.splitSliceTails()) { 3880 auto SplitOffsetsMapI = 3881 SplitOffsetsMap.find(cast<Instruction>(S->getUse()->getUser())); 3882 if (SplitOffsetsMapI == SplitOffsetsMap.end()) 3883 continue; 3884 auto &Offsets = SplitOffsetsMapI->second; 3885 3886 assert(Offsets.S == S && "Found a mismatched slice!"); 3887 assert(!Offsets.Splits.empty() && 3888 "Cannot have an empty set of splits on the second partition!"); 3889 assert(Offsets.Splits.back() == 3890 P.beginOffset() - Offsets.S->beginOffset() && 3891 "Previous split does not end where this one begins!"); 3892 3893 // Record each split. The last partition's end isn't needed as the size 3894 // of the slice dictates that. 3895 if (S->endOffset() > P.endOffset()) 3896 Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset()); 3897 } 3898 } 3899 3900 // We may have split loads where some of their stores are split stores. For 3901 // such loads and stores, we can only pre-split them if their splits exactly 3902 // match relative to their starting offset. We have to verify this prior to 3903 // any rewriting. 3904 llvm::erase_if(Stores, [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) { 3905 // Lookup the load we are storing in our map of split 3906 // offsets. 3907 auto *LI = cast<LoadInst>(SI->getValueOperand()); 3908 // If it was completely unsplittable, then we're done, 3909 // and this store can't be pre-split. 3910 if (UnsplittableLoads.count(LI)) 3911 return true; 3912 3913 auto LoadOffsetsI = SplitOffsetsMap.find(LI); 3914 if (LoadOffsetsI == SplitOffsetsMap.end()) 3915 return false; // Unrelated loads are definitely safe. 3916 auto &LoadOffsets = LoadOffsetsI->second; 3917 3918 // Now lookup the store's offsets. 3919 auto &StoreOffsets = SplitOffsetsMap[SI]; 3920 3921 // If the relative offsets of each split in the load and 3922 // store match exactly, then we can split them and we 3923 // don't need to remove them here. 3924 if (LoadOffsets.Splits == StoreOffsets.Splits) 3925 return false; 3926 3927 LLVM_DEBUG(dbgs() << " Mismatched splits for load and store:\n" 3928 << " " << *LI << "\n" 3929 << " " << *SI << "\n"); 3930 3931 // We've found a store and load that we need to split 3932 // with mismatched relative splits. Just give up on them 3933 // and remove both instructions from our list of 3934 // candidates. 3935 UnsplittableLoads.insert(LI); 3936 return true; 3937 }); 3938 // Now we have to go *back* through all the stores, because a later store may 3939 // have caused an earlier store's load to become unsplittable and if it is 3940 // unsplittable for the later store, then we can't rely on it being split in 3941 // the earlier store either. 3942 llvm::erase_if(Stores, [&UnsplittableLoads](StoreInst *SI) { 3943 auto *LI = cast<LoadInst>(SI->getValueOperand()); 3944 return UnsplittableLoads.count(LI); 3945 }); 3946 // Once we've established all the loads that can't be split for some reason, 3947 // filter any that made it into our list out. 3948 llvm::erase_if(Loads, [&UnsplittableLoads](LoadInst *LI) { 3949 return UnsplittableLoads.count(LI); 3950 }); 3951 3952 // If no loads or stores are left, there is no pre-splitting to be done for 3953 // this alloca. 3954 if (Loads.empty() && Stores.empty()) 3955 return false; 3956 3957 // From here on, we can't fail and will be building new accesses, so rig up 3958 // an IR builder. 3959 IRBuilderTy IRB(&AI); 3960 3961 // Collect the new slices which we will merge into the alloca slices. 3962 SmallVector<Slice, 4> NewSlices; 3963 3964 // Track any allocas we end up splitting loads and stores for so we iterate 3965 // on them. 3966 SmallPtrSet<AllocaInst *, 4> ResplitPromotableAllocas; 3967 3968 // At this point, we have collected all of the loads and stores we can 3969 // pre-split, and the specific splits needed for them. We actually do the 3970 // splitting in a specific order in order to handle when one of the loads in 3971 // the value operand to one of the stores. 3972 // 3973 // First, we rewrite all of the split loads, and just accumulate each split 3974 // load in a parallel structure. We also build the slices for them and append 3975 // them to the alloca slices. 3976 SmallDenseMap<LoadInst *, std::vector<LoadInst *>, 1> SplitLoadsMap; 3977 std::vector<LoadInst *> SplitLoads; 3978 const DataLayout &DL = AI.getModule()->getDataLayout(); 3979 for (LoadInst *LI : Loads) { 3980 SplitLoads.clear(); 3981 3982 IntegerType *Ty = cast<IntegerType>(LI->getType()); 3983 uint64_t LoadSize = Ty->getBitWidth() / 8; 3984 assert(LoadSize > 0 && "Cannot have a zero-sized integer load!"); 3985 3986 auto &Offsets = SplitOffsetsMap[LI]; 3987 assert(LoadSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && 3988 "Slice size should always match load size exactly!"); 3989 uint64_t BaseOffset = Offsets.S->beginOffset(); 3990 assert(BaseOffset + LoadSize > BaseOffset && 3991 "Cannot represent alloca access size using 64-bit integers!"); 3992 3993 Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand()); 3994 IRB.SetInsertPoint(LI); 3995 3996 LLVM_DEBUG(dbgs() << " Splitting load: " << *LI << "\n"); 3997 3998 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); 3999 int Idx = 0, Size = Offsets.Splits.size(); 4000 for (;;) { 4001 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); 4002 auto AS = LI->getPointerAddressSpace(); 4003 auto *PartPtrTy = PartTy->getPointerTo(AS); 4004 LoadInst *PLoad = IRB.CreateAlignedLoad( 4005 PartTy, 4006 getAdjustedPtr(IRB, DL, BasePtr, 4007 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4008 PartPtrTy, BasePtr->getName() + "."), 4009 getAdjustedAlignment(LI, PartOffset), 4010 /*IsVolatile*/ false, LI->getName()); 4011 PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access, 4012 LLVMContext::MD_access_group}); 4013 4014 // Append this load onto the list of split loads so we can find it later 4015 // to rewrite the stores. 4016 SplitLoads.push_back(PLoad); 4017 4018 // Now build a new slice for the alloca. 4019 NewSlices.push_back( 4020 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, 4021 &PLoad->getOperandUse(PLoad->getPointerOperandIndex()), 4022 /*IsSplittable*/ false)); 4023 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() 4024 << ", " << NewSlices.back().endOffset() 4025 << "): " << *PLoad << "\n"); 4026 4027 // See if we've handled all the splits. 4028 if (Idx >= Size) 4029 break; 4030 4031 // Setup the next partition. 4032 PartOffset = Offsets.Splits[Idx]; 4033 ++Idx; 4034 PartSize = (Idx < Size ? Offsets.Splits[Idx] : LoadSize) - PartOffset; 4035 } 4036 4037 // Now that we have the split loads, do the slow walk over all uses of the 4038 // load and rewrite them as split stores, or save the split loads to use 4039 // below if the store is going to be split there anyways. 4040 bool DeferredStores = false; 4041 for (User *LU : LI->users()) { 4042 StoreInst *SI = cast<StoreInst>(LU); 4043 if (!Stores.empty() && SplitOffsetsMap.count(SI)) { 4044 DeferredStores = true; 4045 LLVM_DEBUG(dbgs() << " Deferred splitting of store: " << *SI 4046 << "\n"); 4047 continue; 4048 } 4049 4050 Value *StoreBasePtr = SI->getPointerOperand(); 4051 IRB.SetInsertPoint(SI); 4052 4053 LLVM_DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n"); 4054 4055 for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) { 4056 LoadInst *PLoad = SplitLoads[Idx]; 4057 uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1]; 4058 auto *PartPtrTy = 4059 PLoad->getType()->getPointerTo(SI->getPointerAddressSpace()); 4060 4061 auto AS = SI->getPointerAddressSpace(); 4062 StoreInst *PStore = IRB.CreateAlignedStore( 4063 PLoad, 4064 getAdjustedPtr(IRB, DL, StoreBasePtr, 4065 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4066 PartPtrTy, StoreBasePtr->getName() + "."), 4067 getAdjustedAlignment(SI, PartOffset), 4068 /*IsVolatile*/ false); 4069 PStore->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access, 4070 LLVMContext::MD_access_group}); 4071 LLVM_DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n"); 4072 } 4073 4074 // We want to immediately iterate on any allocas impacted by splitting 4075 // this store, and we have to track any promotable alloca (indicated by 4076 // a direct store) as needing to be resplit because it is no longer 4077 // promotable. 4078 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(StoreBasePtr)) { 4079 ResplitPromotableAllocas.insert(OtherAI); 4080 Worklist.insert(OtherAI); 4081 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( 4082 StoreBasePtr->stripInBoundsOffsets())) { 4083 Worklist.insert(OtherAI); 4084 } 4085 4086 // Mark the original store as dead. 4087 DeadInsts.push_back(SI); 4088 } 4089 4090 // Save the split loads if there are deferred stores among the users. 4091 if (DeferredStores) 4092 SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads))); 4093 4094 // Mark the original load as dead and kill the original slice. 4095 DeadInsts.push_back(LI); 4096 Offsets.S->kill(); 4097 } 4098 4099 // Second, we rewrite all of the split stores. At this point, we know that 4100 // all loads from this alloca have been split already. For stores of such 4101 // loads, we can simply look up the pre-existing split loads. For stores of 4102 // other loads, we split those loads first and then write split stores of 4103 // them. 4104 for (StoreInst *SI : Stores) { 4105 auto *LI = cast<LoadInst>(SI->getValueOperand()); 4106 IntegerType *Ty = cast<IntegerType>(LI->getType()); 4107 uint64_t StoreSize = Ty->getBitWidth() / 8; 4108 assert(StoreSize > 0 && "Cannot have a zero-sized integer store!"); 4109 4110 auto &Offsets = SplitOffsetsMap[SI]; 4111 assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && 4112 "Slice size should always match load size exactly!"); 4113 uint64_t BaseOffset = Offsets.S->beginOffset(); 4114 assert(BaseOffset + StoreSize > BaseOffset && 4115 "Cannot represent alloca access size using 64-bit integers!"); 4116 4117 Value *LoadBasePtr = LI->getPointerOperand(); 4118 Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand()); 4119 4120 LLVM_DEBUG(dbgs() << " Splitting store: " << *SI << "\n"); 4121 4122 // Check whether we have an already split load. 4123 auto SplitLoadsMapI = SplitLoadsMap.find(LI); 4124 std::vector<LoadInst *> *SplitLoads = nullptr; 4125 if (SplitLoadsMapI != SplitLoadsMap.end()) { 4126 SplitLoads = &SplitLoadsMapI->second; 4127 assert(SplitLoads->size() == Offsets.Splits.size() + 1 && 4128 "Too few split loads for the number of splits in the store!"); 4129 } else { 4130 LLVM_DEBUG(dbgs() << " of load: " << *LI << "\n"); 4131 } 4132 4133 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); 4134 int Idx = 0, Size = Offsets.Splits.size(); 4135 for (;;) { 4136 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); 4137 auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace()); 4138 auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace()); 4139 4140 // Either lookup a split load or create one. 4141 LoadInst *PLoad; 4142 if (SplitLoads) { 4143 PLoad = (*SplitLoads)[Idx]; 4144 } else { 4145 IRB.SetInsertPoint(LI); 4146 auto AS = LI->getPointerAddressSpace(); 4147 PLoad = IRB.CreateAlignedLoad( 4148 PartTy, 4149 getAdjustedPtr(IRB, DL, LoadBasePtr, 4150 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4151 LoadPartPtrTy, LoadBasePtr->getName() + "."), 4152 getAdjustedAlignment(LI, PartOffset), 4153 /*IsVolatile*/ false, LI->getName()); 4154 } 4155 4156 // And store this partition. 4157 IRB.SetInsertPoint(SI); 4158 auto AS = SI->getPointerAddressSpace(); 4159 StoreInst *PStore = IRB.CreateAlignedStore( 4160 PLoad, 4161 getAdjustedPtr(IRB, DL, StoreBasePtr, 4162 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4163 StorePartPtrTy, StoreBasePtr->getName() + "."), 4164 getAdjustedAlignment(SI, PartOffset), 4165 /*IsVolatile*/ false); 4166 4167 // Now build a new slice for the alloca. 4168 NewSlices.push_back( 4169 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, 4170 &PStore->getOperandUse(PStore->getPointerOperandIndex()), 4171 /*IsSplittable*/ false)); 4172 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() 4173 << ", " << NewSlices.back().endOffset() 4174 << "): " << *PStore << "\n"); 4175 if (!SplitLoads) { 4176 LLVM_DEBUG(dbgs() << " of split load: " << *PLoad << "\n"); 4177 } 4178 4179 // See if we've finished all the splits. 4180 if (Idx >= Size) 4181 break; 4182 4183 // Setup the next partition. 4184 PartOffset = Offsets.Splits[Idx]; 4185 ++Idx; 4186 PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset; 4187 } 4188 4189 // We want to immediately iterate on any allocas impacted by splitting 4190 // this load, which is only relevant if it isn't a load of this alloca and 4191 // thus we didn't already split the loads above. We also have to keep track 4192 // of any promotable allocas we split loads on as they can no longer be 4193 // promoted. 4194 if (!SplitLoads) { 4195 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(LoadBasePtr)) { 4196 assert(OtherAI != &AI && "We can't re-split our own alloca!"); 4197 ResplitPromotableAllocas.insert(OtherAI); 4198 Worklist.insert(OtherAI); 4199 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( 4200 LoadBasePtr->stripInBoundsOffsets())) { 4201 assert(OtherAI != &AI && "We can't re-split our own alloca!"); 4202 Worklist.insert(OtherAI); 4203 } 4204 } 4205 4206 // Mark the original store as dead now that we've split it up and kill its 4207 // slice. Note that we leave the original load in place unless this store 4208 // was its only use. It may in turn be split up if it is an alloca load 4209 // for some other alloca, but it may be a normal load. This may introduce 4210 // redundant loads, but where those can be merged the rest of the optimizer 4211 // should handle the merging, and this uncovers SSA splits which is more 4212 // important. In practice, the original loads will almost always be fully 4213 // split and removed eventually, and the splits will be merged by any 4214 // trivial CSE, including instcombine. 4215 if (LI->hasOneUse()) { 4216 assert(*LI->user_begin() == SI && "Single use isn't this store!"); 4217 DeadInsts.push_back(LI); 4218 } 4219 DeadInsts.push_back(SI); 4220 Offsets.S->kill(); 4221 } 4222 4223 // Remove the killed slices that have ben pre-split. 4224 llvm::erase_if(AS, [](const Slice &S) { return S.isDead(); }); 4225 4226 // Insert our new slices. This will sort and merge them into the sorted 4227 // sequence. 4228 AS.insert(NewSlices); 4229 4230 LLVM_DEBUG(dbgs() << " Pre-split slices:\n"); 4231 #ifndef NDEBUG 4232 for (auto I = AS.begin(), E = AS.end(); I != E; ++I) 4233 LLVM_DEBUG(AS.print(dbgs(), I, " ")); 4234 #endif 4235 4236 // Finally, don't try to promote any allocas that new require re-splitting. 4237 // They have already been added to the worklist above. 4238 llvm::erase_if(PromotableAllocas, [&](AllocaInst *AI) { 4239 return ResplitPromotableAllocas.count(AI); 4240 }); 4241 4242 return true; 4243 } 4244 4245 /// Rewrite an alloca partition's users. 4246 /// 4247 /// This routine drives both of the rewriting goals of the SROA pass. It tries 4248 /// to rewrite uses of an alloca partition to be conducive for SSA value 4249 /// promotion. If the partition needs a new, more refined alloca, this will 4250 /// build that new alloca, preserving as much type information as possible, and 4251 /// rewrite the uses of the old alloca to point at the new one and have the 4252 /// appropriate new offsets. It also evaluates how successful the rewrite was 4253 /// at enabling promotion and if it was successful queues the alloca to be 4254 /// promoted. 4255 AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS, 4256 Partition &P) { 4257 // Try to compute a friendly type for this partition of the alloca. This 4258 // won't always succeed, in which case we fall back to a legal integer type 4259 // or an i8 array of an appropriate size. 4260 Type *SliceTy = nullptr; 4261 const DataLayout &DL = AI.getModule()->getDataLayout(); 4262 std::pair<Type *, IntegerType *> CommonUseTy = 4263 findCommonType(P.begin(), P.end(), P.endOffset()); 4264 // Do all uses operate on the same type? 4265 if (CommonUseTy.first) 4266 if (DL.getTypeAllocSize(CommonUseTy.first).getFixedSize() >= P.size()) 4267 SliceTy = CommonUseTy.first; 4268 // If not, can we find an appropriate subtype in the original allocated type? 4269 if (!SliceTy) 4270 if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(), 4271 P.beginOffset(), P.size())) 4272 SliceTy = TypePartitionTy; 4273 // If still not, can we use the largest bitwidth integer type used? 4274 if (!SliceTy && CommonUseTy.second) 4275 if (DL.getTypeAllocSize(CommonUseTy.second).getFixedSize() >= P.size()) 4276 SliceTy = CommonUseTy.second; 4277 if ((!SliceTy || (SliceTy->isArrayTy() && 4278 SliceTy->getArrayElementType()->isIntegerTy())) && 4279 DL.isLegalInteger(P.size() * 8)) 4280 SliceTy = Type::getIntNTy(*C, P.size() * 8); 4281 if (!SliceTy) 4282 SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size()); 4283 assert(DL.getTypeAllocSize(SliceTy).getFixedSize() >= P.size()); 4284 4285 bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL); 4286 4287 VectorType *VecTy = 4288 IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL); 4289 if (VecTy) 4290 SliceTy = VecTy; 4291 4292 // Check for the case where we're going to rewrite to a new alloca of the 4293 // exact same type as the original, and with the same access offsets. In that 4294 // case, re-use the existing alloca, but still run through the rewriter to 4295 // perform phi and select speculation. 4296 // P.beginOffset() can be non-zero even with the same type in a case with 4297 // out-of-bounds access (e.g. @PR35657 function in SROA/basictest.ll). 4298 AllocaInst *NewAI; 4299 if (SliceTy == AI.getAllocatedType() && P.beginOffset() == 0) { 4300 NewAI = &AI; 4301 // FIXME: We should be able to bail at this point with "nothing changed". 4302 // FIXME: We might want to defer PHI speculation until after here. 4303 // FIXME: return nullptr; 4304 } else { 4305 // Make sure the alignment is compatible with P.beginOffset(). 4306 const Align Alignment = commonAlignment(AI.getAlign(), P.beginOffset()); 4307 // If we will get at least this much alignment from the type alone, leave 4308 // the alloca's alignment unconstrained. 4309 const bool IsUnconstrained = Alignment <= DL.getABITypeAlign(SliceTy); 4310 NewAI = new AllocaInst( 4311 SliceTy, AI.getType()->getAddressSpace(), nullptr, 4312 IsUnconstrained ? DL.getPrefTypeAlign(SliceTy) : Alignment, 4313 AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI); 4314 // Copy the old AI debug location over to the new one. 4315 NewAI->setDebugLoc(AI.getDebugLoc()); 4316 ++NumNewAllocas; 4317 } 4318 4319 LLVM_DEBUG(dbgs() << "Rewriting alloca partition " 4320 << "[" << P.beginOffset() << "," << P.endOffset() 4321 << ") to: " << *NewAI << "\n"); 4322 4323 // Track the high watermark on the worklist as it is only relevant for 4324 // promoted allocas. We will reset it to this point if the alloca is not in 4325 // fact scheduled for promotion. 4326 unsigned PPWOldSize = PostPromotionWorklist.size(); 4327 unsigned NumUses = 0; 4328 SmallSetVector<PHINode *, 8> PHIUsers; 4329 SmallSetVector<SelectInst *, 8> SelectUsers; 4330 4331 AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(), 4332 P.endOffset(), IsIntegerPromotable, VecTy, 4333 PHIUsers, SelectUsers); 4334 bool Promotable = true; 4335 for (Slice *S : P.splitSliceTails()) { 4336 Promotable &= Rewriter.visit(S); 4337 ++NumUses; 4338 } 4339 for (Slice &S : P) { 4340 Promotable &= Rewriter.visit(&S); 4341 ++NumUses; 4342 } 4343 4344 NumAllocaPartitionUses += NumUses; 4345 MaxUsesPerAllocaPartition.updateMax(NumUses); 4346 4347 // Now that we've processed all the slices in the new partition, check if any 4348 // PHIs or Selects would block promotion. 4349 for (PHINode *PHI : PHIUsers) 4350 if (!isSafePHIToSpeculate(*PHI)) { 4351 Promotable = false; 4352 PHIUsers.clear(); 4353 SelectUsers.clear(); 4354 break; 4355 } 4356 4357 for (SelectInst *Sel : SelectUsers) 4358 if (!isSafeSelectToSpeculate(*Sel)) { 4359 Promotable = false; 4360 PHIUsers.clear(); 4361 SelectUsers.clear(); 4362 break; 4363 } 4364 4365 if (Promotable) { 4366 for (Use *U : AS.getDeadUsesIfPromotable()) { 4367 auto *OldInst = dyn_cast<Instruction>(U->get()); 4368 Value::dropDroppableUse(*U); 4369 if (OldInst) 4370 if (isInstructionTriviallyDead(OldInst)) 4371 DeadInsts.push_back(OldInst); 4372 } 4373 if (PHIUsers.empty() && SelectUsers.empty()) { 4374 // Promote the alloca. 4375 PromotableAllocas.push_back(NewAI); 4376 } else { 4377 // If we have either PHIs or Selects to speculate, add them to those 4378 // worklists and re-queue the new alloca so that we promote in on the 4379 // next iteration. 4380 for (PHINode *PHIUser : PHIUsers) 4381 SpeculatablePHIs.insert(PHIUser); 4382 for (SelectInst *SelectUser : SelectUsers) 4383 SpeculatableSelects.insert(SelectUser); 4384 Worklist.insert(NewAI); 4385 } 4386 } else { 4387 // Drop any post-promotion work items if promotion didn't happen. 4388 while (PostPromotionWorklist.size() > PPWOldSize) 4389 PostPromotionWorklist.pop_back(); 4390 4391 // We couldn't promote and we didn't create a new partition, nothing 4392 // happened. 4393 if (NewAI == &AI) 4394 return nullptr; 4395 4396 // If we can't promote the alloca, iterate on it to check for new 4397 // refinements exposed by splitting the current alloca. Don't iterate on an 4398 // alloca which didn't actually change and didn't get promoted. 4399 Worklist.insert(NewAI); 4400 } 4401 4402 return NewAI; 4403 } 4404 4405 /// Walks the slices of an alloca and form partitions based on them, 4406 /// rewriting each of their uses. 4407 bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { 4408 if (AS.begin() == AS.end()) 4409 return false; 4410 4411 unsigned NumPartitions = 0; 4412 bool Changed = false; 4413 const DataLayout &DL = AI.getModule()->getDataLayout(); 4414 4415 // First try to pre-split loads and stores. 4416 Changed |= presplitLoadsAndStores(AI, AS); 4417 4418 // Now that we have identified any pre-splitting opportunities, 4419 // mark loads and stores unsplittable except for the following case. 4420 // We leave a slice splittable if all other slices are disjoint or fully 4421 // included in the slice, such as whole-alloca loads and stores. 4422 // If we fail to split these during pre-splitting, we want to force them 4423 // to be rewritten into a partition. 4424 bool IsSorted = true; 4425 4426 uint64_t AllocaSize = 4427 DL.getTypeAllocSize(AI.getAllocatedType()).getFixedSize(); 4428 const uint64_t MaxBitVectorSize = 1024; 4429 if (AllocaSize <= MaxBitVectorSize) { 4430 // If a byte boundary is included in any load or store, a slice starting or 4431 // ending at the boundary is not splittable. 4432 SmallBitVector SplittableOffset(AllocaSize + 1, true); 4433 for (Slice &S : AS) 4434 for (unsigned O = S.beginOffset() + 1; 4435 O < S.endOffset() && O < AllocaSize; O++) 4436 SplittableOffset.reset(O); 4437 4438 for (Slice &S : AS) { 4439 if (!S.isSplittable()) 4440 continue; 4441 4442 if ((S.beginOffset() > AllocaSize || SplittableOffset[S.beginOffset()]) && 4443 (S.endOffset() > AllocaSize || SplittableOffset[S.endOffset()])) 4444 continue; 4445 4446 if (isa<LoadInst>(S.getUse()->getUser()) || 4447 isa<StoreInst>(S.getUse()->getUser())) { 4448 S.makeUnsplittable(); 4449 IsSorted = false; 4450 } 4451 } 4452 } 4453 else { 4454 // We only allow whole-alloca splittable loads and stores 4455 // for a large alloca to avoid creating too large BitVector. 4456 for (Slice &S : AS) { 4457 if (!S.isSplittable()) 4458 continue; 4459 4460 if (S.beginOffset() == 0 && S.endOffset() >= AllocaSize) 4461 continue; 4462 4463 if (isa<LoadInst>(S.getUse()->getUser()) || 4464 isa<StoreInst>(S.getUse()->getUser())) { 4465 S.makeUnsplittable(); 4466 IsSorted = false; 4467 } 4468 } 4469 } 4470 4471 if (!IsSorted) 4472 llvm::sort(AS); 4473 4474 /// Describes the allocas introduced by rewritePartition in order to migrate 4475 /// the debug info. 4476 struct Fragment { 4477 AllocaInst *Alloca; 4478 uint64_t Offset; 4479 uint64_t Size; 4480 Fragment(AllocaInst *AI, uint64_t O, uint64_t S) 4481 : Alloca(AI), Offset(O), Size(S) {} 4482 }; 4483 SmallVector<Fragment, 4> Fragments; 4484 4485 // Rewrite each partition. 4486 for (auto &P : AS.partitions()) { 4487 if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) { 4488 Changed = true; 4489 if (NewAI != &AI) { 4490 uint64_t SizeOfByte = 8; 4491 uint64_t AllocaSize = 4492 DL.getTypeSizeInBits(NewAI->getAllocatedType()).getFixedSize(); 4493 // Don't include any padding. 4494 uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte); 4495 Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size)); 4496 } 4497 } 4498 ++NumPartitions; 4499 } 4500 4501 NumAllocaPartitions += NumPartitions; 4502 MaxPartitionsPerAlloca.updateMax(NumPartitions); 4503 4504 // Migrate debug information from the old alloca to the new alloca(s) 4505 // and the individual partitions. 4506 TinyPtrVector<DbgVariableIntrinsic *> DbgDeclares = FindDbgAddrUses(&AI); 4507 for (DbgVariableIntrinsic *DbgDeclare : DbgDeclares) { 4508 auto *Expr = DbgDeclare->getExpression(); 4509 DIBuilder DIB(*AI.getModule(), /*AllowUnresolved*/ false); 4510 uint64_t AllocaSize = 4511 DL.getTypeSizeInBits(AI.getAllocatedType()).getFixedSize(); 4512 for (auto Fragment : Fragments) { 4513 // Create a fragment expression describing the new partition or reuse AI's 4514 // expression if there is only one partition. 4515 auto *FragmentExpr = Expr; 4516 if (Fragment.Size < AllocaSize || Expr->isFragment()) { 4517 // If this alloca is already a scalar replacement of a larger aggregate, 4518 // Fragment.Offset describes the offset inside the scalar. 4519 auto ExprFragment = Expr->getFragmentInfo(); 4520 uint64_t Offset = ExprFragment ? ExprFragment->OffsetInBits : 0; 4521 uint64_t Start = Offset + Fragment.Offset; 4522 uint64_t Size = Fragment.Size; 4523 if (ExprFragment) { 4524 uint64_t AbsEnd = 4525 ExprFragment->OffsetInBits + ExprFragment->SizeInBits; 4526 if (Start >= AbsEnd) 4527 // No need to describe a SROAed padding. 4528 continue; 4529 Size = std::min(Size, AbsEnd - Start); 4530 } 4531 // The new, smaller fragment is stenciled out from the old fragment. 4532 if (auto OrigFragment = FragmentExpr->getFragmentInfo()) { 4533 assert(Start >= OrigFragment->OffsetInBits && 4534 "new fragment is outside of original fragment"); 4535 Start -= OrigFragment->OffsetInBits; 4536 } 4537 4538 // The alloca may be larger than the variable. 4539 auto VarSize = DbgDeclare->getVariable()->getSizeInBits(); 4540 if (VarSize) { 4541 if (Size > *VarSize) 4542 Size = *VarSize; 4543 if (Size == 0 || Start + Size > *VarSize) 4544 continue; 4545 } 4546 4547 // Avoid creating a fragment expression that covers the entire variable. 4548 if (!VarSize || *VarSize != Size) { 4549 if (auto E = 4550 DIExpression::createFragmentExpression(Expr, Start, Size)) 4551 FragmentExpr = *E; 4552 else 4553 continue; 4554 } 4555 } 4556 4557 // Remove any existing intrinsics on the new alloca describing 4558 // the variable fragment. 4559 for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(Fragment.Alloca)) { 4560 auto SameVariableFragment = [](const DbgVariableIntrinsic *LHS, 4561 const DbgVariableIntrinsic *RHS) { 4562 return LHS->getVariable() == RHS->getVariable() && 4563 LHS->getDebugLoc()->getInlinedAt() == 4564 RHS->getDebugLoc()->getInlinedAt(); 4565 }; 4566 if (SameVariableFragment(OldDII, DbgDeclare)) 4567 OldDII->eraseFromParent(); 4568 } 4569 4570 DIB.insertDeclare(Fragment.Alloca, DbgDeclare->getVariable(), FragmentExpr, 4571 DbgDeclare->getDebugLoc(), &AI); 4572 } 4573 } 4574 return Changed; 4575 } 4576 4577 /// Clobber a use with undef, deleting the used value if it becomes dead. 4578 void SROA::clobberUse(Use &U) { 4579 Value *OldV = U; 4580 // Replace the use with an undef value. 4581 U = UndefValue::get(OldV->getType()); 4582 4583 // Check for this making an instruction dead. We have to garbage collect 4584 // all the dead instructions to ensure the uses of any alloca end up being 4585 // minimal. 4586 if (Instruction *OldI = dyn_cast<Instruction>(OldV)) 4587 if (isInstructionTriviallyDead(OldI)) { 4588 DeadInsts.push_back(OldI); 4589 } 4590 } 4591 4592 /// Analyze an alloca for SROA. 4593 /// 4594 /// This analyzes the alloca to ensure we can reason about it, builds 4595 /// the slices of the alloca, and then hands it off to be split and 4596 /// rewritten as needed. 4597 bool SROA::runOnAlloca(AllocaInst &AI) { 4598 LLVM_DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); 4599 ++NumAllocasAnalyzed; 4600 4601 // Special case dead allocas, as they're trivial. 4602 if (AI.use_empty()) { 4603 AI.eraseFromParent(); 4604 return true; 4605 } 4606 const DataLayout &DL = AI.getModule()->getDataLayout(); 4607 4608 // Skip alloca forms that this analysis can't handle. 4609 auto *AT = AI.getAllocatedType(); 4610 if (AI.isArrayAllocation() || !AT->isSized() || isa<ScalableVectorType>(AT) || 4611 DL.getTypeAllocSize(AT).getFixedSize() == 0) 4612 return false; 4613 4614 bool Changed = false; 4615 4616 // First, split any FCA loads and stores touching this alloca to promote 4617 // better splitting and promotion opportunities. 4618 AggLoadStoreRewriter AggRewriter(DL); 4619 Changed |= AggRewriter.rewrite(AI); 4620 4621 // Build the slices using a recursive instruction-visiting builder. 4622 AllocaSlices AS(DL, AI); 4623 LLVM_DEBUG(AS.print(dbgs())); 4624 if (AS.isEscaped()) 4625 return Changed; 4626 4627 // Delete all the dead users of this alloca before splitting and rewriting it. 4628 for (Instruction *DeadUser : AS.getDeadUsers()) { 4629 // Free up everything used by this instruction. 4630 for (Use &DeadOp : DeadUser->operands()) 4631 clobberUse(DeadOp); 4632 4633 // Now replace the uses of this instruction. 4634 DeadUser->replaceAllUsesWith(UndefValue::get(DeadUser->getType())); 4635 4636 // And mark it for deletion. 4637 DeadInsts.push_back(DeadUser); 4638 Changed = true; 4639 } 4640 for (Use *DeadOp : AS.getDeadOperands()) { 4641 clobberUse(*DeadOp); 4642 Changed = true; 4643 } 4644 4645 // No slices to split. Leave the dead alloca for a later pass to clean up. 4646 if (AS.begin() == AS.end()) 4647 return Changed; 4648 4649 Changed |= splitAlloca(AI, AS); 4650 4651 LLVM_DEBUG(dbgs() << " Speculating PHIs\n"); 4652 while (!SpeculatablePHIs.empty()) 4653 speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val()); 4654 4655 LLVM_DEBUG(dbgs() << " Speculating Selects\n"); 4656 while (!SpeculatableSelects.empty()) 4657 speculateSelectInstLoads(*SpeculatableSelects.pop_back_val()); 4658 4659 return Changed; 4660 } 4661 4662 /// Delete the dead instructions accumulated in this run. 4663 /// 4664 /// Recursively deletes the dead instructions we've accumulated. This is done 4665 /// at the very end to maximize locality of the recursive delete and to 4666 /// minimize the problems of invalidated instruction pointers as such pointers 4667 /// are used heavily in the intermediate stages of the algorithm. 4668 /// 4669 /// We also record the alloca instructions deleted here so that they aren't 4670 /// subsequently handed to mem2reg to promote. 4671 bool SROA::deleteDeadInstructions( 4672 SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) { 4673 bool Changed = false; 4674 while (!DeadInsts.empty()) { 4675 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()); 4676 if (!I) continue; 4677 LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); 4678 4679 // If the instruction is an alloca, find the possible dbg.declare connected 4680 // to it, and remove it too. We must do this before calling RAUW or we will 4681 // not be able to find it. 4682 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { 4683 DeletedAllocas.insert(AI); 4684 for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(AI)) 4685 OldDII->eraseFromParent(); 4686 } 4687 4688 I->replaceAllUsesWith(UndefValue::get(I->getType())); 4689 4690 for (Use &Operand : I->operands()) 4691 if (Instruction *U = dyn_cast<Instruction>(Operand)) { 4692 // Zero out the operand and see if it becomes trivially dead. 4693 Operand = nullptr; 4694 if (isInstructionTriviallyDead(U)) 4695 DeadInsts.push_back(U); 4696 } 4697 4698 ++NumDeleted; 4699 I->eraseFromParent(); 4700 Changed = true; 4701 } 4702 return Changed; 4703 } 4704 4705 /// Promote the allocas, using the best available technique. 4706 /// 4707 /// This attempts to promote whatever allocas have been identified as viable in 4708 /// the PromotableAllocas list. If that list is empty, there is nothing to do. 4709 /// This function returns whether any promotion occurred. 4710 bool SROA::promoteAllocas(Function &F) { 4711 if (PromotableAllocas.empty()) 4712 return false; 4713 4714 NumPromoted += PromotableAllocas.size(); 4715 4716 LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); 4717 PromoteMemToReg(PromotableAllocas, *DT, AC); 4718 PromotableAllocas.clear(); 4719 return true; 4720 } 4721 4722 PreservedAnalyses SROA::runImpl(Function &F, DominatorTree &RunDT, 4723 AssumptionCache &RunAC) { 4724 LLVM_DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); 4725 C = &F.getContext(); 4726 DT = &RunDT; 4727 AC = &RunAC; 4728 4729 BasicBlock &EntryBB = F.getEntryBlock(); 4730 for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); 4731 I != E; ++I) { 4732 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { 4733 if (isa<ScalableVectorType>(AI->getAllocatedType())) { 4734 if (isAllocaPromotable(AI)) 4735 PromotableAllocas.push_back(AI); 4736 } else { 4737 Worklist.insert(AI); 4738 } 4739 } 4740 } 4741 4742 bool Changed = false; 4743 // A set of deleted alloca instruction pointers which should be removed from 4744 // the list of promotable allocas. 4745 SmallPtrSet<AllocaInst *, 4> DeletedAllocas; 4746 4747 do { 4748 while (!Worklist.empty()) { 4749 Changed |= runOnAlloca(*Worklist.pop_back_val()); 4750 Changed |= deleteDeadInstructions(DeletedAllocas); 4751 4752 // Remove the deleted allocas from various lists so that we don't try to 4753 // continue processing them. 4754 if (!DeletedAllocas.empty()) { 4755 auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); }; 4756 Worklist.remove_if(IsInSet); 4757 PostPromotionWorklist.remove_if(IsInSet); 4758 llvm::erase_if(PromotableAllocas, IsInSet); 4759 DeletedAllocas.clear(); 4760 } 4761 } 4762 4763 Changed |= promoteAllocas(F); 4764 4765 Worklist = PostPromotionWorklist; 4766 PostPromotionWorklist.clear(); 4767 } while (!Worklist.empty()); 4768 4769 if (!Changed) 4770 return PreservedAnalyses::all(); 4771 4772 PreservedAnalyses PA; 4773 PA.preserveSet<CFGAnalyses>(); 4774 PA.preserve<GlobalsAA>(); 4775 return PA; 4776 } 4777 4778 PreservedAnalyses SROA::run(Function &F, FunctionAnalysisManager &AM) { 4779 return runImpl(F, AM.getResult<DominatorTreeAnalysis>(F), 4780 AM.getResult<AssumptionAnalysis>(F)); 4781 } 4782 4783 /// A legacy pass for the legacy pass manager that wraps the \c SROA pass. 4784 /// 4785 /// This is in the llvm namespace purely to allow it to be a friend of the \c 4786 /// SROA pass. 4787 class llvm::sroa::SROALegacyPass : public FunctionPass { 4788 /// The SROA implementation. 4789 SROA Impl; 4790 4791 public: 4792 static char ID; 4793 4794 SROALegacyPass() : FunctionPass(ID) { 4795 initializeSROALegacyPassPass(*PassRegistry::getPassRegistry()); 4796 } 4797 4798 bool runOnFunction(Function &F) override { 4799 if (skipFunction(F)) 4800 return false; 4801 4802 auto PA = Impl.runImpl( 4803 F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 4804 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 4805 return !PA.areAllPreserved(); 4806 } 4807 4808 void getAnalysisUsage(AnalysisUsage &AU) const override { 4809 AU.addRequired<AssumptionCacheTracker>(); 4810 AU.addRequired<DominatorTreeWrapperPass>(); 4811 AU.addPreserved<GlobalsAAWrapperPass>(); 4812 AU.setPreservesCFG(); 4813 } 4814 4815 StringRef getPassName() const override { return "SROA"; } 4816 }; 4817 4818 char SROALegacyPass::ID = 0; 4819 4820 FunctionPass *llvm::createSROAPass() { return new SROALegacyPass(); } 4821 4822 INITIALIZE_PASS_BEGIN(SROALegacyPass, "sroa", 4823 "Scalar Replacement Of Aggregates", false, false) 4824 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 4825 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 4826 INITIALIZE_PASS_END(SROALegacyPass, "sroa", "Scalar Replacement Of Aggregates", 4827 false, false) 4828