1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This transformation implements the well known scalar replacement of 10 /// aggregates transformation. It tries to identify promotable elements of an 11 /// aggregate alloca, and promote them to registers. It will also try to 12 /// convert uses of an element (or set of elements) of an alloca into a vector 13 /// or bitfield-style integer scalar if appropriate. 14 /// 15 /// It works to do this with minimal slicing of the alloca so that regions 16 /// which are merely transferred in and out of external memory remain unchanged 17 /// and are not decomposed to scalar code. 18 /// 19 /// Because this also performs alloca promotion, it can be thought of as also 20 /// serving the purpose of SSA formation. The algorithm iterates on the 21 /// function until all opportunities for promotion have been realized. 22 /// 23 //===----------------------------------------------------------------------===// 24 25 #include "llvm/Transforms/Scalar/SROA.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/DenseMap.h" 29 #include "llvm/ADT/PointerIntPair.h" 30 #include "llvm/ADT/STLExtras.h" 31 #include "llvm/ADT/SetVector.h" 32 #include "llvm/ADT/SmallBitVector.h" 33 #include "llvm/ADT/SmallPtrSet.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/Twine.h" 38 #include "llvm/ADT/iterator.h" 39 #include "llvm/ADT/iterator_range.h" 40 #include "llvm/Analysis/AssumptionCache.h" 41 #include "llvm/Analysis/GlobalsModRef.h" 42 #include "llvm/Analysis/Loads.h" 43 #include "llvm/Analysis/PtrUseVisitor.h" 44 #include "llvm/Config/llvm-config.h" 45 #include "llvm/IR/BasicBlock.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/ConstantFolder.h" 48 #include "llvm/IR/Constants.h" 49 #include "llvm/IR/DIBuilder.h" 50 #include "llvm/IR/DataLayout.h" 51 #include "llvm/IR/DebugInfo.h" 52 #include "llvm/IR/DebugInfoMetadata.h" 53 #include "llvm/IR/DerivedTypes.h" 54 #include "llvm/IR/Dominators.h" 55 #include "llvm/IR/Function.h" 56 #include "llvm/IR/GetElementPtrTypeIterator.h" 57 #include "llvm/IR/GlobalAlias.h" 58 #include "llvm/IR/IRBuilder.h" 59 #include "llvm/IR/InstVisitor.h" 60 #include "llvm/IR/InstrTypes.h" 61 #include "llvm/IR/Instruction.h" 62 #include "llvm/IR/Instructions.h" 63 #include "llvm/IR/IntrinsicInst.h" 64 #include "llvm/IR/Intrinsics.h" 65 #include "llvm/IR/LLVMContext.h" 66 #include "llvm/IR/Metadata.h" 67 #include "llvm/IR/Module.h" 68 #include "llvm/IR/Operator.h" 69 #include "llvm/IR/PassManager.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/Use.h" 72 #include "llvm/IR/User.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/InitializePasses.h" 75 #include "llvm/Pass.h" 76 #include "llvm/Support/Casting.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/Debug.h" 80 #include "llvm/Support/ErrorHandling.h" 81 #include "llvm/Support/MathExtras.h" 82 #include "llvm/Support/raw_ostream.h" 83 #include "llvm/Transforms/Scalar.h" 84 #include "llvm/Transforms/Utils/Local.h" 85 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 86 #include <algorithm> 87 #include <cassert> 88 #include <chrono> 89 #include <cstddef> 90 #include <cstdint> 91 #include <cstring> 92 #include <iterator> 93 #include <string> 94 #include <tuple> 95 #include <utility> 96 #include <vector> 97 98 using namespace llvm; 99 using namespace llvm::sroa; 100 101 #define DEBUG_TYPE "sroa" 102 103 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); 104 STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); 105 STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca"); 106 STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten"); 107 STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition"); 108 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); 109 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); 110 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion"); 111 STATISTIC(NumDeleted, "Number of instructions deleted"); 112 STATISTIC(NumVectorized, "Number of vectorized aggregates"); 113 114 /// Hidden option to experiment with completely strict handling of inbounds 115 /// GEPs. 116 static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false), 117 cl::Hidden); 118 119 namespace { 120 121 /// A custom IRBuilder inserter which prefixes all names, but only in 122 /// Assert builds. 123 class IRBuilderPrefixedInserter final : public IRBuilderDefaultInserter { 124 std::string Prefix; 125 126 Twine getNameWithPrefix(const Twine &Name) const { 127 return Name.isTriviallyEmpty() ? Name : Prefix + Name; 128 } 129 130 public: 131 void SetNamePrefix(const Twine &P) { Prefix = P.str(); } 132 133 void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, 134 BasicBlock::iterator InsertPt) const override { 135 IRBuilderDefaultInserter::InsertHelper(I, getNameWithPrefix(Name), BB, 136 InsertPt); 137 } 138 }; 139 140 /// Provide a type for IRBuilder that drops names in release builds. 141 using IRBuilderTy = IRBuilder<ConstantFolder, IRBuilderPrefixedInserter>; 142 143 /// A used slice of an alloca. 144 /// 145 /// This structure represents a slice of an alloca used by some instruction. It 146 /// stores both the begin and end offsets of this use, a pointer to the use 147 /// itself, and a flag indicating whether we can classify the use as splittable 148 /// or not when forming partitions of the alloca. 149 class Slice { 150 /// The beginning offset of the range. 151 uint64_t BeginOffset = 0; 152 153 /// The ending offset, not included in the range. 154 uint64_t EndOffset = 0; 155 156 /// Storage for both the use of this slice and whether it can be 157 /// split. 158 PointerIntPair<Use *, 1, bool> UseAndIsSplittable; 159 160 public: 161 Slice() = default; 162 163 Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable) 164 : BeginOffset(BeginOffset), EndOffset(EndOffset), 165 UseAndIsSplittable(U, IsSplittable) {} 166 167 uint64_t beginOffset() const { return BeginOffset; } 168 uint64_t endOffset() const { return EndOffset; } 169 170 bool isSplittable() const { return UseAndIsSplittable.getInt(); } 171 void makeUnsplittable() { UseAndIsSplittable.setInt(false); } 172 173 Use *getUse() const { return UseAndIsSplittable.getPointer(); } 174 175 bool isDead() const { return getUse() == nullptr; } 176 void kill() { UseAndIsSplittable.setPointer(nullptr); } 177 178 /// Support for ordering ranges. 179 /// 180 /// This provides an ordering over ranges such that start offsets are 181 /// always increasing, and within equal start offsets, the end offsets are 182 /// decreasing. Thus the spanning range comes first in a cluster with the 183 /// same start position. 184 bool operator<(const Slice &RHS) const { 185 if (beginOffset() < RHS.beginOffset()) 186 return true; 187 if (beginOffset() > RHS.beginOffset()) 188 return false; 189 if (isSplittable() != RHS.isSplittable()) 190 return !isSplittable(); 191 if (endOffset() > RHS.endOffset()) 192 return true; 193 return false; 194 } 195 196 /// Support comparison with a single offset to allow binary searches. 197 friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS, 198 uint64_t RHSOffset) { 199 return LHS.beginOffset() < RHSOffset; 200 } 201 friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, 202 const Slice &RHS) { 203 return LHSOffset < RHS.beginOffset(); 204 } 205 206 bool operator==(const Slice &RHS) const { 207 return isSplittable() == RHS.isSplittable() && 208 beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset(); 209 } 210 bool operator!=(const Slice &RHS) const { return !operator==(RHS); } 211 }; 212 213 } // end anonymous namespace 214 215 /// Representation of the alloca slices. 216 /// 217 /// This class represents the slices of an alloca which are formed by its 218 /// various uses. If a pointer escapes, we can't fully build a representation 219 /// for the slices used and we reflect that in this structure. The uses are 220 /// stored, sorted by increasing beginning offset and with unsplittable slices 221 /// starting at a particular offset before splittable slices. 222 class llvm::sroa::AllocaSlices { 223 public: 224 /// Construct the slices of a particular alloca. 225 AllocaSlices(const DataLayout &DL, AllocaInst &AI); 226 227 /// Test whether a pointer to the allocation escapes our analysis. 228 /// 229 /// If this is true, the slices are never fully built and should be 230 /// ignored. 231 bool isEscaped() const { return PointerEscapingInstr; } 232 233 /// Support for iterating over the slices. 234 /// @{ 235 using iterator = SmallVectorImpl<Slice>::iterator; 236 using range = iterator_range<iterator>; 237 238 iterator begin() { return Slices.begin(); } 239 iterator end() { return Slices.end(); } 240 241 using const_iterator = SmallVectorImpl<Slice>::const_iterator; 242 using const_range = iterator_range<const_iterator>; 243 244 const_iterator begin() const { return Slices.begin(); } 245 const_iterator end() const { return Slices.end(); } 246 /// @} 247 248 /// Erase a range of slices. 249 void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); } 250 251 /// Insert new slices for this alloca. 252 /// 253 /// This moves the slices into the alloca's slices collection, and re-sorts 254 /// everything so that the usual ordering properties of the alloca's slices 255 /// hold. 256 void insert(ArrayRef<Slice> NewSlices) { 257 int OldSize = Slices.size(); 258 Slices.append(NewSlices.begin(), NewSlices.end()); 259 auto SliceI = Slices.begin() + OldSize; 260 llvm::sort(SliceI, Slices.end()); 261 std::inplace_merge(Slices.begin(), SliceI, Slices.end()); 262 } 263 264 // Forward declare the iterator and range accessor for walking the 265 // partitions. 266 class partition_iterator; 267 iterator_range<partition_iterator> partitions(); 268 269 /// Access the dead users for this alloca. 270 ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; } 271 272 /// Access Uses that should be dropped if the alloca is promotable. 273 ArrayRef<Use *> getDeadUsesIfPromotable() const { 274 return DeadUseIfPromotable; 275 } 276 277 /// Access the dead operands referring to this alloca. 278 /// 279 /// These are operands which have cannot actually be used to refer to the 280 /// alloca as they are outside its range and the user doesn't correct for 281 /// that. These mostly consist of PHI node inputs and the like which we just 282 /// need to replace with undef. 283 ArrayRef<Use *> getDeadOperands() const { return DeadOperands; } 284 285 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 286 void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; 287 void printSlice(raw_ostream &OS, const_iterator I, 288 StringRef Indent = " ") const; 289 void printUse(raw_ostream &OS, const_iterator I, 290 StringRef Indent = " ") const; 291 void print(raw_ostream &OS) const; 292 void dump(const_iterator I) const; 293 void dump() const; 294 #endif 295 296 private: 297 template <typename DerivedT, typename RetT = void> class BuilderBase; 298 class SliceBuilder; 299 300 friend class AllocaSlices::SliceBuilder; 301 302 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 303 /// Handle to alloca instruction to simplify method interfaces. 304 AllocaInst &AI; 305 #endif 306 307 /// The instruction responsible for this alloca not having a known set 308 /// of slices. 309 /// 310 /// When an instruction (potentially) escapes the pointer to the alloca, we 311 /// store a pointer to that here and abort trying to form slices of the 312 /// alloca. This will be null if the alloca slices are analyzed successfully. 313 Instruction *PointerEscapingInstr; 314 315 /// The slices of the alloca. 316 /// 317 /// We store a vector of the slices formed by uses of the alloca here. This 318 /// vector is sorted by increasing begin offset, and then the unsplittable 319 /// slices before the splittable ones. See the Slice inner class for more 320 /// details. 321 SmallVector<Slice, 8> Slices; 322 323 /// Instructions which will become dead if we rewrite the alloca. 324 /// 325 /// Note that these are not separated by slice. This is because we expect an 326 /// alloca to be completely rewritten or not rewritten at all. If rewritten, 327 /// all these instructions can simply be removed and replaced with poison as 328 /// they come from outside of the allocated space. 329 SmallVector<Instruction *, 8> DeadUsers; 330 331 /// Uses which will become dead if can promote the alloca. 332 SmallVector<Use *, 8> DeadUseIfPromotable; 333 334 /// Operands which will become dead if we rewrite the alloca. 335 /// 336 /// These are operands that in their particular use can be replaced with 337 /// poison when we rewrite the alloca. These show up in out-of-bounds inputs 338 /// to PHI nodes and the like. They aren't entirely dead (there might be 339 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we 340 /// want to swap this particular input for poison to simplify the use lists of 341 /// the alloca. 342 SmallVector<Use *, 8> DeadOperands; 343 }; 344 345 /// A partition of the slices. 346 /// 347 /// An ephemeral representation for a range of slices which can be viewed as 348 /// a partition of the alloca. This range represents a span of the alloca's 349 /// memory which cannot be split, and provides access to all of the slices 350 /// overlapping some part of the partition. 351 /// 352 /// Objects of this type are produced by traversing the alloca's slices, but 353 /// are only ephemeral and not persistent. 354 class llvm::sroa::Partition { 355 private: 356 friend class AllocaSlices; 357 friend class AllocaSlices::partition_iterator; 358 359 using iterator = AllocaSlices::iterator; 360 361 /// The beginning and ending offsets of the alloca for this 362 /// partition. 363 uint64_t BeginOffset = 0, EndOffset = 0; 364 365 /// The start and end iterators of this partition. 366 iterator SI, SJ; 367 368 /// A collection of split slice tails overlapping the partition. 369 SmallVector<Slice *, 4> SplitTails; 370 371 /// Raw constructor builds an empty partition starting and ending at 372 /// the given iterator. 373 Partition(iterator SI) : SI(SI), SJ(SI) {} 374 375 public: 376 /// The start offset of this partition. 377 /// 378 /// All of the contained slices start at or after this offset. 379 uint64_t beginOffset() const { return BeginOffset; } 380 381 /// The end offset of this partition. 382 /// 383 /// All of the contained slices end at or before this offset. 384 uint64_t endOffset() const { return EndOffset; } 385 386 /// The size of the partition. 387 /// 388 /// Note that this can never be zero. 389 uint64_t size() const { 390 assert(BeginOffset < EndOffset && "Partitions must span some bytes!"); 391 return EndOffset - BeginOffset; 392 } 393 394 /// Test whether this partition contains no slices, and merely spans 395 /// a region occupied by split slices. 396 bool empty() const { return SI == SJ; } 397 398 /// \name Iterate slices that start within the partition. 399 /// These may be splittable or unsplittable. They have a begin offset >= the 400 /// partition begin offset. 401 /// @{ 402 // FIXME: We should probably define a "concat_iterator" helper and use that 403 // to stitch together pointee_iterators over the split tails and the 404 // contiguous iterators of the partition. That would give a much nicer 405 // interface here. We could then additionally expose filtered iterators for 406 // split, unsplit, and unsplittable splices based on the usage patterns. 407 iterator begin() const { return SI; } 408 iterator end() const { return SJ; } 409 /// @} 410 411 /// Get the sequence of split slice tails. 412 /// 413 /// These tails are of slices which start before this partition but are 414 /// split and overlap into the partition. We accumulate these while forming 415 /// partitions. 416 ArrayRef<Slice *> splitSliceTails() const { return SplitTails; } 417 }; 418 419 /// An iterator over partitions of the alloca's slices. 420 /// 421 /// This iterator implements the core algorithm for partitioning the alloca's 422 /// slices. It is a forward iterator as we don't support backtracking for 423 /// efficiency reasons, and re-use a single storage area to maintain the 424 /// current set of split slices. 425 /// 426 /// It is templated on the slice iterator type to use so that it can operate 427 /// with either const or non-const slice iterators. 428 class AllocaSlices::partition_iterator 429 : public iterator_facade_base<partition_iterator, std::forward_iterator_tag, 430 Partition> { 431 friend class AllocaSlices; 432 433 /// Most of the state for walking the partitions is held in a class 434 /// with a nice interface for examining them. 435 Partition P; 436 437 /// We need to keep the end of the slices to know when to stop. 438 AllocaSlices::iterator SE; 439 440 /// We also need to keep track of the maximum split end offset seen. 441 /// FIXME: Do we really? 442 uint64_t MaxSplitSliceEndOffset = 0; 443 444 /// Sets the partition to be empty at given iterator, and sets the 445 /// end iterator. 446 partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE) 447 : P(SI), SE(SE) { 448 // If not already at the end, advance our state to form the initial 449 // partition. 450 if (SI != SE) 451 advance(); 452 } 453 454 /// Advance the iterator to the next partition. 455 /// 456 /// Requires that the iterator not be at the end of the slices. 457 void advance() { 458 assert((P.SI != SE || !P.SplitTails.empty()) && 459 "Cannot advance past the end of the slices!"); 460 461 // Clear out any split uses which have ended. 462 if (!P.SplitTails.empty()) { 463 if (P.EndOffset >= MaxSplitSliceEndOffset) { 464 // If we've finished all splits, this is easy. 465 P.SplitTails.clear(); 466 MaxSplitSliceEndOffset = 0; 467 } else { 468 // Remove the uses which have ended in the prior partition. This 469 // cannot change the max split slice end because we just checked that 470 // the prior partition ended prior to that max. 471 llvm::erase_if(P.SplitTails, 472 [&](Slice *S) { return S->endOffset() <= P.EndOffset; }); 473 assert(llvm::any_of(P.SplitTails, 474 [&](Slice *S) { 475 return S->endOffset() == MaxSplitSliceEndOffset; 476 }) && 477 "Could not find the current max split slice offset!"); 478 assert(llvm::all_of(P.SplitTails, 479 [&](Slice *S) { 480 return S->endOffset() <= MaxSplitSliceEndOffset; 481 }) && 482 "Max split slice end offset is not actually the max!"); 483 } 484 } 485 486 // If P.SI is already at the end, then we've cleared the split tail and 487 // now have an end iterator. 488 if (P.SI == SE) { 489 assert(P.SplitTails.empty() && "Failed to clear the split slices!"); 490 return; 491 } 492 493 // If we had a non-empty partition previously, set up the state for 494 // subsequent partitions. 495 if (P.SI != P.SJ) { 496 // Accumulate all the splittable slices which started in the old 497 // partition into the split list. 498 for (Slice &S : P) 499 if (S.isSplittable() && S.endOffset() > P.EndOffset) { 500 P.SplitTails.push_back(&S); 501 MaxSplitSliceEndOffset = 502 std::max(S.endOffset(), MaxSplitSliceEndOffset); 503 } 504 505 // Start from the end of the previous partition. 506 P.SI = P.SJ; 507 508 // If P.SI is now at the end, we at most have a tail of split slices. 509 if (P.SI == SE) { 510 P.BeginOffset = P.EndOffset; 511 P.EndOffset = MaxSplitSliceEndOffset; 512 return; 513 } 514 515 // If the we have split slices and the next slice is after a gap and is 516 // not splittable immediately form an empty partition for the split 517 // slices up until the next slice begins. 518 if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset && 519 !P.SI->isSplittable()) { 520 P.BeginOffset = P.EndOffset; 521 P.EndOffset = P.SI->beginOffset(); 522 return; 523 } 524 } 525 526 // OK, we need to consume new slices. Set the end offset based on the 527 // current slice, and step SJ past it. The beginning offset of the 528 // partition is the beginning offset of the next slice unless we have 529 // pre-existing split slices that are continuing, in which case we begin 530 // at the prior end offset. 531 P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset; 532 P.EndOffset = P.SI->endOffset(); 533 ++P.SJ; 534 535 // There are two strategies to form a partition based on whether the 536 // partition starts with an unsplittable slice or a splittable slice. 537 if (!P.SI->isSplittable()) { 538 // When we're forming an unsplittable region, it must always start at 539 // the first slice and will extend through its end. 540 assert(P.BeginOffset == P.SI->beginOffset()); 541 542 // Form a partition including all of the overlapping slices with this 543 // unsplittable slice. 544 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { 545 if (!P.SJ->isSplittable()) 546 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); 547 ++P.SJ; 548 } 549 550 // We have a partition across a set of overlapping unsplittable 551 // partitions. 552 return; 553 } 554 555 // If we're starting with a splittable slice, then we need to form 556 // a synthetic partition spanning it and any other overlapping splittable 557 // splices. 558 assert(P.SI->isSplittable() && "Forming a splittable partition!"); 559 560 // Collect all of the overlapping splittable slices. 561 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset && 562 P.SJ->isSplittable()) { 563 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); 564 ++P.SJ; 565 } 566 567 // Back upiP.EndOffset if we ended the span early when encountering an 568 // unsplittable slice. This synthesizes the early end offset of 569 // a partition spanning only splittable slices. 570 if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { 571 assert(!P.SJ->isSplittable()); 572 P.EndOffset = P.SJ->beginOffset(); 573 } 574 } 575 576 public: 577 bool operator==(const partition_iterator &RHS) const { 578 assert(SE == RHS.SE && 579 "End iterators don't match between compared partition iterators!"); 580 581 // The observed positions of partitions is marked by the P.SI iterator and 582 // the emptiness of the split slices. The latter is only relevant when 583 // P.SI == SE, as the end iterator will additionally have an empty split 584 // slices list, but the prior may have the same P.SI and a tail of split 585 // slices. 586 if (P.SI == RHS.P.SI && P.SplitTails.empty() == RHS.P.SplitTails.empty()) { 587 assert(P.SJ == RHS.P.SJ && 588 "Same set of slices formed two different sized partitions!"); 589 assert(P.SplitTails.size() == RHS.P.SplitTails.size() && 590 "Same slice position with differently sized non-empty split " 591 "slice tails!"); 592 return true; 593 } 594 return false; 595 } 596 597 partition_iterator &operator++() { 598 advance(); 599 return *this; 600 } 601 602 Partition &operator*() { return P; } 603 }; 604 605 /// A forward range over the partitions of the alloca's slices. 606 /// 607 /// This accesses an iterator range over the partitions of the alloca's 608 /// slices. It computes these partitions on the fly based on the overlapping 609 /// offsets of the slices and the ability to split them. It will visit "empty" 610 /// partitions to cover regions of the alloca only accessed via split 611 /// slices. 612 iterator_range<AllocaSlices::partition_iterator> AllocaSlices::partitions() { 613 return make_range(partition_iterator(begin(), end()), 614 partition_iterator(end(), end())); 615 } 616 617 static Value *foldSelectInst(SelectInst &SI) { 618 // If the condition being selected on is a constant or the same value is 619 // being selected between, fold the select. Yes this does (rarely) happen 620 // early on. 621 if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition())) 622 return SI.getOperand(1 + CI->isZero()); 623 if (SI.getOperand(1) == SI.getOperand(2)) 624 return SI.getOperand(1); 625 626 return nullptr; 627 } 628 629 /// A helper that folds a PHI node or a select. 630 static Value *foldPHINodeOrSelectInst(Instruction &I) { 631 if (PHINode *PN = dyn_cast<PHINode>(&I)) { 632 // If PN merges together the same value, return that value. 633 return PN->hasConstantValue(); 634 } 635 return foldSelectInst(cast<SelectInst>(I)); 636 } 637 638 /// Builder for the alloca slices. 639 /// 640 /// This class builds a set of alloca slices by recursively visiting the uses 641 /// of an alloca and making a slice for each load and store at each offset. 642 class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> { 643 friend class PtrUseVisitor<SliceBuilder>; 644 friend class InstVisitor<SliceBuilder>; 645 646 using Base = PtrUseVisitor<SliceBuilder>; 647 648 const uint64_t AllocSize; 649 AllocaSlices &AS; 650 651 SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap; 652 SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes; 653 654 /// Set to de-duplicate dead instructions found in the use walk. 655 SmallPtrSet<Instruction *, 4> VisitedDeadInsts; 656 657 public: 658 SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS) 659 : PtrUseVisitor<SliceBuilder>(DL), 660 AllocSize(DL.getTypeAllocSize(AI.getAllocatedType()).getFixedSize()), 661 AS(AS) {} 662 663 private: 664 void markAsDead(Instruction &I) { 665 if (VisitedDeadInsts.insert(&I).second) 666 AS.DeadUsers.push_back(&I); 667 } 668 669 void insertUse(Instruction &I, const APInt &Offset, uint64_t Size, 670 bool IsSplittable = false) { 671 // Completely skip uses which have a zero size or start either before or 672 // past the end of the allocation. 673 if (Size == 0 || Offset.uge(AllocSize)) { 674 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" 675 << Offset 676 << " which has zero size or starts outside of the " 677 << AllocSize << " byte alloca:\n" 678 << " alloca: " << AS.AI << "\n" 679 << " use: " << I << "\n"); 680 return markAsDead(I); 681 } 682 683 uint64_t BeginOffset = Offset.getZExtValue(); 684 uint64_t EndOffset = BeginOffset + Size; 685 686 // Clamp the end offset to the end of the allocation. Note that this is 687 // formulated to handle even the case where "BeginOffset + Size" overflows. 688 // This may appear superficially to be something we could ignore entirely, 689 // but that is not so! There may be widened loads or PHI-node uses where 690 // some instructions are dead but not others. We can't completely ignore 691 // them, and so have to record at least the information here. 692 assert(AllocSize >= BeginOffset); // Established above. 693 if (Size > AllocSize - BeginOffset) { 694 LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" 695 << Offset << " to remain within the " << AllocSize 696 << " byte alloca:\n" 697 << " alloca: " << AS.AI << "\n" 698 << " use: " << I << "\n"); 699 EndOffset = AllocSize; 700 } 701 702 AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable)); 703 } 704 705 void visitBitCastInst(BitCastInst &BC) { 706 if (BC.use_empty()) 707 return markAsDead(BC); 708 709 return Base::visitBitCastInst(BC); 710 } 711 712 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 713 if (ASC.use_empty()) 714 return markAsDead(ASC); 715 716 return Base::visitAddrSpaceCastInst(ASC); 717 } 718 719 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 720 if (GEPI.use_empty()) 721 return markAsDead(GEPI); 722 723 if (SROAStrictInbounds && GEPI.isInBounds()) { 724 // FIXME: This is a manually un-factored variant of the basic code inside 725 // of GEPs with checking of the inbounds invariant specified in the 726 // langref in a very strict sense. If we ever want to enable 727 // SROAStrictInbounds, this code should be factored cleanly into 728 // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds 729 // by writing out the code here where we have the underlying allocation 730 // size readily available. 731 APInt GEPOffset = Offset; 732 const DataLayout &DL = GEPI.getModule()->getDataLayout(); 733 for (gep_type_iterator GTI = gep_type_begin(GEPI), 734 GTE = gep_type_end(GEPI); 735 GTI != GTE; ++GTI) { 736 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 737 if (!OpC) 738 break; 739 740 // Handle a struct index, which adds its field offset to the pointer. 741 if (StructType *STy = GTI.getStructTypeOrNull()) { 742 unsigned ElementIdx = OpC->getZExtValue(); 743 const StructLayout *SL = DL.getStructLayout(STy); 744 GEPOffset += 745 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); 746 } else { 747 // For array or vector indices, scale the index by the size of the 748 // type. 749 APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth()); 750 GEPOffset += 751 Index * 752 APInt(Offset.getBitWidth(), 753 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize()); 754 } 755 756 // If this index has computed an intermediate pointer which is not 757 // inbounds, then the result of the GEP is a poison value and we can 758 // delete it and all uses. 759 if (GEPOffset.ugt(AllocSize)) 760 return markAsDead(GEPI); 761 } 762 } 763 764 return Base::visitGetElementPtrInst(GEPI); 765 } 766 767 void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset, 768 uint64_t Size, bool IsVolatile) { 769 // We allow splitting of non-volatile loads and stores where the type is an 770 // integer type. These may be used to implement 'memcpy' or other "transfer 771 // of bits" patterns. 772 bool IsSplittable = 773 Ty->isIntegerTy() && !IsVolatile && DL.typeSizeEqualsStoreSize(Ty); 774 775 insertUse(I, Offset, Size, IsSplittable); 776 } 777 778 void visitLoadInst(LoadInst &LI) { 779 assert((!LI.isSimple() || LI.getType()->isSingleValueType()) && 780 "All simple FCA loads should have been pre-split"); 781 782 if (!IsOffsetKnown) 783 return PI.setAborted(&LI); 784 785 if (LI.isVolatile() && 786 LI.getPointerAddressSpace() != DL.getAllocaAddrSpace()) 787 return PI.setAborted(&LI); 788 789 if (isa<ScalableVectorType>(LI.getType())) 790 return PI.setAborted(&LI); 791 792 uint64_t Size = DL.getTypeStoreSize(LI.getType()).getFixedSize(); 793 return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile()); 794 } 795 796 void visitStoreInst(StoreInst &SI) { 797 Value *ValOp = SI.getValueOperand(); 798 if (ValOp == *U) 799 return PI.setEscapedAndAborted(&SI); 800 if (!IsOffsetKnown) 801 return PI.setAborted(&SI); 802 803 if (SI.isVolatile() && 804 SI.getPointerAddressSpace() != DL.getAllocaAddrSpace()) 805 return PI.setAborted(&SI); 806 807 if (isa<ScalableVectorType>(ValOp->getType())) 808 return PI.setAborted(&SI); 809 810 uint64_t Size = DL.getTypeStoreSize(ValOp->getType()).getFixedSize(); 811 812 // If this memory access can be shown to *statically* extend outside the 813 // bounds of the allocation, it's behavior is undefined, so simply 814 // ignore it. Note that this is more strict than the generic clamping 815 // behavior of insertUse. We also try to handle cases which might run the 816 // risk of overflow. 817 // FIXME: We should instead consider the pointer to have escaped if this 818 // function is being instrumented for addressing bugs or race conditions. 819 if (Size > AllocSize || Offset.ugt(AllocSize - Size)) { 820 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" 821 << Offset << " which extends past the end of the " 822 << AllocSize << " byte alloca:\n" 823 << " alloca: " << AS.AI << "\n" 824 << " use: " << SI << "\n"); 825 return markAsDead(SI); 826 } 827 828 assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && 829 "All simple FCA stores should have been pre-split"); 830 handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile()); 831 } 832 833 void visitMemSetInst(MemSetInst &II) { 834 assert(II.getRawDest() == *U && "Pointer use is not the destination?"); 835 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 836 if ((Length && Length->getValue() == 0) || 837 (IsOffsetKnown && Offset.uge(AllocSize))) 838 // Zero-length mem transfer intrinsics can be ignored entirely. 839 return markAsDead(II); 840 841 if (!IsOffsetKnown) 842 return PI.setAborted(&II); 843 844 // Don't replace this with a store with a different address space. TODO: 845 // Use a store with the casted new alloca? 846 if (II.isVolatile() && II.getDestAddressSpace() != DL.getAllocaAddrSpace()) 847 return PI.setAborted(&II); 848 849 insertUse(II, Offset, Length ? Length->getLimitedValue() 850 : AllocSize - Offset.getLimitedValue(), 851 (bool)Length); 852 } 853 854 void visitMemTransferInst(MemTransferInst &II) { 855 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 856 if (Length && Length->getValue() == 0) 857 // Zero-length mem transfer intrinsics can be ignored entirely. 858 return markAsDead(II); 859 860 // Because we can visit these intrinsics twice, also check to see if the 861 // first time marked this instruction as dead. If so, skip it. 862 if (VisitedDeadInsts.count(&II)) 863 return; 864 865 if (!IsOffsetKnown) 866 return PI.setAborted(&II); 867 868 // Don't replace this with a load/store with a different address space. 869 // TODO: Use a store with the casted new alloca? 870 if (II.isVolatile() && 871 (II.getDestAddressSpace() != DL.getAllocaAddrSpace() || 872 II.getSourceAddressSpace() != DL.getAllocaAddrSpace())) 873 return PI.setAborted(&II); 874 875 // This side of the transfer is completely out-of-bounds, and so we can 876 // nuke the entire transfer. However, we also need to nuke the other side 877 // if already added to our partitions. 878 // FIXME: Yet another place we really should bypass this when 879 // instrumenting for ASan. 880 if (Offset.uge(AllocSize)) { 881 SmallDenseMap<Instruction *, unsigned>::iterator MTPI = 882 MemTransferSliceMap.find(&II); 883 if (MTPI != MemTransferSliceMap.end()) 884 AS.Slices[MTPI->second].kill(); 885 return markAsDead(II); 886 } 887 888 uint64_t RawOffset = Offset.getLimitedValue(); 889 uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset; 890 891 // Check for the special case where the same exact value is used for both 892 // source and dest. 893 if (*U == II.getRawDest() && *U == II.getRawSource()) { 894 // For non-volatile transfers this is a no-op. 895 if (!II.isVolatile()) 896 return markAsDead(II); 897 898 return insertUse(II, Offset, Size, /*IsSplittable=*/false); 899 } 900 901 // If we have seen both source and destination for a mem transfer, then 902 // they both point to the same alloca. 903 bool Inserted; 904 SmallDenseMap<Instruction *, unsigned>::iterator MTPI; 905 std::tie(MTPI, Inserted) = 906 MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size())); 907 unsigned PrevIdx = MTPI->second; 908 if (!Inserted) { 909 Slice &PrevP = AS.Slices[PrevIdx]; 910 911 // Check if the begin offsets match and this is a non-volatile transfer. 912 // In that case, we can completely elide the transfer. 913 if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) { 914 PrevP.kill(); 915 return markAsDead(II); 916 } 917 918 // Otherwise we have an offset transfer within the same alloca. We can't 919 // split those. 920 PrevP.makeUnsplittable(); 921 } 922 923 // Insert the use now that we've fixed up the splittable nature. 924 insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length); 925 926 // Check that we ended up with a valid index in the map. 927 assert(AS.Slices[PrevIdx].getUse()->getUser() == &II && 928 "Map index doesn't point back to a slice with this user."); 929 } 930 931 // Disable SRoA for any intrinsics except for lifetime invariants and 932 // invariant group. 933 // FIXME: What about debug intrinsics? This matches old behavior, but 934 // doesn't make sense. 935 void visitIntrinsicInst(IntrinsicInst &II) { 936 if (II.isDroppable()) { 937 AS.DeadUseIfPromotable.push_back(U); 938 return; 939 } 940 941 if (!IsOffsetKnown) 942 return PI.setAborted(&II); 943 944 if (II.isLifetimeStartOrEnd()) { 945 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0)); 946 uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(), 947 Length->getLimitedValue()); 948 insertUse(II, Offset, Size, true); 949 return; 950 } 951 952 if (II.isLaunderOrStripInvariantGroup()) { 953 enqueueUsers(II); 954 return; 955 } 956 957 Base::visitIntrinsicInst(II); 958 } 959 960 Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) { 961 // We consider any PHI or select that results in a direct load or store of 962 // the same offset to be a viable use for slicing purposes. These uses 963 // are considered unsplittable and the size is the maximum loaded or stored 964 // size. 965 SmallPtrSet<Instruction *, 4> Visited; 966 SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses; 967 Visited.insert(Root); 968 Uses.push_back(std::make_pair(cast<Instruction>(*U), Root)); 969 const DataLayout &DL = Root->getModule()->getDataLayout(); 970 // If there are no loads or stores, the access is dead. We mark that as 971 // a size zero access. 972 Size = 0; 973 do { 974 Instruction *I, *UsedI; 975 std::tie(UsedI, I) = Uses.pop_back_val(); 976 977 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 978 Size = std::max(Size, 979 DL.getTypeStoreSize(LI->getType()).getFixedSize()); 980 continue; 981 } 982 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 983 Value *Op = SI->getOperand(0); 984 if (Op == UsedI) 985 return SI; 986 Size = std::max(Size, 987 DL.getTypeStoreSize(Op->getType()).getFixedSize()); 988 continue; 989 } 990 991 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 992 if (!GEP->hasAllZeroIndices()) 993 return GEP; 994 } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) && 995 !isa<SelectInst>(I) && !isa<AddrSpaceCastInst>(I)) { 996 return I; 997 } 998 999 for (User *U : I->users()) 1000 if (Visited.insert(cast<Instruction>(U)).second) 1001 Uses.push_back(std::make_pair(I, cast<Instruction>(U))); 1002 } while (!Uses.empty()); 1003 1004 return nullptr; 1005 } 1006 1007 void visitPHINodeOrSelectInst(Instruction &I) { 1008 assert(isa<PHINode>(I) || isa<SelectInst>(I)); 1009 if (I.use_empty()) 1010 return markAsDead(I); 1011 1012 // If this is a PHI node before a catchswitch, we cannot insert any non-PHI 1013 // instructions in this BB, which may be required during rewriting. Bail out 1014 // on these cases. 1015 if (isa<PHINode>(I) && 1016 I.getParent()->getFirstInsertionPt() == I.getParent()->end()) 1017 return PI.setAborted(&I); 1018 1019 // TODO: We could use SimplifyInstruction here to fold PHINodes and 1020 // SelectInsts. However, doing so requires to change the current 1021 // dead-operand-tracking mechanism. For instance, suppose neither loading 1022 // from %U nor %other traps. Then "load (select undef, %U, %other)" does not 1023 // trap either. However, if we simply replace %U with undef using the 1024 // current dead-operand-tracking mechanism, "load (select undef, undef, 1025 // %other)" may trap because the select may return the first operand 1026 // "undef". 1027 if (Value *Result = foldPHINodeOrSelectInst(I)) { 1028 if (Result == *U) 1029 // If the result of the constant fold will be the pointer, recurse 1030 // through the PHI/select as if we had RAUW'ed it. 1031 enqueueUsers(I); 1032 else 1033 // Otherwise the operand to the PHI/select is dead, and we can replace 1034 // it with poison. 1035 AS.DeadOperands.push_back(U); 1036 1037 return; 1038 } 1039 1040 if (!IsOffsetKnown) 1041 return PI.setAborted(&I); 1042 1043 // See if we already have computed info on this node. 1044 uint64_t &Size = PHIOrSelectSizes[&I]; 1045 if (!Size) { 1046 // This is a new PHI/Select, check for an unsafe use of it. 1047 if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size)) 1048 return PI.setAborted(UnsafeI); 1049 } 1050 1051 // For PHI and select operands outside the alloca, we can't nuke the entire 1052 // phi or select -- the other side might still be relevant, so we special 1053 // case them here and use a separate structure to track the operands 1054 // themselves which should be replaced with poison. 1055 // FIXME: This should instead be escaped in the event we're instrumenting 1056 // for address sanitization. 1057 if (Offset.uge(AllocSize)) { 1058 AS.DeadOperands.push_back(U); 1059 return; 1060 } 1061 1062 insertUse(I, Offset, Size); 1063 } 1064 1065 void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); } 1066 1067 void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); } 1068 1069 /// Disable SROA entirely if there are unhandled users of the alloca. 1070 void visitInstruction(Instruction &I) { PI.setAborted(&I); } 1071 }; 1072 1073 AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) 1074 : 1075 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1076 AI(AI), 1077 #endif 1078 PointerEscapingInstr(nullptr) { 1079 SliceBuilder PB(DL, AI, *this); 1080 SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); 1081 if (PtrI.isEscaped() || PtrI.isAborted()) { 1082 // FIXME: We should sink the escape vs. abort info into the caller nicely, 1083 // possibly by just storing the PtrInfo in the AllocaSlices. 1084 PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst() 1085 : PtrI.getAbortingInst(); 1086 assert(PointerEscapingInstr && "Did not track a bad instruction"); 1087 return; 1088 } 1089 1090 llvm::erase_if(Slices, [](const Slice &S) { return S.isDead(); }); 1091 1092 // Sort the uses. This arranges for the offsets to be in ascending order, 1093 // and the sizes to be in descending order. 1094 llvm::stable_sort(Slices); 1095 } 1096 1097 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1098 1099 void AllocaSlices::print(raw_ostream &OS, const_iterator I, 1100 StringRef Indent) const { 1101 printSlice(OS, I, Indent); 1102 OS << "\n"; 1103 printUse(OS, I, Indent); 1104 } 1105 1106 void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I, 1107 StringRef Indent) const { 1108 OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")" 1109 << " slice #" << (I - begin()) 1110 << (I->isSplittable() ? " (splittable)" : ""); 1111 } 1112 1113 void AllocaSlices::printUse(raw_ostream &OS, const_iterator I, 1114 StringRef Indent) const { 1115 OS << Indent << " used by: " << *I->getUse()->getUser() << "\n"; 1116 } 1117 1118 void AllocaSlices::print(raw_ostream &OS) const { 1119 if (PointerEscapingInstr) { 1120 OS << "Can't analyze slices for alloca: " << AI << "\n" 1121 << " A pointer to this alloca escaped by:\n" 1122 << " " << *PointerEscapingInstr << "\n"; 1123 return; 1124 } 1125 1126 OS << "Slices of alloca: " << AI << "\n"; 1127 for (const_iterator I = begin(), E = end(); I != E; ++I) 1128 print(OS, I); 1129 } 1130 1131 LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const { 1132 print(dbgs(), I); 1133 } 1134 LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); } 1135 1136 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1137 1138 /// Walk the range of a partitioning looking for a common type to cover this 1139 /// sequence of slices. 1140 static std::pair<Type *, IntegerType *> 1141 findCommonType(AllocaSlices::const_iterator B, AllocaSlices::const_iterator E, 1142 uint64_t EndOffset) { 1143 Type *Ty = nullptr; 1144 bool TyIsCommon = true; 1145 IntegerType *ITy = nullptr; 1146 1147 // Note that we need to look at *every* alloca slice's Use to ensure we 1148 // always get consistent results regardless of the order of slices. 1149 for (AllocaSlices::const_iterator I = B; I != E; ++I) { 1150 Use *U = I->getUse(); 1151 if (isa<IntrinsicInst>(*U->getUser())) 1152 continue; 1153 if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset) 1154 continue; 1155 1156 Type *UserTy = nullptr; 1157 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1158 UserTy = LI->getType(); 1159 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1160 UserTy = SI->getValueOperand()->getType(); 1161 } 1162 1163 if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) { 1164 // If the type is larger than the partition, skip it. We only encounter 1165 // this for split integer operations where we want to use the type of the 1166 // entity causing the split. Also skip if the type is not a byte width 1167 // multiple. 1168 if (UserITy->getBitWidth() % 8 != 0 || 1169 UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset())) 1170 continue; 1171 1172 // Track the largest bitwidth integer type used in this way in case there 1173 // is no common type. 1174 if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth()) 1175 ITy = UserITy; 1176 } 1177 1178 // To avoid depending on the order of slices, Ty and TyIsCommon must not 1179 // depend on types skipped above. 1180 if (!UserTy || (Ty && Ty != UserTy)) 1181 TyIsCommon = false; // Give up on anything but an iN type. 1182 else 1183 Ty = UserTy; 1184 } 1185 1186 return {TyIsCommon ? Ty : nullptr, ITy}; 1187 } 1188 1189 /// PHI instructions that use an alloca and are subsequently loaded can be 1190 /// rewritten to load both input pointers in the pred blocks and then PHI the 1191 /// results, allowing the load of the alloca to be promoted. 1192 /// From this: 1193 /// %P2 = phi [i32* %Alloca, i32* %Other] 1194 /// %V = load i32* %P2 1195 /// to: 1196 /// %V1 = load i32* %Alloca -> will be mem2reg'd 1197 /// ... 1198 /// %V2 = load i32* %Other 1199 /// ... 1200 /// %V = phi [i32 %V1, i32 %V2] 1201 /// 1202 /// We can do this to a select if its only uses are loads and if the operands 1203 /// to the select can be loaded unconditionally. 1204 /// 1205 /// FIXME: This should be hoisted into a generic utility, likely in 1206 /// Transforms/Util/Local.h 1207 static bool isSafePHIToSpeculate(PHINode &PN) { 1208 const DataLayout &DL = PN.getModule()->getDataLayout(); 1209 1210 // For now, we can only do this promotion if the load is in the same block 1211 // as the PHI, and if there are no stores between the phi and load. 1212 // TODO: Allow recursive phi users. 1213 // TODO: Allow stores. 1214 BasicBlock *BB = PN.getParent(); 1215 Align MaxAlign; 1216 uint64_t APWidth = DL.getIndexTypeSizeInBits(PN.getType()); 1217 APInt MaxSize(APWidth, 0); 1218 bool HaveLoad = false; 1219 for (User *U : PN.users()) { 1220 LoadInst *LI = dyn_cast<LoadInst>(U); 1221 if (!LI || !LI->isSimple()) 1222 return false; 1223 1224 // For now we only allow loads in the same block as the PHI. This is 1225 // a common case that happens when instcombine merges two loads through 1226 // a PHI. 1227 if (LI->getParent() != BB) 1228 return false; 1229 1230 // Ensure that there are no instructions between the PHI and the load that 1231 // could store. 1232 for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI) 1233 if (BBI->mayWriteToMemory()) 1234 return false; 1235 1236 uint64_t Size = DL.getTypeStoreSize(LI->getType()).getFixedSize(); 1237 MaxAlign = std::max(MaxAlign, LI->getAlign()); 1238 MaxSize = MaxSize.ult(Size) ? APInt(APWidth, Size) : MaxSize; 1239 HaveLoad = true; 1240 } 1241 1242 if (!HaveLoad) 1243 return false; 1244 1245 // We can only transform this if it is safe to push the loads into the 1246 // predecessor blocks. The only thing to watch out for is that we can't put 1247 // a possibly trapping load in the predecessor if it is a critical edge. 1248 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1249 Instruction *TI = PN.getIncomingBlock(Idx)->getTerminator(); 1250 Value *InVal = PN.getIncomingValue(Idx); 1251 1252 // If the value is produced by the terminator of the predecessor (an 1253 // invoke) or it has side-effects, there is no valid place to put a load 1254 // in the predecessor. 1255 if (TI == InVal || TI->mayHaveSideEffects()) 1256 return false; 1257 1258 // If the predecessor has a single successor, then the edge isn't 1259 // critical. 1260 if (TI->getNumSuccessors() == 1) 1261 continue; 1262 1263 // If this pointer is always safe to load, or if we can prove that there 1264 // is already a load in the block, then we can move the load to the pred 1265 // block. 1266 if (isSafeToLoadUnconditionally(InVal, MaxAlign, MaxSize, DL, TI)) 1267 continue; 1268 1269 return false; 1270 } 1271 1272 return true; 1273 } 1274 1275 static void speculatePHINodeLoads(IRBuilderTy &IRB, PHINode &PN) { 1276 LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); 1277 1278 LoadInst *SomeLoad = cast<LoadInst>(PN.user_back()); 1279 Type *LoadTy = SomeLoad->getType(); 1280 IRB.SetInsertPoint(&PN); 1281 PHINode *NewPN = IRB.CreatePHI(LoadTy, PN.getNumIncomingValues(), 1282 PN.getName() + ".sroa.speculated"); 1283 1284 // Get the AA tags and alignment to use from one of the loads. It does not 1285 // matter which one we get and if any differ. 1286 AAMDNodes AATags = SomeLoad->getAAMetadata(); 1287 Align Alignment = SomeLoad->getAlign(); 1288 1289 // Rewrite all loads of the PN to use the new PHI. 1290 while (!PN.use_empty()) { 1291 LoadInst *LI = cast<LoadInst>(PN.user_back()); 1292 LI->replaceAllUsesWith(NewPN); 1293 LI->eraseFromParent(); 1294 } 1295 1296 // Inject loads into all of the pred blocks. 1297 DenseMap<BasicBlock*, Value*> InjectedLoads; 1298 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1299 BasicBlock *Pred = PN.getIncomingBlock(Idx); 1300 Value *InVal = PN.getIncomingValue(Idx); 1301 1302 // A PHI node is allowed to have multiple (duplicated) entries for the same 1303 // basic block, as long as the value is the same. So if we already injected 1304 // a load in the predecessor, then we should reuse the same load for all 1305 // duplicated entries. 1306 if (Value* V = InjectedLoads.lookup(Pred)) { 1307 NewPN->addIncoming(V, Pred); 1308 continue; 1309 } 1310 1311 Instruction *TI = Pred->getTerminator(); 1312 IRB.SetInsertPoint(TI); 1313 1314 LoadInst *Load = IRB.CreateAlignedLoad( 1315 LoadTy, InVal, Alignment, 1316 (PN.getName() + ".sroa.speculate.load." + Pred->getName())); 1317 ++NumLoadsSpeculated; 1318 if (AATags) 1319 Load->setAAMetadata(AATags); 1320 NewPN->addIncoming(Load, Pred); 1321 InjectedLoads[Pred] = Load; 1322 } 1323 1324 LLVM_DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); 1325 PN.eraseFromParent(); 1326 } 1327 1328 /// Select instructions that use an alloca and are subsequently loaded can be 1329 /// rewritten to load both input pointers and then select between the result, 1330 /// allowing the load of the alloca to be promoted. 1331 /// From this: 1332 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other 1333 /// %V = load i32* %P2 1334 /// to: 1335 /// %V1 = load i32* %Alloca -> will be mem2reg'd 1336 /// %V2 = load i32* %Other 1337 /// %V = select i1 %cond, i32 %V1, i32 %V2 1338 /// 1339 /// We can do this to a select if its only uses are loads and if the operand 1340 /// to the select can be loaded unconditionally. If found an intervening bitcast 1341 /// with a single use of the load, allow the promotion. 1342 static bool isSafeSelectToSpeculate(SelectInst &SI) { 1343 Value *TValue = SI.getTrueValue(); 1344 Value *FValue = SI.getFalseValue(); 1345 const DataLayout &DL = SI.getModule()->getDataLayout(); 1346 1347 for (User *U : SI.users()) { 1348 LoadInst *LI; 1349 BitCastInst *BC = dyn_cast<BitCastInst>(U); 1350 if (BC && BC->hasOneUse()) 1351 LI = dyn_cast<LoadInst>(*BC->user_begin()); 1352 else 1353 LI = dyn_cast<LoadInst>(U); 1354 1355 if (!LI || !LI->isSimple()) 1356 return false; 1357 1358 // Both operands to the select need to be dereferenceable, either 1359 // absolutely (e.g. allocas) or at this point because we can see other 1360 // accesses to it. 1361 if (!isSafeToLoadUnconditionally(TValue, LI->getType(), 1362 LI->getAlign(), DL, LI)) 1363 return false; 1364 if (!isSafeToLoadUnconditionally(FValue, LI->getType(), 1365 LI->getAlign(), DL, LI)) 1366 return false; 1367 } 1368 1369 return true; 1370 } 1371 1372 static void speculateSelectInstLoads(IRBuilderTy &IRB, SelectInst &SI) { 1373 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 1374 1375 IRB.SetInsertPoint(&SI); 1376 Value *TV = SI.getTrueValue(); 1377 Value *FV = SI.getFalseValue(); 1378 // Replace the loads of the select with a select of two loads. 1379 while (!SI.use_empty()) { 1380 LoadInst *LI; 1381 BitCastInst *BC = dyn_cast<BitCastInst>(SI.user_back()); 1382 if (BC) { 1383 assert(BC->hasOneUse() && "Bitcast should have a single use."); 1384 LI = cast<LoadInst>(BC->user_back()); 1385 } else { 1386 LI = cast<LoadInst>(SI.user_back()); 1387 } 1388 1389 assert(LI->isSimple() && "We only speculate simple loads"); 1390 1391 IRB.SetInsertPoint(LI); 1392 Value *NewTV = 1393 BC ? IRB.CreateBitCast(TV, BC->getType(), TV->getName() + ".sroa.cast") 1394 : TV; 1395 Value *NewFV = 1396 BC ? IRB.CreateBitCast(FV, BC->getType(), FV->getName() + ".sroa.cast") 1397 : FV; 1398 LoadInst *TL = IRB.CreateLoad(LI->getType(), NewTV, 1399 LI->getName() + ".sroa.speculate.load.true"); 1400 LoadInst *FL = IRB.CreateLoad(LI->getType(), NewFV, 1401 LI->getName() + ".sroa.speculate.load.false"); 1402 NumLoadsSpeculated += 2; 1403 1404 // Transfer alignment and AA info if present. 1405 TL->setAlignment(LI->getAlign()); 1406 FL->setAlignment(LI->getAlign()); 1407 1408 AAMDNodes Tags = LI->getAAMetadata(); 1409 if (Tags) { 1410 TL->setAAMetadata(Tags); 1411 FL->setAAMetadata(Tags); 1412 } 1413 1414 Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, 1415 LI->getName() + ".sroa.speculated"); 1416 1417 LLVM_DEBUG(dbgs() << " speculated to: " << *V << "\n"); 1418 LI->replaceAllUsesWith(V); 1419 LI->eraseFromParent(); 1420 if (BC) 1421 BC->eraseFromParent(); 1422 } 1423 SI.eraseFromParent(); 1424 } 1425 1426 /// Build a GEP out of a base pointer and indices. 1427 /// 1428 /// This will return the BasePtr if that is valid, or build a new GEP 1429 /// instruction using the IRBuilder if GEP-ing is needed. 1430 static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, 1431 SmallVectorImpl<Value *> &Indices, 1432 const Twine &NamePrefix) { 1433 if (Indices.empty()) 1434 return BasePtr; 1435 1436 // A single zero index is a no-op, so check for this and avoid building a GEP 1437 // in that case. 1438 if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero()) 1439 return BasePtr; 1440 1441 // buildGEP() is only called for non-opaque pointers. 1442 return IRB.CreateInBoundsGEP( 1443 BasePtr->getType()->getNonOpaquePointerElementType(), BasePtr, Indices, 1444 NamePrefix + "sroa_idx"); 1445 } 1446 1447 /// Get a natural GEP off of the BasePtr walking through Ty toward 1448 /// TargetTy without changing the offset of the pointer. 1449 /// 1450 /// This routine assumes we've already established a properly offset GEP with 1451 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with 1452 /// zero-indices down through type layers until we find one the same as 1453 /// TargetTy. If we can't find one with the same type, we at least try to use 1454 /// one with the same size. If none of that works, we just produce the GEP as 1455 /// indicated by Indices to have the correct offset. 1456 static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, 1457 Value *BasePtr, Type *Ty, Type *TargetTy, 1458 SmallVectorImpl<Value *> &Indices, 1459 const Twine &NamePrefix) { 1460 if (Ty == TargetTy) 1461 return buildGEP(IRB, BasePtr, Indices, NamePrefix); 1462 1463 // Offset size to use for the indices. 1464 unsigned OffsetSize = DL.getIndexTypeSizeInBits(BasePtr->getType()); 1465 1466 // See if we can descend into a struct and locate a field with the correct 1467 // type. 1468 unsigned NumLayers = 0; 1469 Type *ElementTy = Ty; 1470 do { 1471 if (ElementTy->isPointerTy()) 1472 break; 1473 1474 if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) { 1475 ElementTy = ArrayTy->getElementType(); 1476 Indices.push_back(IRB.getIntN(OffsetSize, 0)); 1477 } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) { 1478 ElementTy = VectorTy->getElementType(); 1479 Indices.push_back(IRB.getInt32(0)); 1480 } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) { 1481 if (STy->element_begin() == STy->element_end()) 1482 break; // Nothing left to descend into. 1483 ElementTy = *STy->element_begin(); 1484 Indices.push_back(IRB.getInt32(0)); 1485 } else { 1486 break; 1487 } 1488 ++NumLayers; 1489 } while (ElementTy != TargetTy); 1490 if (ElementTy != TargetTy) 1491 Indices.erase(Indices.end() - NumLayers, Indices.end()); 1492 1493 return buildGEP(IRB, BasePtr, Indices, NamePrefix); 1494 } 1495 1496 /// Get a natural GEP from a base pointer to a particular offset and 1497 /// resulting in a particular type. 1498 /// 1499 /// The goal is to produce a "natural" looking GEP that works with the existing 1500 /// composite types to arrive at the appropriate offset and element type for 1501 /// a pointer. TargetTy is the element type the returned GEP should point-to if 1502 /// possible. We recurse by decreasing Offset, adding the appropriate index to 1503 /// Indices, and setting Ty to the result subtype. 1504 /// 1505 /// If no natural GEP can be constructed, this function returns null. 1506 static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, 1507 Value *Ptr, APInt Offset, Type *TargetTy, 1508 SmallVectorImpl<Value *> &Indices, 1509 const Twine &NamePrefix) { 1510 PointerType *Ty = cast<PointerType>(Ptr->getType()); 1511 1512 // Don't consider any GEPs through an i8* as natural unless the TargetTy is 1513 // an i8. 1514 if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8)) 1515 return nullptr; 1516 1517 Type *ElementTy = Ty->getNonOpaquePointerElementType(); 1518 if (!ElementTy->isSized()) 1519 return nullptr; // We can't GEP through an unsized element. 1520 1521 SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(ElementTy, Offset); 1522 if (Offset != 0) 1523 return nullptr; 1524 1525 for (const APInt &Index : IntIndices) 1526 Indices.push_back(IRB.getInt(Index)); 1527 return getNaturalGEPWithType(IRB, DL, Ptr, ElementTy, TargetTy, Indices, 1528 NamePrefix); 1529 } 1530 1531 /// Compute an adjusted pointer from Ptr by Offset bytes where the 1532 /// resulting pointer has PointerTy. 1533 /// 1534 /// This tries very hard to compute a "natural" GEP which arrives at the offset 1535 /// and produces the pointer type desired. Where it cannot, it will try to use 1536 /// the natural GEP to arrive at the offset and bitcast to the type. Where that 1537 /// fails, it will try to use an existing i8* and GEP to the byte offset and 1538 /// bitcast to the type. 1539 /// 1540 /// The strategy for finding the more natural GEPs is to peel off layers of the 1541 /// pointer, walking back through bit casts and GEPs, searching for a base 1542 /// pointer from which we can compute a natural GEP with the desired 1543 /// properties. The algorithm tries to fold as many constant indices into 1544 /// a single GEP as possible, thus making each GEP more independent of the 1545 /// surrounding code. 1546 static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, 1547 APInt Offset, Type *PointerTy, 1548 const Twine &NamePrefix) { 1549 // Create i8 GEP for opaque pointers. 1550 if (Ptr->getType()->isOpaquePointerTy()) { 1551 if (Offset != 0) 1552 Ptr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(Offset), 1553 NamePrefix + "sroa_idx"); 1554 return IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, PointerTy, 1555 NamePrefix + "sroa_cast"); 1556 } 1557 1558 // Even though we don't look through PHI nodes, we could be called on an 1559 // instruction in an unreachable block, which may be on a cycle. 1560 SmallPtrSet<Value *, 4> Visited; 1561 Visited.insert(Ptr); 1562 SmallVector<Value *, 4> Indices; 1563 1564 // We may end up computing an offset pointer that has the wrong type. If we 1565 // never are able to compute one directly that has the correct type, we'll 1566 // fall back to it, so keep it and the base it was computed from around here. 1567 Value *OffsetPtr = nullptr; 1568 Value *OffsetBasePtr; 1569 1570 // Remember any i8 pointer we come across to re-use if we need to do a raw 1571 // byte offset. 1572 Value *Int8Ptr = nullptr; 1573 APInt Int8PtrOffset(Offset.getBitWidth(), 0); 1574 1575 PointerType *TargetPtrTy = cast<PointerType>(PointerTy); 1576 Type *TargetTy = TargetPtrTy->getNonOpaquePointerElementType(); 1577 1578 // As `addrspacecast` is , `Ptr` (the storage pointer) may have different 1579 // address space from the expected `PointerTy` (the pointer to be used). 1580 // Adjust the pointer type based the original storage pointer. 1581 auto AS = cast<PointerType>(Ptr->getType())->getAddressSpace(); 1582 PointerTy = TargetTy->getPointerTo(AS); 1583 1584 do { 1585 // First fold any existing GEPs into the offset. 1586 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 1587 APInt GEPOffset(Offset.getBitWidth(), 0); 1588 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 1589 break; 1590 Offset += GEPOffset; 1591 Ptr = GEP->getPointerOperand(); 1592 if (!Visited.insert(Ptr).second) 1593 break; 1594 } 1595 1596 // See if we can perform a natural GEP here. 1597 Indices.clear(); 1598 if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy, 1599 Indices, NamePrefix)) { 1600 // If we have a new natural pointer at the offset, clear out any old 1601 // offset pointer we computed. Unless it is the base pointer or 1602 // a non-instruction, we built a GEP we don't need. Zap it. 1603 if (OffsetPtr && OffsetPtr != OffsetBasePtr) 1604 if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) { 1605 assert(I->use_empty() && "Built a GEP with uses some how!"); 1606 I->eraseFromParent(); 1607 } 1608 OffsetPtr = P; 1609 OffsetBasePtr = Ptr; 1610 // If we also found a pointer of the right type, we're done. 1611 if (P->getType() == PointerTy) 1612 break; 1613 } 1614 1615 // Stash this pointer if we've found an i8*. 1616 if (Ptr->getType()->isIntegerTy(8)) { 1617 Int8Ptr = Ptr; 1618 Int8PtrOffset = Offset; 1619 } 1620 1621 // Peel off a layer of the pointer and update the offset appropriately. 1622 if (Operator::getOpcode(Ptr) == Instruction::BitCast) { 1623 Ptr = cast<Operator>(Ptr)->getOperand(0); 1624 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 1625 if (GA->isInterposable()) 1626 break; 1627 Ptr = GA->getAliasee(); 1628 } else { 1629 break; 1630 } 1631 assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!"); 1632 } while (Visited.insert(Ptr).second); 1633 1634 if (!OffsetPtr) { 1635 if (!Int8Ptr) { 1636 Int8Ptr = IRB.CreateBitCast( 1637 Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()), 1638 NamePrefix + "sroa_raw_cast"); 1639 Int8PtrOffset = Offset; 1640 } 1641 1642 OffsetPtr = Int8PtrOffset == 0 1643 ? Int8Ptr 1644 : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr, 1645 IRB.getInt(Int8PtrOffset), 1646 NamePrefix + "sroa_raw_idx"); 1647 } 1648 Ptr = OffsetPtr; 1649 1650 // On the off chance we were targeting i8*, guard the bitcast here. 1651 if (cast<PointerType>(Ptr->getType()) != TargetPtrTy) { 1652 Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, 1653 TargetPtrTy, 1654 NamePrefix + "sroa_cast"); 1655 } 1656 1657 return Ptr; 1658 } 1659 1660 /// Compute the adjusted alignment for a load or store from an offset. 1661 static Align getAdjustedAlignment(Instruction *I, uint64_t Offset) { 1662 return commonAlignment(getLoadStoreAlignment(I), Offset); 1663 } 1664 1665 /// Test whether we can convert a value from the old to the new type. 1666 /// 1667 /// This predicate should be used to guard calls to convertValue in order to 1668 /// ensure that we only try to convert viable values. The strategy is that we 1669 /// will peel off single element struct and array wrappings to get to an 1670 /// underlying value, and convert that value. 1671 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { 1672 if (OldTy == NewTy) 1673 return true; 1674 1675 // For integer types, we can't handle any bit-width differences. This would 1676 // break both vector conversions with extension and introduce endianness 1677 // issues when in conjunction with loads and stores. 1678 if (isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) { 1679 assert(cast<IntegerType>(OldTy)->getBitWidth() != 1680 cast<IntegerType>(NewTy)->getBitWidth() && 1681 "We can't have the same bitwidth for different int types"); 1682 return false; 1683 } 1684 1685 if (DL.getTypeSizeInBits(NewTy).getFixedSize() != 1686 DL.getTypeSizeInBits(OldTy).getFixedSize()) 1687 return false; 1688 if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) 1689 return false; 1690 1691 // We can convert pointers to integers and vice-versa. Same for vectors 1692 // of pointers and integers. 1693 OldTy = OldTy->getScalarType(); 1694 NewTy = NewTy->getScalarType(); 1695 if (NewTy->isPointerTy() || OldTy->isPointerTy()) { 1696 if (NewTy->isPointerTy() && OldTy->isPointerTy()) { 1697 unsigned OldAS = OldTy->getPointerAddressSpace(); 1698 unsigned NewAS = NewTy->getPointerAddressSpace(); 1699 // Convert pointers if they are pointers from the same address space or 1700 // different integral (not non-integral) address spaces with the same 1701 // pointer size. 1702 return OldAS == NewAS || 1703 (!DL.isNonIntegralAddressSpace(OldAS) && 1704 !DL.isNonIntegralAddressSpace(NewAS) && 1705 DL.getPointerSize(OldAS) == DL.getPointerSize(NewAS)); 1706 } 1707 1708 // We can convert integers to integral pointers, but not to non-integral 1709 // pointers. 1710 if (OldTy->isIntegerTy()) 1711 return !DL.isNonIntegralPointerType(NewTy); 1712 1713 // We can convert integral pointers to integers, but non-integral pointers 1714 // need to remain pointers. 1715 if (!DL.isNonIntegralPointerType(OldTy)) 1716 return NewTy->isIntegerTy(); 1717 1718 return false; 1719 } 1720 1721 return true; 1722 } 1723 1724 /// Generic routine to convert an SSA value to a value of a different 1725 /// type. 1726 /// 1727 /// This will try various different casting techniques, such as bitcasts, 1728 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test 1729 /// two types for viability with this routine. 1730 static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 1731 Type *NewTy) { 1732 Type *OldTy = V->getType(); 1733 assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type"); 1734 1735 if (OldTy == NewTy) 1736 return V; 1737 1738 assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) && 1739 "Integer types must be the exact same to convert."); 1740 1741 // See if we need inttoptr for this type pair. May require additional bitcast. 1742 if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) { 1743 // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8* 1744 // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*> 1745 // Expand <4 x i32> to <2 x i8*> --> <4 x i32> to <2 x i64> to <2 x i8*> 1746 // Directly handle i64 to i8* 1747 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), 1748 NewTy); 1749 } 1750 1751 // See if we need ptrtoint for this type pair. May require additional bitcast. 1752 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) { 1753 // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128 1754 // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32> 1755 // Expand <2 x i8*> to <4 x i32> --> <2 x i8*> to <2 x i64> to <4 x i32> 1756 // Expand i8* to i64 --> i8* to i64 to i64 1757 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 1758 NewTy); 1759 } 1760 1761 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isPtrOrPtrVectorTy()) { 1762 unsigned OldAS = OldTy->getPointerAddressSpace(); 1763 unsigned NewAS = NewTy->getPointerAddressSpace(); 1764 // To convert pointers with different address spaces (they are already 1765 // checked convertible, i.e. they have the same pointer size), so far we 1766 // cannot use `bitcast` (which has restrict on the same address space) or 1767 // `addrspacecast` (which is not always no-op casting). Instead, use a pair 1768 // of no-op `ptrtoint`/`inttoptr` casts through an integer with the same bit 1769 // size. 1770 if (OldAS != NewAS) { 1771 assert(DL.getPointerSize(OldAS) == DL.getPointerSize(NewAS)); 1772 return IRB.CreateIntToPtr(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 1773 NewTy); 1774 } 1775 } 1776 1777 return IRB.CreateBitCast(V, NewTy); 1778 } 1779 1780 /// Test whether the given slice use can be promoted to a vector. 1781 /// 1782 /// This function is called to test each entry in a partition which is slated 1783 /// for a single slice. 1784 static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S, 1785 VectorType *Ty, 1786 uint64_t ElementSize, 1787 const DataLayout &DL) { 1788 // First validate the slice offsets. 1789 uint64_t BeginOffset = 1790 std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset(); 1791 uint64_t BeginIndex = BeginOffset / ElementSize; 1792 if (BeginIndex * ElementSize != BeginOffset || 1793 BeginIndex >= cast<FixedVectorType>(Ty)->getNumElements()) 1794 return false; 1795 uint64_t EndOffset = 1796 std::min(S.endOffset(), P.endOffset()) - P.beginOffset(); 1797 uint64_t EndIndex = EndOffset / ElementSize; 1798 if (EndIndex * ElementSize != EndOffset || 1799 EndIndex > cast<FixedVectorType>(Ty)->getNumElements()) 1800 return false; 1801 1802 assert(EndIndex > BeginIndex && "Empty vector!"); 1803 uint64_t NumElements = EndIndex - BeginIndex; 1804 Type *SliceTy = (NumElements == 1) 1805 ? Ty->getElementType() 1806 : FixedVectorType::get(Ty->getElementType(), NumElements); 1807 1808 Type *SplitIntTy = 1809 Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8); 1810 1811 Use *U = S.getUse(); 1812 1813 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 1814 if (MI->isVolatile()) 1815 return false; 1816 if (!S.isSplittable()) 1817 return false; // Skip any unsplittable intrinsics. 1818 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 1819 if (!II->isLifetimeStartOrEnd() && !II->isDroppable()) 1820 return false; 1821 } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1822 if (LI->isVolatile()) 1823 return false; 1824 Type *LTy = LI->getType(); 1825 // Disable vector promotion when there are loads or stores of an FCA. 1826 if (LTy->isStructTy()) 1827 return false; 1828 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { 1829 assert(LTy->isIntegerTy()); 1830 LTy = SplitIntTy; 1831 } 1832 if (!canConvertValue(DL, SliceTy, LTy)) 1833 return false; 1834 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1835 if (SI->isVolatile()) 1836 return false; 1837 Type *STy = SI->getValueOperand()->getType(); 1838 // Disable vector promotion when there are loads or stores of an FCA. 1839 if (STy->isStructTy()) 1840 return false; 1841 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { 1842 assert(STy->isIntegerTy()); 1843 STy = SplitIntTy; 1844 } 1845 if (!canConvertValue(DL, STy, SliceTy)) 1846 return false; 1847 } else { 1848 return false; 1849 } 1850 1851 return true; 1852 } 1853 1854 /// Test whether the given alloca partitioning and range of slices can be 1855 /// promoted to a vector. 1856 /// 1857 /// This is a quick test to check whether we can rewrite a particular alloca 1858 /// partition (and its newly formed alloca) into a vector alloca with only 1859 /// whole-vector loads and stores such that it could be promoted to a vector 1860 /// SSA value. We only can ensure this for a limited set of operations, and we 1861 /// don't want to do the rewrites unless we are confident that the result will 1862 /// be promotable, so we have an early test here. 1863 static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) { 1864 // Collect the candidate types for vector-based promotion. Also track whether 1865 // we have different element types. 1866 SmallVector<VectorType *, 4> CandidateTys; 1867 Type *CommonEltTy = nullptr; 1868 bool HaveCommonEltTy = true; 1869 auto CheckCandidateType = [&](Type *Ty) { 1870 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 1871 // Return if bitcast to vectors is different for total size in bits. 1872 if (!CandidateTys.empty()) { 1873 VectorType *V = CandidateTys[0]; 1874 if (DL.getTypeSizeInBits(VTy).getFixedSize() != 1875 DL.getTypeSizeInBits(V).getFixedSize()) { 1876 CandidateTys.clear(); 1877 return; 1878 } 1879 } 1880 CandidateTys.push_back(VTy); 1881 if (!CommonEltTy) 1882 CommonEltTy = VTy->getElementType(); 1883 else if (CommonEltTy != VTy->getElementType()) 1884 HaveCommonEltTy = false; 1885 } 1886 }; 1887 // Consider any loads or stores that are the exact size of the slice. 1888 for (const Slice &S : P) 1889 if (S.beginOffset() == P.beginOffset() && 1890 S.endOffset() == P.endOffset()) { 1891 if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser())) 1892 CheckCandidateType(LI->getType()); 1893 else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser())) 1894 CheckCandidateType(SI->getValueOperand()->getType()); 1895 } 1896 1897 // If we didn't find a vector type, nothing to do here. 1898 if (CandidateTys.empty()) 1899 return nullptr; 1900 1901 // Remove non-integer vector types if we had multiple common element types. 1902 // FIXME: It'd be nice to replace them with integer vector types, but we can't 1903 // do that until all the backends are known to produce good code for all 1904 // integer vector types. 1905 if (!HaveCommonEltTy) { 1906 llvm::erase_if(CandidateTys, [](VectorType *VTy) { 1907 return !VTy->getElementType()->isIntegerTy(); 1908 }); 1909 1910 // If there were no integer vector types, give up. 1911 if (CandidateTys.empty()) 1912 return nullptr; 1913 1914 // Rank the remaining candidate vector types. This is easy because we know 1915 // they're all integer vectors. We sort by ascending number of elements. 1916 auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) { 1917 (void)DL; 1918 assert(DL.getTypeSizeInBits(RHSTy).getFixedSize() == 1919 DL.getTypeSizeInBits(LHSTy).getFixedSize() && 1920 "Cannot have vector types of different sizes!"); 1921 assert(RHSTy->getElementType()->isIntegerTy() && 1922 "All non-integer types eliminated!"); 1923 assert(LHSTy->getElementType()->isIntegerTy() && 1924 "All non-integer types eliminated!"); 1925 return cast<FixedVectorType>(RHSTy)->getNumElements() < 1926 cast<FixedVectorType>(LHSTy)->getNumElements(); 1927 }; 1928 llvm::sort(CandidateTys, RankVectorTypes); 1929 CandidateTys.erase( 1930 std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes), 1931 CandidateTys.end()); 1932 } else { 1933 // The only way to have the same element type in every vector type is to 1934 // have the same vector type. Check that and remove all but one. 1935 #ifndef NDEBUG 1936 for (VectorType *VTy : CandidateTys) { 1937 assert(VTy->getElementType() == CommonEltTy && 1938 "Unaccounted for element type!"); 1939 assert(VTy == CandidateTys[0] && 1940 "Different vector types with the same element type!"); 1941 } 1942 #endif 1943 CandidateTys.resize(1); 1944 } 1945 1946 // Try each vector type, and return the one which works. 1947 auto CheckVectorTypeForPromotion = [&](VectorType *VTy) { 1948 uint64_t ElementSize = 1949 DL.getTypeSizeInBits(VTy->getElementType()).getFixedSize(); 1950 1951 // While the definition of LLVM vectors is bitpacked, we don't support sizes 1952 // that aren't byte sized. 1953 if (ElementSize % 8) 1954 return false; 1955 assert((DL.getTypeSizeInBits(VTy).getFixedSize() % 8) == 0 && 1956 "vector size not a multiple of element size?"); 1957 ElementSize /= 8; 1958 1959 for (const Slice &S : P) 1960 if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL)) 1961 return false; 1962 1963 for (const Slice *S : P.splitSliceTails()) 1964 if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL)) 1965 return false; 1966 1967 return true; 1968 }; 1969 for (VectorType *VTy : CandidateTys) 1970 if (CheckVectorTypeForPromotion(VTy)) 1971 return VTy; 1972 1973 return nullptr; 1974 } 1975 1976 /// Test whether a slice of an alloca is valid for integer widening. 1977 /// 1978 /// This implements the necessary checking for the \c isIntegerWideningViable 1979 /// test below on a single slice of the alloca. 1980 static bool isIntegerWideningViableForSlice(const Slice &S, 1981 uint64_t AllocBeginOffset, 1982 Type *AllocaTy, 1983 const DataLayout &DL, 1984 bool &WholeAllocaOp) { 1985 uint64_t Size = DL.getTypeStoreSize(AllocaTy).getFixedSize(); 1986 1987 uint64_t RelBegin = S.beginOffset() - AllocBeginOffset; 1988 uint64_t RelEnd = S.endOffset() - AllocBeginOffset; 1989 1990 // We can't reasonably handle cases where the load or store extends past 1991 // the end of the alloca's type and into its padding. 1992 if (RelEnd > Size) 1993 return false; 1994 1995 Use *U = S.getUse(); 1996 1997 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1998 if (LI->isVolatile()) 1999 return false; 2000 // We can't handle loads that extend past the allocated memory. 2001 if (DL.getTypeStoreSize(LI->getType()).getFixedSize() > Size) 2002 return false; 2003 // So far, AllocaSliceRewriter does not support widening split slice tails 2004 // in rewriteIntegerLoad. 2005 if (S.beginOffset() < AllocBeginOffset) 2006 return false; 2007 // Note that we don't count vector loads or stores as whole-alloca 2008 // operations which enable integer widening because we would prefer to use 2009 // vector widening instead. 2010 if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size) 2011 WholeAllocaOp = true; 2012 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) { 2013 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedSize()) 2014 return false; 2015 } else if (RelBegin != 0 || RelEnd != Size || 2016 !canConvertValue(DL, AllocaTy, LI->getType())) { 2017 // Non-integer loads need to be convertible from the alloca type so that 2018 // they are promotable. 2019 return false; 2020 } 2021 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 2022 Type *ValueTy = SI->getValueOperand()->getType(); 2023 if (SI->isVolatile()) 2024 return false; 2025 // We can't handle stores that extend past the allocated memory. 2026 if (DL.getTypeStoreSize(ValueTy).getFixedSize() > Size) 2027 return false; 2028 // So far, AllocaSliceRewriter does not support widening split slice tails 2029 // in rewriteIntegerStore. 2030 if (S.beginOffset() < AllocBeginOffset) 2031 return false; 2032 // Note that we don't count vector loads or stores as whole-alloca 2033 // operations which enable integer widening because we would prefer to use 2034 // vector widening instead. 2035 if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size) 2036 WholeAllocaOp = true; 2037 if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) { 2038 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedSize()) 2039 return false; 2040 } else if (RelBegin != 0 || RelEnd != Size || 2041 !canConvertValue(DL, ValueTy, AllocaTy)) { 2042 // Non-integer stores need to be convertible to the alloca type so that 2043 // they are promotable. 2044 return false; 2045 } 2046 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 2047 if (MI->isVolatile() || !isa<Constant>(MI->getLength())) 2048 return false; 2049 if (!S.isSplittable()) 2050 return false; // Skip any unsplittable intrinsics. 2051 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 2052 if (!II->isLifetimeStartOrEnd() && !II->isDroppable()) 2053 return false; 2054 } else { 2055 return false; 2056 } 2057 2058 return true; 2059 } 2060 2061 /// Test whether the given alloca partition's integer operations can be 2062 /// widened to promotable ones. 2063 /// 2064 /// This is a quick test to check whether we can rewrite the integer loads and 2065 /// stores to a particular alloca into wider loads and stores and be able to 2066 /// promote the resulting alloca. 2067 static bool isIntegerWideningViable(Partition &P, Type *AllocaTy, 2068 const DataLayout &DL) { 2069 uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy).getFixedSize(); 2070 // Don't create integer types larger than the maximum bitwidth. 2071 if (SizeInBits > IntegerType::MAX_INT_BITS) 2072 return false; 2073 2074 // Don't try to handle allocas with bit-padding. 2075 if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy).getFixedSize()) 2076 return false; 2077 2078 // We need to ensure that an integer type with the appropriate bitwidth can 2079 // be converted to the alloca type, whatever that is. We don't want to force 2080 // the alloca itself to have an integer type if there is a more suitable one. 2081 Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits); 2082 if (!canConvertValue(DL, AllocaTy, IntTy) || 2083 !canConvertValue(DL, IntTy, AllocaTy)) 2084 return false; 2085 2086 // While examining uses, we ensure that the alloca has a covering load or 2087 // store. We don't want to widen the integer operations only to fail to 2088 // promote due to some other unsplittable entry (which we may make splittable 2089 // later). However, if there are only splittable uses, go ahead and assume 2090 // that we cover the alloca. 2091 // FIXME: We shouldn't consider split slices that happen to start in the 2092 // partition here... 2093 bool WholeAllocaOp = P.empty() && DL.isLegalInteger(SizeInBits); 2094 2095 for (const Slice &S : P) 2096 if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL, 2097 WholeAllocaOp)) 2098 return false; 2099 2100 for (const Slice *S : P.splitSliceTails()) 2101 if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL, 2102 WholeAllocaOp)) 2103 return false; 2104 2105 return WholeAllocaOp; 2106 } 2107 2108 static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 2109 IntegerType *Ty, uint64_t Offset, 2110 const Twine &Name) { 2111 LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); 2112 IntegerType *IntTy = cast<IntegerType>(V->getType()); 2113 assert(DL.getTypeStoreSize(Ty).getFixedSize() + Offset <= 2114 DL.getTypeStoreSize(IntTy).getFixedSize() && 2115 "Element extends past full value"); 2116 uint64_t ShAmt = 8 * Offset; 2117 if (DL.isBigEndian()) 2118 ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedSize() - 2119 DL.getTypeStoreSize(Ty).getFixedSize() - Offset); 2120 if (ShAmt) { 2121 V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); 2122 LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); 2123 } 2124 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 2125 "Cannot extract to a larger integer!"); 2126 if (Ty != IntTy) { 2127 V = IRB.CreateTrunc(V, Ty, Name + ".trunc"); 2128 LLVM_DEBUG(dbgs() << " trunced: " << *V << "\n"); 2129 } 2130 return V; 2131 } 2132 2133 static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, 2134 Value *V, uint64_t Offset, const Twine &Name) { 2135 IntegerType *IntTy = cast<IntegerType>(Old->getType()); 2136 IntegerType *Ty = cast<IntegerType>(V->getType()); 2137 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 2138 "Cannot insert a larger integer!"); 2139 LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); 2140 if (Ty != IntTy) { 2141 V = IRB.CreateZExt(V, IntTy, Name + ".ext"); 2142 LLVM_DEBUG(dbgs() << " extended: " << *V << "\n"); 2143 } 2144 assert(DL.getTypeStoreSize(Ty).getFixedSize() + Offset <= 2145 DL.getTypeStoreSize(IntTy).getFixedSize() && 2146 "Element store outside of alloca store"); 2147 uint64_t ShAmt = 8 * Offset; 2148 if (DL.isBigEndian()) 2149 ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedSize() - 2150 DL.getTypeStoreSize(Ty).getFixedSize() - Offset); 2151 if (ShAmt) { 2152 V = IRB.CreateShl(V, ShAmt, Name + ".shift"); 2153 LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); 2154 } 2155 2156 if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) { 2157 APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt); 2158 Old = IRB.CreateAnd(Old, Mask, Name + ".mask"); 2159 LLVM_DEBUG(dbgs() << " masked: " << *Old << "\n"); 2160 V = IRB.CreateOr(Old, V, Name + ".insert"); 2161 LLVM_DEBUG(dbgs() << " inserted: " << *V << "\n"); 2162 } 2163 return V; 2164 } 2165 2166 static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, 2167 unsigned EndIndex, const Twine &Name) { 2168 auto *VecTy = cast<FixedVectorType>(V->getType()); 2169 unsigned NumElements = EndIndex - BeginIndex; 2170 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2171 2172 if (NumElements == VecTy->getNumElements()) 2173 return V; 2174 2175 if (NumElements == 1) { 2176 V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex), 2177 Name + ".extract"); 2178 LLVM_DEBUG(dbgs() << " extract: " << *V << "\n"); 2179 return V; 2180 } 2181 2182 SmallVector<int, 8> Mask; 2183 Mask.reserve(NumElements); 2184 for (unsigned i = BeginIndex; i != EndIndex; ++i) 2185 Mask.push_back(i); 2186 V = IRB.CreateShuffleVector(V, Mask, Name + ".extract"); 2187 LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); 2188 return V; 2189 } 2190 2191 static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, 2192 unsigned BeginIndex, const Twine &Name) { 2193 VectorType *VecTy = cast<VectorType>(Old->getType()); 2194 assert(VecTy && "Can only insert a vector into a vector"); 2195 2196 VectorType *Ty = dyn_cast<VectorType>(V->getType()); 2197 if (!Ty) { 2198 // Single element to insert. 2199 V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex), 2200 Name + ".insert"); 2201 LLVM_DEBUG(dbgs() << " insert: " << *V << "\n"); 2202 return V; 2203 } 2204 2205 assert(cast<FixedVectorType>(Ty)->getNumElements() <= 2206 cast<FixedVectorType>(VecTy)->getNumElements() && 2207 "Too many elements!"); 2208 if (cast<FixedVectorType>(Ty)->getNumElements() == 2209 cast<FixedVectorType>(VecTy)->getNumElements()) { 2210 assert(V->getType() == VecTy && "Vector type mismatch"); 2211 return V; 2212 } 2213 unsigned EndIndex = BeginIndex + cast<FixedVectorType>(Ty)->getNumElements(); 2214 2215 // When inserting a smaller vector into the larger to store, we first 2216 // use a shuffle vector to widen it with undef elements, and then 2217 // a second shuffle vector to select between the loaded vector and the 2218 // incoming vector. 2219 SmallVector<int, 8> Mask; 2220 Mask.reserve(cast<FixedVectorType>(VecTy)->getNumElements()); 2221 for (unsigned i = 0; i != cast<FixedVectorType>(VecTy)->getNumElements(); ++i) 2222 if (i >= BeginIndex && i < EndIndex) 2223 Mask.push_back(i - BeginIndex); 2224 else 2225 Mask.push_back(-1); 2226 V = IRB.CreateShuffleVector(V, Mask, Name + ".expand"); 2227 LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); 2228 2229 SmallVector<Constant *, 8> Mask2; 2230 Mask2.reserve(cast<FixedVectorType>(VecTy)->getNumElements()); 2231 for (unsigned i = 0; i != cast<FixedVectorType>(VecTy)->getNumElements(); ++i) 2232 Mask2.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex)); 2233 2234 V = IRB.CreateSelect(ConstantVector::get(Mask2), V, Old, Name + "blend"); 2235 2236 LLVM_DEBUG(dbgs() << " blend: " << *V << "\n"); 2237 return V; 2238 } 2239 2240 /// Visitor to rewrite instructions using p particular slice of an alloca 2241 /// to use a new alloca. 2242 /// 2243 /// Also implements the rewriting to vector-based accesses when the partition 2244 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic 2245 /// lives here. 2246 class llvm::sroa::AllocaSliceRewriter 2247 : public InstVisitor<AllocaSliceRewriter, bool> { 2248 // Befriend the base class so it can delegate to private visit methods. 2249 friend class InstVisitor<AllocaSliceRewriter, bool>; 2250 2251 using Base = InstVisitor<AllocaSliceRewriter, bool>; 2252 2253 const DataLayout &DL; 2254 AllocaSlices &AS; 2255 SROAPass &Pass; 2256 AllocaInst &OldAI, &NewAI; 2257 const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset; 2258 Type *NewAllocaTy; 2259 2260 // This is a convenience and flag variable that will be null unless the new 2261 // alloca's integer operations should be widened to this integer type due to 2262 // passing isIntegerWideningViable above. If it is non-null, the desired 2263 // integer type will be stored here for easy access during rewriting. 2264 IntegerType *IntTy; 2265 2266 // If we are rewriting an alloca partition which can be written as pure 2267 // vector operations, we stash extra information here. When VecTy is 2268 // non-null, we have some strict guarantees about the rewritten alloca: 2269 // - The new alloca is exactly the size of the vector type here. 2270 // - The accesses all either map to the entire vector or to a single 2271 // element. 2272 // - The set of accessing instructions is only one of those handled above 2273 // in isVectorPromotionViable. Generally these are the same access kinds 2274 // which are promotable via mem2reg. 2275 VectorType *VecTy; 2276 Type *ElementTy; 2277 uint64_t ElementSize; 2278 2279 // The original offset of the slice currently being rewritten relative to 2280 // the original alloca. 2281 uint64_t BeginOffset = 0; 2282 uint64_t EndOffset = 0; 2283 2284 // The new offsets of the slice currently being rewritten relative to the 2285 // original alloca. 2286 uint64_t NewBeginOffset = 0, NewEndOffset = 0; 2287 2288 uint64_t SliceSize = 0; 2289 bool IsSplittable = false; 2290 bool IsSplit = false; 2291 Use *OldUse = nullptr; 2292 Instruction *OldPtr = nullptr; 2293 2294 // Track post-rewrite users which are PHI nodes and Selects. 2295 SmallSetVector<PHINode *, 8> &PHIUsers; 2296 SmallSetVector<SelectInst *, 8> &SelectUsers; 2297 2298 // Utility IR builder, whose name prefix is setup for each visited use, and 2299 // the insertion point is set to point to the user. 2300 IRBuilderTy IRB; 2301 2302 public: 2303 AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROAPass &Pass, 2304 AllocaInst &OldAI, AllocaInst &NewAI, 2305 uint64_t NewAllocaBeginOffset, 2306 uint64_t NewAllocaEndOffset, bool IsIntegerPromotable, 2307 VectorType *PromotableVecTy, 2308 SmallSetVector<PHINode *, 8> &PHIUsers, 2309 SmallSetVector<SelectInst *, 8> &SelectUsers) 2310 : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI), 2311 NewAllocaBeginOffset(NewAllocaBeginOffset), 2312 NewAllocaEndOffset(NewAllocaEndOffset), 2313 NewAllocaTy(NewAI.getAllocatedType()), 2314 IntTy( 2315 IsIntegerPromotable 2316 ? Type::getIntNTy(NewAI.getContext(), 2317 DL.getTypeSizeInBits(NewAI.getAllocatedType()) 2318 .getFixedSize()) 2319 : nullptr), 2320 VecTy(PromotableVecTy), 2321 ElementTy(VecTy ? VecTy->getElementType() : nullptr), 2322 ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy).getFixedSize() / 8 2323 : 0), 2324 PHIUsers(PHIUsers), SelectUsers(SelectUsers), 2325 IRB(NewAI.getContext(), ConstantFolder()) { 2326 if (VecTy) { 2327 assert((DL.getTypeSizeInBits(ElementTy).getFixedSize() % 8) == 0 && 2328 "Only multiple-of-8 sized vector elements are viable"); 2329 ++NumVectorized; 2330 } 2331 assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy)); 2332 } 2333 2334 bool visit(AllocaSlices::const_iterator I) { 2335 bool CanSROA = true; 2336 BeginOffset = I->beginOffset(); 2337 EndOffset = I->endOffset(); 2338 IsSplittable = I->isSplittable(); 2339 IsSplit = 2340 BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; 2341 LLVM_DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : "")); 2342 LLVM_DEBUG(AS.printSlice(dbgs(), I, "")); 2343 LLVM_DEBUG(dbgs() << "\n"); 2344 2345 // Compute the intersecting offset range. 2346 assert(BeginOffset < NewAllocaEndOffset); 2347 assert(EndOffset > NewAllocaBeginOffset); 2348 NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); 2349 NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); 2350 2351 SliceSize = NewEndOffset - NewBeginOffset; 2352 2353 OldUse = I->getUse(); 2354 OldPtr = cast<Instruction>(OldUse->get()); 2355 2356 Instruction *OldUserI = cast<Instruction>(OldUse->getUser()); 2357 IRB.SetInsertPoint(OldUserI); 2358 IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc()); 2359 IRB.getInserter().SetNamePrefix( 2360 Twine(NewAI.getName()) + "." + Twine(BeginOffset) + "."); 2361 2362 CanSROA &= visit(cast<Instruction>(OldUse->getUser())); 2363 if (VecTy || IntTy) 2364 assert(CanSROA); 2365 return CanSROA; 2366 } 2367 2368 private: 2369 // Make sure the other visit overloads are visible. 2370 using Base::visit; 2371 2372 // Every instruction which can end up as a user must have a rewrite rule. 2373 bool visitInstruction(Instruction &I) { 2374 LLVM_DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); 2375 llvm_unreachable("No rewrite rule for this instruction!"); 2376 } 2377 2378 Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) { 2379 // Note that the offset computation can use BeginOffset or NewBeginOffset 2380 // interchangeably for unsplit slices. 2381 assert(IsSplit || BeginOffset == NewBeginOffset); 2382 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2383 2384 #ifndef NDEBUG 2385 StringRef OldName = OldPtr->getName(); 2386 // Skip through the last '.sroa.' component of the name. 2387 size_t LastSROAPrefix = OldName.rfind(".sroa."); 2388 if (LastSROAPrefix != StringRef::npos) { 2389 OldName = OldName.substr(LastSROAPrefix + strlen(".sroa.")); 2390 // Look for an SROA slice index. 2391 size_t IndexEnd = OldName.find_first_not_of("0123456789"); 2392 if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') { 2393 // Strip the index and look for the offset. 2394 OldName = OldName.substr(IndexEnd + 1); 2395 size_t OffsetEnd = OldName.find_first_not_of("0123456789"); 2396 if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.') 2397 // Strip the offset. 2398 OldName = OldName.substr(OffsetEnd + 1); 2399 } 2400 } 2401 // Strip any SROA suffixes as well. 2402 OldName = OldName.substr(0, OldName.find(".sroa_")); 2403 #endif 2404 2405 return getAdjustedPtr(IRB, DL, &NewAI, 2406 APInt(DL.getIndexTypeSizeInBits(PointerTy), Offset), 2407 PointerTy, 2408 #ifndef NDEBUG 2409 Twine(OldName) + "." 2410 #else 2411 Twine() 2412 #endif 2413 ); 2414 } 2415 2416 /// Compute suitable alignment to access this slice of the *new* 2417 /// alloca. 2418 /// 2419 /// You can optionally pass a type to this routine and if that type's ABI 2420 /// alignment is itself suitable, this will return zero. 2421 Align getSliceAlign() { 2422 return commonAlignment(NewAI.getAlign(), 2423 NewBeginOffset - NewAllocaBeginOffset); 2424 } 2425 2426 unsigned getIndex(uint64_t Offset) { 2427 assert(VecTy && "Can only call getIndex when rewriting a vector"); 2428 uint64_t RelOffset = Offset - NewAllocaBeginOffset; 2429 assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds"); 2430 uint32_t Index = RelOffset / ElementSize; 2431 assert(Index * ElementSize == RelOffset); 2432 return Index; 2433 } 2434 2435 void deleteIfTriviallyDead(Value *V) { 2436 Instruction *I = cast<Instruction>(V); 2437 if (isInstructionTriviallyDead(I)) 2438 Pass.DeadInsts.push_back(I); 2439 } 2440 2441 Value *rewriteVectorizedLoadInst(LoadInst &LI) { 2442 unsigned BeginIndex = getIndex(NewBeginOffset); 2443 unsigned EndIndex = getIndex(NewEndOffset); 2444 assert(EndIndex > BeginIndex && "Empty vector!"); 2445 2446 LoadInst *Load = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2447 NewAI.getAlign(), "load"); 2448 2449 Load->copyMetadata(LI, {LLVMContext::MD_mem_parallel_loop_access, 2450 LLVMContext::MD_access_group}); 2451 return extractVector(IRB, Load, BeginIndex, EndIndex, "vec"); 2452 } 2453 2454 Value *rewriteIntegerLoad(LoadInst &LI) { 2455 assert(IntTy && "We cannot insert an integer to the alloca"); 2456 assert(!LI.isVolatile()); 2457 Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2458 NewAI.getAlign(), "load"); 2459 V = convertValue(DL, IRB, V, IntTy); 2460 assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2461 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2462 if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) { 2463 IntegerType *ExtractTy = Type::getIntNTy(LI.getContext(), SliceSize * 8); 2464 V = extractInteger(DL, IRB, V, ExtractTy, Offset, "extract"); 2465 } 2466 // It is possible that the extracted type is not the load type. This 2467 // happens if there is a load past the end of the alloca, and as 2468 // a consequence the slice is narrower but still a candidate for integer 2469 // lowering. To handle this case, we just zero extend the extracted 2470 // integer. 2471 assert(cast<IntegerType>(LI.getType())->getBitWidth() >= SliceSize * 8 && 2472 "Can only handle an extract for an overly wide load"); 2473 if (cast<IntegerType>(LI.getType())->getBitWidth() > SliceSize * 8) 2474 V = IRB.CreateZExt(V, LI.getType()); 2475 return V; 2476 } 2477 2478 bool visitLoadInst(LoadInst &LI) { 2479 LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); 2480 Value *OldOp = LI.getOperand(0); 2481 assert(OldOp == OldPtr); 2482 2483 AAMDNodes AATags = LI.getAAMetadata(); 2484 2485 unsigned AS = LI.getPointerAddressSpace(); 2486 2487 Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8) 2488 : LI.getType(); 2489 const bool IsLoadPastEnd = 2490 DL.getTypeStoreSize(TargetTy).getFixedSize() > SliceSize; 2491 bool IsPtrAdjusted = false; 2492 Value *V; 2493 if (VecTy) { 2494 V = rewriteVectorizedLoadInst(LI); 2495 } else if (IntTy && LI.getType()->isIntegerTy()) { 2496 V = rewriteIntegerLoad(LI); 2497 } else if (NewBeginOffset == NewAllocaBeginOffset && 2498 NewEndOffset == NewAllocaEndOffset && 2499 (canConvertValue(DL, NewAllocaTy, TargetTy) || 2500 (IsLoadPastEnd && NewAllocaTy->isIntegerTy() && 2501 TargetTy->isIntegerTy()))) { 2502 LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2503 NewAI.getAlign(), LI.isVolatile(), 2504 LI.getName()); 2505 if (AATags) 2506 NewLI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2507 if (LI.isVolatile()) 2508 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 2509 if (NewLI->isAtomic()) 2510 NewLI->setAlignment(LI.getAlign()); 2511 2512 // Any !nonnull metadata or !range metadata on the old load is also valid 2513 // on the new load. This is even true in some cases even when the loads 2514 // are different types, for example by mapping !nonnull metadata to 2515 // !range metadata by modeling the null pointer constant converted to the 2516 // integer type. 2517 // FIXME: Add support for range metadata here. Currently the utilities 2518 // for this don't propagate range metadata in trivial cases from one 2519 // integer load to another, don't handle non-addrspace-0 null pointers 2520 // correctly, and don't have any support for mapping ranges as the 2521 // integer type becomes winder or narrower. 2522 if (MDNode *N = LI.getMetadata(LLVMContext::MD_nonnull)) 2523 copyNonnullMetadata(LI, N, *NewLI); 2524 2525 // Try to preserve nonnull metadata 2526 V = NewLI; 2527 2528 // If this is an integer load past the end of the slice (which means the 2529 // bytes outside the slice are undef or this load is dead) just forcibly 2530 // fix the integer size with correct handling of endianness. 2531 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) 2532 if (auto *TITy = dyn_cast<IntegerType>(TargetTy)) 2533 if (AITy->getBitWidth() < TITy->getBitWidth()) { 2534 V = IRB.CreateZExt(V, TITy, "load.ext"); 2535 if (DL.isBigEndian()) 2536 V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(), 2537 "endian_shift"); 2538 } 2539 } else { 2540 Type *LTy = TargetTy->getPointerTo(AS); 2541 LoadInst *NewLI = 2542 IRB.CreateAlignedLoad(TargetTy, getNewAllocaSlicePtr(IRB, LTy), 2543 getSliceAlign(), LI.isVolatile(), LI.getName()); 2544 if (AATags) 2545 NewLI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2546 if (LI.isVolatile()) 2547 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 2548 NewLI->copyMetadata(LI, {LLVMContext::MD_mem_parallel_loop_access, 2549 LLVMContext::MD_access_group}); 2550 2551 V = NewLI; 2552 IsPtrAdjusted = true; 2553 } 2554 V = convertValue(DL, IRB, V, TargetTy); 2555 2556 if (IsSplit) { 2557 assert(!LI.isVolatile()); 2558 assert(LI.getType()->isIntegerTy() && 2559 "Only integer type loads and stores are split"); 2560 assert(SliceSize < DL.getTypeStoreSize(LI.getType()).getFixedSize() && 2561 "Split load isn't smaller than original load"); 2562 assert(DL.typeSizeEqualsStoreSize(LI.getType()) && 2563 "Non-byte-multiple bit width"); 2564 // Move the insertion point just past the load so that we can refer to it. 2565 IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI))); 2566 // Create a placeholder value with the same type as LI to use as the 2567 // basis for the new value. This allows us to replace the uses of LI with 2568 // the computed value, and then replace the placeholder with LI, leaving 2569 // LI only used for this computation. 2570 Value *Placeholder = new LoadInst( 2571 LI.getType(), PoisonValue::get(LI.getType()->getPointerTo(AS)), "", 2572 false, Align(1)); 2573 V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset, 2574 "insert"); 2575 LI.replaceAllUsesWith(V); 2576 Placeholder->replaceAllUsesWith(&LI); 2577 Placeholder->deleteValue(); 2578 } else { 2579 LI.replaceAllUsesWith(V); 2580 } 2581 2582 Pass.DeadInsts.push_back(&LI); 2583 deleteIfTriviallyDead(OldOp); 2584 LLVM_DEBUG(dbgs() << " to: " << *V << "\n"); 2585 return !LI.isVolatile() && !IsPtrAdjusted; 2586 } 2587 2588 bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp, 2589 AAMDNodes AATags) { 2590 if (V->getType() != VecTy) { 2591 unsigned BeginIndex = getIndex(NewBeginOffset); 2592 unsigned EndIndex = getIndex(NewEndOffset); 2593 assert(EndIndex > BeginIndex && "Empty vector!"); 2594 unsigned NumElements = EndIndex - BeginIndex; 2595 assert(NumElements <= cast<FixedVectorType>(VecTy)->getNumElements() && 2596 "Too many elements!"); 2597 Type *SliceTy = (NumElements == 1) 2598 ? ElementTy 2599 : FixedVectorType::get(ElementTy, NumElements); 2600 if (V->getType() != SliceTy) 2601 V = convertValue(DL, IRB, V, SliceTy); 2602 2603 // Mix in the existing elements. 2604 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2605 NewAI.getAlign(), "load"); 2606 V = insertVector(IRB, Old, V, BeginIndex, "vec"); 2607 } 2608 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign()); 2609 Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, 2610 LLVMContext::MD_access_group}); 2611 if (AATags) 2612 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2613 Pass.DeadInsts.push_back(&SI); 2614 2615 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 2616 return true; 2617 } 2618 2619 bool rewriteIntegerStore(Value *V, StoreInst &SI, AAMDNodes AATags) { 2620 assert(IntTy && "We cannot extract an integer from the alloca"); 2621 assert(!SI.isVolatile()); 2622 if (DL.getTypeSizeInBits(V->getType()).getFixedSize() != 2623 IntTy->getBitWidth()) { 2624 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2625 NewAI.getAlign(), "oldload"); 2626 Old = convertValue(DL, IRB, Old, IntTy); 2627 assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2628 uint64_t Offset = BeginOffset - NewAllocaBeginOffset; 2629 V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert"); 2630 } 2631 V = convertValue(DL, IRB, V, NewAllocaTy); 2632 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign()); 2633 Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, 2634 LLVMContext::MD_access_group}); 2635 if (AATags) 2636 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2637 Pass.DeadInsts.push_back(&SI); 2638 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 2639 return true; 2640 } 2641 2642 bool visitStoreInst(StoreInst &SI) { 2643 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 2644 Value *OldOp = SI.getOperand(1); 2645 assert(OldOp == OldPtr); 2646 2647 AAMDNodes AATags = SI.getAAMetadata(); 2648 Value *V = SI.getValueOperand(); 2649 2650 // Strip all inbounds GEPs and pointer casts to try to dig out any root 2651 // alloca that should be re-examined after promoting this alloca. 2652 if (V->getType()->isPointerTy()) 2653 if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets())) 2654 Pass.PostPromotionWorklist.insert(AI); 2655 2656 if (SliceSize < DL.getTypeStoreSize(V->getType()).getFixedSize()) { 2657 assert(!SI.isVolatile()); 2658 assert(V->getType()->isIntegerTy() && 2659 "Only integer type loads and stores are split"); 2660 assert(DL.typeSizeEqualsStoreSize(V->getType()) && 2661 "Non-byte-multiple bit width"); 2662 IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8); 2663 V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset, 2664 "extract"); 2665 } 2666 2667 if (VecTy) 2668 return rewriteVectorizedStoreInst(V, SI, OldOp, AATags); 2669 if (IntTy && V->getType()->isIntegerTy()) 2670 return rewriteIntegerStore(V, SI, AATags); 2671 2672 const bool IsStorePastEnd = 2673 DL.getTypeStoreSize(V->getType()).getFixedSize() > SliceSize; 2674 StoreInst *NewSI; 2675 if (NewBeginOffset == NewAllocaBeginOffset && 2676 NewEndOffset == NewAllocaEndOffset && 2677 (canConvertValue(DL, V->getType(), NewAllocaTy) || 2678 (IsStorePastEnd && NewAllocaTy->isIntegerTy() && 2679 V->getType()->isIntegerTy()))) { 2680 // If this is an integer store past the end of slice (and thus the bytes 2681 // past that point are irrelevant or this is unreachable), truncate the 2682 // value prior to storing. 2683 if (auto *VITy = dyn_cast<IntegerType>(V->getType())) 2684 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) 2685 if (VITy->getBitWidth() > AITy->getBitWidth()) { 2686 if (DL.isBigEndian()) 2687 V = IRB.CreateLShr(V, VITy->getBitWidth() - AITy->getBitWidth(), 2688 "endian_shift"); 2689 V = IRB.CreateTrunc(V, AITy, "load.trunc"); 2690 } 2691 2692 V = convertValue(DL, IRB, V, NewAllocaTy); 2693 NewSI = 2694 IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign(), SI.isVolatile()); 2695 } else { 2696 unsigned AS = SI.getPointerAddressSpace(); 2697 Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS)); 2698 NewSI = 2699 IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(), SI.isVolatile()); 2700 } 2701 NewSI->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, 2702 LLVMContext::MD_access_group}); 2703 if (AATags) 2704 NewSI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2705 if (SI.isVolatile()) 2706 NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID()); 2707 if (NewSI->isAtomic()) 2708 NewSI->setAlignment(SI.getAlign()); 2709 Pass.DeadInsts.push_back(&SI); 2710 deleteIfTriviallyDead(OldOp); 2711 2712 LLVM_DEBUG(dbgs() << " to: " << *NewSI << "\n"); 2713 return NewSI->getPointerOperand() == &NewAI && 2714 NewSI->getValueOperand()->getType() == NewAllocaTy && 2715 !SI.isVolatile(); 2716 } 2717 2718 /// Compute an integer value from splatting an i8 across the given 2719 /// number of bytes. 2720 /// 2721 /// Note that this routine assumes an i8 is a byte. If that isn't true, don't 2722 /// call this routine. 2723 /// FIXME: Heed the advice above. 2724 /// 2725 /// \param V The i8 value to splat. 2726 /// \param Size The number of bytes in the output (assuming i8 is one byte) 2727 Value *getIntegerSplat(Value *V, unsigned Size) { 2728 assert(Size > 0 && "Expected a positive number of bytes."); 2729 IntegerType *VTy = cast<IntegerType>(V->getType()); 2730 assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte"); 2731 if (Size == 1) 2732 return V; 2733 2734 Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8); 2735 V = IRB.CreateMul( 2736 IRB.CreateZExt(V, SplatIntTy, "zext"), 2737 ConstantExpr::getUDiv( 2738 Constant::getAllOnesValue(SplatIntTy), 2739 ConstantExpr::getZExt(Constant::getAllOnesValue(V->getType()), 2740 SplatIntTy)), 2741 "isplat"); 2742 return V; 2743 } 2744 2745 /// Compute a vector splat for a given element value. 2746 Value *getVectorSplat(Value *V, unsigned NumElements) { 2747 V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); 2748 LLVM_DEBUG(dbgs() << " splat: " << *V << "\n"); 2749 return V; 2750 } 2751 2752 bool visitMemSetInst(MemSetInst &II) { 2753 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 2754 assert(II.getRawDest() == OldPtr); 2755 2756 AAMDNodes AATags = II.getAAMetadata(); 2757 2758 // If the memset has a variable size, it cannot be split, just adjust the 2759 // pointer to the new alloca. 2760 if (!isa<ConstantInt>(II.getLength())) { 2761 assert(!IsSplit); 2762 assert(NewBeginOffset == BeginOffset); 2763 II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType())); 2764 II.setDestAlignment(getSliceAlign()); 2765 2766 deleteIfTriviallyDead(OldPtr); 2767 return false; 2768 } 2769 2770 // Record this instruction for deletion. 2771 Pass.DeadInsts.push_back(&II); 2772 2773 Type *AllocaTy = NewAI.getAllocatedType(); 2774 Type *ScalarTy = AllocaTy->getScalarType(); 2775 2776 const bool CanContinue = [&]() { 2777 if (VecTy || IntTy) 2778 return true; 2779 if (BeginOffset > NewAllocaBeginOffset || 2780 EndOffset < NewAllocaEndOffset) 2781 return false; 2782 // Length must be in range for FixedVectorType. 2783 auto *C = cast<ConstantInt>(II.getLength()); 2784 const uint64_t Len = C->getLimitedValue(); 2785 if (Len > std::numeric_limits<unsigned>::max()) 2786 return false; 2787 auto *Int8Ty = IntegerType::getInt8Ty(NewAI.getContext()); 2788 auto *SrcTy = FixedVectorType::get(Int8Ty, Len); 2789 return canConvertValue(DL, SrcTy, AllocaTy) && 2790 DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy).getFixedSize()); 2791 }(); 2792 2793 // If this doesn't map cleanly onto the alloca type, and that type isn't 2794 // a single value type, just emit a memset. 2795 if (!CanContinue) { 2796 Type *SizeTy = II.getLength()->getType(); 2797 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 2798 CallInst *New = IRB.CreateMemSet( 2799 getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size, 2800 MaybeAlign(getSliceAlign()), II.isVolatile()); 2801 if (AATags) 2802 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2803 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 2804 return false; 2805 } 2806 2807 // If we can represent this as a simple value, we have to build the actual 2808 // value to store, which requires expanding the byte present in memset to 2809 // a sensible representation for the alloca type. This is essentially 2810 // splatting the byte to a sufficiently wide integer, splatting it across 2811 // any desired vector width, and bitcasting to the final type. 2812 Value *V; 2813 2814 if (VecTy) { 2815 // If this is a memset of a vectorized alloca, insert it. 2816 assert(ElementTy == ScalarTy); 2817 2818 unsigned BeginIndex = getIndex(NewBeginOffset); 2819 unsigned EndIndex = getIndex(NewEndOffset); 2820 assert(EndIndex > BeginIndex && "Empty vector!"); 2821 unsigned NumElements = EndIndex - BeginIndex; 2822 assert(NumElements <= cast<FixedVectorType>(VecTy)->getNumElements() && 2823 "Too many elements!"); 2824 2825 Value *Splat = getIntegerSplat( 2826 II.getValue(), DL.getTypeSizeInBits(ElementTy).getFixedSize() / 8); 2827 Splat = convertValue(DL, IRB, Splat, ElementTy); 2828 if (NumElements > 1) 2829 Splat = getVectorSplat(Splat, NumElements); 2830 2831 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2832 NewAI.getAlign(), "oldload"); 2833 V = insertVector(IRB, Old, Splat, BeginIndex, "vec"); 2834 } else if (IntTy) { 2835 // If this is a memset on an alloca where we can widen stores, insert the 2836 // set integer. 2837 assert(!II.isVolatile()); 2838 2839 uint64_t Size = NewEndOffset - NewBeginOffset; 2840 V = getIntegerSplat(II.getValue(), Size); 2841 2842 if (IntTy && (BeginOffset != NewAllocaBeginOffset || 2843 EndOffset != NewAllocaBeginOffset)) { 2844 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2845 NewAI.getAlign(), "oldload"); 2846 Old = convertValue(DL, IRB, Old, IntTy); 2847 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2848 V = insertInteger(DL, IRB, Old, V, Offset, "insert"); 2849 } else { 2850 assert(V->getType() == IntTy && 2851 "Wrong type for an alloca wide integer!"); 2852 } 2853 V = convertValue(DL, IRB, V, AllocaTy); 2854 } else { 2855 // Established these invariants above. 2856 assert(NewBeginOffset == NewAllocaBeginOffset); 2857 assert(NewEndOffset == NewAllocaEndOffset); 2858 2859 V = getIntegerSplat(II.getValue(), 2860 DL.getTypeSizeInBits(ScalarTy).getFixedSize() / 8); 2861 if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy)) 2862 V = getVectorSplat( 2863 V, cast<FixedVectorType>(AllocaVecTy)->getNumElements()); 2864 2865 V = convertValue(DL, IRB, V, AllocaTy); 2866 } 2867 2868 StoreInst *New = 2869 IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign(), II.isVolatile()); 2870 New->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access, 2871 LLVMContext::MD_access_group}); 2872 if (AATags) 2873 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2874 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 2875 return !II.isVolatile(); 2876 } 2877 2878 bool visitMemTransferInst(MemTransferInst &II) { 2879 // Rewriting of memory transfer instructions can be a bit tricky. We break 2880 // them into two categories: split intrinsics and unsplit intrinsics. 2881 2882 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 2883 2884 AAMDNodes AATags = II.getAAMetadata(); 2885 2886 bool IsDest = &II.getRawDestUse() == OldUse; 2887 assert((IsDest && II.getRawDest() == OldPtr) || 2888 (!IsDest && II.getRawSource() == OldPtr)); 2889 2890 MaybeAlign SliceAlign = getSliceAlign(); 2891 2892 // For unsplit intrinsics, we simply modify the source and destination 2893 // pointers in place. This isn't just an optimization, it is a matter of 2894 // correctness. With unsplit intrinsics we may be dealing with transfers 2895 // within a single alloca before SROA ran, or with transfers that have 2896 // a variable length. We may also be dealing with memmove instead of 2897 // memcpy, and so simply updating the pointers is the necessary for us to 2898 // update both source and dest of a single call. 2899 if (!IsSplittable) { 2900 Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2901 if (IsDest) { 2902 II.setDest(AdjustedPtr); 2903 II.setDestAlignment(SliceAlign); 2904 } 2905 else { 2906 II.setSource(AdjustedPtr); 2907 II.setSourceAlignment(SliceAlign); 2908 } 2909 2910 LLVM_DEBUG(dbgs() << " to: " << II << "\n"); 2911 deleteIfTriviallyDead(OldPtr); 2912 return false; 2913 } 2914 // For split transfer intrinsics we have an incredibly useful assurance: 2915 // the source and destination do not reside within the same alloca, and at 2916 // least one of them does not escape. This means that we can replace 2917 // memmove with memcpy, and we don't need to worry about all manner of 2918 // downsides to splitting and transforming the operations. 2919 2920 // If this doesn't map cleanly onto the alloca type, and that type isn't 2921 // a single value type, just emit a memcpy. 2922 bool EmitMemCpy = 2923 !VecTy && !IntTy && 2924 (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || 2925 SliceSize != 2926 DL.getTypeStoreSize(NewAI.getAllocatedType()).getFixedSize() || 2927 !NewAI.getAllocatedType()->isSingleValueType()); 2928 2929 // If we're just going to emit a memcpy, the alloca hasn't changed, and the 2930 // size hasn't been shrunk based on analysis of the viable range, this is 2931 // a no-op. 2932 if (EmitMemCpy && &OldAI == &NewAI) { 2933 // Ensure the start lines up. 2934 assert(NewBeginOffset == BeginOffset); 2935 2936 // Rewrite the size as needed. 2937 if (NewEndOffset != EndOffset) 2938 II.setLength(ConstantInt::get(II.getLength()->getType(), 2939 NewEndOffset - NewBeginOffset)); 2940 return false; 2941 } 2942 // Record this instruction for deletion. 2943 Pass.DeadInsts.push_back(&II); 2944 2945 // Strip all inbounds GEPs and pointer casts to try to dig out any root 2946 // alloca that should be re-examined after rewriting this instruction. 2947 Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); 2948 if (AllocaInst *AI = 2949 dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) { 2950 assert(AI != &OldAI && AI != &NewAI && 2951 "Splittable transfers cannot reach the same alloca on both ends."); 2952 Pass.Worklist.insert(AI); 2953 } 2954 2955 Type *OtherPtrTy = OtherPtr->getType(); 2956 unsigned OtherAS = OtherPtrTy->getPointerAddressSpace(); 2957 2958 // Compute the relative offset for the other pointer within the transfer. 2959 unsigned OffsetWidth = DL.getIndexSizeInBits(OtherAS); 2960 APInt OtherOffset(OffsetWidth, NewBeginOffset - BeginOffset); 2961 Align OtherAlign = 2962 (IsDest ? II.getSourceAlign() : II.getDestAlign()).valueOrOne(); 2963 OtherAlign = 2964 commonAlignment(OtherAlign, OtherOffset.zextOrTrunc(64).getZExtValue()); 2965 2966 if (EmitMemCpy) { 2967 // Compute the other pointer, folding as much as possible to produce 2968 // a single, simple GEP in most cases. 2969 OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 2970 OtherPtr->getName() + "."); 2971 2972 Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2973 Type *SizeTy = II.getLength()->getType(); 2974 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 2975 2976 Value *DestPtr, *SrcPtr; 2977 MaybeAlign DestAlign, SrcAlign; 2978 // Note: IsDest is true iff we're copying into the new alloca slice 2979 if (IsDest) { 2980 DestPtr = OurPtr; 2981 DestAlign = SliceAlign; 2982 SrcPtr = OtherPtr; 2983 SrcAlign = OtherAlign; 2984 } else { 2985 DestPtr = OtherPtr; 2986 DestAlign = OtherAlign; 2987 SrcPtr = OurPtr; 2988 SrcAlign = SliceAlign; 2989 } 2990 CallInst *New = IRB.CreateMemCpy(DestPtr, DestAlign, SrcPtr, SrcAlign, 2991 Size, II.isVolatile()); 2992 if (AATags) 2993 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2994 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 2995 return false; 2996 } 2997 2998 bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && 2999 NewEndOffset == NewAllocaEndOffset; 3000 uint64_t Size = NewEndOffset - NewBeginOffset; 3001 unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0; 3002 unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0; 3003 unsigned NumElements = EndIndex - BeginIndex; 3004 IntegerType *SubIntTy = 3005 IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr; 3006 3007 // Reset the other pointer type to match the register type we're going to 3008 // use, but using the address space of the original other pointer. 3009 Type *OtherTy; 3010 if (VecTy && !IsWholeAlloca) { 3011 if (NumElements == 1) 3012 OtherTy = VecTy->getElementType(); 3013 else 3014 OtherTy = FixedVectorType::get(VecTy->getElementType(), NumElements); 3015 } else if (IntTy && !IsWholeAlloca) { 3016 OtherTy = SubIntTy; 3017 } else { 3018 OtherTy = NewAllocaTy; 3019 } 3020 OtherPtrTy = OtherTy->getPointerTo(OtherAS); 3021 3022 Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 3023 OtherPtr->getName() + "."); 3024 MaybeAlign SrcAlign = OtherAlign; 3025 Value *DstPtr = &NewAI; 3026 MaybeAlign DstAlign = SliceAlign; 3027 if (!IsDest) { 3028 std::swap(SrcPtr, DstPtr); 3029 std::swap(SrcAlign, DstAlign); 3030 } 3031 3032 Value *Src; 3033 if (VecTy && !IsWholeAlloca && !IsDest) { 3034 Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3035 NewAI.getAlign(), "load"); 3036 Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec"); 3037 } else if (IntTy && !IsWholeAlloca && !IsDest) { 3038 Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3039 NewAI.getAlign(), "load"); 3040 Src = convertValue(DL, IRB, Src, IntTy); 3041 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 3042 Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); 3043 } else { 3044 LoadInst *Load = IRB.CreateAlignedLoad(OtherTy, SrcPtr, SrcAlign, 3045 II.isVolatile(), "copyload"); 3046 Load->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access, 3047 LLVMContext::MD_access_group}); 3048 if (AATags) 3049 Load->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 3050 Src = Load; 3051 } 3052 3053 if (VecTy && !IsWholeAlloca && IsDest) { 3054 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3055 NewAI.getAlign(), "oldload"); 3056 Src = insertVector(IRB, Old, Src, BeginIndex, "vec"); 3057 } else if (IntTy && !IsWholeAlloca && IsDest) { 3058 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3059 NewAI.getAlign(), "oldload"); 3060 Old = convertValue(DL, IRB, Old, IntTy); 3061 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 3062 Src = insertInteger(DL, IRB, Old, Src, Offset, "insert"); 3063 Src = convertValue(DL, IRB, Src, NewAllocaTy); 3064 } 3065 3066 StoreInst *Store = cast<StoreInst>( 3067 IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); 3068 Store->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access, 3069 LLVMContext::MD_access_group}); 3070 if (AATags) 3071 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 3072 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 3073 return !II.isVolatile(); 3074 } 3075 3076 bool visitIntrinsicInst(IntrinsicInst &II) { 3077 assert((II.isLifetimeStartOrEnd() || II.isDroppable()) && 3078 "Unexpected intrinsic!"); 3079 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 3080 3081 // Record this instruction for deletion. 3082 Pass.DeadInsts.push_back(&II); 3083 3084 if (II.isDroppable()) { 3085 assert(II.getIntrinsicID() == Intrinsic::assume && "Expected assume"); 3086 // TODO For now we forget assumed information, this can be improved. 3087 OldPtr->dropDroppableUsesIn(II); 3088 return true; 3089 } 3090 3091 assert(II.getArgOperand(1) == OldPtr); 3092 // Lifetime intrinsics are only promotable if they cover the whole alloca. 3093 // Therefore, we drop lifetime intrinsics which don't cover the whole 3094 // alloca. 3095 // (In theory, intrinsics which partially cover an alloca could be 3096 // promoted, but PromoteMemToReg doesn't handle that case.) 3097 // FIXME: Check whether the alloca is promotable before dropping the 3098 // lifetime intrinsics? 3099 if (NewBeginOffset != NewAllocaBeginOffset || 3100 NewEndOffset != NewAllocaEndOffset) 3101 return true; 3102 3103 ConstantInt *Size = 3104 ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()), 3105 NewEndOffset - NewBeginOffset); 3106 // Lifetime intrinsics always expect an i8* so directly get such a pointer 3107 // for the new alloca slice. 3108 Type *PointerTy = IRB.getInt8PtrTy(OldPtr->getType()->getPointerAddressSpace()); 3109 Value *Ptr = getNewAllocaSlicePtr(IRB, PointerTy); 3110 Value *New; 3111 if (II.getIntrinsicID() == Intrinsic::lifetime_start) 3112 New = IRB.CreateLifetimeStart(Ptr, Size); 3113 else 3114 New = IRB.CreateLifetimeEnd(Ptr, Size); 3115 3116 (void)New; 3117 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 3118 3119 return true; 3120 } 3121 3122 void fixLoadStoreAlign(Instruction &Root) { 3123 // This algorithm implements the same visitor loop as 3124 // hasUnsafePHIOrSelectUse, and fixes the alignment of each load 3125 // or store found. 3126 SmallPtrSet<Instruction *, 4> Visited; 3127 SmallVector<Instruction *, 4> Uses; 3128 Visited.insert(&Root); 3129 Uses.push_back(&Root); 3130 do { 3131 Instruction *I = Uses.pop_back_val(); 3132 3133 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 3134 LI->setAlignment(std::min(LI->getAlign(), getSliceAlign())); 3135 continue; 3136 } 3137 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 3138 SI->setAlignment(std::min(SI->getAlign(), getSliceAlign())); 3139 continue; 3140 } 3141 3142 assert(isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I) || 3143 isa<PHINode>(I) || isa<SelectInst>(I) || 3144 isa<GetElementPtrInst>(I)); 3145 for (User *U : I->users()) 3146 if (Visited.insert(cast<Instruction>(U)).second) 3147 Uses.push_back(cast<Instruction>(U)); 3148 } while (!Uses.empty()); 3149 } 3150 3151 bool visitPHINode(PHINode &PN) { 3152 LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); 3153 assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable"); 3154 assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable"); 3155 3156 // We would like to compute a new pointer in only one place, but have it be 3157 // as local as possible to the PHI. To do that, we re-use the location of 3158 // the old pointer, which necessarily must be in the right position to 3159 // dominate the PHI. 3160 IRBuilderBase::InsertPointGuard Guard(IRB); 3161 if (isa<PHINode>(OldPtr)) 3162 IRB.SetInsertPoint(&*OldPtr->getParent()->getFirstInsertionPt()); 3163 else 3164 IRB.SetInsertPoint(OldPtr); 3165 IRB.SetCurrentDebugLocation(OldPtr->getDebugLoc()); 3166 3167 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 3168 // Replace the operands which were using the old pointer. 3169 std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr); 3170 3171 LLVM_DEBUG(dbgs() << " to: " << PN << "\n"); 3172 deleteIfTriviallyDead(OldPtr); 3173 3174 // Fix the alignment of any loads or stores using this PHI node. 3175 fixLoadStoreAlign(PN); 3176 3177 // PHIs can't be promoted on their own, but often can be speculated. We 3178 // check the speculation outside of the rewriter so that we see the 3179 // fully-rewritten alloca. 3180 PHIUsers.insert(&PN); 3181 return true; 3182 } 3183 3184 bool visitSelectInst(SelectInst &SI) { 3185 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 3186 assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) && 3187 "Pointer isn't an operand!"); 3188 assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); 3189 assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable"); 3190 3191 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 3192 // Replace the operands which were using the old pointer. 3193 if (SI.getOperand(1) == OldPtr) 3194 SI.setOperand(1, NewPtr); 3195 if (SI.getOperand(2) == OldPtr) 3196 SI.setOperand(2, NewPtr); 3197 3198 LLVM_DEBUG(dbgs() << " to: " << SI << "\n"); 3199 deleteIfTriviallyDead(OldPtr); 3200 3201 // Fix the alignment of any loads or stores using this select. 3202 fixLoadStoreAlign(SI); 3203 3204 // Selects can't be promoted on their own, but often can be speculated. We 3205 // check the speculation outside of the rewriter so that we see the 3206 // fully-rewritten alloca. 3207 SelectUsers.insert(&SI); 3208 return true; 3209 } 3210 }; 3211 3212 namespace { 3213 3214 /// Visitor to rewrite aggregate loads and stores as scalar. 3215 /// 3216 /// This pass aggressively rewrites all aggregate loads and stores on 3217 /// a particular pointer (or any pointer derived from it which we can identify) 3218 /// with scalar loads and stores. 3219 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> { 3220 // Befriend the base class so it can delegate to private visit methods. 3221 friend class InstVisitor<AggLoadStoreRewriter, bool>; 3222 3223 /// Queue of pointer uses to analyze and potentially rewrite. 3224 SmallVector<Use *, 8> Queue; 3225 3226 /// Set to prevent us from cycling with phi nodes and loops. 3227 SmallPtrSet<User *, 8> Visited; 3228 3229 /// The current pointer use being rewritten. This is used to dig up the used 3230 /// value (as opposed to the user). 3231 Use *U = nullptr; 3232 3233 /// Used to calculate offsets, and hence alignment, of subobjects. 3234 const DataLayout &DL; 3235 3236 IRBuilderTy &IRB; 3237 3238 public: 3239 AggLoadStoreRewriter(const DataLayout &DL, IRBuilderTy &IRB) 3240 : DL(DL), IRB(IRB) {} 3241 3242 /// Rewrite loads and stores through a pointer and all pointers derived from 3243 /// it. 3244 bool rewrite(Instruction &I) { 3245 LLVM_DEBUG(dbgs() << " Rewriting FCA loads and stores...\n"); 3246 enqueueUsers(I); 3247 bool Changed = false; 3248 while (!Queue.empty()) { 3249 U = Queue.pop_back_val(); 3250 Changed |= visit(cast<Instruction>(U->getUser())); 3251 } 3252 return Changed; 3253 } 3254 3255 private: 3256 /// Enqueue all the users of the given instruction for further processing. 3257 /// This uses a set to de-duplicate users. 3258 void enqueueUsers(Instruction &I) { 3259 for (Use &U : I.uses()) 3260 if (Visited.insert(U.getUser()).second) 3261 Queue.push_back(&U); 3262 } 3263 3264 // Conservative default is to not rewrite anything. 3265 bool visitInstruction(Instruction &I) { return false; } 3266 3267 /// Generic recursive split emission class. 3268 template <typename Derived> class OpSplitter { 3269 protected: 3270 /// The builder used to form new instructions. 3271 IRBuilderTy &IRB; 3272 3273 /// The indices which to be used with insert- or extractvalue to select the 3274 /// appropriate value within the aggregate. 3275 SmallVector<unsigned, 4> Indices; 3276 3277 /// The indices to a GEP instruction which will move Ptr to the correct slot 3278 /// within the aggregate. 3279 SmallVector<Value *, 4> GEPIndices; 3280 3281 /// The base pointer of the original op, used as a base for GEPing the 3282 /// split operations. 3283 Value *Ptr; 3284 3285 /// The base pointee type being GEPed into. 3286 Type *BaseTy; 3287 3288 /// Known alignment of the base pointer. 3289 Align BaseAlign; 3290 3291 /// To calculate offset of each component so we can correctly deduce 3292 /// alignments. 3293 const DataLayout &DL; 3294 3295 /// Initialize the splitter with an insertion point, Ptr and start with a 3296 /// single zero GEP index. 3297 OpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3298 Align BaseAlign, const DataLayout &DL, IRBuilderTy &IRB) 3299 : IRB(IRB), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr), BaseTy(BaseTy), 3300 BaseAlign(BaseAlign), DL(DL) { 3301 IRB.SetInsertPoint(InsertionPoint); 3302 } 3303 3304 public: 3305 /// Generic recursive split emission routine. 3306 /// 3307 /// This method recursively splits an aggregate op (load or store) into 3308 /// scalar or vector ops. It splits recursively until it hits a single value 3309 /// and emits that single value operation via the template argument. 3310 /// 3311 /// The logic of this routine relies on GEPs and insertvalue and 3312 /// extractvalue all operating with the same fundamental index list, merely 3313 /// formatted differently (GEPs need actual values). 3314 /// 3315 /// \param Ty The type being split recursively into smaller ops. 3316 /// \param Agg The aggregate value being built up or stored, depending on 3317 /// whether this is splitting a load or a store respectively. 3318 void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) { 3319 if (Ty->isSingleValueType()) { 3320 unsigned Offset = DL.getIndexedOffsetInType(BaseTy, GEPIndices); 3321 return static_cast<Derived *>(this)->emitFunc( 3322 Ty, Agg, commonAlignment(BaseAlign, Offset), Name); 3323 } 3324 3325 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 3326 unsigned OldSize = Indices.size(); 3327 (void)OldSize; 3328 for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size; 3329 ++Idx) { 3330 assert(Indices.size() == OldSize && "Did not return to the old size"); 3331 Indices.push_back(Idx); 3332 GEPIndices.push_back(IRB.getInt32(Idx)); 3333 emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx)); 3334 GEPIndices.pop_back(); 3335 Indices.pop_back(); 3336 } 3337 return; 3338 } 3339 3340 if (StructType *STy = dyn_cast<StructType>(Ty)) { 3341 unsigned OldSize = Indices.size(); 3342 (void)OldSize; 3343 for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size; 3344 ++Idx) { 3345 assert(Indices.size() == OldSize && "Did not return to the old size"); 3346 Indices.push_back(Idx); 3347 GEPIndices.push_back(IRB.getInt32(Idx)); 3348 emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx)); 3349 GEPIndices.pop_back(); 3350 Indices.pop_back(); 3351 } 3352 return; 3353 } 3354 3355 llvm_unreachable("Only arrays and structs are aggregate loadable types"); 3356 } 3357 }; 3358 3359 struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> { 3360 AAMDNodes AATags; 3361 3362 LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3363 AAMDNodes AATags, Align BaseAlign, const DataLayout &DL, 3364 IRBuilderTy &IRB) 3365 : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, DL, 3366 IRB), 3367 AATags(AATags) {} 3368 3369 /// Emit a leaf load of a single value. This is called at the leaves of the 3370 /// recursive emission to actually load values. 3371 void emitFunc(Type *Ty, Value *&Agg, Align Alignment, const Twine &Name) { 3372 assert(Ty->isSingleValueType()); 3373 // Load the single value and insert it using the indices. 3374 Value *GEP = 3375 IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep"); 3376 LoadInst *Load = 3377 IRB.CreateAlignedLoad(Ty, GEP, Alignment, Name + ".load"); 3378 3379 APInt Offset( 3380 DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()), 0); 3381 if (AATags && 3382 GEPOperator::accumulateConstantOffset(BaseTy, GEPIndices, DL, Offset)) 3383 Load->setAAMetadata(AATags.shift(Offset.getZExtValue())); 3384 3385 Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); 3386 LLVM_DEBUG(dbgs() << " to: " << *Load << "\n"); 3387 } 3388 }; 3389 3390 bool visitLoadInst(LoadInst &LI) { 3391 assert(LI.getPointerOperand() == *U); 3392 if (!LI.isSimple() || LI.getType()->isSingleValueType()) 3393 return false; 3394 3395 // We have an aggregate being loaded, split it apart. 3396 LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); 3397 LoadOpSplitter Splitter(&LI, *U, LI.getType(), LI.getAAMetadata(), 3398 getAdjustedAlignment(&LI, 0), DL, IRB); 3399 Value *V = PoisonValue::get(LI.getType()); 3400 Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca"); 3401 Visited.erase(&LI); 3402 LI.replaceAllUsesWith(V); 3403 LI.eraseFromParent(); 3404 return true; 3405 } 3406 3407 struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> { 3408 StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3409 AAMDNodes AATags, Align BaseAlign, const DataLayout &DL, 3410 IRBuilderTy &IRB) 3411 : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, 3412 DL, IRB), 3413 AATags(AATags) {} 3414 AAMDNodes AATags; 3415 /// Emit a leaf store of a single value. This is called at the leaves of the 3416 /// recursive emission to actually produce stores. 3417 void emitFunc(Type *Ty, Value *&Agg, Align Alignment, const Twine &Name) { 3418 assert(Ty->isSingleValueType()); 3419 // Extract the single value and store it using the indices. 3420 // 3421 // The gep and extractvalue values are factored out of the CreateStore 3422 // call to make the output independent of the argument evaluation order. 3423 Value *ExtractValue = 3424 IRB.CreateExtractValue(Agg, Indices, Name + ".extract"); 3425 Value *InBoundsGEP = 3426 IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep"); 3427 StoreInst *Store = 3428 IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Alignment); 3429 3430 APInt Offset( 3431 DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()), 0); 3432 if (AATags && 3433 GEPOperator::accumulateConstantOffset(BaseTy, GEPIndices, DL, Offset)) 3434 Store->setAAMetadata(AATags.shift(Offset.getZExtValue())); 3435 3436 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 3437 } 3438 }; 3439 3440 bool visitStoreInst(StoreInst &SI) { 3441 if (!SI.isSimple() || SI.getPointerOperand() != *U) 3442 return false; 3443 Value *V = SI.getValueOperand(); 3444 if (V->getType()->isSingleValueType()) 3445 return false; 3446 3447 // We have an aggregate being stored, split it apart. 3448 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 3449 StoreOpSplitter Splitter(&SI, *U, V->getType(), SI.getAAMetadata(), 3450 getAdjustedAlignment(&SI, 0), DL, IRB); 3451 Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca"); 3452 Visited.erase(&SI); 3453 SI.eraseFromParent(); 3454 return true; 3455 } 3456 3457 bool visitBitCastInst(BitCastInst &BC) { 3458 enqueueUsers(BC); 3459 return false; 3460 } 3461 3462 bool visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 3463 enqueueUsers(ASC); 3464 return false; 3465 } 3466 3467 // Fold gep (select cond, ptr1, ptr2) => select cond, gep(ptr1), gep(ptr2) 3468 bool foldGEPSelect(GetElementPtrInst &GEPI) { 3469 if (!GEPI.hasAllConstantIndices()) 3470 return false; 3471 3472 SelectInst *Sel = cast<SelectInst>(GEPI.getPointerOperand()); 3473 3474 LLVM_DEBUG(dbgs() << " Rewriting gep(select) -> select(gep):" 3475 << "\n original: " << *Sel 3476 << "\n " << GEPI); 3477 3478 IRB.SetInsertPoint(&GEPI); 3479 SmallVector<Value *, 4> Index(GEPI.indices()); 3480 bool IsInBounds = GEPI.isInBounds(); 3481 3482 Type *Ty = GEPI.getSourceElementType(); 3483 Value *True = Sel->getTrueValue(); 3484 Value *NTrue = 3485 IsInBounds 3486 ? IRB.CreateInBoundsGEP(Ty, True, Index, 3487 True->getName() + ".sroa.gep") 3488 : IRB.CreateGEP(Ty, True, Index, True->getName() + ".sroa.gep"); 3489 3490 Value *False = Sel->getFalseValue(); 3491 3492 Value *NFalse = 3493 IsInBounds 3494 ? IRB.CreateInBoundsGEP(Ty, False, Index, 3495 False->getName() + ".sroa.gep") 3496 : IRB.CreateGEP(Ty, False, Index, False->getName() + ".sroa.gep"); 3497 3498 Value *NSel = IRB.CreateSelect(Sel->getCondition(), NTrue, NFalse, 3499 Sel->getName() + ".sroa.sel"); 3500 Visited.erase(&GEPI); 3501 GEPI.replaceAllUsesWith(NSel); 3502 GEPI.eraseFromParent(); 3503 Instruction *NSelI = cast<Instruction>(NSel); 3504 Visited.insert(NSelI); 3505 enqueueUsers(*NSelI); 3506 3507 LLVM_DEBUG(dbgs() << "\n to: " << *NTrue 3508 << "\n " << *NFalse 3509 << "\n " << *NSel << '\n'); 3510 3511 return true; 3512 } 3513 3514 // Fold gep (phi ptr1, ptr2) => phi gep(ptr1), gep(ptr2) 3515 bool foldGEPPhi(GetElementPtrInst &GEPI) { 3516 if (!GEPI.hasAllConstantIndices()) 3517 return false; 3518 3519 PHINode *PHI = cast<PHINode>(GEPI.getPointerOperand()); 3520 if (GEPI.getParent() != PHI->getParent() || 3521 llvm::any_of(PHI->incoming_values(), [](Value *In) 3522 { Instruction *I = dyn_cast<Instruction>(In); 3523 return !I || isa<GetElementPtrInst>(I) || isa<PHINode>(I) || 3524 succ_empty(I->getParent()) || 3525 !I->getParent()->isLegalToHoistInto(); 3526 })) 3527 return false; 3528 3529 LLVM_DEBUG(dbgs() << " Rewriting gep(phi) -> phi(gep):" 3530 << "\n original: " << *PHI 3531 << "\n " << GEPI 3532 << "\n to: "); 3533 3534 SmallVector<Value *, 4> Index(GEPI.indices()); 3535 bool IsInBounds = GEPI.isInBounds(); 3536 IRB.SetInsertPoint(GEPI.getParent()->getFirstNonPHI()); 3537 PHINode *NewPN = IRB.CreatePHI(GEPI.getType(), PHI->getNumIncomingValues(), 3538 PHI->getName() + ".sroa.phi"); 3539 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I != E; ++I) { 3540 BasicBlock *B = PHI->getIncomingBlock(I); 3541 Value *NewVal = nullptr; 3542 int Idx = NewPN->getBasicBlockIndex(B); 3543 if (Idx >= 0) { 3544 NewVal = NewPN->getIncomingValue(Idx); 3545 } else { 3546 Instruction *In = cast<Instruction>(PHI->getIncomingValue(I)); 3547 3548 IRB.SetInsertPoint(In->getParent(), std::next(In->getIterator())); 3549 Type *Ty = GEPI.getSourceElementType(); 3550 NewVal = IsInBounds ? IRB.CreateInBoundsGEP(Ty, In, Index, 3551 In->getName() + ".sroa.gep") 3552 : IRB.CreateGEP(Ty, In, Index, 3553 In->getName() + ".sroa.gep"); 3554 } 3555 NewPN->addIncoming(NewVal, B); 3556 } 3557 3558 Visited.erase(&GEPI); 3559 GEPI.replaceAllUsesWith(NewPN); 3560 GEPI.eraseFromParent(); 3561 Visited.insert(NewPN); 3562 enqueueUsers(*NewPN); 3563 3564 LLVM_DEBUG(for (Value *In : NewPN->incoming_values()) 3565 dbgs() << "\n " << *In; 3566 dbgs() << "\n " << *NewPN << '\n'); 3567 3568 return true; 3569 } 3570 3571 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { 3572 if (isa<SelectInst>(GEPI.getPointerOperand()) && 3573 foldGEPSelect(GEPI)) 3574 return true; 3575 3576 if (isa<PHINode>(GEPI.getPointerOperand()) && 3577 foldGEPPhi(GEPI)) 3578 return true; 3579 3580 enqueueUsers(GEPI); 3581 return false; 3582 } 3583 3584 bool visitPHINode(PHINode &PN) { 3585 enqueueUsers(PN); 3586 return false; 3587 } 3588 3589 bool visitSelectInst(SelectInst &SI) { 3590 enqueueUsers(SI); 3591 return false; 3592 } 3593 }; 3594 3595 } // end anonymous namespace 3596 3597 /// Strip aggregate type wrapping. 3598 /// 3599 /// This removes no-op aggregate types wrapping an underlying type. It will 3600 /// strip as many layers of types as it can without changing either the type 3601 /// size or the allocated size. 3602 static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) { 3603 if (Ty->isSingleValueType()) 3604 return Ty; 3605 3606 uint64_t AllocSize = DL.getTypeAllocSize(Ty).getFixedSize(); 3607 uint64_t TypeSize = DL.getTypeSizeInBits(Ty).getFixedSize(); 3608 3609 Type *InnerTy; 3610 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { 3611 InnerTy = ArrTy->getElementType(); 3612 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 3613 const StructLayout *SL = DL.getStructLayout(STy); 3614 unsigned Index = SL->getElementContainingOffset(0); 3615 InnerTy = STy->getElementType(Index); 3616 } else { 3617 return Ty; 3618 } 3619 3620 if (AllocSize > DL.getTypeAllocSize(InnerTy).getFixedSize() || 3621 TypeSize > DL.getTypeSizeInBits(InnerTy).getFixedSize()) 3622 return Ty; 3623 3624 return stripAggregateTypeWrapping(DL, InnerTy); 3625 } 3626 3627 /// Try to find a partition of the aggregate type passed in for a given 3628 /// offset and size. 3629 /// 3630 /// This recurses through the aggregate type and tries to compute a subtype 3631 /// based on the offset and size. When the offset and size span a sub-section 3632 /// of an array, it will even compute a new array type for that sub-section, 3633 /// and the same for structs. 3634 /// 3635 /// Note that this routine is very strict and tries to find a partition of the 3636 /// type which produces the *exact* right offset and size. It is not forgiving 3637 /// when the size or offset cause either end of type-based partition to be off. 3638 /// Also, this is a best-effort routine. It is reasonable to give up and not 3639 /// return a type if necessary. 3640 static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, 3641 uint64_t Size) { 3642 if (Offset == 0 && DL.getTypeAllocSize(Ty).getFixedSize() == Size) 3643 return stripAggregateTypeWrapping(DL, Ty); 3644 if (Offset > DL.getTypeAllocSize(Ty).getFixedSize() || 3645 (DL.getTypeAllocSize(Ty).getFixedSize() - Offset) < Size) 3646 return nullptr; 3647 3648 if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) { 3649 Type *ElementTy; 3650 uint64_t TyNumElements; 3651 if (auto *AT = dyn_cast<ArrayType>(Ty)) { 3652 ElementTy = AT->getElementType(); 3653 TyNumElements = AT->getNumElements(); 3654 } else { 3655 // FIXME: This isn't right for vectors with non-byte-sized or 3656 // non-power-of-two sized elements. 3657 auto *VT = cast<FixedVectorType>(Ty); 3658 ElementTy = VT->getElementType(); 3659 TyNumElements = VT->getNumElements(); 3660 } 3661 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedSize(); 3662 uint64_t NumSkippedElements = Offset / ElementSize; 3663 if (NumSkippedElements >= TyNumElements) 3664 return nullptr; 3665 Offset -= NumSkippedElements * ElementSize; 3666 3667 // First check if we need to recurse. 3668 if (Offset > 0 || Size < ElementSize) { 3669 // Bail if the partition ends in a different array element. 3670 if ((Offset + Size) > ElementSize) 3671 return nullptr; 3672 // Recurse through the element type trying to peel off offset bytes. 3673 return getTypePartition(DL, ElementTy, Offset, Size); 3674 } 3675 assert(Offset == 0); 3676 3677 if (Size == ElementSize) 3678 return stripAggregateTypeWrapping(DL, ElementTy); 3679 assert(Size > ElementSize); 3680 uint64_t NumElements = Size / ElementSize; 3681 if (NumElements * ElementSize != Size) 3682 return nullptr; 3683 return ArrayType::get(ElementTy, NumElements); 3684 } 3685 3686 StructType *STy = dyn_cast<StructType>(Ty); 3687 if (!STy) 3688 return nullptr; 3689 3690 const StructLayout *SL = DL.getStructLayout(STy); 3691 if (Offset >= SL->getSizeInBytes()) 3692 return nullptr; 3693 uint64_t EndOffset = Offset + Size; 3694 if (EndOffset > SL->getSizeInBytes()) 3695 return nullptr; 3696 3697 unsigned Index = SL->getElementContainingOffset(Offset); 3698 Offset -= SL->getElementOffset(Index); 3699 3700 Type *ElementTy = STy->getElementType(Index); 3701 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedSize(); 3702 if (Offset >= ElementSize) 3703 return nullptr; // The offset points into alignment padding. 3704 3705 // See if any partition must be contained by the element. 3706 if (Offset > 0 || Size < ElementSize) { 3707 if ((Offset + Size) > ElementSize) 3708 return nullptr; 3709 return getTypePartition(DL, ElementTy, Offset, Size); 3710 } 3711 assert(Offset == 0); 3712 3713 if (Size == ElementSize) 3714 return stripAggregateTypeWrapping(DL, ElementTy); 3715 3716 StructType::element_iterator EI = STy->element_begin() + Index, 3717 EE = STy->element_end(); 3718 if (EndOffset < SL->getSizeInBytes()) { 3719 unsigned EndIndex = SL->getElementContainingOffset(EndOffset); 3720 if (Index == EndIndex) 3721 return nullptr; // Within a single element and its padding. 3722 3723 // Don't try to form "natural" types if the elements don't line up with the 3724 // expected size. 3725 // FIXME: We could potentially recurse down through the last element in the 3726 // sub-struct to find a natural end point. 3727 if (SL->getElementOffset(EndIndex) != EndOffset) 3728 return nullptr; 3729 3730 assert(Index < EndIndex); 3731 EE = STy->element_begin() + EndIndex; 3732 } 3733 3734 // Try to build up a sub-structure. 3735 StructType *SubTy = 3736 StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked()); 3737 const StructLayout *SubSL = DL.getStructLayout(SubTy); 3738 if (Size != SubSL->getSizeInBytes()) 3739 return nullptr; // The sub-struct doesn't have quite the size needed. 3740 3741 return SubTy; 3742 } 3743 3744 /// Pre-split loads and stores to simplify rewriting. 3745 /// 3746 /// We want to break up the splittable load+store pairs as much as 3747 /// possible. This is important to do as a preprocessing step, as once we 3748 /// start rewriting the accesses to partitions of the alloca we lose the 3749 /// necessary information to correctly split apart paired loads and stores 3750 /// which both point into this alloca. The case to consider is something like 3751 /// the following: 3752 /// 3753 /// %a = alloca [12 x i8] 3754 /// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0 3755 /// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4 3756 /// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8 3757 /// %iptr1 = bitcast i8* %gep1 to i64* 3758 /// %iptr2 = bitcast i8* %gep2 to i64* 3759 /// %fptr1 = bitcast i8* %gep1 to float* 3760 /// %fptr2 = bitcast i8* %gep2 to float* 3761 /// %fptr3 = bitcast i8* %gep3 to float* 3762 /// store float 0.0, float* %fptr1 3763 /// store float 1.0, float* %fptr2 3764 /// %v = load i64* %iptr1 3765 /// store i64 %v, i64* %iptr2 3766 /// %f1 = load float* %fptr2 3767 /// %f2 = load float* %fptr3 3768 /// 3769 /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and 3770 /// promote everything so we recover the 2 SSA values that should have been 3771 /// there all along. 3772 /// 3773 /// \returns true if any changes are made. 3774 bool SROAPass::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { 3775 LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n"); 3776 3777 // Track the loads and stores which are candidates for pre-splitting here, in 3778 // the order they first appear during the partition scan. These give stable 3779 // iteration order and a basis for tracking which loads and stores we 3780 // actually split. 3781 SmallVector<LoadInst *, 4> Loads; 3782 SmallVector<StoreInst *, 4> Stores; 3783 3784 // We need to accumulate the splits required of each load or store where we 3785 // can find them via a direct lookup. This is important to cross-check loads 3786 // and stores against each other. We also track the slice so that we can kill 3787 // all the slices that end up split. 3788 struct SplitOffsets { 3789 Slice *S; 3790 std::vector<uint64_t> Splits; 3791 }; 3792 SmallDenseMap<Instruction *, SplitOffsets, 8> SplitOffsetsMap; 3793 3794 // Track loads out of this alloca which cannot, for any reason, be pre-split. 3795 // This is important as we also cannot pre-split stores of those loads! 3796 // FIXME: This is all pretty gross. It means that we can be more aggressive 3797 // in pre-splitting when the load feeding the store happens to come from 3798 // a separate alloca. Put another way, the effectiveness of SROA would be 3799 // decreased by a frontend which just concatenated all of its local allocas 3800 // into one big flat alloca. But defeating such patterns is exactly the job 3801 // SROA is tasked with! Sadly, to not have this discrepancy we would have 3802 // change store pre-splitting to actually force pre-splitting of the load 3803 // that feeds it *and all stores*. That makes pre-splitting much harder, but 3804 // maybe it would make it more principled? 3805 SmallPtrSet<LoadInst *, 8> UnsplittableLoads; 3806 3807 LLVM_DEBUG(dbgs() << " Searching for candidate loads and stores\n"); 3808 for (auto &P : AS.partitions()) { 3809 for (Slice &S : P) { 3810 Instruction *I = cast<Instruction>(S.getUse()->getUser()); 3811 if (!S.isSplittable() || S.endOffset() <= P.endOffset()) { 3812 // If this is a load we have to track that it can't participate in any 3813 // pre-splitting. If this is a store of a load we have to track that 3814 // that load also can't participate in any pre-splitting. 3815 if (auto *LI = dyn_cast<LoadInst>(I)) 3816 UnsplittableLoads.insert(LI); 3817 else if (auto *SI = dyn_cast<StoreInst>(I)) 3818 if (auto *LI = dyn_cast<LoadInst>(SI->getValueOperand())) 3819 UnsplittableLoads.insert(LI); 3820 continue; 3821 } 3822 assert(P.endOffset() > S.beginOffset() && 3823 "Empty or backwards partition!"); 3824 3825 // Determine if this is a pre-splittable slice. 3826 if (auto *LI = dyn_cast<LoadInst>(I)) { 3827 assert(!LI->isVolatile() && "Cannot split volatile loads!"); 3828 3829 // The load must be used exclusively to store into other pointers for 3830 // us to be able to arbitrarily pre-split it. The stores must also be 3831 // simple to avoid changing semantics. 3832 auto IsLoadSimplyStored = [](LoadInst *LI) { 3833 for (User *LU : LI->users()) { 3834 auto *SI = dyn_cast<StoreInst>(LU); 3835 if (!SI || !SI->isSimple()) 3836 return false; 3837 } 3838 return true; 3839 }; 3840 if (!IsLoadSimplyStored(LI)) { 3841 UnsplittableLoads.insert(LI); 3842 continue; 3843 } 3844 3845 Loads.push_back(LI); 3846 } else if (auto *SI = dyn_cast<StoreInst>(I)) { 3847 if (S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex())) 3848 // Skip stores *of* pointers. FIXME: This shouldn't even be possible! 3849 continue; 3850 auto *StoredLoad = dyn_cast<LoadInst>(SI->getValueOperand()); 3851 if (!StoredLoad || !StoredLoad->isSimple()) 3852 continue; 3853 assert(!SI->isVolatile() && "Cannot split volatile stores!"); 3854 3855 Stores.push_back(SI); 3856 } else { 3857 // Other uses cannot be pre-split. 3858 continue; 3859 } 3860 3861 // Record the initial split. 3862 LLVM_DEBUG(dbgs() << " Candidate: " << *I << "\n"); 3863 auto &Offsets = SplitOffsetsMap[I]; 3864 assert(Offsets.Splits.empty() && 3865 "Should not have splits the first time we see an instruction!"); 3866 Offsets.S = &S; 3867 Offsets.Splits.push_back(P.endOffset() - S.beginOffset()); 3868 } 3869 3870 // Now scan the already split slices, and add a split for any of them which 3871 // we're going to pre-split. 3872 for (Slice *S : P.splitSliceTails()) { 3873 auto SplitOffsetsMapI = 3874 SplitOffsetsMap.find(cast<Instruction>(S->getUse()->getUser())); 3875 if (SplitOffsetsMapI == SplitOffsetsMap.end()) 3876 continue; 3877 auto &Offsets = SplitOffsetsMapI->second; 3878 3879 assert(Offsets.S == S && "Found a mismatched slice!"); 3880 assert(!Offsets.Splits.empty() && 3881 "Cannot have an empty set of splits on the second partition!"); 3882 assert(Offsets.Splits.back() == 3883 P.beginOffset() - Offsets.S->beginOffset() && 3884 "Previous split does not end where this one begins!"); 3885 3886 // Record each split. The last partition's end isn't needed as the size 3887 // of the slice dictates that. 3888 if (S->endOffset() > P.endOffset()) 3889 Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset()); 3890 } 3891 } 3892 3893 // We may have split loads where some of their stores are split stores. For 3894 // such loads and stores, we can only pre-split them if their splits exactly 3895 // match relative to their starting offset. We have to verify this prior to 3896 // any rewriting. 3897 llvm::erase_if(Stores, [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) { 3898 // Lookup the load we are storing in our map of split 3899 // offsets. 3900 auto *LI = cast<LoadInst>(SI->getValueOperand()); 3901 // If it was completely unsplittable, then we're done, 3902 // and this store can't be pre-split. 3903 if (UnsplittableLoads.count(LI)) 3904 return true; 3905 3906 auto LoadOffsetsI = SplitOffsetsMap.find(LI); 3907 if (LoadOffsetsI == SplitOffsetsMap.end()) 3908 return false; // Unrelated loads are definitely safe. 3909 auto &LoadOffsets = LoadOffsetsI->second; 3910 3911 // Now lookup the store's offsets. 3912 auto &StoreOffsets = SplitOffsetsMap[SI]; 3913 3914 // If the relative offsets of each split in the load and 3915 // store match exactly, then we can split them and we 3916 // don't need to remove them here. 3917 if (LoadOffsets.Splits == StoreOffsets.Splits) 3918 return false; 3919 3920 LLVM_DEBUG(dbgs() << " Mismatched splits for load and store:\n" 3921 << " " << *LI << "\n" 3922 << " " << *SI << "\n"); 3923 3924 // We've found a store and load that we need to split 3925 // with mismatched relative splits. Just give up on them 3926 // and remove both instructions from our list of 3927 // candidates. 3928 UnsplittableLoads.insert(LI); 3929 return true; 3930 }); 3931 // Now we have to go *back* through all the stores, because a later store may 3932 // have caused an earlier store's load to become unsplittable and if it is 3933 // unsplittable for the later store, then we can't rely on it being split in 3934 // the earlier store either. 3935 llvm::erase_if(Stores, [&UnsplittableLoads](StoreInst *SI) { 3936 auto *LI = cast<LoadInst>(SI->getValueOperand()); 3937 return UnsplittableLoads.count(LI); 3938 }); 3939 // Once we've established all the loads that can't be split for some reason, 3940 // filter any that made it into our list out. 3941 llvm::erase_if(Loads, [&UnsplittableLoads](LoadInst *LI) { 3942 return UnsplittableLoads.count(LI); 3943 }); 3944 3945 // If no loads or stores are left, there is no pre-splitting to be done for 3946 // this alloca. 3947 if (Loads.empty() && Stores.empty()) 3948 return false; 3949 3950 // From here on, we can't fail and will be building new accesses, so rig up 3951 // an IR builder. 3952 IRBuilderTy IRB(&AI); 3953 3954 // Collect the new slices which we will merge into the alloca slices. 3955 SmallVector<Slice, 4> NewSlices; 3956 3957 // Track any allocas we end up splitting loads and stores for so we iterate 3958 // on them. 3959 SmallPtrSet<AllocaInst *, 4> ResplitPromotableAllocas; 3960 3961 // At this point, we have collected all of the loads and stores we can 3962 // pre-split, and the specific splits needed for them. We actually do the 3963 // splitting in a specific order in order to handle when one of the loads in 3964 // the value operand to one of the stores. 3965 // 3966 // First, we rewrite all of the split loads, and just accumulate each split 3967 // load in a parallel structure. We also build the slices for them and append 3968 // them to the alloca slices. 3969 SmallDenseMap<LoadInst *, std::vector<LoadInst *>, 1> SplitLoadsMap; 3970 std::vector<LoadInst *> SplitLoads; 3971 const DataLayout &DL = AI.getModule()->getDataLayout(); 3972 for (LoadInst *LI : Loads) { 3973 SplitLoads.clear(); 3974 3975 IntegerType *Ty = cast<IntegerType>(LI->getType()); 3976 assert(Ty->getBitWidth() % 8 == 0); 3977 uint64_t LoadSize = Ty->getBitWidth() / 8; 3978 assert(LoadSize > 0 && "Cannot have a zero-sized integer load!"); 3979 3980 auto &Offsets = SplitOffsetsMap[LI]; 3981 assert(LoadSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && 3982 "Slice size should always match load size exactly!"); 3983 uint64_t BaseOffset = Offsets.S->beginOffset(); 3984 assert(BaseOffset + LoadSize > BaseOffset && 3985 "Cannot represent alloca access size using 64-bit integers!"); 3986 3987 Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand()); 3988 IRB.SetInsertPoint(LI); 3989 3990 LLVM_DEBUG(dbgs() << " Splitting load: " << *LI << "\n"); 3991 3992 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); 3993 int Idx = 0, Size = Offsets.Splits.size(); 3994 for (;;) { 3995 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); 3996 auto AS = LI->getPointerAddressSpace(); 3997 auto *PartPtrTy = PartTy->getPointerTo(AS); 3998 LoadInst *PLoad = IRB.CreateAlignedLoad( 3999 PartTy, 4000 getAdjustedPtr(IRB, DL, BasePtr, 4001 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4002 PartPtrTy, BasePtr->getName() + "."), 4003 getAdjustedAlignment(LI, PartOffset), 4004 /*IsVolatile*/ false, LI->getName()); 4005 PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access, 4006 LLVMContext::MD_access_group}); 4007 4008 // Append this load onto the list of split loads so we can find it later 4009 // to rewrite the stores. 4010 SplitLoads.push_back(PLoad); 4011 4012 // Now build a new slice for the alloca. 4013 NewSlices.push_back( 4014 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, 4015 &PLoad->getOperandUse(PLoad->getPointerOperandIndex()), 4016 /*IsSplittable*/ false)); 4017 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() 4018 << ", " << NewSlices.back().endOffset() 4019 << "): " << *PLoad << "\n"); 4020 4021 // See if we've handled all the splits. 4022 if (Idx >= Size) 4023 break; 4024 4025 // Setup the next partition. 4026 PartOffset = Offsets.Splits[Idx]; 4027 ++Idx; 4028 PartSize = (Idx < Size ? Offsets.Splits[Idx] : LoadSize) - PartOffset; 4029 } 4030 4031 // Now that we have the split loads, do the slow walk over all uses of the 4032 // load and rewrite them as split stores, or save the split loads to use 4033 // below if the store is going to be split there anyways. 4034 bool DeferredStores = false; 4035 for (User *LU : LI->users()) { 4036 StoreInst *SI = cast<StoreInst>(LU); 4037 if (!Stores.empty() && SplitOffsetsMap.count(SI)) { 4038 DeferredStores = true; 4039 LLVM_DEBUG(dbgs() << " Deferred splitting of store: " << *SI 4040 << "\n"); 4041 continue; 4042 } 4043 4044 Value *StoreBasePtr = SI->getPointerOperand(); 4045 IRB.SetInsertPoint(SI); 4046 4047 LLVM_DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n"); 4048 4049 for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) { 4050 LoadInst *PLoad = SplitLoads[Idx]; 4051 uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1]; 4052 auto *PartPtrTy = 4053 PLoad->getType()->getPointerTo(SI->getPointerAddressSpace()); 4054 4055 auto AS = SI->getPointerAddressSpace(); 4056 StoreInst *PStore = IRB.CreateAlignedStore( 4057 PLoad, 4058 getAdjustedPtr(IRB, DL, StoreBasePtr, 4059 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4060 PartPtrTy, StoreBasePtr->getName() + "."), 4061 getAdjustedAlignment(SI, PartOffset), 4062 /*IsVolatile*/ false); 4063 PStore->copyMetadata(*SI, {LLVMContext::MD_mem_parallel_loop_access, 4064 LLVMContext::MD_access_group}); 4065 LLVM_DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n"); 4066 } 4067 4068 // We want to immediately iterate on any allocas impacted by splitting 4069 // this store, and we have to track any promotable alloca (indicated by 4070 // a direct store) as needing to be resplit because it is no longer 4071 // promotable. 4072 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(StoreBasePtr)) { 4073 ResplitPromotableAllocas.insert(OtherAI); 4074 Worklist.insert(OtherAI); 4075 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( 4076 StoreBasePtr->stripInBoundsOffsets())) { 4077 Worklist.insert(OtherAI); 4078 } 4079 4080 // Mark the original store as dead. 4081 DeadInsts.push_back(SI); 4082 } 4083 4084 // Save the split loads if there are deferred stores among the users. 4085 if (DeferredStores) 4086 SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads))); 4087 4088 // Mark the original load as dead and kill the original slice. 4089 DeadInsts.push_back(LI); 4090 Offsets.S->kill(); 4091 } 4092 4093 // Second, we rewrite all of the split stores. At this point, we know that 4094 // all loads from this alloca have been split already. For stores of such 4095 // loads, we can simply look up the pre-existing split loads. For stores of 4096 // other loads, we split those loads first and then write split stores of 4097 // them. 4098 for (StoreInst *SI : Stores) { 4099 auto *LI = cast<LoadInst>(SI->getValueOperand()); 4100 IntegerType *Ty = cast<IntegerType>(LI->getType()); 4101 assert(Ty->getBitWidth() % 8 == 0); 4102 uint64_t StoreSize = Ty->getBitWidth() / 8; 4103 assert(StoreSize > 0 && "Cannot have a zero-sized integer store!"); 4104 4105 auto &Offsets = SplitOffsetsMap[SI]; 4106 assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && 4107 "Slice size should always match load size exactly!"); 4108 uint64_t BaseOffset = Offsets.S->beginOffset(); 4109 assert(BaseOffset + StoreSize > BaseOffset && 4110 "Cannot represent alloca access size using 64-bit integers!"); 4111 4112 Value *LoadBasePtr = LI->getPointerOperand(); 4113 Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand()); 4114 4115 LLVM_DEBUG(dbgs() << " Splitting store: " << *SI << "\n"); 4116 4117 // Check whether we have an already split load. 4118 auto SplitLoadsMapI = SplitLoadsMap.find(LI); 4119 std::vector<LoadInst *> *SplitLoads = nullptr; 4120 if (SplitLoadsMapI != SplitLoadsMap.end()) { 4121 SplitLoads = &SplitLoadsMapI->second; 4122 assert(SplitLoads->size() == Offsets.Splits.size() + 1 && 4123 "Too few split loads for the number of splits in the store!"); 4124 } else { 4125 LLVM_DEBUG(dbgs() << " of load: " << *LI << "\n"); 4126 } 4127 4128 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); 4129 int Idx = 0, Size = Offsets.Splits.size(); 4130 for (;;) { 4131 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); 4132 auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace()); 4133 auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace()); 4134 4135 // Either lookup a split load or create one. 4136 LoadInst *PLoad; 4137 if (SplitLoads) { 4138 PLoad = (*SplitLoads)[Idx]; 4139 } else { 4140 IRB.SetInsertPoint(LI); 4141 auto AS = LI->getPointerAddressSpace(); 4142 PLoad = IRB.CreateAlignedLoad( 4143 PartTy, 4144 getAdjustedPtr(IRB, DL, LoadBasePtr, 4145 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4146 LoadPartPtrTy, LoadBasePtr->getName() + "."), 4147 getAdjustedAlignment(LI, PartOffset), 4148 /*IsVolatile*/ false, LI->getName()); 4149 PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access, 4150 LLVMContext::MD_access_group}); 4151 } 4152 4153 // And store this partition. 4154 IRB.SetInsertPoint(SI); 4155 auto AS = SI->getPointerAddressSpace(); 4156 StoreInst *PStore = IRB.CreateAlignedStore( 4157 PLoad, 4158 getAdjustedPtr(IRB, DL, StoreBasePtr, 4159 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4160 StorePartPtrTy, StoreBasePtr->getName() + "."), 4161 getAdjustedAlignment(SI, PartOffset), 4162 /*IsVolatile*/ false); 4163 PStore->copyMetadata(*SI, {LLVMContext::MD_mem_parallel_loop_access, 4164 LLVMContext::MD_access_group}); 4165 4166 // Now build a new slice for the alloca. 4167 NewSlices.push_back( 4168 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, 4169 &PStore->getOperandUse(PStore->getPointerOperandIndex()), 4170 /*IsSplittable*/ false)); 4171 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() 4172 << ", " << NewSlices.back().endOffset() 4173 << "): " << *PStore << "\n"); 4174 if (!SplitLoads) { 4175 LLVM_DEBUG(dbgs() << " of split load: " << *PLoad << "\n"); 4176 } 4177 4178 // See if we've finished all the splits. 4179 if (Idx >= Size) 4180 break; 4181 4182 // Setup the next partition. 4183 PartOffset = Offsets.Splits[Idx]; 4184 ++Idx; 4185 PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset; 4186 } 4187 4188 // We want to immediately iterate on any allocas impacted by splitting 4189 // this load, which is only relevant if it isn't a load of this alloca and 4190 // thus we didn't already split the loads above. We also have to keep track 4191 // of any promotable allocas we split loads on as they can no longer be 4192 // promoted. 4193 if (!SplitLoads) { 4194 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(LoadBasePtr)) { 4195 assert(OtherAI != &AI && "We can't re-split our own alloca!"); 4196 ResplitPromotableAllocas.insert(OtherAI); 4197 Worklist.insert(OtherAI); 4198 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( 4199 LoadBasePtr->stripInBoundsOffsets())) { 4200 assert(OtherAI != &AI && "We can't re-split our own alloca!"); 4201 Worklist.insert(OtherAI); 4202 } 4203 } 4204 4205 // Mark the original store as dead now that we've split it up and kill its 4206 // slice. Note that we leave the original load in place unless this store 4207 // was its only use. It may in turn be split up if it is an alloca load 4208 // for some other alloca, but it may be a normal load. This may introduce 4209 // redundant loads, but where those can be merged the rest of the optimizer 4210 // should handle the merging, and this uncovers SSA splits which is more 4211 // important. In practice, the original loads will almost always be fully 4212 // split and removed eventually, and the splits will be merged by any 4213 // trivial CSE, including instcombine. 4214 if (LI->hasOneUse()) { 4215 assert(*LI->user_begin() == SI && "Single use isn't this store!"); 4216 DeadInsts.push_back(LI); 4217 } 4218 DeadInsts.push_back(SI); 4219 Offsets.S->kill(); 4220 } 4221 4222 // Remove the killed slices that have ben pre-split. 4223 llvm::erase_if(AS, [](const Slice &S) { return S.isDead(); }); 4224 4225 // Insert our new slices. This will sort and merge them into the sorted 4226 // sequence. 4227 AS.insert(NewSlices); 4228 4229 LLVM_DEBUG(dbgs() << " Pre-split slices:\n"); 4230 #ifndef NDEBUG 4231 for (auto I = AS.begin(), E = AS.end(); I != E; ++I) 4232 LLVM_DEBUG(AS.print(dbgs(), I, " ")); 4233 #endif 4234 4235 // Finally, don't try to promote any allocas that new require re-splitting. 4236 // They have already been added to the worklist above. 4237 llvm::erase_if(PromotableAllocas, [&](AllocaInst *AI) { 4238 return ResplitPromotableAllocas.count(AI); 4239 }); 4240 4241 return true; 4242 } 4243 4244 /// Rewrite an alloca partition's users. 4245 /// 4246 /// This routine drives both of the rewriting goals of the SROA pass. It tries 4247 /// to rewrite uses of an alloca partition to be conducive for SSA value 4248 /// promotion. If the partition needs a new, more refined alloca, this will 4249 /// build that new alloca, preserving as much type information as possible, and 4250 /// rewrite the uses of the old alloca to point at the new one and have the 4251 /// appropriate new offsets. It also evaluates how successful the rewrite was 4252 /// at enabling promotion and if it was successful queues the alloca to be 4253 /// promoted. 4254 AllocaInst *SROAPass::rewritePartition(AllocaInst &AI, AllocaSlices &AS, 4255 Partition &P) { 4256 // Try to compute a friendly type for this partition of the alloca. This 4257 // won't always succeed, in which case we fall back to a legal integer type 4258 // or an i8 array of an appropriate size. 4259 Type *SliceTy = nullptr; 4260 const DataLayout &DL = AI.getModule()->getDataLayout(); 4261 std::pair<Type *, IntegerType *> CommonUseTy = 4262 findCommonType(P.begin(), P.end(), P.endOffset()); 4263 // Do all uses operate on the same type? 4264 if (CommonUseTy.first) 4265 if (DL.getTypeAllocSize(CommonUseTy.first).getFixedSize() >= P.size()) 4266 SliceTy = CommonUseTy.first; 4267 // If not, can we find an appropriate subtype in the original allocated type? 4268 if (!SliceTy) 4269 if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(), 4270 P.beginOffset(), P.size())) 4271 SliceTy = TypePartitionTy; 4272 // If still not, can we use the largest bitwidth integer type used? 4273 if (!SliceTy && CommonUseTy.second) 4274 if (DL.getTypeAllocSize(CommonUseTy.second).getFixedSize() >= P.size()) 4275 SliceTy = CommonUseTy.second; 4276 if ((!SliceTy || (SliceTy->isArrayTy() && 4277 SliceTy->getArrayElementType()->isIntegerTy())) && 4278 DL.isLegalInteger(P.size() * 8)) 4279 SliceTy = Type::getIntNTy(*C, P.size() * 8); 4280 if (!SliceTy) 4281 SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size()); 4282 assert(DL.getTypeAllocSize(SliceTy).getFixedSize() >= P.size()); 4283 4284 bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL); 4285 4286 VectorType *VecTy = 4287 IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL); 4288 if (VecTy) 4289 SliceTy = VecTy; 4290 4291 // Check for the case where we're going to rewrite to a new alloca of the 4292 // exact same type as the original, and with the same access offsets. In that 4293 // case, re-use the existing alloca, but still run through the rewriter to 4294 // perform phi and select speculation. 4295 // P.beginOffset() can be non-zero even with the same type in a case with 4296 // out-of-bounds access (e.g. @PR35657 function in SROA/basictest.ll). 4297 AllocaInst *NewAI; 4298 if (SliceTy == AI.getAllocatedType() && P.beginOffset() == 0) { 4299 NewAI = &AI; 4300 // FIXME: We should be able to bail at this point with "nothing changed". 4301 // FIXME: We might want to defer PHI speculation until after here. 4302 // FIXME: return nullptr; 4303 } else { 4304 // Make sure the alignment is compatible with P.beginOffset(). 4305 const Align Alignment = commonAlignment(AI.getAlign(), P.beginOffset()); 4306 // If we will get at least this much alignment from the type alone, leave 4307 // the alloca's alignment unconstrained. 4308 const bool IsUnconstrained = Alignment <= DL.getABITypeAlign(SliceTy); 4309 NewAI = new AllocaInst( 4310 SliceTy, AI.getType()->getAddressSpace(), nullptr, 4311 IsUnconstrained ? DL.getPrefTypeAlign(SliceTy) : Alignment, 4312 AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI); 4313 // Copy the old AI debug location over to the new one. 4314 NewAI->setDebugLoc(AI.getDebugLoc()); 4315 ++NumNewAllocas; 4316 } 4317 4318 LLVM_DEBUG(dbgs() << "Rewriting alloca partition " 4319 << "[" << P.beginOffset() << "," << P.endOffset() 4320 << ") to: " << *NewAI << "\n"); 4321 4322 // Track the high watermark on the worklist as it is only relevant for 4323 // promoted allocas. We will reset it to this point if the alloca is not in 4324 // fact scheduled for promotion. 4325 unsigned PPWOldSize = PostPromotionWorklist.size(); 4326 unsigned NumUses = 0; 4327 SmallSetVector<PHINode *, 8> PHIUsers; 4328 SmallSetVector<SelectInst *, 8> SelectUsers; 4329 4330 AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(), 4331 P.endOffset(), IsIntegerPromotable, VecTy, 4332 PHIUsers, SelectUsers); 4333 bool Promotable = true; 4334 for (Slice *S : P.splitSliceTails()) { 4335 Promotable &= Rewriter.visit(S); 4336 ++NumUses; 4337 } 4338 for (Slice &S : P) { 4339 Promotable &= Rewriter.visit(&S); 4340 ++NumUses; 4341 } 4342 4343 NumAllocaPartitionUses += NumUses; 4344 MaxUsesPerAllocaPartition.updateMax(NumUses); 4345 4346 // Now that we've processed all the slices in the new partition, check if any 4347 // PHIs or Selects would block promotion. 4348 for (PHINode *PHI : PHIUsers) 4349 if (!isSafePHIToSpeculate(*PHI)) { 4350 Promotable = false; 4351 PHIUsers.clear(); 4352 SelectUsers.clear(); 4353 break; 4354 } 4355 4356 for (SelectInst *Sel : SelectUsers) 4357 if (!isSafeSelectToSpeculate(*Sel)) { 4358 Promotable = false; 4359 PHIUsers.clear(); 4360 SelectUsers.clear(); 4361 break; 4362 } 4363 4364 if (Promotable) { 4365 for (Use *U : AS.getDeadUsesIfPromotable()) { 4366 auto *OldInst = dyn_cast<Instruction>(U->get()); 4367 Value::dropDroppableUse(*U); 4368 if (OldInst) 4369 if (isInstructionTriviallyDead(OldInst)) 4370 DeadInsts.push_back(OldInst); 4371 } 4372 if (PHIUsers.empty() && SelectUsers.empty()) { 4373 // Promote the alloca. 4374 PromotableAllocas.push_back(NewAI); 4375 } else { 4376 // If we have either PHIs or Selects to speculate, add them to those 4377 // worklists and re-queue the new alloca so that we promote in on the 4378 // next iteration. 4379 for (PHINode *PHIUser : PHIUsers) 4380 SpeculatablePHIs.insert(PHIUser); 4381 for (SelectInst *SelectUser : SelectUsers) 4382 SpeculatableSelects.insert(SelectUser); 4383 Worklist.insert(NewAI); 4384 } 4385 } else { 4386 // Drop any post-promotion work items if promotion didn't happen. 4387 while (PostPromotionWorklist.size() > PPWOldSize) 4388 PostPromotionWorklist.pop_back(); 4389 4390 // We couldn't promote and we didn't create a new partition, nothing 4391 // happened. 4392 if (NewAI == &AI) 4393 return nullptr; 4394 4395 // If we can't promote the alloca, iterate on it to check for new 4396 // refinements exposed by splitting the current alloca. Don't iterate on an 4397 // alloca which didn't actually change and didn't get promoted. 4398 Worklist.insert(NewAI); 4399 } 4400 4401 return NewAI; 4402 } 4403 4404 /// Walks the slices of an alloca and form partitions based on them, 4405 /// rewriting each of their uses. 4406 bool SROAPass::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { 4407 if (AS.begin() == AS.end()) 4408 return false; 4409 4410 unsigned NumPartitions = 0; 4411 bool Changed = false; 4412 const DataLayout &DL = AI.getModule()->getDataLayout(); 4413 4414 // First try to pre-split loads and stores. 4415 Changed |= presplitLoadsAndStores(AI, AS); 4416 4417 // Now that we have identified any pre-splitting opportunities, 4418 // mark loads and stores unsplittable except for the following case. 4419 // We leave a slice splittable if all other slices are disjoint or fully 4420 // included in the slice, such as whole-alloca loads and stores. 4421 // If we fail to split these during pre-splitting, we want to force them 4422 // to be rewritten into a partition. 4423 bool IsSorted = true; 4424 4425 uint64_t AllocaSize = 4426 DL.getTypeAllocSize(AI.getAllocatedType()).getFixedSize(); 4427 const uint64_t MaxBitVectorSize = 1024; 4428 if (AllocaSize <= MaxBitVectorSize) { 4429 // If a byte boundary is included in any load or store, a slice starting or 4430 // ending at the boundary is not splittable. 4431 SmallBitVector SplittableOffset(AllocaSize + 1, true); 4432 for (Slice &S : AS) 4433 for (unsigned O = S.beginOffset() + 1; 4434 O < S.endOffset() && O < AllocaSize; O++) 4435 SplittableOffset.reset(O); 4436 4437 for (Slice &S : AS) { 4438 if (!S.isSplittable()) 4439 continue; 4440 4441 if ((S.beginOffset() > AllocaSize || SplittableOffset[S.beginOffset()]) && 4442 (S.endOffset() > AllocaSize || SplittableOffset[S.endOffset()])) 4443 continue; 4444 4445 if (isa<LoadInst>(S.getUse()->getUser()) || 4446 isa<StoreInst>(S.getUse()->getUser())) { 4447 S.makeUnsplittable(); 4448 IsSorted = false; 4449 } 4450 } 4451 } 4452 else { 4453 // We only allow whole-alloca splittable loads and stores 4454 // for a large alloca to avoid creating too large BitVector. 4455 for (Slice &S : AS) { 4456 if (!S.isSplittable()) 4457 continue; 4458 4459 if (S.beginOffset() == 0 && S.endOffset() >= AllocaSize) 4460 continue; 4461 4462 if (isa<LoadInst>(S.getUse()->getUser()) || 4463 isa<StoreInst>(S.getUse()->getUser())) { 4464 S.makeUnsplittable(); 4465 IsSorted = false; 4466 } 4467 } 4468 } 4469 4470 if (!IsSorted) 4471 llvm::sort(AS); 4472 4473 /// Describes the allocas introduced by rewritePartition in order to migrate 4474 /// the debug info. 4475 struct Fragment { 4476 AllocaInst *Alloca; 4477 uint64_t Offset; 4478 uint64_t Size; 4479 Fragment(AllocaInst *AI, uint64_t O, uint64_t S) 4480 : Alloca(AI), Offset(O), Size(S) {} 4481 }; 4482 SmallVector<Fragment, 4> Fragments; 4483 4484 // Rewrite each partition. 4485 for (auto &P : AS.partitions()) { 4486 if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) { 4487 Changed = true; 4488 if (NewAI != &AI) { 4489 uint64_t SizeOfByte = 8; 4490 uint64_t AllocaSize = 4491 DL.getTypeSizeInBits(NewAI->getAllocatedType()).getFixedSize(); 4492 // Don't include any padding. 4493 uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte); 4494 Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size)); 4495 } 4496 } 4497 ++NumPartitions; 4498 } 4499 4500 NumAllocaPartitions += NumPartitions; 4501 MaxPartitionsPerAlloca.updateMax(NumPartitions); 4502 4503 // Migrate debug information from the old alloca to the new alloca(s) 4504 // and the individual partitions. 4505 TinyPtrVector<DbgVariableIntrinsic *> DbgDeclares = FindDbgAddrUses(&AI); 4506 for (DbgVariableIntrinsic *DbgDeclare : DbgDeclares) { 4507 auto *Expr = DbgDeclare->getExpression(); 4508 DIBuilder DIB(*AI.getModule(), /*AllowUnresolved*/ false); 4509 uint64_t AllocaSize = 4510 DL.getTypeSizeInBits(AI.getAllocatedType()).getFixedSize(); 4511 for (auto Fragment : Fragments) { 4512 // Create a fragment expression describing the new partition or reuse AI's 4513 // expression if there is only one partition. 4514 auto *FragmentExpr = Expr; 4515 if (Fragment.Size < AllocaSize || Expr->isFragment()) { 4516 // If this alloca is already a scalar replacement of a larger aggregate, 4517 // Fragment.Offset describes the offset inside the scalar. 4518 auto ExprFragment = Expr->getFragmentInfo(); 4519 uint64_t Offset = ExprFragment ? ExprFragment->OffsetInBits : 0; 4520 uint64_t Start = Offset + Fragment.Offset; 4521 uint64_t Size = Fragment.Size; 4522 if (ExprFragment) { 4523 uint64_t AbsEnd = 4524 ExprFragment->OffsetInBits + ExprFragment->SizeInBits; 4525 if (Start >= AbsEnd) 4526 // No need to describe a SROAed padding. 4527 continue; 4528 Size = std::min(Size, AbsEnd - Start); 4529 } 4530 // The new, smaller fragment is stenciled out from the old fragment. 4531 if (auto OrigFragment = FragmentExpr->getFragmentInfo()) { 4532 assert(Start >= OrigFragment->OffsetInBits && 4533 "new fragment is outside of original fragment"); 4534 Start -= OrigFragment->OffsetInBits; 4535 } 4536 4537 // The alloca may be larger than the variable. 4538 auto VarSize = DbgDeclare->getVariable()->getSizeInBits(); 4539 if (VarSize) { 4540 if (Size > *VarSize) 4541 Size = *VarSize; 4542 if (Size == 0 || Start + Size > *VarSize) 4543 continue; 4544 } 4545 4546 // Avoid creating a fragment expression that covers the entire variable. 4547 if (!VarSize || *VarSize != Size) { 4548 if (auto E = 4549 DIExpression::createFragmentExpression(Expr, Start, Size)) 4550 FragmentExpr = *E; 4551 else 4552 continue; 4553 } 4554 } 4555 4556 // Remove any existing intrinsics on the new alloca describing 4557 // the variable fragment. 4558 for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(Fragment.Alloca)) { 4559 auto SameVariableFragment = [](const DbgVariableIntrinsic *LHS, 4560 const DbgVariableIntrinsic *RHS) { 4561 return LHS->getVariable() == RHS->getVariable() && 4562 LHS->getDebugLoc()->getInlinedAt() == 4563 RHS->getDebugLoc()->getInlinedAt(); 4564 }; 4565 if (SameVariableFragment(OldDII, DbgDeclare)) 4566 OldDII->eraseFromParent(); 4567 } 4568 4569 DIB.insertDeclare(Fragment.Alloca, DbgDeclare->getVariable(), FragmentExpr, 4570 DbgDeclare->getDebugLoc(), &AI); 4571 } 4572 } 4573 return Changed; 4574 } 4575 4576 /// Clobber a use with poison, deleting the used value if it becomes dead. 4577 void SROAPass::clobberUse(Use &U) { 4578 Value *OldV = U; 4579 // Replace the use with an poison value. 4580 U = PoisonValue::get(OldV->getType()); 4581 4582 // Check for this making an instruction dead. We have to garbage collect 4583 // all the dead instructions to ensure the uses of any alloca end up being 4584 // minimal. 4585 if (Instruction *OldI = dyn_cast<Instruction>(OldV)) 4586 if (isInstructionTriviallyDead(OldI)) { 4587 DeadInsts.push_back(OldI); 4588 } 4589 } 4590 4591 /// Analyze an alloca for SROA. 4592 /// 4593 /// This analyzes the alloca to ensure we can reason about it, builds 4594 /// the slices of the alloca, and then hands it off to be split and 4595 /// rewritten as needed. 4596 bool SROAPass::runOnAlloca(AllocaInst &AI) { 4597 LLVM_DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); 4598 ++NumAllocasAnalyzed; 4599 4600 // Special case dead allocas, as they're trivial. 4601 if (AI.use_empty()) { 4602 AI.eraseFromParent(); 4603 return true; 4604 } 4605 const DataLayout &DL = AI.getModule()->getDataLayout(); 4606 4607 // Skip alloca forms that this analysis can't handle. 4608 auto *AT = AI.getAllocatedType(); 4609 if (AI.isArrayAllocation() || !AT->isSized() || isa<ScalableVectorType>(AT) || 4610 DL.getTypeAllocSize(AT).getFixedSize() == 0) 4611 return false; 4612 4613 bool Changed = false; 4614 4615 // First, split any FCA loads and stores touching this alloca to promote 4616 // better splitting and promotion opportunities. 4617 IRBuilderTy IRB(&AI); 4618 AggLoadStoreRewriter AggRewriter(DL, IRB); 4619 Changed |= AggRewriter.rewrite(AI); 4620 4621 // Build the slices using a recursive instruction-visiting builder. 4622 AllocaSlices AS(DL, AI); 4623 LLVM_DEBUG(AS.print(dbgs())); 4624 if (AS.isEscaped()) 4625 return Changed; 4626 4627 // Delete all the dead users of this alloca before splitting and rewriting it. 4628 for (Instruction *DeadUser : AS.getDeadUsers()) { 4629 // Free up everything used by this instruction. 4630 for (Use &DeadOp : DeadUser->operands()) 4631 clobberUse(DeadOp); 4632 4633 // Now replace the uses of this instruction. 4634 DeadUser->replaceAllUsesWith(PoisonValue::get(DeadUser->getType())); 4635 4636 // And mark it for deletion. 4637 DeadInsts.push_back(DeadUser); 4638 Changed = true; 4639 } 4640 for (Use *DeadOp : AS.getDeadOperands()) { 4641 clobberUse(*DeadOp); 4642 Changed = true; 4643 } 4644 4645 // No slices to split. Leave the dead alloca for a later pass to clean up. 4646 if (AS.begin() == AS.end()) 4647 return Changed; 4648 4649 Changed |= splitAlloca(AI, AS); 4650 4651 LLVM_DEBUG(dbgs() << " Speculating PHIs\n"); 4652 while (!SpeculatablePHIs.empty()) 4653 speculatePHINodeLoads(IRB, *SpeculatablePHIs.pop_back_val()); 4654 4655 LLVM_DEBUG(dbgs() << " Speculating Selects\n"); 4656 while (!SpeculatableSelects.empty()) 4657 speculateSelectInstLoads(IRB, *SpeculatableSelects.pop_back_val()); 4658 4659 return Changed; 4660 } 4661 4662 /// Delete the dead instructions accumulated in this run. 4663 /// 4664 /// Recursively deletes the dead instructions we've accumulated. This is done 4665 /// at the very end to maximize locality of the recursive delete and to 4666 /// minimize the problems of invalidated instruction pointers as such pointers 4667 /// are used heavily in the intermediate stages of the algorithm. 4668 /// 4669 /// We also record the alloca instructions deleted here so that they aren't 4670 /// subsequently handed to mem2reg to promote. 4671 bool SROAPass::deleteDeadInstructions( 4672 SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) { 4673 bool Changed = false; 4674 while (!DeadInsts.empty()) { 4675 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()); 4676 if (!I) continue; 4677 LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); 4678 4679 // If the instruction is an alloca, find the possible dbg.declare connected 4680 // to it, and remove it too. We must do this before calling RAUW or we will 4681 // not be able to find it. 4682 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { 4683 DeletedAllocas.insert(AI); 4684 for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(AI)) 4685 OldDII->eraseFromParent(); 4686 } 4687 4688 I->replaceAllUsesWith(UndefValue::get(I->getType())); 4689 4690 for (Use &Operand : I->operands()) 4691 if (Instruction *U = dyn_cast<Instruction>(Operand)) { 4692 // Zero out the operand and see if it becomes trivially dead. 4693 Operand = nullptr; 4694 if (isInstructionTriviallyDead(U)) 4695 DeadInsts.push_back(U); 4696 } 4697 4698 ++NumDeleted; 4699 I->eraseFromParent(); 4700 Changed = true; 4701 } 4702 return Changed; 4703 } 4704 4705 /// Promote the allocas, using the best available technique. 4706 /// 4707 /// This attempts to promote whatever allocas have been identified as viable in 4708 /// the PromotableAllocas list. If that list is empty, there is nothing to do. 4709 /// This function returns whether any promotion occurred. 4710 bool SROAPass::promoteAllocas(Function &F) { 4711 if (PromotableAllocas.empty()) 4712 return false; 4713 4714 NumPromoted += PromotableAllocas.size(); 4715 4716 LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); 4717 PromoteMemToReg(PromotableAllocas, *DT, AC); 4718 PromotableAllocas.clear(); 4719 return true; 4720 } 4721 4722 PreservedAnalyses SROAPass::runImpl(Function &F, DominatorTree &RunDT, 4723 AssumptionCache &RunAC) { 4724 LLVM_DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); 4725 C = &F.getContext(); 4726 DT = &RunDT; 4727 AC = &RunAC; 4728 4729 BasicBlock &EntryBB = F.getEntryBlock(); 4730 for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); 4731 I != E; ++I) { 4732 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { 4733 if (isa<ScalableVectorType>(AI->getAllocatedType())) { 4734 if (isAllocaPromotable(AI)) 4735 PromotableAllocas.push_back(AI); 4736 } else { 4737 Worklist.insert(AI); 4738 } 4739 } 4740 } 4741 4742 bool Changed = false; 4743 // A set of deleted alloca instruction pointers which should be removed from 4744 // the list of promotable allocas. 4745 SmallPtrSet<AllocaInst *, 4> DeletedAllocas; 4746 4747 do { 4748 while (!Worklist.empty()) { 4749 Changed |= runOnAlloca(*Worklist.pop_back_val()); 4750 Changed |= deleteDeadInstructions(DeletedAllocas); 4751 4752 // Remove the deleted allocas from various lists so that we don't try to 4753 // continue processing them. 4754 if (!DeletedAllocas.empty()) { 4755 auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); }; 4756 Worklist.remove_if(IsInSet); 4757 PostPromotionWorklist.remove_if(IsInSet); 4758 llvm::erase_if(PromotableAllocas, IsInSet); 4759 DeletedAllocas.clear(); 4760 } 4761 } 4762 4763 Changed |= promoteAllocas(F); 4764 4765 Worklist = PostPromotionWorklist; 4766 PostPromotionWorklist.clear(); 4767 } while (!Worklist.empty()); 4768 4769 if (!Changed) 4770 return PreservedAnalyses::all(); 4771 4772 PreservedAnalyses PA; 4773 PA.preserveSet<CFGAnalyses>(); 4774 return PA; 4775 } 4776 4777 PreservedAnalyses SROAPass::run(Function &F, FunctionAnalysisManager &AM) { 4778 return runImpl(F, AM.getResult<DominatorTreeAnalysis>(F), 4779 AM.getResult<AssumptionAnalysis>(F)); 4780 } 4781 4782 /// A legacy pass for the legacy pass manager that wraps the \c SROA pass. 4783 /// 4784 /// This is in the llvm namespace purely to allow it to be a friend of the \c 4785 /// SROA pass. 4786 class llvm::sroa::SROALegacyPass : public FunctionPass { 4787 /// The SROA implementation. 4788 SROAPass Impl; 4789 4790 public: 4791 static char ID; 4792 4793 SROALegacyPass() : FunctionPass(ID) { 4794 initializeSROALegacyPassPass(*PassRegistry::getPassRegistry()); 4795 } 4796 4797 bool runOnFunction(Function &F) override { 4798 if (skipFunction(F)) 4799 return false; 4800 4801 auto PA = Impl.runImpl( 4802 F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 4803 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 4804 return !PA.areAllPreserved(); 4805 } 4806 4807 void getAnalysisUsage(AnalysisUsage &AU) const override { 4808 AU.addRequired<AssumptionCacheTracker>(); 4809 AU.addRequired<DominatorTreeWrapperPass>(); 4810 AU.addPreserved<GlobalsAAWrapperPass>(); 4811 AU.setPreservesCFG(); 4812 } 4813 4814 StringRef getPassName() const override { return "SROA"; } 4815 }; 4816 4817 char SROALegacyPass::ID = 0; 4818 4819 FunctionPass *llvm::createSROAPass() { return new SROALegacyPass(); } 4820 4821 INITIALIZE_PASS_BEGIN(SROALegacyPass, "sroa", 4822 "Scalar Replacement Of Aggregates", false, false) 4823 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 4824 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 4825 INITIALIZE_PASS_END(SROALegacyPass, "sroa", "Scalar Replacement Of Aggregates", 4826 false, false) 4827