1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This transformation implements the well known scalar replacement of 10 /// aggregates transformation. It tries to identify promotable elements of an 11 /// aggregate alloca, and promote them to registers. It will also try to 12 /// convert uses of an element (or set of elements) of an alloca into a vector 13 /// or bitfield-style integer scalar if appropriate. 14 /// 15 /// It works to do this with minimal slicing of the alloca so that regions 16 /// which are merely transferred in and out of external memory remain unchanged 17 /// and are not decomposed to scalar code. 18 /// 19 /// Because this also performs alloca promotion, it can be thought of as also 20 /// serving the purpose of SSA formation. The algorithm iterates on the 21 /// function until all opportunities for promotion have been realized. 22 /// 23 //===----------------------------------------------------------------------===// 24 25 #include "llvm/Transforms/Scalar/SROA.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/DenseMap.h" 29 #include "llvm/ADT/MapVector.h" 30 #include "llvm/ADT/PointerIntPair.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SetVector.h" 33 #include "llvm/ADT/SmallBitVector.h" 34 #include "llvm/ADT/SmallPtrSet.h" 35 #include "llvm/ADT/SmallVector.h" 36 #include "llvm/ADT/Statistic.h" 37 #include "llvm/ADT/StringRef.h" 38 #include "llvm/ADT/Twine.h" 39 #include "llvm/ADT/iterator.h" 40 #include "llvm/ADT/iterator_range.h" 41 #include "llvm/Analysis/AssumptionCache.h" 42 #include "llvm/Analysis/DomTreeUpdater.h" 43 #include "llvm/Analysis/GlobalsModRef.h" 44 #include "llvm/Analysis/Loads.h" 45 #include "llvm/Analysis/PtrUseVisitor.h" 46 #include "llvm/Config/llvm-config.h" 47 #include "llvm/IR/BasicBlock.h" 48 #include "llvm/IR/Constant.h" 49 #include "llvm/IR/ConstantFolder.h" 50 #include "llvm/IR/Constants.h" 51 #include "llvm/IR/DIBuilder.h" 52 #include "llvm/IR/DataLayout.h" 53 #include "llvm/IR/DebugInfo.h" 54 #include "llvm/IR/DebugInfoMetadata.h" 55 #include "llvm/IR/DerivedTypes.h" 56 #include "llvm/IR/Dominators.h" 57 #include "llvm/IR/Function.h" 58 #include "llvm/IR/GetElementPtrTypeIterator.h" 59 #include "llvm/IR/GlobalAlias.h" 60 #include "llvm/IR/IRBuilder.h" 61 #include "llvm/IR/InstVisitor.h" 62 #include "llvm/IR/Instruction.h" 63 #include "llvm/IR/Instructions.h" 64 #include "llvm/IR/IntrinsicInst.h" 65 #include "llvm/IR/LLVMContext.h" 66 #include "llvm/IR/Metadata.h" 67 #include "llvm/IR/Module.h" 68 #include "llvm/IR/Operator.h" 69 #include "llvm/IR/PassManager.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/Use.h" 72 #include "llvm/IR/User.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/IR/ValueHandle.h" 75 #include "llvm/InitializePasses.h" 76 #include "llvm/Pass.h" 77 #include "llvm/Support/Casting.h" 78 #include "llvm/Support/CommandLine.h" 79 #include "llvm/Support/Compiler.h" 80 #include "llvm/Support/Debug.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Support/raw_ostream.h" 83 #include "llvm/Transforms/Scalar.h" 84 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 85 #include "llvm/Transforms/Utils/Local.h" 86 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 87 #include <algorithm> 88 #include <cassert> 89 #include <cstddef> 90 #include <cstdint> 91 #include <cstring> 92 #include <iterator> 93 #include <string> 94 #include <tuple> 95 #include <utility> 96 #include <variant> 97 #include <vector> 98 99 using namespace llvm; 100 101 #define DEBUG_TYPE "sroa" 102 103 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); 104 STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); 105 STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca"); 106 STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten"); 107 STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition"); 108 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); 109 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); 110 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion"); 111 STATISTIC(NumLoadsPredicated, 112 "Number of loads rewritten into predicated loads to allow promotion"); 113 STATISTIC( 114 NumStoresPredicated, 115 "Number of stores rewritten into predicated loads to allow promotion"); 116 STATISTIC(NumDeleted, "Number of instructions deleted"); 117 STATISTIC(NumVectorized, "Number of vectorized aggregates"); 118 119 /// Hidden option to experiment with completely strict handling of inbounds 120 /// GEPs. 121 static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false), 122 cl::Hidden); 123 /// Disable running mem2reg during SROA in order to test or debug SROA. 124 static cl::opt<bool> SROASkipMem2Reg("sroa-skip-mem2reg", cl::init(false), 125 cl::Hidden); 126 namespace { 127 128 class AllocaSliceRewriter; 129 class AllocaSlices; 130 class Partition; 131 132 class SelectHandSpeculativity { 133 unsigned char Storage = 0; // None are speculatable by default. 134 using TrueVal = Bitfield::Element<bool, 0, 1>; // Low 0'th bit. 135 using FalseVal = Bitfield::Element<bool, 1, 1>; // Low 1'th bit. 136 public: 137 SelectHandSpeculativity() = default; 138 SelectHandSpeculativity &setAsSpeculatable(bool isTrueVal); 139 bool isSpeculatable(bool isTrueVal) const; 140 bool areAllSpeculatable() const; 141 bool areAnySpeculatable() const; 142 bool areNoneSpeculatable() const; 143 // For interop as int half of PointerIntPair. 144 explicit operator intptr_t() const { return static_cast<intptr_t>(Storage); } 145 explicit SelectHandSpeculativity(intptr_t Storage_) : Storage(Storage_) {} 146 }; 147 static_assert(sizeof(SelectHandSpeculativity) == sizeof(unsigned char)); 148 149 using PossiblySpeculatableLoad = 150 PointerIntPair<LoadInst *, 2, SelectHandSpeculativity>; 151 using UnspeculatableStore = StoreInst *; 152 using RewriteableMemOp = 153 std::variant<PossiblySpeculatableLoad, UnspeculatableStore>; 154 using RewriteableMemOps = SmallVector<RewriteableMemOp, 2>; 155 156 /// An optimization pass providing Scalar Replacement of Aggregates. 157 /// 158 /// This pass takes allocations which can be completely analyzed (that is, they 159 /// don't escape) and tries to turn them into scalar SSA values. There are 160 /// a few steps to this process. 161 /// 162 /// 1) It takes allocations of aggregates and analyzes the ways in which they 163 /// are used to try to split them into smaller allocations, ideally of 164 /// a single scalar data type. It will split up memcpy and memset accesses 165 /// as necessary and try to isolate individual scalar accesses. 166 /// 2) It will transform accesses into forms which are suitable for SSA value 167 /// promotion. This can be replacing a memset with a scalar store of an 168 /// integer value, or it can involve speculating operations on a PHI or 169 /// select to be a PHI or select of the results. 170 /// 3) Finally, this will try to detect a pattern of accesses which map cleanly 171 /// onto insert and extract operations on a vector value, and convert them to 172 /// this form. By doing so, it will enable promotion of vector aggregates to 173 /// SSA vector values. 174 class SROA { 175 LLVMContext *const C; 176 DomTreeUpdater *const DTU; 177 AssumptionCache *const AC; 178 const bool PreserveCFG; 179 180 /// Worklist of alloca instructions to simplify. 181 /// 182 /// Each alloca in the function is added to this. Each new alloca formed gets 183 /// added to it as well to recursively simplify unless that alloca can be 184 /// directly promoted. Finally, each time we rewrite a use of an alloca other 185 /// the one being actively rewritten, we add it back onto the list if not 186 /// already present to ensure it is re-visited. 187 SmallSetVector<AllocaInst *, 16> Worklist; 188 189 /// A collection of instructions to delete. 190 /// We try to batch deletions to simplify code and make things a bit more 191 /// efficient. We also make sure there is no dangling pointers. 192 SmallVector<WeakVH, 8> DeadInsts; 193 194 /// Post-promotion worklist. 195 /// 196 /// Sometimes we discover an alloca which has a high probability of becoming 197 /// viable for SROA after a round of promotion takes place. In those cases, 198 /// the alloca is enqueued here for re-processing. 199 /// 200 /// Note that we have to be very careful to clear allocas out of this list in 201 /// the event they are deleted. 202 SmallSetVector<AllocaInst *, 16> PostPromotionWorklist; 203 204 /// A collection of alloca instructions we can directly promote. 205 std::vector<AllocaInst *> PromotableAllocas; 206 207 /// A worklist of PHIs to speculate prior to promoting allocas. 208 /// 209 /// All of these PHIs have been checked for the safety of speculation and by 210 /// being speculated will allow promoting allocas currently in the promotable 211 /// queue. 212 SmallSetVector<PHINode *, 8> SpeculatablePHIs; 213 214 /// A worklist of select instructions to rewrite prior to promoting 215 /// allocas. 216 SmallMapVector<SelectInst *, RewriteableMemOps, 8> SelectsToRewrite; 217 218 /// Select instructions that use an alloca and are subsequently loaded can be 219 /// rewritten to load both input pointers and then select between the result, 220 /// allowing the load of the alloca to be promoted. 221 /// From this: 222 /// %P2 = select i1 %cond, ptr %Alloca, ptr %Other 223 /// %V = load <type>, ptr %P2 224 /// to: 225 /// %V1 = load <type>, ptr %Alloca -> will be mem2reg'd 226 /// %V2 = load <type>, ptr %Other 227 /// %V = select i1 %cond, <type> %V1, <type> %V2 228 /// 229 /// We can do this to a select if its only uses are loads 230 /// and if either the operand to the select can be loaded unconditionally, 231 /// or if we are allowed to perform CFG modifications. 232 /// If found an intervening bitcast with a single use of the load, 233 /// allow the promotion. 234 static std::optional<RewriteableMemOps> 235 isSafeSelectToSpeculate(SelectInst &SI, bool PreserveCFG); 236 237 public: 238 SROA(LLVMContext *C, DomTreeUpdater *DTU, AssumptionCache *AC, 239 SROAOptions PreserveCFG_) 240 : C(C), DTU(DTU), AC(AC), 241 PreserveCFG(PreserveCFG_ == SROAOptions::PreserveCFG) {} 242 243 /// Main run method used by both the SROAPass and by the legacy pass. 244 std::pair<bool /*Changed*/, bool /*CFGChanged*/> runSROA(Function &F); 245 246 private: 247 friend class AllocaSliceRewriter; 248 249 bool presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS); 250 AllocaInst *rewritePartition(AllocaInst &AI, AllocaSlices &AS, Partition &P); 251 bool splitAlloca(AllocaInst &AI, AllocaSlices &AS); 252 std::pair<bool /*Changed*/, bool /*CFGChanged*/> runOnAlloca(AllocaInst &AI); 253 void clobberUse(Use &U); 254 bool deleteDeadInstructions(SmallPtrSetImpl<AllocaInst *> &DeletedAllocas); 255 bool promoteAllocas(Function &F); 256 }; 257 258 } // end anonymous namespace 259 260 /// Calculate the fragment of a variable to use when slicing a store 261 /// based on the slice dimensions, existing fragment, and base storage 262 /// fragment. 263 /// Results: 264 /// UseFrag - Use Target as the new fragment. 265 /// UseNoFrag - The new slice already covers the whole variable. 266 /// Skip - The new alloca slice doesn't include this variable. 267 /// FIXME: Can we use calculateFragmentIntersect instead? 268 namespace { 269 enum FragCalcResult { UseFrag, UseNoFrag, Skip }; 270 } 271 static FragCalcResult 272 calculateFragment(DILocalVariable *Variable, 273 uint64_t NewStorageSliceOffsetInBits, 274 uint64_t NewStorageSliceSizeInBits, 275 std::optional<DIExpression::FragmentInfo> StorageFragment, 276 std::optional<DIExpression::FragmentInfo> CurrentFragment, 277 DIExpression::FragmentInfo &Target) { 278 // If the base storage describes part of the variable apply the offset and 279 // the size constraint. 280 if (StorageFragment) { 281 Target.SizeInBits = 282 std::min(NewStorageSliceSizeInBits, StorageFragment->SizeInBits); 283 Target.OffsetInBits = 284 NewStorageSliceOffsetInBits + StorageFragment->OffsetInBits; 285 } else { 286 Target.SizeInBits = NewStorageSliceSizeInBits; 287 Target.OffsetInBits = NewStorageSliceOffsetInBits; 288 } 289 290 // If this slice extracts the entirety of an independent variable from a 291 // larger alloca, do not produce a fragment expression, as the variable is 292 // not fragmented. 293 if (!CurrentFragment) { 294 if (auto Size = Variable->getSizeInBits()) { 295 // Treat the current fragment as covering the whole variable. 296 CurrentFragment = DIExpression::FragmentInfo(*Size, 0); 297 if (Target == CurrentFragment) 298 return UseNoFrag; 299 } 300 } 301 302 // No additional work to do if there isn't a fragment already, or there is 303 // but it already exactly describes the new assignment. 304 if (!CurrentFragment || *CurrentFragment == Target) 305 return UseFrag; 306 307 // Reject the target fragment if it doesn't fit wholly within the current 308 // fragment. TODO: We could instead chop up the target to fit in the case of 309 // a partial overlap. 310 if (Target.startInBits() < CurrentFragment->startInBits() || 311 Target.endInBits() > CurrentFragment->endInBits()) 312 return Skip; 313 314 // Target fits within the current fragment, return it. 315 return UseFrag; 316 } 317 318 static DebugVariable getAggregateVariable(DbgVariableIntrinsic *DVI) { 319 return DebugVariable(DVI->getVariable(), std::nullopt, 320 DVI->getDebugLoc().getInlinedAt()); 321 } 322 323 /// Find linked dbg.assign and generate a new one with the correct 324 /// FragmentInfo. Link Inst to the new dbg.assign. If Value is nullptr the 325 /// value component is copied from the old dbg.assign to the new. 326 /// \param OldAlloca Alloca for the variable before splitting. 327 /// \param IsSplit True if the store (not necessarily alloca) 328 /// is being split. 329 /// \param OldAllocaOffsetInBits Offset of the slice taken from OldAlloca. 330 /// \param SliceSizeInBits New number of bits being written to. 331 /// \param OldInst Instruction that is being split. 332 /// \param Inst New instruction performing this part of the 333 /// split store. 334 /// \param Dest Store destination. 335 /// \param Value Stored value. 336 /// \param DL Datalayout. 337 static void migrateDebugInfo(AllocaInst *OldAlloca, bool IsSplit, 338 uint64_t OldAllocaOffsetInBits, 339 uint64_t SliceSizeInBits, Instruction *OldInst, 340 Instruction *Inst, Value *Dest, Value *Value, 341 const DataLayout &DL) { 342 auto MarkerRange = at::getAssignmentMarkers(OldInst); 343 // Nothing to do if OldInst has no linked dbg.assign intrinsics. 344 if (MarkerRange.empty()) 345 return; 346 347 LLVM_DEBUG(dbgs() << " migrateDebugInfo\n"); 348 LLVM_DEBUG(dbgs() << " OldAlloca: " << *OldAlloca << "\n"); 349 LLVM_DEBUG(dbgs() << " IsSplit: " << IsSplit << "\n"); 350 LLVM_DEBUG(dbgs() << " OldAllocaOffsetInBits: " << OldAllocaOffsetInBits 351 << "\n"); 352 LLVM_DEBUG(dbgs() << " SliceSizeInBits: " << SliceSizeInBits << "\n"); 353 LLVM_DEBUG(dbgs() << " OldInst: " << *OldInst << "\n"); 354 LLVM_DEBUG(dbgs() << " Inst: " << *Inst << "\n"); 355 LLVM_DEBUG(dbgs() << " Dest: " << *Dest << "\n"); 356 if (Value) 357 LLVM_DEBUG(dbgs() << " Value: " << *Value << "\n"); 358 359 /// Map of aggregate variables to their fragment associated with OldAlloca. 360 DenseMap<DebugVariable, std::optional<DIExpression::FragmentInfo>> 361 BaseFragments; 362 for (auto *DAI : at::getAssignmentMarkers(OldAlloca)) 363 BaseFragments[getAggregateVariable(DAI)] = 364 DAI->getExpression()->getFragmentInfo(); 365 366 // The new inst needs a DIAssignID unique metadata tag (if OldInst has 367 // one). It shouldn't already have one: assert this assumption. 368 assert(!Inst->getMetadata(LLVMContext::MD_DIAssignID)); 369 DIAssignID *NewID = nullptr; 370 auto &Ctx = Inst->getContext(); 371 DIBuilder DIB(*OldInst->getModule(), /*AllowUnresolved*/ false); 372 assert(OldAlloca->isStaticAlloca()); 373 374 for (DbgAssignIntrinsic *DbgAssign : MarkerRange) { 375 LLVM_DEBUG(dbgs() << " existing dbg.assign is: " << *DbgAssign 376 << "\n"); 377 auto *Expr = DbgAssign->getExpression(); 378 bool SetKillLocation = false; 379 380 if (IsSplit) { 381 std::optional<DIExpression::FragmentInfo> BaseFragment; 382 { 383 auto R = BaseFragments.find(getAggregateVariable(DbgAssign)); 384 if (R == BaseFragments.end()) 385 continue; 386 BaseFragment = R->second; 387 } 388 std::optional<DIExpression::FragmentInfo> CurrentFragment = 389 Expr->getFragmentInfo(); 390 DIExpression::FragmentInfo NewFragment; 391 FragCalcResult Result = calculateFragment( 392 DbgAssign->getVariable(), OldAllocaOffsetInBits, SliceSizeInBits, 393 BaseFragment, CurrentFragment, NewFragment); 394 395 if (Result == Skip) 396 continue; 397 if (Result == UseFrag && !(NewFragment == CurrentFragment)) { 398 if (CurrentFragment) { 399 // Rewrite NewFragment to be relative to the existing one (this is 400 // what createFragmentExpression wants). CalculateFragment has 401 // already resolved the size for us. FIXME: Should it return the 402 // relative fragment too? 403 NewFragment.OffsetInBits -= CurrentFragment->OffsetInBits; 404 } 405 // Add the new fragment info to the existing expression if possible. 406 if (auto E = DIExpression::createFragmentExpression( 407 Expr, NewFragment.OffsetInBits, NewFragment.SizeInBits)) { 408 Expr = *E; 409 } else { 410 // Otherwise, add the new fragment info to an empty expression and 411 // discard the value component of this dbg.assign as the value cannot 412 // be computed with the new fragment. 413 Expr = *DIExpression::createFragmentExpression( 414 DIExpression::get(Expr->getContext(), std::nullopt), 415 NewFragment.OffsetInBits, NewFragment.SizeInBits); 416 SetKillLocation = true; 417 } 418 } 419 } 420 421 // If we haven't created a DIAssignID ID do that now and attach it to Inst. 422 if (!NewID) { 423 NewID = DIAssignID::getDistinct(Ctx); 424 Inst->setMetadata(LLVMContext::MD_DIAssignID, NewID); 425 } 426 427 ::Value *NewValue = Value ? Value : DbgAssign->getValue(); 428 auto *NewAssign = DIB.insertDbgAssign( 429 Inst, NewValue, DbgAssign->getVariable(), Expr, Dest, 430 DIExpression::get(Ctx, std::nullopt), DbgAssign->getDebugLoc()); 431 432 // If we've updated the value but the original dbg.assign has an arglist 433 // then kill it now - we can't use the requested new value. 434 // We can't replace the DIArgList with the new value as it'd leave 435 // the DIExpression in an invalid state (DW_OP_LLVM_arg operands without 436 // an arglist). And we can't keep the DIArgList in case the linked store 437 // is being split - in which case the DIArgList + expression may no longer 438 // be computing the correct value. 439 // This should be a very rare situation as it requires the value being 440 // stored to differ from the dbg.assign (i.e., the value has been 441 // represented differently in the debug intrinsic for some reason). 442 SetKillLocation |= 443 Value && (DbgAssign->hasArgList() || 444 !DbgAssign->getExpression()->isSingleLocationExpression()); 445 if (SetKillLocation) 446 NewAssign->setKillLocation(); 447 448 // We could use more precision here at the cost of some additional (code) 449 // complexity - if the original dbg.assign was adjacent to its store, we 450 // could position this new dbg.assign adjacent to its store rather than the 451 // old dbg.assgn. That would result in interleaved dbg.assigns rather than 452 // what we get now: 453 // split store !1 454 // split store !2 455 // dbg.assign !1 456 // dbg.assign !2 457 // This (current behaviour) results results in debug assignments being 458 // noted as slightly offset (in code) from the store. In practice this 459 // should have little effect on the debugging experience due to the fact 460 // that all the split stores should get the same line number. 461 NewAssign->moveBefore(DbgAssign); 462 463 NewAssign->setDebugLoc(DbgAssign->getDebugLoc()); 464 LLVM_DEBUG(dbgs() << "Created new assign intrinsic: " << *NewAssign 465 << "\n"); 466 } 467 } 468 469 namespace { 470 471 /// A custom IRBuilder inserter which prefixes all names, but only in 472 /// Assert builds. 473 class IRBuilderPrefixedInserter final : public IRBuilderDefaultInserter { 474 std::string Prefix; 475 476 Twine getNameWithPrefix(const Twine &Name) const { 477 return Name.isTriviallyEmpty() ? Name : Prefix + Name; 478 } 479 480 public: 481 void SetNamePrefix(const Twine &P) { Prefix = P.str(); } 482 483 void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, 484 BasicBlock::iterator InsertPt) const override { 485 IRBuilderDefaultInserter::InsertHelper(I, getNameWithPrefix(Name), BB, 486 InsertPt); 487 } 488 }; 489 490 /// Provide a type for IRBuilder that drops names in release builds. 491 using IRBuilderTy = IRBuilder<ConstantFolder, IRBuilderPrefixedInserter>; 492 493 /// A used slice of an alloca. 494 /// 495 /// This structure represents a slice of an alloca used by some instruction. It 496 /// stores both the begin and end offsets of this use, a pointer to the use 497 /// itself, and a flag indicating whether we can classify the use as splittable 498 /// or not when forming partitions of the alloca. 499 class Slice { 500 /// The beginning offset of the range. 501 uint64_t BeginOffset = 0; 502 503 /// The ending offset, not included in the range. 504 uint64_t EndOffset = 0; 505 506 /// Storage for both the use of this slice and whether it can be 507 /// split. 508 PointerIntPair<Use *, 1, bool> UseAndIsSplittable; 509 510 public: 511 Slice() = default; 512 513 Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable) 514 : BeginOffset(BeginOffset), EndOffset(EndOffset), 515 UseAndIsSplittable(U, IsSplittable) {} 516 517 uint64_t beginOffset() const { return BeginOffset; } 518 uint64_t endOffset() const { return EndOffset; } 519 520 bool isSplittable() const { return UseAndIsSplittable.getInt(); } 521 void makeUnsplittable() { UseAndIsSplittable.setInt(false); } 522 523 Use *getUse() const { return UseAndIsSplittable.getPointer(); } 524 525 bool isDead() const { return getUse() == nullptr; } 526 void kill() { UseAndIsSplittable.setPointer(nullptr); } 527 528 /// Support for ordering ranges. 529 /// 530 /// This provides an ordering over ranges such that start offsets are 531 /// always increasing, and within equal start offsets, the end offsets are 532 /// decreasing. Thus the spanning range comes first in a cluster with the 533 /// same start position. 534 bool operator<(const Slice &RHS) const { 535 if (beginOffset() < RHS.beginOffset()) 536 return true; 537 if (beginOffset() > RHS.beginOffset()) 538 return false; 539 if (isSplittable() != RHS.isSplittable()) 540 return !isSplittable(); 541 if (endOffset() > RHS.endOffset()) 542 return true; 543 return false; 544 } 545 546 /// Support comparison with a single offset to allow binary searches. 547 friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS, 548 uint64_t RHSOffset) { 549 return LHS.beginOffset() < RHSOffset; 550 } 551 friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, 552 const Slice &RHS) { 553 return LHSOffset < RHS.beginOffset(); 554 } 555 556 bool operator==(const Slice &RHS) const { 557 return isSplittable() == RHS.isSplittable() && 558 beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset(); 559 } 560 bool operator!=(const Slice &RHS) const { return !operator==(RHS); } 561 }; 562 563 /// Representation of the alloca slices. 564 /// 565 /// This class represents the slices of an alloca which are formed by its 566 /// various uses. If a pointer escapes, we can't fully build a representation 567 /// for the slices used and we reflect that in this structure. The uses are 568 /// stored, sorted by increasing beginning offset and with unsplittable slices 569 /// starting at a particular offset before splittable slices. 570 class AllocaSlices { 571 public: 572 /// Construct the slices of a particular alloca. 573 AllocaSlices(const DataLayout &DL, AllocaInst &AI); 574 575 /// Test whether a pointer to the allocation escapes our analysis. 576 /// 577 /// If this is true, the slices are never fully built and should be 578 /// ignored. 579 bool isEscaped() const { return PointerEscapingInstr; } 580 581 /// Support for iterating over the slices. 582 /// @{ 583 using iterator = SmallVectorImpl<Slice>::iterator; 584 using range = iterator_range<iterator>; 585 586 iterator begin() { return Slices.begin(); } 587 iterator end() { return Slices.end(); } 588 589 using const_iterator = SmallVectorImpl<Slice>::const_iterator; 590 using const_range = iterator_range<const_iterator>; 591 592 const_iterator begin() const { return Slices.begin(); } 593 const_iterator end() const { return Slices.end(); } 594 /// @} 595 596 /// Erase a range of slices. 597 void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); } 598 599 /// Insert new slices for this alloca. 600 /// 601 /// This moves the slices into the alloca's slices collection, and re-sorts 602 /// everything so that the usual ordering properties of the alloca's slices 603 /// hold. 604 void insert(ArrayRef<Slice> NewSlices) { 605 int OldSize = Slices.size(); 606 Slices.append(NewSlices.begin(), NewSlices.end()); 607 auto SliceI = Slices.begin() + OldSize; 608 llvm::sort(SliceI, Slices.end()); 609 std::inplace_merge(Slices.begin(), SliceI, Slices.end()); 610 } 611 612 // Forward declare the iterator and range accessor for walking the 613 // partitions. 614 class partition_iterator; 615 iterator_range<partition_iterator> partitions(); 616 617 /// Access the dead users for this alloca. 618 ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; } 619 620 /// Access Uses that should be dropped if the alloca is promotable. 621 ArrayRef<Use *> getDeadUsesIfPromotable() const { 622 return DeadUseIfPromotable; 623 } 624 625 /// Access the dead operands referring to this alloca. 626 /// 627 /// These are operands which have cannot actually be used to refer to the 628 /// alloca as they are outside its range and the user doesn't correct for 629 /// that. These mostly consist of PHI node inputs and the like which we just 630 /// need to replace with undef. 631 ArrayRef<Use *> getDeadOperands() const { return DeadOperands; } 632 633 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 634 void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; 635 void printSlice(raw_ostream &OS, const_iterator I, 636 StringRef Indent = " ") const; 637 void printUse(raw_ostream &OS, const_iterator I, 638 StringRef Indent = " ") const; 639 void print(raw_ostream &OS) const; 640 void dump(const_iterator I) const; 641 void dump() const; 642 #endif 643 644 private: 645 template <typename DerivedT, typename RetT = void> class BuilderBase; 646 class SliceBuilder; 647 648 friend class AllocaSlices::SliceBuilder; 649 650 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 651 /// Handle to alloca instruction to simplify method interfaces. 652 AllocaInst &AI; 653 #endif 654 655 /// The instruction responsible for this alloca not having a known set 656 /// of slices. 657 /// 658 /// When an instruction (potentially) escapes the pointer to the alloca, we 659 /// store a pointer to that here and abort trying to form slices of the 660 /// alloca. This will be null if the alloca slices are analyzed successfully. 661 Instruction *PointerEscapingInstr; 662 663 /// The slices of the alloca. 664 /// 665 /// We store a vector of the slices formed by uses of the alloca here. This 666 /// vector is sorted by increasing begin offset, and then the unsplittable 667 /// slices before the splittable ones. See the Slice inner class for more 668 /// details. 669 SmallVector<Slice, 8> Slices; 670 671 /// Instructions which will become dead if we rewrite the alloca. 672 /// 673 /// Note that these are not separated by slice. This is because we expect an 674 /// alloca to be completely rewritten or not rewritten at all. If rewritten, 675 /// all these instructions can simply be removed and replaced with poison as 676 /// they come from outside of the allocated space. 677 SmallVector<Instruction *, 8> DeadUsers; 678 679 /// Uses which will become dead if can promote the alloca. 680 SmallVector<Use *, 8> DeadUseIfPromotable; 681 682 /// Operands which will become dead if we rewrite the alloca. 683 /// 684 /// These are operands that in their particular use can be replaced with 685 /// poison when we rewrite the alloca. These show up in out-of-bounds inputs 686 /// to PHI nodes and the like. They aren't entirely dead (there might be 687 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we 688 /// want to swap this particular input for poison to simplify the use lists of 689 /// the alloca. 690 SmallVector<Use *, 8> DeadOperands; 691 }; 692 693 /// A partition of the slices. 694 /// 695 /// An ephemeral representation for a range of slices which can be viewed as 696 /// a partition of the alloca. This range represents a span of the alloca's 697 /// memory which cannot be split, and provides access to all of the slices 698 /// overlapping some part of the partition. 699 /// 700 /// Objects of this type are produced by traversing the alloca's slices, but 701 /// are only ephemeral and not persistent. 702 class Partition { 703 private: 704 friend class AllocaSlices; 705 friend class AllocaSlices::partition_iterator; 706 707 using iterator = AllocaSlices::iterator; 708 709 /// The beginning and ending offsets of the alloca for this 710 /// partition. 711 uint64_t BeginOffset = 0, EndOffset = 0; 712 713 /// The start and end iterators of this partition. 714 iterator SI, SJ; 715 716 /// A collection of split slice tails overlapping the partition. 717 SmallVector<Slice *, 4> SplitTails; 718 719 /// Raw constructor builds an empty partition starting and ending at 720 /// the given iterator. 721 Partition(iterator SI) : SI(SI), SJ(SI) {} 722 723 public: 724 /// The start offset of this partition. 725 /// 726 /// All of the contained slices start at or after this offset. 727 uint64_t beginOffset() const { return BeginOffset; } 728 729 /// The end offset of this partition. 730 /// 731 /// All of the contained slices end at or before this offset. 732 uint64_t endOffset() const { return EndOffset; } 733 734 /// The size of the partition. 735 /// 736 /// Note that this can never be zero. 737 uint64_t size() const { 738 assert(BeginOffset < EndOffset && "Partitions must span some bytes!"); 739 return EndOffset - BeginOffset; 740 } 741 742 /// Test whether this partition contains no slices, and merely spans 743 /// a region occupied by split slices. 744 bool empty() const { return SI == SJ; } 745 746 /// \name Iterate slices that start within the partition. 747 /// These may be splittable or unsplittable. They have a begin offset >= the 748 /// partition begin offset. 749 /// @{ 750 // FIXME: We should probably define a "concat_iterator" helper and use that 751 // to stitch together pointee_iterators over the split tails and the 752 // contiguous iterators of the partition. That would give a much nicer 753 // interface here. We could then additionally expose filtered iterators for 754 // split, unsplit, and unsplittable splices based on the usage patterns. 755 iterator begin() const { return SI; } 756 iterator end() const { return SJ; } 757 /// @} 758 759 /// Get the sequence of split slice tails. 760 /// 761 /// These tails are of slices which start before this partition but are 762 /// split and overlap into the partition. We accumulate these while forming 763 /// partitions. 764 ArrayRef<Slice *> splitSliceTails() const { return SplitTails; } 765 }; 766 767 } // end anonymous namespace 768 769 /// An iterator over partitions of the alloca's slices. 770 /// 771 /// This iterator implements the core algorithm for partitioning the alloca's 772 /// slices. It is a forward iterator as we don't support backtracking for 773 /// efficiency reasons, and re-use a single storage area to maintain the 774 /// current set of split slices. 775 /// 776 /// It is templated on the slice iterator type to use so that it can operate 777 /// with either const or non-const slice iterators. 778 class AllocaSlices::partition_iterator 779 : public iterator_facade_base<partition_iterator, std::forward_iterator_tag, 780 Partition> { 781 friend class AllocaSlices; 782 783 /// Most of the state for walking the partitions is held in a class 784 /// with a nice interface for examining them. 785 Partition P; 786 787 /// We need to keep the end of the slices to know when to stop. 788 AllocaSlices::iterator SE; 789 790 /// We also need to keep track of the maximum split end offset seen. 791 /// FIXME: Do we really? 792 uint64_t MaxSplitSliceEndOffset = 0; 793 794 /// Sets the partition to be empty at given iterator, and sets the 795 /// end iterator. 796 partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE) 797 : P(SI), SE(SE) { 798 // If not already at the end, advance our state to form the initial 799 // partition. 800 if (SI != SE) 801 advance(); 802 } 803 804 /// Advance the iterator to the next partition. 805 /// 806 /// Requires that the iterator not be at the end of the slices. 807 void advance() { 808 assert((P.SI != SE || !P.SplitTails.empty()) && 809 "Cannot advance past the end of the slices!"); 810 811 // Clear out any split uses which have ended. 812 if (!P.SplitTails.empty()) { 813 if (P.EndOffset >= MaxSplitSliceEndOffset) { 814 // If we've finished all splits, this is easy. 815 P.SplitTails.clear(); 816 MaxSplitSliceEndOffset = 0; 817 } else { 818 // Remove the uses which have ended in the prior partition. This 819 // cannot change the max split slice end because we just checked that 820 // the prior partition ended prior to that max. 821 llvm::erase_if(P.SplitTails, 822 [&](Slice *S) { return S->endOffset() <= P.EndOffset; }); 823 assert(llvm::any_of(P.SplitTails, 824 [&](Slice *S) { 825 return S->endOffset() == MaxSplitSliceEndOffset; 826 }) && 827 "Could not find the current max split slice offset!"); 828 assert(llvm::all_of(P.SplitTails, 829 [&](Slice *S) { 830 return S->endOffset() <= MaxSplitSliceEndOffset; 831 }) && 832 "Max split slice end offset is not actually the max!"); 833 } 834 } 835 836 // If P.SI is already at the end, then we've cleared the split tail and 837 // now have an end iterator. 838 if (P.SI == SE) { 839 assert(P.SplitTails.empty() && "Failed to clear the split slices!"); 840 return; 841 } 842 843 // If we had a non-empty partition previously, set up the state for 844 // subsequent partitions. 845 if (P.SI != P.SJ) { 846 // Accumulate all the splittable slices which started in the old 847 // partition into the split list. 848 for (Slice &S : P) 849 if (S.isSplittable() && S.endOffset() > P.EndOffset) { 850 P.SplitTails.push_back(&S); 851 MaxSplitSliceEndOffset = 852 std::max(S.endOffset(), MaxSplitSliceEndOffset); 853 } 854 855 // Start from the end of the previous partition. 856 P.SI = P.SJ; 857 858 // If P.SI is now at the end, we at most have a tail of split slices. 859 if (P.SI == SE) { 860 P.BeginOffset = P.EndOffset; 861 P.EndOffset = MaxSplitSliceEndOffset; 862 return; 863 } 864 865 // If the we have split slices and the next slice is after a gap and is 866 // not splittable immediately form an empty partition for the split 867 // slices up until the next slice begins. 868 if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset && 869 !P.SI->isSplittable()) { 870 P.BeginOffset = P.EndOffset; 871 P.EndOffset = P.SI->beginOffset(); 872 return; 873 } 874 } 875 876 // OK, we need to consume new slices. Set the end offset based on the 877 // current slice, and step SJ past it. The beginning offset of the 878 // partition is the beginning offset of the next slice unless we have 879 // pre-existing split slices that are continuing, in which case we begin 880 // at the prior end offset. 881 P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset; 882 P.EndOffset = P.SI->endOffset(); 883 ++P.SJ; 884 885 // There are two strategies to form a partition based on whether the 886 // partition starts with an unsplittable slice or a splittable slice. 887 if (!P.SI->isSplittable()) { 888 // When we're forming an unsplittable region, it must always start at 889 // the first slice and will extend through its end. 890 assert(P.BeginOffset == P.SI->beginOffset()); 891 892 // Form a partition including all of the overlapping slices with this 893 // unsplittable slice. 894 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { 895 if (!P.SJ->isSplittable()) 896 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); 897 ++P.SJ; 898 } 899 900 // We have a partition across a set of overlapping unsplittable 901 // partitions. 902 return; 903 } 904 905 // If we're starting with a splittable slice, then we need to form 906 // a synthetic partition spanning it and any other overlapping splittable 907 // splices. 908 assert(P.SI->isSplittable() && "Forming a splittable partition!"); 909 910 // Collect all of the overlapping splittable slices. 911 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset && 912 P.SJ->isSplittable()) { 913 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); 914 ++P.SJ; 915 } 916 917 // Back upiP.EndOffset if we ended the span early when encountering an 918 // unsplittable slice. This synthesizes the early end offset of 919 // a partition spanning only splittable slices. 920 if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { 921 assert(!P.SJ->isSplittable()); 922 P.EndOffset = P.SJ->beginOffset(); 923 } 924 } 925 926 public: 927 bool operator==(const partition_iterator &RHS) const { 928 assert(SE == RHS.SE && 929 "End iterators don't match between compared partition iterators!"); 930 931 // The observed positions of partitions is marked by the P.SI iterator and 932 // the emptiness of the split slices. The latter is only relevant when 933 // P.SI == SE, as the end iterator will additionally have an empty split 934 // slices list, but the prior may have the same P.SI and a tail of split 935 // slices. 936 if (P.SI == RHS.P.SI && P.SplitTails.empty() == RHS.P.SplitTails.empty()) { 937 assert(P.SJ == RHS.P.SJ && 938 "Same set of slices formed two different sized partitions!"); 939 assert(P.SplitTails.size() == RHS.P.SplitTails.size() && 940 "Same slice position with differently sized non-empty split " 941 "slice tails!"); 942 return true; 943 } 944 return false; 945 } 946 947 partition_iterator &operator++() { 948 advance(); 949 return *this; 950 } 951 952 Partition &operator*() { return P; } 953 }; 954 955 /// A forward range over the partitions of the alloca's slices. 956 /// 957 /// This accesses an iterator range over the partitions of the alloca's 958 /// slices. It computes these partitions on the fly based on the overlapping 959 /// offsets of the slices and the ability to split them. It will visit "empty" 960 /// partitions to cover regions of the alloca only accessed via split 961 /// slices. 962 iterator_range<AllocaSlices::partition_iterator> AllocaSlices::partitions() { 963 return make_range(partition_iterator(begin(), end()), 964 partition_iterator(end(), end())); 965 } 966 967 static Value *foldSelectInst(SelectInst &SI) { 968 // If the condition being selected on is a constant or the same value is 969 // being selected between, fold the select. Yes this does (rarely) happen 970 // early on. 971 if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition())) 972 return SI.getOperand(1 + CI->isZero()); 973 if (SI.getOperand(1) == SI.getOperand(2)) 974 return SI.getOperand(1); 975 976 return nullptr; 977 } 978 979 /// A helper that folds a PHI node or a select. 980 static Value *foldPHINodeOrSelectInst(Instruction &I) { 981 if (PHINode *PN = dyn_cast<PHINode>(&I)) { 982 // If PN merges together the same value, return that value. 983 return PN->hasConstantValue(); 984 } 985 return foldSelectInst(cast<SelectInst>(I)); 986 } 987 988 /// Builder for the alloca slices. 989 /// 990 /// This class builds a set of alloca slices by recursively visiting the uses 991 /// of an alloca and making a slice for each load and store at each offset. 992 class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> { 993 friend class PtrUseVisitor<SliceBuilder>; 994 friend class InstVisitor<SliceBuilder>; 995 996 using Base = PtrUseVisitor<SliceBuilder>; 997 998 const uint64_t AllocSize; 999 AllocaSlices &AS; 1000 1001 SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap; 1002 SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes; 1003 1004 /// Set to de-duplicate dead instructions found in the use walk. 1005 SmallPtrSet<Instruction *, 4> VisitedDeadInsts; 1006 1007 public: 1008 SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS) 1009 : PtrUseVisitor<SliceBuilder>(DL), 1010 AllocSize(DL.getTypeAllocSize(AI.getAllocatedType()).getFixedValue()), 1011 AS(AS) {} 1012 1013 private: 1014 void markAsDead(Instruction &I) { 1015 if (VisitedDeadInsts.insert(&I).second) 1016 AS.DeadUsers.push_back(&I); 1017 } 1018 1019 void insertUse(Instruction &I, const APInt &Offset, uint64_t Size, 1020 bool IsSplittable = false) { 1021 // Completely skip uses which have a zero size or start either before or 1022 // past the end of the allocation. 1023 if (Size == 0 || Offset.uge(AllocSize)) { 1024 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" 1025 << Offset 1026 << " which has zero size or starts outside of the " 1027 << AllocSize << " byte alloca:\n" 1028 << " alloca: " << AS.AI << "\n" 1029 << " use: " << I << "\n"); 1030 return markAsDead(I); 1031 } 1032 1033 uint64_t BeginOffset = Offset.getZExtValue(); 1034 uint64_t EndOffset = BeginOffset + Size; 1035 1036 // Clamp the end offset to the end of the allocation. Note that this is 1037 // formulated to handle even the case where "BeginOffset + Size" overflows. 1038 // This may appear superficially to be something we could ignore entirely, 1039 // but that is not so! There may be widened loads or PHI-node uses where 1040 // some instructions are dead but not others. We can't completely ignore 1041 // them, and so have to record at least the information here. 1042 assert(AllocSize >= BeginOffset); // Established above. 1043 if (Size > AllocSize - BeginOffset) { 1044 LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" 1045 << Offset << " to remain within the " << AllocSize 1046 << " byte alloca:\n" 1047 << " alloca: " << AS.AI << "\n" 1048 << " use: " << I << "\n"); 1049 EndOffset = AllocSize; 1050 } 1051 1052 AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable)); 1053 } 1054 1055 void visitBitCastInst(BitCastInst &BC) { 1056 if (BC.use_empty()) 1057 return markAsDead(BC); 1058 1059 return Base::visitBitCastInst(BC); 1060 } 1061 1062 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 1063 if (ASC.use_empty()) 1064 return markAsDead(ASC); 1065 1066 return Base::visitAddrSpaceCastInst(ASC); 1067 } 1068 1069 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 1070 if (GEPI.use_empty()) 1071 return markAsDead(GEPI); 1072 1073 if (SROAStrictInbounds && GEPI.isInBounds()) { 1074 // FIXME: This is a manually un-factored variant of the basic code inside 1075 // of GEPs with checking of the inbounds invariant specified in the 1076 // langref in a very strict sense. If we ever want to enable 1077 // SROAStrictInbounds, this code should be factored cleanly into 1078 // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds 1079 // by writing out the code here where we have the underlying allocation 1080 // size readily available. 1081 APInt GEPOffset = Offset; 1082 const DataLayout &DL = GEPI.getModule()->getDataLayout(); 1083 for (gep_type_iterator GTI = gep_type_begin(GEPI), 1084 GTE = gep_type_end(GEPI); 1085 GTI != GTE; ++GTI) { 1086 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 1087 if (!OpC) 1088 break; 1089 1090 // Handle a struct index, which adds its field offset to the pointer. 1091 if (StructType *STy = GTI.getStructTypeOrNull()) { 1092 unsigned ElementIdx = OpC->getZExtValue(); 1093 const StructLayout *SL = DL.getStructLayout(STy); 1094 GEPOffset += 1095 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); 1096 } else { 1097 // For array or vector indices, scale the index by the size of the 1098 // type. 1099 APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth()); 1100 GEPOffset += Index * APInt(Offset.getBitWidth(), 1101 GTI.getSequentialElementStride(DL)); 1102 } 1103 1104 // If this index has computed an intermediate pointer which is not 1105 // inbounds, then the result of the GEP is a poison value and we can 1106 // delete it and all uses. 1107 if (GEPOffset.ugt(AllocSize)) 1108 return markAsDead(GEPI); 1109 } 1110 } 1111 1112 return Base::visitGetElementPtrInst(GEPI); 1113 } 1114 1115 void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset, 1116 uint64_t Size, bool IsVolatile) { 1117 // We allow splitting of non-volatile loads and stores where the type is an 1118 // integer type. These may be used to implement 'memcpy' or other "transfer 1119 // of bits" patterns. 1120 bool IsSplittable = 1121 Ty->isIntegerTy() && !IsVolatile && DL.typeSizeEqualsStoreSize(Ty); 1122 1123 insertUse(I, Offset, Size, IsSplittable); 1124 } 1125 1126 void visitLoadInst(LoadInst &LI) { 1127 assert((!LI.isSimple() || LI.getType()->isSingleValueType()) && 1128 "All simple FCA loads should have been pre-split"); 1129 1130 if (!IsOffsetKnown) 1131 return PI.setAborted(&LI); 1132 1133 TypeSize Size = DL.getTypeStoreSize(LI.getType()); 1134 if (Size.isScalable()) 1135 return PI.setAborted(&LI); 1136 1137 return handleLoadOrStore(LI.getType(), LI, Offset, Size.getFixedValue(), 1138 LI.isVolatile()); 1139 } 1140 1141 void visitStoreInst(StoreInst &SI) { 1142 Value *ValOp = SI.getValueOperand(); 1143 if (ValOp == *U) 1144 return PI.setEscapedAndAborted(&SI); 1145 if (!IsOffsetKnown) 1146 return PI.setAborted(&SI); 1147 1148 TypeSize StoreSize = DL.getTypeStoreSize(ValOp->getType()); 1149 if (StoreSize.isScalable()) 1150 return PI.setAborted(&SI); 1151 1152 uint64_t Size = StoreSize.getFixedValue(); 1153 1154 // If this memory access can be shown to *statically* extend outside the 1155 // bounds of the allocation, it's behavior is undefined, so simply 1156 // ignore it. Note that this is more strict than the generic clamping 1157 // behavior of insertUse. We also try to handle cases which might run the 1158 // risk of overflow. 1159 // FIXME: We should instead consider the pointer to have escaped if this 1160 // function is being instrumented for addressing bugs or race conditions. 1161 if (Size > AllocSize || Offset.ugt(AllocSize - Size)) { 1162 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" 1163 << Offset << " which extends past the end of the " 1164 << AllocSize << " byte alloca:\n" 1165 << " alloca: " << AS.AI << "\n" 1166 << " use: " << SI << "\n"); 1167 return markAsDead(SI); 1168 } 1169 1170 assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && 1171 "All simple FCA stores should have been pre-split"); 1172 handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile()); 1173 } 1174 1175 void visitMemSetInst(MemSetInst &II) { 1176 assert(II.getRawDest() == *U && "Pointer use is not the destination?"); 1177 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 1178 if ((Length && Length->getValue() == 0) || 1179 (IsOffsetKnown && Offset.uge(AllocSize))) 1180 // Zero-length mem transfer intrinsics can be ignored entirely. 1181 return markAsDead(II); 1182 1183 if (!IsOffsetKnown) 1184 return PI.setAborted(&II); 1185 1186 insertUse(II, Offset, Length ? Length->getLimitedValue() 1187 : AllocSize - Offset.getLimitedValue(), 1188 (bool)Length); 1189 } 1190 1191 void visitMemTransferInst(MemTransferInst &II) { 1192 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 1193 if (Length && Length->getValue() == 0) 1194 // Zero-length mem transfer intrinsics can be ignored entirely. 1195 return markAsDead(II); 1196 1197 // Because we can visit these intrinsics twice, also check to see if the 1198 // first time marked this instruction as dead. If so, skip it. 1199 if (VisitedDeadInsts.count(&II)) 1200 return; 1201 1202 if (!IsOffsetKnown) 1203 return PI.setAborted(&II); 1204 1205 // This side of the transfer is completely out-of-bounds, and so we can 1206 // nuke the entire transfer. However, we also need to nuke the other side 1207 // if already added to our partitions. 1208 // FIXME: Yet another place we really should bypass this when 1209 // instrumenting for ASan. 1210 if (Offset.uge(AllocSize)) { 1211 SmallDenseMap<Instruction *, unsigned>::iterator MTPI = 1212 MemTransferSliceMap.find(&II); 1213 if (MTPI != MemTransferSliceMap.end()) 1214 AS.Slices[MTPI->second].kill(); 1215 return markAsDead(II); 1216 } 1217 1218 uint64_t RawOffset = Offset.getLimitedValue(); 1219 uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset; 1220 1221 // Check for the special case where the same exact value is used for both 1222 // source and dest. 1223 if (*U == II.getRawDest() && *U == II.getRawSource()) { 1224 // For non-volatile transfers this is a no-op. 1225 if (!II.isVolatile()) 1226 return markAsDead(II); 1227 1228 return insertUse(II, Offset, Size, /*IsSplittable=*/false); 1229 } 1230 1231 // If we have seen both source and destination for a mem transfer, then 1232 // they both point to the same alloca. 1233 bool Inserted; 1234 SmallDenseMap<Instruction *, unsigned>::iterator MTPI; 1235 std::tie(MTPI, Inserted) = 1236 MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size())); 1237 unsigned PrevIdx = MTPI->second; 1238 if (!Inserted) { 1239 Slice &PrevP = AS.Slices[PrevIdx]; 1240 1241 // Check if the begin offsets match and this is a non-volatile transfer. 1242 // In that case, we can completely elide the transfer. 1243 if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) { 1244 PrevP.kill(); 1245 return markAsDead(II); 1246 } 1247 1248 // Otherwise we have an offset transfer within the same alloca. We can't 1249 // split those. 1250 PrevP.makeUnsplittable(); 1251 } 1252 1253 // Insert the use now that we've fixed up the splittable nature. 1254 insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length); 1255 1256 // Check that we ended up with a valid index in the map. 1257 assert(AS.Slices[PrevIdx].getUse()->getUser() == &II && 1258 "Map index doesn't point back to a slice with this user."); 1259 } 1260 1261 // Disable SRoA for any intrinsics except for lifetime invariants and 1262 // invariant group. 1263 // FIXME: What about debug intrinsics? This matches old behavior, but 1264 // doesn't make sense. 1265 void visitIntrinsicInst(IntrinsicInst &II) { 1266 if (II.isDroppable()) { 1267 AS.DeadUseIfPromotable.push_back(U); 1268 return; 1269 } 1270 1271 if (!IsOffsetKnown) 1272 return PI.setAborted(&II); 1273 1274 if (II.isLifetimeStartOrEnd()) { 1275 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0)); 1276 uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(), 1277 Length->getLimitedValue()); 1278 insertUse(II, Offset, Size, true); 1279 return; 1280 } 1281 1282 if (II.isLaunderOrStripInvariantGroup()) { 1283 insertUse(II, Offset, AllocSize, true); 1284 enqueueUsers(II); 1285 return; 1286 } 1287 1288 Base::visitIntrinsicInst(II); 1289 } 1290 1291 Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) { 1292 // We consider any PHI or select that results in a direct load or store of 1293 // the same offset to be a viable use for slicing purposes. These uses 1294 // are considered unsplittable and the size is the maximum loaded or stored 1295 // size. 1296 SmallPtrSet<Instruction *, 4> Visited; 1297 SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses; 1298 Visited.insert(Root); 1299 Uses.push_back(std::make_pair(cast<Instruction>(*U), Root)); 1300 const DataLayout &DL = Root->getModule()->getDataLayout(); 1301 // If there are no loads or stores, the access is dead. We mark that as 1302 // a size zero access. 1303 Size = 0; 1304 do { 1305 Instruction *I, *UsedI; 1306 std::tie(UsedI, I) = Uses.pop_back_val(); 1307 1308 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1309 TypeSize LoadSize = DL.getTypeStoreSize(LI->getType()); 1310 if (LoadSize.isScalable()) { 1311 PI.setAborted(LI); 1312 return nullptr; 1313 } 1314 Size = std::max(Size, LoadSize.getFixedValue()); 1315 continue; 1316 } 1317 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 1318 Value *Op = SI->getOperand(0); 1319 if (Op == UsedI) 1320 return SI; 1321 TypeSize StoreSize = DL.getTypeStoreSize(Op->getType()); 1322 if (StoreSize.isScalable()) { 1323 PI.setAborted(SI); 1324 return nullptr; 1325 } 1326 Size = std::max(Size, StoreSize.getFixedValue()); 1327 continue; 1328 } 1329 1330 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 1331 if (!GEP->hasAllZeroIndices()) 1332 return GEP; 1333 } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) && 1334 !isa<SelectInst>(I) && !isa<AddrSpaceCastInst>(I)) { 1335 return I; 1336 } 1337 1338 for (User *U : I->users()) 1339 if (Visited.insert(cast<Instruction>(U)).second) 1340 Uses.push_back(std::make_pair(I, cast<Instruction>(U))); 1341 } while (!Uses.empty()); 1342 1343 return nullptr; 1344 } 1345 1346 void visitPHINodeOrSelectInst(Instruction &I) { 1347 assert(isa<PHINode>(I) || isa<SelectInst>(I)); 1348 if (I.use_empty()) 1349 return markAsDead(I); 1350 1351 // If this is a PHI node before a catchswitch, we cannot insert any non-PHI 1352 // instructions in this BB, which may be required during rewriting. Bail out 1353 // on these cases. 1354 if (isa<PHINode>(I) && 1355 I.getParent()->getFirstInsertionPt() == I.getParent()->end()) 1356 return PI.setAborted(&I); 1357 1358 // TODO: We could use simplifyInstruction here to fold PHINodes and 1359 // SelectInsts. However, doing so requires to change the current 1360 // dead-operand-tracking mechanism. For instance, suppose neither loading 1361 // from %U nor %other traps. Then "load (select undef, %U, %other)" does not 1362 // trap either. However, if we simply replace %U with undef using the 1363 // current dead-operand-tracking mechanism, "load (select undef, undef, 1364 // %other)" may trap because the select may return the first operand 1365 // "undef". 1366 if (Value *Result = foldPHINodeOrSelectInst(I)) { 1367 if (Result == *U) 1368 // If the result of the constant fold will be the pointer, recurse 1369 // through the PHI/select as if we had RAUW'ed it. 1370 enqueueUsers(I); 1371 else 1372 // Otherwise the operand to the PHI/select is dead, and we can replace 1373 // it with poison. 1374 AS.DeadOperands.push_back(U); 1375 1376 return; 1377 } 1378 1379 if (!IsOffsetKnown) 1380 return PI.setAborted(&I); 1381 1382 // See if we already have computed info on this node. 1383 uint64_t &Size = PHIOrSelectSizes[&I]; 1384 if (!Size) { 1385 // This is a new PHI/Select, check for an unsafe use of it. 1386 if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size)) 1387 return PI.setAborted(UnsafeI); 1388 } 1389 1390 // For PHI and select operands outside the alloca, we can't nuke the entire 1391 // phi or select -- the other side might still be relevant, so we special 1392 // case them here and use a separate structure to track the operands 1393 // themselves which should be replaced with poison. 1394 // FIXME: This should instead be escaped in the event we're instrumenting 1395 // for address sanitization. 1396 if (Offset.uge(AllocSize)) { 1397 AS.DeadOperands.push_back(U); 1398 return; 1399 } 1400 1401 insertUse(I, Offset, Size); 1402 } 1403 1404 void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); } 1405 1406 void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); } 1407 1408 /// Disable SROA entirely if there are unhandled users of the alloca. 1409 void visitInstruction(Instruction &I) { PI.setAborted(&I); } 1410 }; 1411 1412 AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) 1413 : 1414 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1415 AI(AI), 1416 #endif 1417 PointerEscapingInstr(nullptr) { 1418 SliceBuilder PB(DL, AI, *this); 1419 SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); 1420 if (PtrI.isEscaped() || PtrI.isAborted()) { 1421 // FIXME: We should sink the escape vs. abort info into the caller nicely, 1422 // possibly by just storing the PtrInfo in the AllocaSlices. 1423 PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst() 1424 : PtrI.getAbortingInst(); 1425 assert(PointerEscapingInstr && "Did not track a bad instruction"); 1426 return; 1427 } 1428 1429 llvm::erase_if(Slices, [](const Slice &S) { return S.isDead(); }); 1430 1431 // Sort the uses. This arranges for the offsets to be in ascending order, 1432 // and the sizes to be in descending order. 1433 llvm::stable_sort(Slices); 1434 } 1435 1436 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1437 1438 void AllocaSlices::print(raw_ostream &OS, const_iterator I, 1439 StringRef Indent) const { 1440 printSlice(OS, I, Indent); 1441 OS << "\n"; 1442 printUse(OS, I, Indent); 1443 } 1444 1445 void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I, 1446 StringRef Indent) const { 1447 OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")" 1448 << " slice #" << (I - begin()) 1449 << (I->isSplittable() ? " (splittable)" : ""); 1450 } 1451 1452 void AllocaSlices::printUse(raw_ostream &OS, const_iterator I, 1453 StringRef Indent) const { 1454 OS << Indent << " used by: " << *I->getUse()->getUser() << "\n"; 1455 } 1456 1457 void AllocaSlices::print(raw_ostream &OS) const { 1458 if (PointerEscapingInstr) { 1459 OS << "Can't analyze slices for alloca: " << AI << "\n" 1460 << " A pointer to this alloca escaped by:\n" 1461 << " " << *PointerEscapingInstr << "\n"; 1462 return; 1463 } 1464 1465 OS << "Slices of alloca: " << AI << "\n"; 1466 for (const_iterator I = begin(), E = end(); I != E; ++I) 1467 print(OS, I); 1468 } 1469 1470 LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const { 1471 print(dbgs(), I); 1472 } 1473 LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); } 1474 1475 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1476 1477 /// Walk the range of a partitioning looking for a common type to cover this 1478 /// sequence of slices. 1479 static std::pair<Type *, IntegerType *> 1480 findCommonType(AllocaSlices::const_iterator B, AllocaSlices::const_iterator E, 1481 uint64_t EndOffset) { 1482 Type *Ty = nullptr; 1483 bool TyIsCommon = true; 1484 IntegerType *ITy = nullptr; 1485 1486 // Note that we need to look at *every* alloca slice's Use to ensure we 1487 // always get consistent results regardless of the order of slices. 1488 for (AllocaSlices::const_iterator I = B; I != E; ++I) { 1489 Use *U = I->getUse(); 1490 if (isa<IntrinsicInst>(*U->getUser())) 1491 continue; 1492 if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset) 1493 continue; 1494 1495 Type *UserTy = nullptr; 1496 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1497 UserTy = LI->getType(); 1498 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1499 UserTy = SI->getValueOperand()->getType(); 1500 } 1501 1502 if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) { 1503 // If the type is larger than the partition, skip it. We only encounter 1504 // this for split integer operations where we want to use the type of the 1505 // entity causing the split. Also skip if the type is not a byte width 1506 // multiple. 1507 if (UserITy->getBitWidth() % 8 != 0 || 1508 UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset())) 1509 continue; 1510 1511 // Track the largest bitwidth integer type used in this way in case there 1512 // is no common type. 1513 if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth()) 1514 ITy = UserITy; 1515 } 1516 1517 // To avoid depending on the order of slices, Ty and TyIsCommon must not 1518 // depend on types skipped above. 1519 if (!UserTy || (Ty && Ty != UserTy)) 1520 TyIsCommon = false; // Give up on anything but an iN type. 1521 else 1522 Ty = UserTy; 1523 } 1524 1525 return {TyIsCommon ? Ty : nullptr, ITy}; 1526 } 1527 1528 /// PHI instructions that use an alloca and are subsequently loaded can be 1529 /// rewritten to load both input pointers in the pred blocks and then PHI the 1530 /// results, allowing the load of the alloca to be promoted. 1531 /// From this: 1532 /// %P2 = phi [i32* %Alloca, i32* %Other] 1533 /// %V = load i32* %P2 1534 /// to: 1535 /// %V1 = load i32* %Alloca -> will be mem2reg'd 1536 /// ... 1537 /// %V2 = load i32* %Other 1538 /// ... 1539 /// %V = phi [i32 %V1, i32 %V2] 1540 /// 1541 /// We can do this to a select if its only uses are loads and if the operands 1542 /// to the select can be loaded unconditionally. 1543 /// 1544 /// FIXME: This should be hoisted into a generic utility, likely in 1545 /// Transforms/Util/Local.h 1546 static bool isSafePHIToSpeculate(PHINode &PN) { 1547 const DataLayout &DL = PN.getModule()->getDataLayout(); 1548 1549 // For now, we can only do this promotion if the load is in the same block 1550 // as the PHI, and if there are no stores between the phi and load. 1551 // TODO: Allow recursive phi users. 1552 // TODO: Allow stores. 1553 BasicBlock *BB = PN.getParent(); 1554 Align MaxAlign; 1555 uint64_t APWidth = DL.getIndexTypeSizeInBits(PN.getType()); 1556 Type *LoadType = nullptr; 1557 for (User *U : PN.users()) { 1558 LoadInst *LI = dyn_cast<LoadInst>(U); 1559 if (!LI || !LI->isSimple()) 1560 return false; 1561 1562 // For now we only allow loads in the same block as the PHI. This is 1563 // a common case that happens when instcombine merges two loads through 1564 // a PHI. 1565 if (LI->getParent() != BB) 1566 return false; 1567 1568 if (LoadType) { 1569 if (LoadType != LI->getType()) 1570 return false; 1571 } else { 1572 LoadType = LI->getType(); 1573 } 1574 1575 // Ensure that there are no instructions between the PHI and the load that 1576 // could store. 1577 for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI) 1578 if (BBI->mayWriteToMemory()) 1579 return false; 1580 1581 MaxAlign = std::max(MaxAlign, LI->getAlign()); 1582 } 1583 1584 if (!LoadType) 1585 return false; 1586 1587 APInt LoadSize = 1588 APInt(APWidth, DL.getTypeStoreSize(LoadType).getFixedValue()); 1589 1590 // We can only transform this if it is safe to push the loads into the 1591 // predecessor blocks. The only thing to watch out for is that we can't put 1592 // a possibly trapping load in the predecessor if it is a critical edge. 1593 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1594 Instruction *TI = PN.getIncomingBlock(Idx)->getTerminator(); 1595 Value *InVal = PN.getIncomingValue(Idx); 1596 1597 // If the value is produced by the terminator of the predecessor (an 1598 // invoke) or it has side-effects, there is no valid place to put a load 1599 // in the predecessor. 1600 if (TI == InVal || TI->mayHaveSideEffects()) 1601 return false; 1602 1603 // If the predecessor has a single successor, then the edge isn't 1604 // critical. 1605 if (TI->getNumSuccessors() == 1) 1606 continue; 1607 1608 // If this pointer is always safe to load, or if we can prove that there 1609 // is already a load in the block, then we can move the load to the pred 1610 // block. 1611 if (isSafeToLoadUnconditionally(InVal, MaxAlign, LoadSize, DL, TI)) 1612 continue; 1613 1614 return false; 1615 } 1616 1617 return true; 1618 } 1619 1620 static void speculatePHINodeLoads(IRBuilderTy &IRB, PHINode &PN) { 1621 LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); 1622 1623 LoadInst *SomeLoad = cast<LoadInst>(PN.user_back()); 1624 Type *LoadTy = SomeLoad->getType(); 1625 IRB.SetInsertPoint(&PN); 1626 PHINode *NewPN = IRB.CreatePHI(LoadTy, PN.getNumIncomingValues(), 1627 PN.getName() + ".sroa.speculated"); 1628 1629 // Get the AA tags and alignment to use from one of the loads. It does not 1630 // matter which one we get and if any differ. 1631 AAMDNodes AATags = SomeLoad->getAAMetadata(); 1632 Align Alignment = SomeLoad->getAlign(); 1633 1634 // Rewrite all loads of the PN to use the new PHI. 1635 while (!PN.use_empty()) { 1636 LoadInst *LI = cast<LoadInst>(PN.user_back()); 1637 LI->replaceAllUsesWith(NewPN); 1638 LI->eraseFromParent(); 1639 } 1640 1641 // Inject loads into all of the pred blocks. 1642 DenseMap<BasicBlock*, Value*> InjectedLoads; 1643 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1644 BasicBlock *Pred = PN.getIncomingBlock(Idx); 1645 Value *InVal = PN.getIncomingValue(Idx); 1646 1647 // A PHI node is allowed to have multiple (duplicated) entries for the same 1648 // basic block, as long as the value is the same. So if we already injected 1649 // a load in the predecessor, then we should reuse the same load for all 1650 // duplicated entries. 1651 if (Value* V = InjectedLoads.lookup(Pred)) { 1652 NewPN->addIncoming(V, Pred); 1653 continue; 1654 } 1655 1656 Instruction *TI = Pred->getTerminator(); 1657 IRB.SetInsertPoint(TI); 1658 1659 LoadInst *Load = IRB.CreateAlignedLoad( 1660 LoadTy, InVal, Alignment, 1661 (PN.getName() + ".sroa.speculate.load." + Pred->getName())); 1662 ++NumLoadsSpeculated; 1663 if (AATags) 1664 Load->setAAMetadata(AATags); 1665 NewPN->addIncoming(Load, Pred); 1666 InjectedLoads[Pred] = Load; 1667 } 1668 1669 LLVM_DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); 1670 PN.eraseFromParent(); 1671 } 1672 1673 SelectHandSpeculativity & 1674 SelectHandSpeculativity::setAsSpeculatable(bool isTrueVal) { 1675 if (isTrueVal) 1676 Bitfield::set<SelectHandSpeculativity::TrueVal>(Storage, true); 1677 else 1678 Bitfield::set<SelectHandSpeculativity::FalseVal>(Storage, true); 1679 return *this; 1680 } 1681 1682 bool SelectHandSpeculativity::isSpeculatable(bool isTrueVal) const { 1683 return isTrueVal ? Bitfield::get<SelectHandSpeculativity::TrueVal>(Storage) 1684 : Bitfield::get<SelectHandSpeculativity::FalseVal>(Storage); 1685 } 1686 1687 bool SelectHandSpeculativity::areAllSpeculatable() const { 1688 return isSpeculatable(/*isTrueVal=*/true) && 1689 isSpeculatable(/*isTrueVal=*/false); 1690 } 1691 1692 bool SelectHandSpeculativity::areAnySpeculatable() const { 1693 return isSpeculatable(/*isTrueVal=*/true) || 1694 isSpeculatable(/*isTrueVal=*/false); 1695 } 1696 bool SelectHandSpeculativity::areNoneSpeculatable() const { 1697 return !areAnySpeculatable(); 1698 } 1699 1700 static SelectHandSpeculativity 1701 isSafeLoadOfSelectToSpeculate(LoadInst &LI, SelectInst &SI, bool PreserveCFG) { 1702 assert(LI.isSimple() && "Only for simple loads"); 1703 SelectHandSpeculativity Spec; 1704 1705 const DataLayout &DL = SI.getModule()->getDataLayout(); 1706 for (Value *Value : {SI.getTrueValue(), SI.getFalseValue()}) 1707 if (isSafeToLoadUnconditionally(Value, LI.getType(), LI.getAlign(), DL, 1708 &LI)) 1709 Spec.setAsSpeculatable(/*isTrueVal=*/Value == SI.getTrueValue()); 1710 else if (PreserveCFG) 1711 return Spec; 1712 1713 return Spec; 1714 } 1715 1716 std::optional<RewriteableMemOps> 1717 SROA::isSafeSelectToSpeculate(SelectInst &SI, bool PreserveCFG) { 1718 RewriteableMemOps Ops; 1719 1720 for (User *U : SI.users()) { 1721 if (auto *BC = dyn_cast<BitCastInst>(U); BC && BC->hasOneUse()) 1722 U = *BC->user_begin(); 1723 1724 if (auto *Store = dyn_cast<StoreInst>(U)) { 1725 // Note that atomic stores can be transformed; atomic semantics do not 1726 // have any meaning for a local alloca. Stores are not speculatable, 1727 // however, so if we can't turn it into a predicated store, we are done. 1728 if (Store->isVolatile() || PreserveCFG) 1729 return {}; // Give up on this `select`. 1730 Ops.emplace_back(Store); 1731 continue; 1732 } 1733 1734 auto *LI = dyn_cast<LoadInst>(U); 1735 1736 // Note that atomic loads can be transformed; 1737 // atomic semantics do not have any meaning for a local alloca. 1738 if (!LI || LI->isVolatile()) 1739 return {}; // Give up on this `select`. 1740 1741 PossiblySpeculatableLoad Load(LI); 1742 if (!LI->isSimple()) { 1743 // If the `load` is not simple, we can't speculatively execute it, 1744 // but we could handle this via a CFG modification. But can we? 1745 if (PreserveCFG) 1746 return {}; // Give up on this `select`. 1747 Ops.emplace_back(Load); 1748 continue; 1749 } 1750 1751 SelectHandSpeculativity Spec = 1752 isSafeLoadOfSelectToSpeculate(*LI, SI, PreserveCFG); 1753 if (PreserveCFG && !Spec.areAllSpeculatable()) 1754 return {}; // Give up on this `select`. 1755 1756 Load.setInt(Spec); 1757 Ops.emplace_back(Load); 1758 } 1759 1760 return Ops; 1761 } 1762 1763 static void speculateSelectInstLoads(SelectInst &SI, LoadInst &LI, 1764 IRBuilderTy &IRB) { 1765 LLVM_DEBUG(dbgs() << " original load: " << SI << "\n"); 1766 1767 Value *TV = SI.getTrueValue(); 1768 Value *FV = SI.getFalseValue(); 1769 // Replace the given load of the select with a select of two loads. 1770 1771 assert(LI.isSimple() && "We only speculate simple loads"); 1772 1773 IRB.SetInsertPoint(&LI); 1774 1775 LoadInst *TL = 1776 IRB.CreateAlignedLoad(LI.getType(), TV, LI.getAlign(), 1777 LI.getName() + ".sroa.speculate.load.true"); 1778 LoadInst *FL = 1779 IRB.CreateAlignedLoad(LI.getType(), FV, LI.getAlign(), 1780 LI.getName() + ".sroa.speculate.load.false"); 1781 NumLoadsSpeculated += 2; 1782 1783 // Transfer alignment and AA info if present. 1784 TL->setAlignment(LI.getAlign()); 1785 FL->setAlignment(LI.getAlign()); 1786 1787 AAMDNodes Tags = LI.getAAMetadata(); 1788 if (Tags) { 1789 TL->setAAMetadata(Tags); 1790 FL->setAAMetadata(Tags); 1791 } 1792 1793 Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, 1794 LI.getName() + ".sroa.speculated"); 1795 1796 LLVM_DEBUG(dbgs() << " speculated to: " << *V << "\n"); 1797 LI.replaceAllUsesWith(V); 1798 } 1799 1800 template <typename T> 1801 static void rewriteMemOpOfSelect(SelectInst &SI, T &I, 1802 SelectHandSpeculativity Spec, 1803 DomTreeUpdater &DTU) { 1804 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Only for load and store!"); 1805 LLVM_DEBUG(dbgs() << " original mem op: " << I << "\n"); 1806 BasicBlock *Head = I.getParent(); 1807 Instruction *ThenTerm = nullptr; 1808 Instruction *ElseTerm = nullptr; 1809 if (Spec.areNoneSpeculatable()) 1810 SplitBlockAndInsertIfThenElse(SI.getCondition(), &I, &ThenTerm, &ElseTerm, 1811 SI.getMetadata(LLVMContext::MD_prof), &DTU); 1812 else { 1813 SplitBlockAndInsertIfThen(SI.getCondition(), &I, /*Unreachable=*/false, 1814 SI.getMetadata(LLVMContext::MD_prof), &DTU, 1815 /*LI=*/nullptr, /*ThenBlock=*/nullptr); 1816 if (Spec.isSpeculatable(/*isTrueVal=*/true)) 1817 cast<BranchInst>(Head->getTerminator())->swapSuccessors(); 1818 } 1819 auto *HeadBI = cast<BranchInst>(Head->getTerminator()); 1820 Spec = {}; // Do not use `Spec` beyond this point. 1821 BasicBlock *Tail = I.getParent(); 1822 Tail->setName(Head->getName() + ".cont"); 1823 PHINode *PN; 1824 if (isa<LoadInst>(I)) 1825 PN = PHINode::Create(I.getType(), 2, "", &I); 1826 for (BasicBlock *SuccBB : successors(Head)) { 1827 bool IsThen = SuccBB == HeadBI->getSuccessor(0); 1828 int SuccIdx = IsThen ? 0 : 1; 1829 auto *NewMemOpBB = SuccBB == Tail ? Head : SuccBB; 1830 auto &CondMemOp = cast<T>(*I.clone()); 1831 if (NewMemOpBB != Head) { 1832 NewMemOpBB->setName(Head->getName() + (IsThen ? ".then" : ".else")); 1833 if (isa<LoadInst>(I)) 1834 ++NumLoadsPredicated; 1835 else 1836 ++NumStoresPredicated; 1837 } else { 1838 CondMemOp.dropUBImplyingAttrsAndMetadata(); 1839 ++NumLoadsSpeculated; 1840 } 1841 CondMemOp.insertBefore(NewMemOpBB->getTerminator()); 1842 Value *Ptr = SI.getOperand(1 + SuccIdx); 1843 CondMemOp.setOperand(I.getPointerOperandIndex(), Ptr); 1844 if (isa<LoadInst>(I)) { 1845 CondMemOp.setName(I.getName() + (IsThen ? ".then" : ".else") + ".val"); 1846 PN->addIncoming(&CondMemOp, NewMemOpBB); 1847 } else 1848 LLVM_DEBUG(dbgs() << " to: " << CondMemOp << "\n"); 1849 } 1850 if (isa<LoadInst>(I)) { 1851 PN->takeName(&I); 1852 LLVM_DEBUG(dbgs() << " to: " << *PN << "\n"); 1853 I.replaceAllUsesWith(PN); 1854 } 1855 } 1856 1857 static void rewriteMemOpOfSelect(SelectInst &SelInst, Instruction &I, 1858 SelectHandSpeculativity Spec, 1859 DomTreeUpdater &DTU) { 1860 if (auto *LI = dyn_cast<LoadInst>(&I)) 1861 rewriteMemOpOfSelect(SelInst, *LI, Spec, DTU); 1862 else if (auto *SI = dyn_cast<StoreInst>(&I)) 1863 rewriteMemOpOfSelect(SelInst, *SI, Spec, DTU); 1864 else 1865 llvm_unreachable_internal("Only for load and store."); 1866 } 1867 1868 static bool rewriteSelectInstMemOps(SelectInst &SI, 1869 const RewriteableMemOps &Ops, 1870 IRBuilderTy &IRB, DomTreeUpdater *DTU) { 1871 bool CFGChanged = false; 1872 LLVM_DEBUG(dbgs() << " original select: " << SI << "\n"); 1873 1874 for (const RewriteableMemOp &Op : Ops) { 1875 SelectHandSpeculativity Spec; 1876 Instruction *I; 1877 if (auto *const *US = std::get_if<UnspeculatableStore>(&Op)) { 1878 I = *US; 1879 } else { 1880 auto PSL = std::get<PossiblySpeculatableLoad>(Op); 1881 I = PSL.getPointer(); 1882 Spec = PSL.getInt(); 1883 } 1884 if (Spec.areAllSpeculatable()) { 1885 speculateSelectInstLoads(SI, cast<LoadInst>(*I), IRB); 1886 } else { 1887 assert(DTU && "Should not get here when not allowed to modify the CFG!"); 1888 rewriteMemOpOfSelect(SI, *I, Spec, *DTU); 1889 CFGChanged = true; 1890 } 1891 I->eraseFromParent(); 1892 } 1893 1894 for (User *U : make_early_inc_range(SI.users())) 1895 cast<BitCastInst>(U)->eraseFromParent(); 1896 SI.eraseFromParent(); 1897 return CFGChanged; 1898 } 1899 1900 /// Compute an adjusted pointer from Ptr by Offset bytes where the 1901 /// resulting pointer has PointerTy. 1902 static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, 1903 APInt Offset, Type *PointerTy, 1904 const Twine &NamePrefix) { 1905 if (Offset != 0) 1906 Ptr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(Offset), 1907 NamePrefix + "sroa_idx"); 1908 return IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, PointerTy, 1909 NamePrefix + "sroa_cast"); 1910 } 1911 1912 /// Compute the adjusted alignment for a load or store from an offset. 1913 static Align getAdjustedAlignment(Instruction *I, uint64_t Offset) { 1914 return commonAlignment(getLoadStoreAlignment(I), Offset); 1915 } 1916 1917 /// Test whether we can convert a value from the old to the new type. 1918 /// 1919 /// This predicate should be used to guard calls to convertValue in order to 1920 /// ensure that we only try to convert viable values. The strategy is that we 1921 /// will peel off single element struct and array wrappings to get to an 1922 /// underlying value, and convert that value. 1923 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { 1924 if (OldTy == NewTy) 1925 return true; 1926 1927 // For integer types, we can't handle any bit-width differences. This would 1928 // break both vector conversions with extension and introduce endianness 1929 // issues when in conjunction with loads and stores. 1930 if (isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) { 1931 assert(cast<IntegerType>(OldTy)->getBitWidth() != 1932 cast<IntegerType>(NewTy)->getBitWidth() && 1933 "We can't have the same bitwidth for different int types"); 1934 return false; 1935 } 1936 1937 if (DL.getTypeSizeInBits(NewTy).getFixedValue() != 1938 DL.getTypeSizeInBits(OldTy).getFixedValue()) 1939 return false; 1940 if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) 1941 return false; 1942 1943 // We can convert pointers to integers and vice-versa. Same for vectors 1944 // of pointers and integers. 1945 OldTy = OldTy->getScalarType(); 1946 NewTy = NewTy->getScalarType(); 1947 if (NewTy->isPointerTy() || OldTy->isPointerTy()) { 1948 if (NewTy->isPointerTy() && OldTy->isPointerTy()) { 1949 unsigned OldAS = OldTy->getPointerAddressSpace(); 1950 unsigned NewAS = NewTy->getPointerAddressSpace(); 1951 // Convert pointers if they are pointers from the same address space or 1952 // different integral (not non-integral) address spaces with the same 1953 // pointer size. 1954 return OldAS == NewAS || 1955 (!DL.isNonIntegralAddressSpace(OldAS) && 1956 !DL.isNonIntegralAddressSpace(NewAS) && 1957 DL.getPointerSize(OldAS) == DL.getPointerSize(NewAS)); 1958 } 1959 1960 // We can convert integers to integral pointers, but not to non-integral 1961 // pointers. 1962 if (OldTy->isIntegerTy()) 1963 return !DL.isNonIntegralPointerType(NewTy); 1964 1965 // We can convert integral pointers to integers, but non-integral pointers 1966 // need to remain pointers. 1967 if (!DL.isNonIntegralPointerType(OldTy)) 1968 return NewTy->isIntegerTy(); 1969 1970 return false; 1971 } 1972 1973 if (OldTy->isTargetExtTy() || NewTy->isTargetExtTy()) 1974 return false; 1975 1976 return true; 1977 } 1978 1979 /// Generic routine to convert an SSA value to a value of a different 1980 /// type. 1981 /// 1982 /// This will try various different casting techniques, such as bitcasts, 1983 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test 1984 /// two types for viability with this routine. 1985 static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 1986 Type *NewTy) { 1987 Type *OldTy = V->getType(); 1988 assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type"); 1989 1990 if (OldTy == NewTy) 1991 return V; 1992 1993 assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) && 1994 "Integer types must be the exact same to convert."); 1995 1996 // See if we need inttoptr for this type pair. May require additional bitcast. 1997 if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) { 1998 // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8* 1999 // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*> 2000 // Expand <4 x i32> to <2 x i8*> --> <4 x i32> to <2 x i64> to <2 x i8*> 2001 // Directly handle i64 to i8* 2002 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), 2003 NewTy); 2004 } 2005 2006 // See if we need ptrtoint for this type pair. May require additional bitcast. 2007 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) { 2008 // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128 2009 // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32> 2010 // Expand <2 x i8*> to <4 x i32> --> <2 x i8*> to <2 x i64> to <4 x i32> 2011 // Expand i8* to i64 --> i8* to i64 to i64 2012 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 2013 NewTy); 2014 } 2015 2016 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isPtrOrPtrVectorTy()) { 2017 unsigned OldAS = OldTy->getPointerAddressSpace(); 2018 unsigned NewAS = NewTy->getPointerAddressSpace(); 2019 // To convert pointers with different address spaces (they are already 2020 // checked convertible, i.e. they have the same pointer size), so far we 2021 // cannot use `bitcast` (which has restrict on the same address space) or 2022 // `addrspacecast` (which is not always no-op casting). Instead, use a pair 2023 // of no-op `ptrtoint`/`inttoptr` casts through an integer with the same bit 2024 // size. 2025 if (OldAS != NewAS) { 2026 assert(DL.getPointerSize(OldAS) == DL.getPointerSize(NewAS)); 2027 return IRB.CreateIntToPtr(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 2028 NewTy); 2029 } 2030 } 2031 2032 return IRB.CreateBitCast(V, NewTy); 2033 } 2034 2035 /// Test whether the given slice use can be promoted to a vector. 2036 /// 2037 /// This function is called to test each entry in a partition which is slated 2038 /// for a single slice. 2039 static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S, 2040 VectorType *Ty, 2041 uint64_t ElementSize, 2042 const DataLayout &DL) { 2043 // First validate the slice offsets. 2044 uint64_t BeginOffset = 2045 std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset(); 2046 uint64_t BeginIndex = BeginOffset / ElementSize; 2047 if (BeginIndex * ElementSize != BeginOffset || 2048 BeginIndex >= cast<FixedVectorType>(Ty)->getNumElements()) 2049 return false; 2050 uint64_t EndOffset = 2051 std::min(S.endOffset(), P.endOffset()) - P.beginOffset(); 2052 uint64_t EndIndex = EndOffset / ElementSize; 2053 if (EndIndex * ElementSize != EndOffset || 2054 EndIndex > cast<FixedVectorType>(Ty)->getNumElements()) 2055 return false; 2056 2057 assert(EndIndex > BeginIndex && "Empty vector!"); 2058 uint64_t NumElements = EndIndex - BeginIndex; 2059 Type *SliceTy = (NumElements == 1) 2060 ? Ty->getElementType() 2061 : FixedVectorType::get(Ty->getElementType(), NumElements); 2062 2063 Type *SplitIntTy = 2064 Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8); 2065 2066 Use *U = S.getUse(); 2067 2068 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 2069 if (MI->isVolatile()) 2070 return false; 2071 if (!S.isSplittable()) 2072 return false; // Skip any unsplittable intrinsics. 2073 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 2074 if (!II->isLifetimeStartOrEnd() && !II->isDroppable()) 2075 return false; 2076 } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 2077 if (LI->isVolatile()) 2078 return false; 2079 Type *LTy = LI->getType(); 2080 // Disable vector promotion when there are loads or stores of an FCA. 2081 if (LTy->isStructTy()) 2082 return false; 2083 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { 2084 assert(LTy->isIntegerTy()); 2085 LTy = SplitIntTy; 2086 } 2087 if (!canConvertValue(DL, SliceTy, LTy)) 2088 return false; 2089 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 2090 if (SI->isVolatile()) 2091 return false; 2092 Type *STy = SI->getValueOperand()->getType(); 2093 // Disable vector promotion when there are loads or stores of an FCA. 2094 if (STy->isStructTy()) 2095 return false; 2096 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { 2097 assert(STy->isIntegerTy()); 2098 STy = SplitIntTy; 2099 } 2100 if (!canConvertValue(DL, STy, SliceTy)) 2101 return false; 2102 } else { 2103 return false; 2104 } 2105 2106 return true; 2107 } 2108 2109 /// Test whether a vector type is viable for promotion. 2110 /// 2111 /// This implements the necessary checking for \c isVectorPromotionViable over 2112 /// all slices of the alloca for the given VectorType. 2113 static bool checkVectorTypeForPromotion(Partition &P, VectorType *VTy, 2114 const DataLayout &DL) { 2115 uint64_t ElementSize = 2116 DL.getTypeSizeInBits(VTy->getElementType()).getFixedValue(); 2117 2118 // While the definition of LLVM vectors is bitpacked, we don't support sizes 2119 // that aren't byte sized. 2120 if (ElementSize % 8) 2121 return false; 2122 assert((DL.getTypeSizeInBits(VTy).getFixedValue() % 8) == 0 && 2123 "vector size not a multiple of element size?"); 2124 ElementSize /= 8; 2125 2126 for (const Slice &S : P) 2127 if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL)) 2128 return false; 2129 2130 for (const Slice *S : P.splitSliceTails()) 2131 if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL)) 2132 return false; 2133 2134 return true; 2135 } 2136 2137 /// Test whether the given alloca partitioning and range of slices can be 2138 /// promoted to a vector. 2139 /// 2140 /// This is a quick test to check whether we can rewrite a particular alloca 2141 /// partition (and its newly formed alloca) into a vector alloca with only 2142 /// whole-vector loads and stores such that it could be promoted to a vector 2143 /// SSA value. We only can ensure this for a limited set of operations, and we 2144 /// don't want to do the rewrites unless we are confident that the result will 2145 /// be promotable, so we have an early test here. 2146 static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) { 2147 // Collect the candidate types for vector-based promotion. Also track whether 2148 // we have different element types. 2149 SmallVector<VectorType *, 4> CandidateTys; 2150 SetVector<Type *> LoadStoreTys; 2151 Type *CommonEltTy = nullptr; 2152 VectorType *CommonVecPtrTy = nullptr; 2153 bool HaveVecPtrTy = false; 2154 bool HaveCommonEltTy = true; 2155 bool HaveCommonVecPtrTy = true; 2156 auto CheckCandidateType = [&](Type *Ty) { 2157 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 2158 // Return if bitcast to vectors is different for total size in bits. 2159 if (!CandidateTys.empty()) { 2160 VectorType *V = CandidateTys[0]; 2161 if (DL.getTypeSizeInBits(VTy).getFixedValue() != 2162 DL.getTypeSizeInBits(V).getFixedValue()) { 2163 CandidateTys.clear(); 2164 return; 2165 } 2166 } 2167 CandidateTys.push_back(VTy); 2168 Type *EltTy = VTy->getElementType(); 2169 2170 if (!CommonEltTy) 2171 CommonEltTy = EltTy; 2172 else if (CommonEltTy != EltTy) 2173 HaveCommonEltTy = false; 2174 2175 if (EltTy->isPointerTy()) { 2176 HaveVecPtrTy = true; 2177 if (!CommonVecPtrTy) 2178 CommonVecPtrTy = VTy; 2179 else if (CommonVecPtrTy != VTy) 2180 HaveCommonVecPtrTy = false; 2181 } 2182 } 2183 }; 2184 // Put load and store types into a set for de-duplication. 2185 for (const Slice &S : P) { 2186 Type *Ty; 2187 if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser())) 2188 Ty = LI->getType(); 2189 else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser())) 2190 Ty = SI->getValueOperand()->getType(); 2191 else 2192 continue; 2193 LoadStoreTys.insert(Ty); 2194 // Consider any loads or stores that are the exact size of the slice. 2195 if (S.beginOffset() == P.beginOffset() && S.endOffset() == P.endOffset()) 2196 CheckCandidateType(Ty); 2197 } 2198 // Consider additional vector types where the element type size is a 2199 // multiple of load/store element size. 2200 for (Type *Ty : LoadStoreTys) { 2201 if (!VectorType::isValidElementType(Ty)) 2202 continue; 2203 unsigned TypeSize = DL.getTypeSizeInBits(Ty).getFixedValue(); 2204 // Make a copy of CandidateTys and iterate through it, because we might 2205 // append to CandidateTys in the loop. 2206 SmallVector<VectorType *, 4> CandidateTysCopy = CandidateTys; 2207 for (VectorType *&VTy : CandidateTysCopy) { 2208 unsigned VectorSize = DL.getTypeSizeInBits(VTy).getFixedValue(); 2209 unsigned ElementSize = 2210 DL.getTypeSizeInBits(VTy->getElementType()).getFixedValue(); 2211 if (TypeSize != VectorSize && TypeSize != ElementSize && 2212 VectorSize % TypeSize == 0) { 2213 VectorType *NewVTy = VectorType::get(Ty, VectorSize / TypeSize, false); 2214 CheckCandidateType(NewVTy); 2215 } 2216 } 2217 } 2218 2219 // If we didn't find a vector type, nothing to do here. 2220 if (CandidateTys.empty()) 2221 return nullptr; 2222 2223 // Pointer-ness is sticky, if we had a vector-of-pointers candidate type, 2224 // then we should choose it, not some other alternative. 2225 // But, we can't perform a no-op pointer address space change via bitcast, 2226 // so if we didn't have a common pointer element type, bail. 2227 if (HaveVecPtrTy && !HaveCommonVecPtrTy) 2228 return nullptr; 2229 2230 // Try to pick the "best" element type out of the choices. 2231 if (!HaveCommonEltTy && HaveVecPtrTy) { 2232 // If there was a pointer element type, there's really only one choice. 2233 CandidateTys.clear(); 2234 CandidateTys.push_back(CommonVecPtrTy); 2235 } else if (!HaveCommonEltTy && !HaveVecPtrTy) { 2236 // Integer-ify vector types. 2237 for (VectorType *&VTy : CandidateTys) { 2238 if (!VTy->getElementType()->isIntegerTy()) 2239 VTy = cast<VectorType>(VTy->getWithNewType(IntegerType::getIntNTy( 2240 VTy->getContext(), VTy->getScalarSizeInBits()))); 2241 } 2242 2243 // Rank the remaining candidate vector types. This is easy because we know 2244 // they're all integer vectors. We sort by ascending number of elements. 2245 auto RankVectorTypesComp = [&DL](VectorType *RHSTy, VectorType *LHSTy) { 2246 (void)DL; 2247 assert(DL.getTypeSizeInBits(RHSTy).getFixedValue() == 2248 DL.getTypeSizeInBits(LHSTy).getFixedValue() && 2249 "Cannot have vector types of different sizes!"); 2250 assert(RHSTy->getElementType()->isIntegerTy() && 2251 "All non-integer types eliminated!"); 2252 assert(LHSTy->getElementType()->isIntegerTy() && 2253 "All non-integer types eliminated!"); 2254 return cast<FixedVectorType>(RHSTy)->getNumElements() < 2255 cast<FixedVectorType>(LHSTy)->getNumElements(); 2256 }; 2257 auto RankVectorTypesEq = [&DL](VectorType *RHSTy, VectorType *LHSTy) { 2258 (void)DL; 2259 assert(DL.getTypeSizeInBits(RHSTy).getFixedValue() == 2260 DL.getTypeSizeInBits(LHSTy).getFixedValue() && 2261 "Cannot have vector types of different sizes!"); 2262 assert(RHSTy->getElementType()->isIntegerTy() && 2263 "All non-integer types eliminated!"); 2264 assert(LHSTy->getElementType()->isIntegerTy() && 2265 "All non-integer types eliminated!"); 2266 return cast<FixedVectorType>(RHSTy)->getNumElements() == 2267 cast<FixedVectorType>(LHSTy)->getNumElements(); 2268 }; 2269 llvm::sort(CandidateTys, RankVectorTypesComp); 2270 CandidateTys.erase(std::unique(CandidateTys.begin(), CandidateTys.end(), 2271 RankVectorTypesEq), 2272 CandidateTys.end()); 2273 } else { 2274 // The only way to have the same element type in every vector type is to 2275 // have the same vector type. Check that and remove all but one. 2276 #ifndef NDEBUG 2277 for (VectorType *VTy : CandidateTys) { 2278 assert(VTy->getElementType() == CommonEltTy && 2279 "Unaccounted for element type!"); 2280 assert(VTy == CandidateTys[0] && 2281 "Different vector types with the same element type!"); 2282 } 2283 #endif 2284 CandidateTys.resize(1); 2285 } 2286 2287 // FIXME: hack. Do we have a named constant for this? 2288 // SDAG SDNode can't have more than 65535 operands. 2289 llvm::erase_if(CandidateTys, [](VectorType *VTy) { 2290 return cast<FixedVectorType>(VTy)->getNumElements() > 2291 std::numeric_limits<unsigned short>::max(); 2292 }); 2293 2294 for (VectorType *VTy : CandidateTys) 2295 if (checkVectorTypeForPromotion(P, VTy, DL)) 2296 return VTy; 2297 2298 return nullptr; 2299 } 2300 2301 /// Test whether a slice of an alloca is valid for integer widening. 2302 /// 2303 /// This implements the necessary checking for the \c isIntegerWideningViable 2304 /// test below on a single slice of the alloca. 2305 static bool isIntegerWideningViableForSlice(const Slice &S, 2306 uint64_t AllocBeginOffset, 2307 Type *AllocaTy, 2308 const DataLayout &DL, 2309 bool &WholeAllocaOp) { 2310 uint64_t Size = DL.getTypeStoreSize(AllocaTy).getFixedValue(); 2311 2312 uint64_t RelBegin = S.beginOffset() - AllocBeginOffset; 2313 uint64_t RelEnd = S.endOffset() - AllocBeginOffset; 2314 2315 Use *U = S.getUse(); 2316 2317 // Lifetime intrinsics operate over the whole alloca whose sizes are usually 2318 // larger than other load/store slices (RelEnd > Size). But lifetime are 2319 // always promotable and should not impact other slices' promotability of the 2320 // partition. 2321 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 2322 if (II->isLifetimeStartOrEnd() || II->isDroppable()) 2323 return true; 2324 } 2325 2326 // We can't reasonably handle cases where the load or store extends past 2327 // the end of the alloca's type and into its padding. 2328 if (RelEnd > Size) 2329 return false; 2330 2331 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 2332 if (LI->isVolatile()) 2333 return false; 2334 // We can't handle loads that extend past the allocated memory. 2335 if (DL.getTypeStoreSize(LI->getType()).getFixedValue() > Size) 2336 return false; 2337 // So far, AllocaSliceRewriter does not support widening split slice tails 2338 // in rewriteIntegerLoad. 2339 if (S.beginOffset() < AllocBeginOffset) 2340 return false; 2341 // Note that we don't count vector loads or stores as whole-alloca 2342 // operations which enable integer widening because we would prefer to use 2343 // vector widening instead. 2344 if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size) 2345 WholeAllocaOp = true; 2346 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) { 2347 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedValue()) 2348 return false; 2349 } else if (RelBegin != 0 || RelEnd != Size || 2350 !canConvertValue(DL, AllocaTy, LI->getType())) { 2351 // Non-integer loads need to be convertible from the alloca type so that 2352 // they are promotable. 2353 return false; 2354 } 2355 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 2356 Type *ValueTy = SI->getValueOperand()->getType(); 2357 if (SI->isVolatile()) 2358 return false; 2359 // We can't handle stores that extend past the allocated memory. 2360 if (DL.getTypeStoreSize(ValueTy).getFixedValue() > Size) 2361 return false; 2362 // So far, AllocaSliceRewriter does not support widening split slice tails 2363 // in rewriteIntegerStore. 2364 if (S.beginOffset() < AllocBeginOffset) 2365 return false; 2366 // Note that we don't count vector loads or stores as whole-alloca 2367 // operations which enable integer widening because we would prefer to use 2368 // vector widening instead. 2369 if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size) 2370 WholeAllocaOp = true; 2371 if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) { 2372 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedValue()) 2373 return false; 2374 } else if (RelBegin != 0 || RelEnd != Size || 2375 !canConvertValue(DL, ValueTy, AllocaTy)) { 2376 // Non-integer stores need to be convertible to the alloca type so that 2377 // they are promotable. 2378 return false; 2379 } 2380 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 2381 if (MI->isVolatile() || !isa<Constant>(MI->getLength())) 2382 return false; 2383 if (!S.isSplittable()) 2384 return false; // Skip any unsplittable intrinsics. 2385 } else { 2386 return false; 2387 } 2388 2389 return true; 2390 } 2391 2392 /// Test whether the given alloca partition's integer operations can be 2393 /// widened to promotable ones. 2394 /// 2395 /// This is a quick test to check whether we can rewrite the integer loads and 2396 /// stores to a particular alloca into wider loads and stores and be able to 2397 /// promote the resulting alloca. 2398 static bool isIntegerWideningViable(Partition &P, Type *AllocaTy, 2399 const DataLayout &DL) { 2400 uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy).getFixedValue(); 2401 // Don't create integer types larger than the maximum bitwidth. 2402 if (SizeInBits > IntegerType::MAX_INT_BITS) 2403 return false; 2404 2405 // Don't try to handle allocas with bit-padding. 2406 if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy).getFixedValue()) 2407 return false; 2408 2409 // We need to ensure that an integer type with the appropriate bitwidth can 2410 // be converted to the alloca type, whatever that is. We don't want to force 2411 // the alloca itself to have an integer type if there is a more suitable one. 2412 Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits); 2413 if (!canConvertValue(DL, AllocaTy, IntTy) || 2414 !canConvertValue(DL, IntTy, AllocaTy)) 2415 return false; 2416 2417 // While examining uses, we ensure that the alloca has a covering load or 2418 // store. We don't want to widen the integer operations only to fail to 2419 // promote due to some other unsplittable entry (which we may make splittable 2420 // later). However, if there are only splittable uses, go ahead and assume 2421 // that we cover the alloca. 2422 // FIXME: We shouldn't consider split slices that happen to start in the 2423 // partition here... 2424 bool WholeAllocaOp = P.empty() && DL.isLegalInteger(SizeInBits); 2425 2426 for (const Slice &S : P) 2427 if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL, 2428 WholeAllocaOp)) 2429 return false; 2430 2431 for (const Slice *S : P.splitSliceTails()) 2432 if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL, 2433 WholeAllocaOp)) 2434 return false; 2435 2436 return WholeAllocaOp; 2437 } 2438 2439 static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 2440 IntegerType *Ty, uint64_t Offset, 2441 const Twine &Name) { 2442 LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); 2443 IntegerType *IntTy = cast<IntegerType>(V->getType()); 2444 assert(DL.getTypeStoreSize(Ty).getFixedValue() + Offset <= 2445 DL.getTypeStoreSize(IntTy).getFixedValue() && 2446 "Element extends past full value"); 2447 uint64_t ShAmt = 8 * Offset; 2448 if (DL.isBigEndian()) 2449 ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedValue() - 2450 DL.getTypeStoreSize(Ty).getFixedValue() - Offset); 2451 if (ShAmt) { 2452 V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); 2453 LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); 2454 } 2455 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 2456 "Cannot extract to a larger integer!"); 2457 if (Ty != IntTy) { 2458 V = IRB.CreateTrunc(V, Ty, Name + ".trunc"); 2459 LLVM_DEBUG(dbgs() << " trunced: " << *V << "\n"); 2460 } 2461 return V; 2462 } 2463 2464 static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, 2465 Value *V, uint64_t Offset, const Twine &Name) { 2466 IntegerType *IntTy = cast<IntegerType>(Old->getType()); 2467 IntegerType *Ty = cast<IntegerType>(V->getType()); 2468 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 2469 "Cannot insert a larger integer!"); 2470 LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); 2471 if (Ty != IntTy) { 2472 V = IRB.CreateZExt(V, IntTy, Name + ".ext"); 2473 LLVM_DEBUG(dbgs() << " extended: " << *V << "\n"); 2474 } 2475 assert(DL.getTypeStoreSize(Ty).getFixedValue() + Offset <= 2476 DL.getTypeStoreSize(IntTy).getFixedValue() && 2477 "Element store outside of alloca store"); 2478 uint64_t ShAmt = 8 * Offset; 2479 if (DL.isBigEndian()) 2480 ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedValue() - 2481 DL.getTypeStoreSize(Ty).getFixedValue() - Offset); 2482 if (ShAmt) { 2483 V = IRB.CreateShl(V, ShAmt, Name + ".shift"); 2484 LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); 2485 } 2486 2487 if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) { 2488 APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt); 2489 Old = IRB.CreateAnd(Old, Mask, Name + ".mask"); 2490 LLVM_DEBUG(dbgs() << " masked: " << *Old << "\n"); 2491 V = IRB.CreateOr(Old, V, Name + ".insert"); 2492 LLVM_DEBUG(dbgs() << " inserted: " << *V << "\n"); 2493 } 2494 return V; 2495 } 2496 2497 static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, 2498 unsigned EndIndex, const Twine &Name) { 2499 auto *VecTy = cast<FixedVectorType>(V->getType()); 2500 unsigned NumElements = EndIndex - BeginIndex; 2501 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2502 2503 if (NumElements == VecTy->getNumElements()) 2504 return V; 2505 2506 if (NumElements == 1) { 2507 V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex), 2508 Name + ".extract"); 2509 LLVM_DEBUG(dbgs() << " extract: " << *V << "\n"); 2510 return V; 2511 } 2512 2513 auto Mask = llvm::to_vector<8>(llvm::seq<int>(BeginIndex, EndIndex)); 2514 V = IRB.CreateShuffleVector(V, Mask, Name + ".extract"); 2515 LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); 2516 return V; 2517 } 2518 2519 static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, 2520 unsigned BeginIndex, const Twine &Name) { 2521 VectorType *VecTy = cast<VectorType>(Old->getType()); 2522 assert(VecTy && "Can only insert a vector into a vector"); 2523 2524 VectorType *Ty = dyn_cast<VectorType>(V->getType()); 2525 if (!Ty) { 2526 // Single element to insert. 2527 V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex), 2528 Name + ".insert"); 2529 LLVM_DEBUG(dbgs() << " insert: " << *V << "\n"); 2530 return V; 2531 } 2532 2533 assert(cast<FixedVectorType>(Ty)->getNumElements() <= 2534 cast<FixedVectorType>(VecTy)->getNumElements() && 2535 "Too many elements!"); 2536 if (cast<FixedVectorType>(Ty)->getNumElements() == 2537 cast<FixedVectorType>(VecTy)->getNumElements()) { 2538 assert(V->getType() == VecTy && "Vector type mismatch"); 2539 return V; 2540 } 2541 unsigned EndIndex = BeginIndex + cast<FixedVectorType>(Ty)->getNumElements(); 2542 2543 // When inserting a smaller vector into the larger to store, we first 2544 // use a shuffle vector to widen it with undef elements, and then 2545 // a second shuffle vector to select between the loaded vector and the 2546 // incoming vector. 2547 SmallVector<int, 8> Mask; 2548 Mask.reserve(cast<FixedVectorType>(VecTy)->getNumElements()); 2549 for (unsigned i = 0; i != cast<FixedVectorType>(VecTy)->getNumElements(); ++i) 2550 if (i >= BeginIndex && i < EndIndex) 2551 Mask.push_back(i - BeginIndex); 2552 else 2553 Mask.push_back(-1); 2554 V = IRB.CreateShuffleVector(V, Mask, Name + ".expand"); 2555 LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); 2556 2557 SmallVector<Constant *, 8> Mask2; 2558 Mask2.reserve(cast<FixedVectorType>(VecTy)->getNumElements()); 2559 for (unsigned i = 0; i != cast<FixedVectorType>(VecTy)->getNumElements(); ++i) 2560 Mask2.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex)); 2561 2562 V = IRB.CreateSelect(ConstantVector::get(Mask2), V, Old, Name + "blend"); 2563 2564 LLVM_DEBUG(dbgs() << " blend: " << *V << "\n"); 2565 return V; 2566 } 2567 2568 namespace { 2569 2570 /// Visitor to rewrite instructions using p particular slice of an alloca 2571 /// to use a new alloca. 2572 /// 2573 /// Also implements the rewriting to vector-based accesses when the partition 2574 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic 2575 /// lives here. 2576 class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> { 2577 // Befriend the base class so it can delegate to private visit methods. 2578 friend class InstVisitor<AllocaSliceRewriter, bool>; 2579 2580 using Base = InstVisitor<AllocaSliceRewriter, bool>; 2581 2582 const DataLayout &DL; 2583 AllocaSlices &AS; 2584 SROA &Pass; 2585 AllocaInst &OldAI, &NewAI; 2586 const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset; 2587 Type *NewAllocaTy; 2588 2589 // This is a convenience and flag variable that will be null unless the new 2590 // alloca's integer operations should be widened to this integer type due to 2591 // passing isIntegerWideningViable above. If it is non-null, the desired 2592 // integer type will be stored here for easy access during rewriting. 2593 IntegerType *IntTy; 2594 2595 // If we are rewriting an alloca partition which can be written as pure 2596 // vector operations, we stash extra information here. When VecTy is 2597 // non-null, we have some strict guarantees about the rewritten alloca: 2598 // - The new alloca is exactly the size of the vector type here. 2599 // - The accesses all either map to the entire vector or to a single 2600 // element. 2601 // - The set of accessing instructions is only one of those handled above 2602 // in isVectorPromotionViable. Generally these are the same access kinds 2603 // which are promotable via mem2reg. 2604 VectorType *VecTy; 2605 Type *ElementTy; 2606 uint64_t ElementSize; 2607 2608 // The original offset of the slice currently being rewritten relative to 2609 // the original alloca. 2610 uint64_t BeginOffset = 0; 2611 uint64_t EndOffset = 0; 2612 2613 // The new offsets of the slice currently being rewritten relative to the 2614 // original alloca. 2615 uint64_t NewBeginOffset = 0, NewEndOffset = 0; 2616 2617 uint64_t SliceSize = 0; 2618 bool IsSplittable = false; 2619 bool IsSplit = false; 2620 Use *OldUse = nullptr; 2621 Instruction *OldPtr = nullptr; 2622 2623 // Track post-rewrite users which are PHI nodes and Selects. 2624 SmallSetVector<PHINode *, 8> &PHIUsers; 2625 SmallSetVector<SelectInst *, 8> &SelectUsers; 2626 2627 // Utility IR builder, whose name prefix is setup for each visited use, and 2628 // the insertion point is set to point to the user. 2629 IRBuilderTy IRB; 2630 2631 // Return the new alloca, addrspacecasted if required to avoid changing the 2632 // addrspace of a volatile access. 2633 Value *getPtrToNewAI(unsigned AddrSpace, bool IsVolatile) { 2634 if (!IsVolatile || AddrSpace == NewAI.getType()->getPointerAddressSpace()) 2635 return &NewAI; 2636 2637 Type *AccessTy = IRB.getPtrTy(AddrSpace); 2638 return IRB.CreateAddrSpaceCast(&NewAI, AccessTy); 2639 } 2640 2641 public: 2642 AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROA &Pass, 2643 AllocaInst &OldAI, AllocaInst &NewAI, 2644 uint64_t NewAllocaBeginOffset, 2645 uint64_t NewAllocaEndOffset, bool IsIntegerPromotable, 2646 VectorType *PromotableVecTy, 2647 SmallSetVector<PHINode *, 8> &PHIUsers, 2648 SmallSetVector<SelectInst *, 8> &SelectUsers) 2649 : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI), 2650 NewAllocaBeginOffset(NewAllocaBeginOffset), 2651 NewAllocaEndOffset(NewAllocaEndOffset), 2652 NewAllocaTy(NewAI.getAllocatedType()), 2653 IntTy( 2654 IsIntegerPromotable 2655 ? Type::getIntNTy(NewAI.getContext(), 2656 DL.getTypeSizeInBits(NewAI.getAllocatedType()) 2657 .getFixedValue()) 2658 : nullptr), 2659 VecTy(PromotableVecTy), 2660 ElementTy(VecTy ? VecTy->getElementType() : nullptr), 2661 ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy).getFixedValue() / 8 2662 : 0), 2663 PHIUsers(PHIUsers), SelectUsers(SelectUsers), 2664 IRB(NewAI.getContext(), ConstantFolder()) { 2665 if (VecTy) { 2666 assert((DL.getTypeSizeInBits(ElementTy).getFixedValue() % 8) == 0 && 2667 "Only multiple-of-8 sized vector elements are viable"); 2668 ++NumVectorized; 2669 } 2670 assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy)); 2671 } 2672 2673 bool visit(AllocaSlices::const_iterator I) { 2674 bool CanSROA = true; 2675 BeginOffset = I->beginOffset(); 2676 EndOffset = I->endOffset(); 2677 IsSplittable = I->isSplittable(); 2678 IsSplit = 2679 BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; 2680 LLVM_DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : "")); 2681 LLVM_DEBUG(AS.printSlice(dbgs(), I, "")); 2682 LLVM_DEBUG(dbgs() << "\n"); 2683 2684 // Compute the intersecting offset range. 2685 assert(BeginOffset < NewAllocaEndOffset); 2686 assert(EndOffset > NewAllocaBeginOffset); 2687 NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); 2688 NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); 2689 2690 SliceSize = NewEndOffset - NewBeginOffset; 2691 LLVM_DEBUG(dbgs() << " Begin:(" << BeginOffset << ", " << EndOffset 2692 << ") NewBegin:(" << NewBeginOffset << ", " 2693 << NewEndOffset << ") NewAllocaBegin:(" 2694 << NewAllocaBeginOffset << ", " << NewAllocaEndOffset 2695 << ")\n"); 2696 assert(IsSplit || NewBeginOffset == BeginOffset); 2697 OldUse = I->getUse(); 2698 OldPtr = cast<Instruction>(OldUse->get()); 2699 2700 Instruction *OldUserI = cast<Instruction>(OldUse->getUser()); 2701 IRB.SetInsertPoint(OldUserI); 2702 IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc()); 2703 IRB.getInserter().SetNamePrefix( 2704 Twine(NewAI.getName()) + "." + Twine(BeginOffset) + "."); 2705 2706 CanSROA &= visit(cast<Instruction>(OldUse->getUser())); 2707 if (VecTy || IntTy) 2708 assert(CanSROA); 2709 return CanSROA; 2710 } 2711 2712 private: 2713 // Make sure the other visit overloads are visible. 2714 using Base::visit; 2715 2716 // Every instruction which can end up as a user must have a rewrite rule. 2717 bool visitInstruction(Instruction &I) { 2718 LLVM_DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); 2719 llvm_unreachable("No rewrite rule for this instruction!"); 2720 } 2721 2722 Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) { 2723 // Note that the offset computation can use BeginOffset or NewBeginOffset 2724 // interchangeably for unsplit slices. 2725 assert(IsSplit || BeginOffset == NewBeginOffset); 2726 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2727 2728 #ifndef NDEBUG 2729 StringRef OldName = OldPtr->getName(); 2730 // Skip through the last '.sroa.' component of the name. 2731 size_t LastSROAPrefix = OldName.rfind(".sroa."); 2732 if (LastSROAPrefix != StringRef::npos) { 2733 OldName = OldName.substr(LastSROAPrefix + strlen(".sroa.")); 2734 // Look for an SROA slice index. 2735 size_t IndexEnd = OldName.find_first_not_of("0123456789"); 2736 if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') { 2737 // Strip the index and look for the offset. 2738 OldName = OldName.substr(IndexEnd + 1); 2739 size_t OffsetEnd = OldName.find_first_not_of("0123456789"); 2740 if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.') 2741 // Strip the offset. 2742 OldName = OldName.substr(OffsetEnd + 1); 2743 } 2744 } 2745 // Strip any SROA suffixes as well. 2746 OldName = OldName.substr(0, OldName.find(".sroa_")); 2747 #endif 2748 2749 return getAdjustedPtr(IRB, DL, &NewAI, 2750 APInt(DL.getIndexTypeSizeInBits(PointerTy), Offset), 2751 PointerTy, 2752 #ifndef NDEBUG 2753 Twine(OldName) + "." 2754 #else 2755 Twine() 2756 #endif 2757 ); 2758 } 2759 2760 /// Compute suitable alignment to access this slice of the *new* 2761 /// alloca. 2762 /// 2763 /// You can optionally pass a type to this routine and if that type's ABI 2764 /// alignment is itself suitable, this will return zero. 2765 Align getSliceAlign() { 2766 return commonAlignment(NewAI.getAlign(), 2767 NewBeginOffset - NewAllocaBeginOffset); 2768 } 2769 2770 unsigned getIndex(uint64_t Offset) { 2771 assert(VecTy && "Can only call getIndex when rewriting a vector"); 2772 uint64_t RelOffset = Offset - NewAllocaBeginOffset; 2773 assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds"); 2774 uint32_t Index = RelOffset / ElementSize; 2775 assert(Index * ElementSize == RelOffset); 2776 return Index; 2777 } 2778 2779 void deleteIfTriviallyDead(Value *V) { 2780 Instruction *I = cast<Instruction>(V); 2781 if (isInstructionTriviallyDead(I)) 2782 Pass.DeadInsts.push_back(I); 2783 } 2784 2785 Value *rewriteVectorizedLoadInst(LoadInst &LI) { 2786 unsigned BeginIndex = getIndex(NewBeginOffset); 2787 unsigned EndIndex = getIndex(NewEndOffset); 2788 assert(EndIndex > BeginIndex && "Empty vector!"); 2789 2790 LoadInst *Load = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2791 NewAI.getAlign(), "load"); 2792 2793 Load->copyMetadata(LI, {LLVMContext::MD_mem_parallel_loop_access, 2794 LLVMContext::MD_access_group}); 2795 return extractVector(IRB, Load, BeginIndex, EndIndex, "vec"); 2796 } 2797 2798 Value *rewriteIntegerLoad(LoadInst &LI) { 2799 assert(IntTy && "We cannot insert an integer to the alloca"); 2800 assert(!LI.isVolatile()); 2801 Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2802 NewAI.getAlign(), "load"); 2803 V = convertValue(DL, IRB, V, IntTy); 2804 assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2805 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2806 if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) { 2807 IntegerType *ExtractTy = Type::getIntNTy(LI.getContext(), SliceSize * 8); 2808 V = extractInteger(DL, IRB, V, ExtractTy, Offset, "extract"); 2809 } 2810 // It is possible that the extracted type is not the load type. This 2811 // happens if there is a load past the end of the alloca, and as 2812 // a consequence the slice is narrower but still a candidate for integer 2813 // lowering. To handle this case, we just zero extend the extracted 2814 // integer. 2815 assert(cast<IntegerType>(LI.getType())->getBitWidth() >= SliceSize * 8 && 2816 "Can only handle an extract for an overly wide load"); 2817 if (cast<IntegerType>(LI.getType())->getBitWidth() > SliceSize * 8) 2818 V = IRB.CreateZExt(V, LI.getType()); 2819 return V; 2820 } 2821 2822 bool visitLoadInst(LoadInst &LI) { 2823 LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); 2824 Value *OldOp = LI.getOperand(0); 2825 assert(OldOp == OldPtr); 2826 2827 AAMDNodes AATags = LI.getAAMetadata(); 2828 2829 unsigned AS = LI.getPointerAddressSpace(); 2830 2831 Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8) 2832 : LI.getType(); 2833 const bool IsLoadPastEnd = 2834 DL.getTypeStoreSize(TargetTy).getFixedValue() > SliceSize; 2835 bool IsPtrAdjusted = false; 2836 Value *V; 2837 if (VecTy) { 2838 V = rewriteVectorizedLoadInst(LI); 2839 } else if (IntTy && LI.getType()->isIntegerTy()) { 2840 V = rewriteIntegerLoad(LI); 2841 } else if (NewBeginOffset == NewAllocaBeginOffset && 2842 NewEndOffset == NewAllocaEndOffset && 2843 (canConvertValue(DL, NewAllocaTy, TargetTy) || 2844 (IsLoadPastEnd && NewAllocaTy->isIntegerTy() && 2845 TargetTy->isIntegerTy() && !LI.isVolatile()))) { 2846 Value *NewPtr = 2847 getPtrToNewAI(LI.getPointerAddressSpace(), LI.isVolatile()); 2848 LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), NewPtr, 2849 NewAI.getAlign(), LI.isVolatile(), 2850 LI.getName()); 2851 if (LI.isVolatile()) 2852 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 2853 if (NewLI->isAtomic()) 2854 NewLI->setAlignment(LI.getAlign()); 2855 2856 // Copy any metadata that is valid for the new load. This may require 2857 // conversion to a different kind of metadata, e.g. !nonnull might change 2858 // to !range or vice versa. 2859 copyMetadataForLoad(*NewLI, LI); 2860 2861 // Do this after copyMetadataForLoad() to preserve the TBAA shift. 2862 if (AATags) 2863 NewLI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2864 2865 // Try to preserve nonnull metadata 2866 V = NewLI; 2867 2868 // If this is an integer load past the end of the slice (which means the 2869 // bytes outside the slice are undef or this load is dead) just forcibly 2870 // fix the integer size with correct handling of endianness. 2871 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) 2872 if (auto *TITy = dyn_cast<IntegerType>(TargetTy)) 2873 if (AITy->getBitWidth() < TITy->getBitWidth()) { 2874 V = IRB.CreateZExt(V, TITy, "load.ext"); 2875 if (DL.isBigEndian()) 2876 V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(), 2877 "endian_shift"); 2878 } 2879 } else { 2880 Type *LTy = IRB.getPtrTy(AS); 2881 LoadInst *NewLI = 2882 IRB.CreateAlignedLoad(TargetTy, getNewAllocaSlicePtr(IRB, LTy), 2883 getSliceAlign(), LI.isVolatile(), LI.getName()); 2884 if (AATags) 2885 NewLI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2886 if (LI.isVolatile()) 2887 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 2888 NewLI->copyMetadata(LI, {LLVMContext::MD_mem_parallel_loop_access, 2889 LLVMContext::MD_access_group}); 2890 2891 V = NewLI; 2892 IsPtrAdjusted = true; 2893 } 2894 V = convertValue(DL, IRB, V, TargetTy); 2895 2896 if (IsSplit) { 2897 assert(!LI.isVolatile()); 2898 assert(LI.getType()->isIntegerTy() && 2899 "Only integer type loads and stores are split"); 2900 assert(SliceSize < DL.getTypeStoreSize(LI.getType()).getFixedValue() && 2901 "Split load isn't smaller than original load"); 2902 assert(DL.typeSizeEqualsStoreSize(LI.getType()) && 2903 "Non-byte-multiple bit width"); 2904 // Move the insertion point just past the load so that we can refer to it. 2905 IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI))); 2906 // Create a placeholder value with the same type as LI to use as the 2907 // basis for the new value. This allows us to replace the uses of LI with 2908 // the computed value, and then replace the placeholder with LI, leaving 2909 // LI only used for this computation. 2910 Value *Placeholder = 2911 new LoadInst(LI.getType(), PoisonValue::get(IRB.getPtrTy(AS)), "", 2912 false, Align(1)); 2913 V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset, 2914 "insert"); 2915 LI.replaceAllUsesWith(V); 2916 Placeholder->replaceAllUsesWith(&LI); 2917 Placeholder->deleteValue(); 2918 } else { 2919 LI.replaceAllUsesWith(V); 2920 } 2921 2922 Pass.DeadInsts.push_back(&LI); 2923 deleteIfTriviallyDead(OldOp); 2924 LLVM_DEBUG(dbgs() << " to: " << *V << "\n"); 2925 return !LI.isVolatile() && !IsPtrAdjusted; 2926 } 2927 2928 bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp, 2929 AAMDNodes AATags) { 2930 // Capture V for the purpose of debug-info accounting once it's converted 2931 // to a vector store. 2932 Value *OrigV = V; 2933 if (V->getType() != VecTy) { 2934 unsigned BeginIndex = getIndex(NewBeginOffset); 2935 unsigned EndIndex = getIndex(NewEndOffset); 2936 assert(EndIndex > BeginIndex && "Empty vector!"); 2937 unsigned NumElements = EndIndex - BeginIndex; 2938 assert(NumElements <= cast<FixedVectorType>(VecTy)->getNumElements() && 2939 "Too many elements!"); 2940 Type *SliceTy = (NumElements == 1) 2941 ? ElementTy 2942 : FixedVectorType::get(ElementTy, NumElements); 2943 if (V->getType() != SliceTy) 2944 V = convertValue(DL, IRB, V, SliceTy); 2945 2946 // Mix in the existing elements. 2947 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2948 NewAI.getAlign(), "load"); 2949 V = insertVector(IRB, Old, V, BeginIndex, "vec"); 2950 } 2951 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign()); 2952 Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, 2953 LLVMContext::MD_access_group}); 2954 if (AATags) 2955 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2956 Pass.DeadInsts.push_back(&SI); 2957 2958 // NOTE: Careful to use OrigV rather than V. 2959 migrateDebugInfo(&OldAI, IsSplit, NewBeginOffset * 8, SliceSize * 8, &SI, 2960 Store, Store->getPointerOperand(), OrigV, DL); 2961 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 2962 return true; 2963 } 2964 2965 bool rewriteIntegerStore(Value *V, StoreInst &SI, AAMDNodes AATags) { 2966 assert(IntTy && "We cannot extract an integer from the alloca"); 2967 assert(!SI.isVolatile()); 2968 if (DL.getTypeSizeInBits(V->getType()).getFixedValue() != 2969 IntTy->getBitWidth()) { 2970 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2971 NewAI.getAlign(), "oldload"); 2972 Old = convertValue(DL, IRB, Old, IntTy); 2973 assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2974 uint64_t Offset = BeginOffset - NewAllocaBeginOffset; 2975 V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert"); 2976 } 2977 V = convertValue(DL, IRB, V, NewAllocaTy); 2978 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign()); 2979 Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, 2980 LLVMContext::MD_access_group}); 2981 if (AATags) 2982 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2983 2984 migrateDebugInfo(&OldAI, IsSplit, NewBeginOffset * 8, SliceSize * 8, &SI, 2985 Store, Store->getPointerOperand(), 2986 Store->getValueOperand(), DL); 2987 2988 Pass.DeadInsts.push_back(&SI); 2989 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 2990 return true; 2991 } 2992 2993 bool visitStoreInst(StoreInst &SI) { 2994 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 2995 Value *OldOp = SI.getOperand(1); 2996 assert(OldOp == OldPtr); 2997 2998 AAMDNodes AATags = SI.getAAMetadata(); 2999 Value *V = SI.getValueOperand(); 3000 3001 // Strip all inbounds GEPs and pointer casts to try to dig out any root 3002 // alloca that should be re-examined after promoting this alloca. 3003 if (V->getType()->isPointerTy()) 3004 if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets())) 3005 Pass.PostPromotionWorklist.insert(AI); 3006 3007 if (SliceSize < DL.getTypeStoreSize(V->getType()).getFixedValue()) { 3008 assert(!SI.isVolatile()); 3009 assert(V->getType()->isIntegerTy() && 3010 "Only integer type loads and stores are split"); 3011 assert(DL.typeSizeEqualsStoreSize(V->getType()) && 3012 "Non-byte-multiple bit width"); 3013 IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8); 3014 V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset, 3015 "extract"); 3016 } 3017 3018 if (VecTy) 3019 return rewriteVectorizedStoreInst(V, SI, OldOp, AATags); 3020 if (IntTy && V->getType()->isIntegerTy()) 3021 return rewriteIntegerStore(V, SI, AATags); 3022 3023 StoreInst *NewSI; 3024 if (NewBeginOffset == NewAllocaBeginOffset && 3025 NewEndOffset == NewAllocaEndOffset && 3026 canConvertValue(DL, V->getType(), NewAllocaTy)) { 3027 V = convertValue(DL, IRB, V, NewAllocaTy); 3028 Value *NewPtr = 3029 getPtrToNewAI(SI.getPointerAddressSpace(), SI.isVolatile()); 3030 3031 NewSI = 3032 IRB.CreateAlignedStore(V, NewPtr, NewAI.getAlign(), SI.isVolatile()); 3033 } else { 3034 unsigned AS = SI.getPointerAddressSpace(); 3035 Value *NewPtr = getNewAllocaSlicePtr(IRB, IRB.getPtrTy(AS)); 3036 NewSI = 3037 IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(), SI.isVolatile()); 3038 } 3039 NewSI->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, 3040 LLVMContext::MD_access_group}); 3041 if (AATags) 3042 NewSI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 3043 if (SI.isVolatile()) 3044 NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID()); 3045 if (NewSI->isAtomic()) 3046 NewSI->setAlignment(SI.getAlign()); 3047 3048 migrateDebugInfo(&OldAI, IsSplit, NewBeginOffset * 8, SliceSize * 8, &SI, 3049 NewSI, NewSI->getPointerOperand(), 3050 NewSI->getValueOperand(), DL); 3051 3052 Pass.DeadInsts.push_back(&SI); 3053 deleteIfTriviallyDead(OldOp); 3054 3055 LLVM_DEBUG(dbgs() << " to: " << *NewSI << "\n"); 3056 return NewSI->getPointerOperand() == &NewAI && 3057 NewSI->getValueOperand()->getType() == NewAllocaTy && 3058 !SI.isVolatile(); 3059 } 3060 3061 /// Compute an integer value from splatting an i8 across the given 3062 /// number of bytes. 3063 /// 3064 /// Note that this routine assumes an i8 is a byte. If that isn't true, don't 3065 /// call this routine. 3066 /// FIXME: Heed the advice above. 3067 /// 3068 /// \param V The i8 value to splat. 3069 /// \param Size The number of bytes in the output (assuming i8 is one byte) 3070 Value *getIntegerSplat(Value *V, unsigned Size) { 3071 assert(Size > 0 && "Expected a positive number of bytes."); 3072 IntegerType *VTy = cast<IntegerType>(V->getType()); 3073 assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte"); 3074 if (Size == 1) 3075 return V; 3076 3077 Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8); 3078 V = IRB.CreateMul( 3079 IRB.CreateZExt(V, SplatIntTy, "zext"), 3080 IRB.CreateUDiv(Constant::getAllOnesValue(SplatIntTy), 3081 IRB.CreateZExt(Constant::getAllOnesValue(V->getType()), 3082 SplatIntTy)), 3083 "isplat"); 3084 return V; 3085 } 3086 3087 /// Compute a vector splat for a given element value. 3088 Value *getVectorSplat(Value *V, unsigned NumElements) { 3089 V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); 3090 LLVM_DEBUG(dbgs() << " splat: " << *V << "\n"); 3091 return V; 3092 } 3093 3094 bool visitMemSetInst(MemSetInst &II) { 3095 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 3096 assert(II.getRawDest() == OldPtr); 3097 3098 AAMDNodes AATags = II.getAAMetadata(); 3099 3100 // If the memset has a variable size, it cannot be split, just adjust the 3101 // pointer to the new alloca. 3102 if (!isa<ConstantInt>(II.getLength())) { 3103 assert(!IsSplit); 3104 assert(NewBeginOffset == BeginOffset); 3105 II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType())); 3106 II.setDestAlignment(getSliceAlign()); 3107 // In theory we should call migrateDebugInfo here. However, we do not 3108 // emit dbg.assign intrinsics for mem intrinsics storing through non- 3109 // constant geps, or storing a variable number of bytes. 3110 assert(at::getAssignmentMarkers(&II).empty() && 3111 "AT: Unexpected link to non-const GEP"); 3112 deleteIfTriviallyDead(OldPtr); 3113 return false; 3114 } 3115 3116 // Record this instruction for deletion. 3117 Pass.DeadInsts.push_back(&II); 3118 3119 Type *AllocaTy = NewAI.getAllocatedType(); 3120 Type *ScalarTy = AllocaTy->getScalarType(); 3121 3122 const bool CanContinue = [&]() { 3123 if (VecTy || IntTy) 3124 return true; 3125 if (BeginOffset > NewAllocaBeginOffset || 3126 EndOffset < NewAllocaEndOffset) 3127 return false; 3128 // Length must be in range for FixedVectorType. 3129 auto *C = cast<ConstantInt>(II.getLength()); 3130 const uint64_t Len = C->getLimitedValue(); 3131 if (Len > std::numeric_limits<unsigned>::max()) 3132 return false; 3133 auto *Int8Ty = IntegerType::getInt8Ty(NewAI.getContext()); 3134 auto *SrcTy = FixedVectorType::get(Int8Ty, Len); 3135 return canConvertValue(DL, SrcTy, AllocaTy) && 3136 DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy).getFixedValue()); 3137 }(); 3138 3139 // If this doesn't map cleanly onto the alloca type, and that type isn't 3140 // a single value type, just emit a memset. 3141 if (!CanContinue) { 3142 Type *SizeTy = II.getLength()->getType(); 3143 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 3144 MemIntrinsic *New = cast<MemIntrinsic>(IRB.CreateMemSet( 3145 getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size, 3146 MaybeAlign(getSliceAlign()), II.isVolatile())); 3147 if (AATags) 3148 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 3149 3150 migrateDebugInfo(&OldAI, IsSplit, NewBeginOffset * 8, SliceSize * 8, &II, 3151 New, New->getRawDest(), nullptr, DL); 3152 3153 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 3154 return false; 3155 } 3156 3157 // If we can represent this as a simple value, we have to build the actual 3158 // value to store, which requires expanding the byte present in memset to 3159 // a sensible representation for the alloca type. This is essentially 3160 // splatting the byte to a sufficiently wide integer, splatting it across 3161 // any desired vector width, and bitcasting to the final type. 3162 Value *V; 3163 3164 if (VecTy) { 3165 // If this is a memset of a vectorized alloca, insert it. 3166 assert(ElementTy == ScalarTy); 3167 3168 unsigned BeginIndex = getIndex(NewBeginOffset); 3169 unsigned EndIndex = getIndex(NewEndOffset); 3170 assert(EndIndex > BeginIndex && "Empty vector!"); 3171 unsigned NumElements = EndIndex - BeginIndex; 3172 assert(NumElements <= cast<FixedVectorType>(VecTy)->getNumElements() && 3173 "Too many elements!"); 3174 3175 Value *Splat = getIntegerSplat( 3176 II.getValue(), DL.getTypeSizeInBits(ElementTy).getFixedValue() / 8); 3177 Splat = convertValue(DL, IRB, Splat, ElementTy); 3178 if (NumElements > 1) 3179 Splat = getVectorSplat(Splat, NumElements); 3180 3181 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3182 NewAI.getAlign(), "oldload"); 3183 V = insertVector(IRB, Old, Splat, BeginIndex, "vec"); 3184 } else if (IntTy) { 3185 // If this is a memset on an alloca where we can widen stores, insert the 3186 // set integer. 3187 assert(!II.isVolatile()); 3188 3189 uint64_t Size = NewEndOffset - NewBeginOffset; 3190 V = getIntegerSplat(II.getValue(), Size); 3191 3192 if (IntTy && (BeginOffset != NewAllocaBeginOffset || 3193 EndOffset != NewAllocaBeginOffset)) { 3194 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3195 NewAI.getAlign(), "oldload"); 3196 Old = convertValue(DL, IRB, Old, IntTy); 3197 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 3198 V = insertInteger(DL, IRB, Old, V, Offset, "insert"); 3199 } else { 3200 assert(V->getType() == IntTy && 3201 "Wrong type for an alloca wide integer!"); 3202 } 3203 V = convertValue(DL, IRB, V, AllocaTy); 3204 } else { 3205 // Established these invariants above. 3206 assert(NewBeginOffset == NewAllocaBeginOffset); 3207 assert(NewEndOffset == NewAllocaEndOffset); 3208 3209 V = getIntegerSplat(II.getValue(), 3210 DL.getTypeSizeInBits(ScalarTy).getFixedValue() / 8); 3211 if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy)) 3212 V = getVectorSplat( 3213 V, cast<FixedVectorType>(AllocaVecTy)->getNumElements()); 3214 3215 V = convertValue(DL, IRB, V, AllocaTy); 3216 } 3217 3218 Value *NewPtr = getPtrToNewAI(II.getDestAddressSpace(), II.isVolatile()); 3219 StoreInst *New = 3220 IRB.CreateAlignedStore(V, NewPtr, NewAI.getAlign(), II.isVolatile()); 3221 New->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access, 3222 LLVMContext::MD_access_group}); 3223 if (AATags) 3224 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 3225 3226 migrateDebugInfo(&OldAI, IsSplit, NewBeginOffset * 8, SliceSize * 8, &II, 3227 New, New->getPointerOperand(), V, DL); 3228 3229 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 3230 return !II.isVolatile(); 3231 } 3232 3233 bool visitMemTransferInst(MemTransferInst &II) { 3234 // Rewriting of memory transfer instructions can be a bit tricky. We break 3235 // them into two categories: split intrinsics and unsplit intrinsics. 3236 3237 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 3238 3239 AAMDNodes AATags = II.getAAMetadata(); 3240 3241 bool IsDest = &II.getRawDestUse() == OldUse; 3242 assert((IsDest && II.getRawDest() == OldPtr) || 3243 (!IsDest && II.getRawSource() == OldPtr)); 3244 3245 Align SliceAlign = getSliceAlign(); 3246 // For unsplit intrinsics, we simply modify the source and destination 3247 // pointers in place. This isn't just an optimization, it is a matter of 3248 // correctness. With unsplit intrinsics we may be dealing with transfers 3249 // within a single alloca before SROA ran, or with transfers that have 3250 // a variable length. We may also be dealing with memmove instead of 3251 // memcpy, and so simply updating the pointers is the necessary for us to 3252 // update both source and dest of a single call. 3253 if (!IsSplittable) { 3254 Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 3255 if (IsDest) { 3256 // Update the address component of linked dbg.assigns. 3257 for (auto *DAI : at::getAssignmentMarkers(&II)) { 3258 if (llvm::is_contained(DAI->location_ops(), II.getDest()) || 3259 DAI->getAddress() == II.getDest()) 3260 DAI->replaceVariableLocationOp(II.getDest(), AdjustedPtr); 3261 } 3262 II.setDest(AdjustedPtr); 3263 II.setDestAlignment(SliceAlign); 3264 } else { 3265 II.setSource(AdjustedPtr); 3266 II.setSourceAlignment(SliceAlign); 3267 } 3268 3269 LLVM_DEBUG(dbgs() << " to: " << II << "\n"); 3270 deleteIfTriviallyDead(OldPtr); 3271 return false; 3272 } 3273 // For split transfer intrinsics we have an incredibly useful assurance: 3274 // the source and destination do not reside within the same alloca, and at 3275 // least one of them does not escape. This means that we can replace 3276 // memmove with memcpy, and we don't need to worry about all manner of 3277 // downsides to splitting and transforming the operations. 3278 3279 // If this doesn't map cleanly onto the alloca type, and that type isn't 3280 // a single value type, just emit a memcpy. 3281 bool EmitMemCpy = 3282 !VecTy && !IntTy && 3283 (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || 3284 SliceSize != 3285 DL.getTypeStoreSize(NewAI.getAllocatedType()).getFixedValue() || 3286 !DL.typeSizeEqualsStoreSize(NewAI.getAllocatedType()) || 3287 !NewAI.getAllocatedType()->isSingleValueType()); 3288 3289 // If we're just going to emit a memcpy, the alloca hasn't changed, and the 3290 // size hasn't been shrunk based on analysis of the viable range, this is 3291 // a no-op. 3292 if (EmitMemCpy && &OldAI == &NewAI) { 3293 // Ensure the start lines up. 3294 assert(NewBeginOffset == BeginOffset); 3295 3296 // Rewrite the size as needed. 3297 if (NewEndOffset != EndOffset) 3298 II.setLength(ConstantInt::get(II.getLength()->getType(), 3299 NewEndOffset - NewBeginOffset)); 3300 return false; 3301 } 3302 // Record this instruction for deletion. 3303 Pass.DeadInsts.push_back(&II); 3304 3305 // Strip all inbounds GEPs and pointer casts to try to dig out any root 3306 // alloca that should be re-examined after rewriting this instruction. 3307 Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); 3308 if (AllocaInst *AI = 3309 dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) { 3310 assert(AI != &OldAI && AI != &NewAI && 3311 "Splittable transfers cannot reach the same alloca on both ends."); 3312 Pass.Worklist.insert(AI); 3313 } 3314 3315 Type *OtherPtrTy = OtherPtr->getType(); 3316 unsigned OtherAS = OtherPtrTy->getPointerAddressSpace(); 3317 3318 // Compute the relative offset for the other pointer within the transfer. 3319 unsigned OffsetWidth = DL.getIndexSizeInBits(OtherAS); 3320 APInt OtherOffset(OffsetWidth, NewBeginOffset - BeginOffset); 3321 Align OtherAlign = 3322 (IsDest ? II.getSourceAlign() : II.getDestAlign()).valueOrOne(); 3323 OtherAlign = 3324 commonAlignment(OtherAlign, OtherOffset.zextOrTrunc(64).getZExtValue()); 3325 3326 if (EmitMemCpy) { 3327 // Compute the other pointer, folding as much as possible to produce 3328 // a single, simple GEP in most cases. 3329 OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 3330 OtherPtr->getName() + "."); 3331 3332 Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 3333 Type *SizeTy = II.getLength()->getType(); 3334 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 3335 3336 Value *DestPtr, *SrcPtr; 3337 MaybeAlign DestAlign, SrcAlign; 3338 // Note: IsDest is true iff we're copying into the new alloca slice 3339 if (IsDest) { 3340 DestPtr = OurPtr; 3341 DestAlign = SliceAlign; 3342 SrcPtr = OtherPtr; 3343 SrcAlign = OtherAlign; 3344 } else { 3345 DestPtr = OtherPtr; 3346 DestAlign = OtherAlign; 3347 SrcPtr = OurPtr; 3348 SrcAlign = SliceAlign; 3349 } 3350 CallInst *New = IRB.CreateMemCpy(DestPtr, DestAlign, SrcPtr, SrcAlign, 3351 Size, II.isVolatile()); 3352 if (AATags) 3353 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 3354 3355 APInt Offset(DL.getIndexTypeSizeInBits(DestPtr->getType()), 0); 3356 if (IsDest) { 3357 migrateDebugInfo(&OldAI, IsSplit, NewBeginOffset * 8, SliceSize * 8, 3358 &II, New, DestPtr, nullptr, DL); 3359 } else if (AllocaInst *Base = dyn_cast<AllocaInst>( 3360 DestPtr->stripAndAccumulateConstantOffsets( 3361 DL, Offset, /*AllowNonInbounds*/ true))) { 3362 migrateDebugInfo(Base, IsSplit, Offset.getZExtValue() * 8, 3363 SliceSize * 8, &II, New, DestPtr, nullptr, DL); 3364 } 3365 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 3366 return false; 3367 } 3368 3369 bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && 3370 NewEndOffset == NewAllocaEndOffset; 3371 uint64_t Size = NewEndOffset - NewBeginOffset; 3372 unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0; 3373 unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0; 3374 unsigned NumElements = EndIndex - BeginIndex; 3375 IntegerType *SubIntTy = 3376 IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr; 3377 3378 // Reset the other pointer type to match the register type we're going to 3379 // use, but using the address space of the original other pointer. 3380 Type *OtherTy; 3381 if (VecTy && !IsWholeAlloca) { 3382 if (NumElements == 1) 3383 OtherTy = VecTy->getElementType(); 3384 else 3385 OtherTy = FixedVectorType::get(VecTy->getElementType(), NumElements); 3386 } else if (IntTy && !IsWholeAlloca) { 3387 OtherTy = SubIntTy; 3388 } else { 3389 OtherTy = NewAllocaTy; 3390 } 3391 3392 Value *AdjPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 3393 OtherPtr->getName() + "."); 3394 MaybeAlign SrcAlign = OtherAlign; 3395 MaybeAlign DstAlign = SliceAlign; 3396 if (!IsDest) 3397 std::swap(SrcAlign, DstAlign); 3398 3399 Value *SrcPtr; 3400 Value *DstPtr; 3401 3402 if (IsDest) { 3403 DstPtr = getPtrToNewAI(II.getDestAddressSpace(), II.isVolatile()); 3404 SrcPtr = AdjPtr; 3405 } else { 3406 DstPtr = AdjPtr; 3407 SrcPtr = getPtrToNewAI(II.getSourceAddressSpace(), II.isVolatile()); 3408 } 3409 3410 Value *Src; 3411 if (VecTy && !IsWholeAlloca && !IsDest) { 3412 Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3413 NewAI.getAlign(), "load"); 3414 Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec"); 3415 } else if (IntTy && !IsWholeAlloca && !IsDest) { 3416 Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3417 NewAI.getAlign(), "load"); 3418 Src = convertValue(DL, IRB, Src, IntTy); 3419 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 3420 Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); 3421 } else { 3422 LoadInst *Load = IRB.CreateAlignedLoad(OtherTy, SrcPtr, SrcAlign, 3423 II.isVolatile(), "copyload"); 3424 Load->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access, 3425 LLVMContext::MD_access_group}); 3426 if (AATags) 3427 Load->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 3428 Src = Load; 3429 } 3430 3431 if (VecTy && !IsWholeAlloca && IsDest) { 3432 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3433 NewAI.getAlign(), "oldload"); 3434 Src = insertVector(IRB, Old, Src, BeginIndex, "vec"); 3435 } else if (IntTy && !IsWholeAlloca && IsDest) { 3436 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3437 NewAI.getAlign(), "oldload"); 3438 Old = convertValue(DL, IRB, Old, IntTy); 3439 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 3440 Src = insertInteger(DL, IRB, Old, Src, Offset, "insert"); 3441 Src = convertValue(DL, IRB, Src, NewAllocaTy); 3442 } 3443 3444 StoreInst *Store = cast<StoreInst>( 3445 IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); 3446 Store->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access, 3447 LLVMContext::MD_access_group}); 3448 if (AATags) 3449 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 3450 3451 APInt Offset(DL.getIndexTypeSizeInBits(DstPtr->getType()), 0); 3452 if (IsDest) { 3453 3454 migrateDebugInfo(&OldAI, IsSplit, NewBeginOffset * 8, SliceSize * 8, &II, 3455 Store, DstPtr, Src, DL); 3456 } else if (AllocaInst *Base = dyn_cast<AllocaInst>( 3457 DstPtr->stripAndAccumulateConstantOffsets( 3458 DL, Offset, /*AllowNonInbounds*/ true))) { 3459 migrateDebugInfo(Base, IsSplit, Offset.getZExtValue() * 8, SliceSize * 8, 3460 &II, Store, DstPtr, Src, DL); 3461 } 3462 3463 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 3464 return !II.isVolatile(); 3465 } 3466 3467 bool visitIntrinsicInst(IntrinsicInst &II) { 3468 assert((II.isLifetimeStartOrEnd() || II.isLaunderOrStripInvariantGroup() || 3469 II.isDroppable()) && 3470 "Unexpected intrinsic!"); 3471 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 3472 3473 // Record this instruction for deletion. 3474 Pass.DeadInsts.push_back(&II); 3475 3476 if (II.isDroppable()) { 3477 assert(II.getIntrinsicID() == Intrinsic::assume && "Expected assume"); 3478 // TODO For now we forget assumed information, this can be improved. 3479 OldPtr->dropDroppableUsesIn(II); 3480 return true; 3481 } 3482 3483 if (II.isLaunderOrStripInvariantGroup()) 3484 return true; 3485 3486 assert(II.getArgOperand(1) == OldPtr); 3487 // Lifetime intrinsics are only promotable if they cover the whole alloca. 3488 // Therefore, we drop lifetime intrinsics which don't cover the whole 3489 // alloca. 3490 // (In theory, intrinsics which partially cover an alloca could be 3491 // promoted, but PromoteMemToReg doesn't handle that case.) 3492 // FIXME: Check whether the alloca is promotable before dropping the 3493 // lifetime intrinsics? 3494 if (NewBeginOffset != NewAllocaBeginOffset || 3495 NewEndOffset != NewAllocaEndOffset) 3496 return true; 3497 3498 ConstantInt *Size = 3499 ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()), 3500 NewEndOffset - NewBeginOffset); 3501 // Lifetime intrinsics always expect an i8* so directly get such a pointer 3502 // for the new alloca slice. 3503 Type *PointerTy = IRB.getPtrTy(OldPtr->getType()->getPointerAddressSpace()); 3504 Value *Ptr = getNewAllocaSlicePtr(IRB, PointerTy); 3505 Value *New; 3506 if (II.getIntrinsicID() == Intrinsic::lifetime_start) 3507 New = IRB.CreateLifetimeStart(Ptr, Size); 3508 else 3509 New = IRB.CreateLifetimeEnd(Ptr, Size); 3510 3511 (void)New; 3512 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 3513 3514 return true; 3515 } 3516 3517 void fixLoadStoreAlign(Instruction &Root) { 3518 // This algorithm implements the same visitor loop as 3519 // hasUnsafePHIOrSelectUse, and fixes the alignment of each load 3520 // or store found. 3521 SmallPtrSet<Instruction *, 4> Visited; 3522 SmallVector<Instruction *, 4> Uses; 3523 Visited.insert(&Root); 3524 Uses.push_back(&Root); 3525 do { 3526 Instruction *I = Uses.pop_back_val(); 3527 3528 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 3529 LI->setAlignment(std::min(LI->getAlign(), getSliceAlign())); 3530 continue; 3531 } 3532 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 3533 SI->setAlignment(std::min(SI->getAlign(), getSliceAlign())); 3534 continue; 3535 } 3536 3537 assert(isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I) || 3538 isa<PHINode>(I) || isa<SelectInst>(I) || 3539 isa<GetElementPtrInst>(I)); 3540 for (User *U : I->users()) 3541 if (Visited.insert(cast<Instruction>(U)).second) 3542 Uses.push_back(cast<Instruction>(U)); 3543 } while (!Uses.empty()); 3544 } 3545 3546 bool visitPHINode(PHINode &PN) { 3547 LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); 3548 assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable"); 3549 assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable"); 3550 3551 // We would like to compute a new pointer in only one place, but have it be 3552 // as local as possible to the PHI. To do that, we re-use the location of 3553 // the old pointer, which necessarily must be in the right position to 3554 // dominate the PHI. 3555 IRBuilderBase::InsertPointGuard Guard(IRB); 3556 if (isa<PHINode>(OldPtr)) 3557 IRB.SetInsertPoint(OldPtr->getParent(), 3558 OldPtr->getParent()->getFirstInsertionPt()); 3559 else 3560 IRB.SetInsertPoint(OldPtr); 3561 IRB.SetCurrentDebugLocation(OldPtr->getDebugLoc()); 3562 3563 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 3564 // Replace the operands which were using the old pointer. 3565 std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr); 3566 3567 LLVM_DEBUG(dbgs() << " to: " << PN << "\n"); 3568 deleteIfTriviallyDead(OldPtr); 3569 3570 // Fix the alignment of any loads or stores using this PHI node. 3571 fixLoadStoreAlign(PN); 3572 3573 // PHIs can't be promoted on their own, but often can be speculated. We 3574 // check the speculation outside of the rewriter so that we see the 3575 // fully-rewritten alloca. 3576 PHIUsers.insert(&PN); 3577 return true; 3578 } 3579 3580 bool visitSelectInst(SelectInst &SI) { 3581 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 3582 assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) && 3583 "Pointer isn't an operand!"); 3584 assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); 3585 assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable"); 3586 3587 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 3588 // Replace the operands which were using the old pointer. 3589 if (SI.getOperand(1) == OldPtr) 3590 SI.setOperand(1, NewPtr); 3591 if (SI.getOperand(2) == OldPtr) 3592 SI.setOperand(2, NewPtr); 3593 3594 LLVM_DEBUG(dbgs() << " to: " << SI << "\n"); 3595 deleteIfTriviallyDead(OldPtr); 3596 3597 // Fix the alignment of any loads or stores using this select. 3598 fixLoadStoreAlign(SI); 3599 3600 // Selects can't be promoted on their own, but often can be speculated. We 3601 // check the speculation outside of the rewriter so that we see the 3602 // fully-rewritten alloca. 3603 SelectUsers.insert(&SI); 3604 return true; 3605 } 3606 }; 3607 3608 /// Visitor to rewrite aggregate loads and stores as scalar. 3609 /// 3610 /// This pass aggressively rewrites all aggregate loads and stores on 3611 /// a particular pointer (or any pointer derived from it which we can identify) 3612 /// with scalar loads and stores. 3613 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> { 3614 // Befriend the base class so it can delegate to private visit methods. 3615 friend class InstVisitor<AggLoadStoreRewriter, bool>; 3616 3617 /// Queue of pointer uses to analyze and potentially rewrite. 3618 SmallVector<Use *, 8> Queue; 3619 3620 /// Set to prevent us from cycling with phi nodes and loops. 3621 SmallPtrSet<User *, 8> Visited; 3622 3623 /// The current pointer use being rewritten. This is used to dig up the used 3624 /// value (as opposed to the user). 3625 Use *U = nullptr; 3626 3627 /// Used to calculate offsets, and hence alignment, of subobjects. 3628 const DataLayout &DL; 3629 3630 IRBuilderTy &IRB; 3631 3632 public: 3633 AggLoadStoreRewriter(const DataLayout &DL, IRBuilderTy &IRB) 3634 : DL(DL), IRB(IRB) {} 3635 3636 /// Rewrite loads and stores through a pointer and all pointers derived from 3637 /// it. 3638 bool rewrite(Instruction &I) { 3639 LLVM_DEBUG(dbgs() << " Rewriting FCA loads and stores...\n"); 3640 enqueueUsers(I); 3641 bool Changed = false; 3642 while (!Queue.empty()) { 3643 U = Queue.pop_back_val(); 3644 Changed |= visit(cast<Instruction>(U->getUser())); 3645 } 3646 return Changed; 3647 } 3648 3649 private: 3650 /// Enqueue all the users of the given instruction for further processing. 3651 /// This uses a set to de-duplicate users. 3652 void enqueueUsers(Instruction &I) { 3653 for (Use &U : I.uses()) 3654 if (Visited.insert(U.getUser()).second) 3655 Queue.push_back(&U); 3656 } 3657 3658 // Conservative default is to not rewrite anything. 3659 bool visitInstruction(Instruction &I) { return false; } 3660 3661 /// Generic recursive split emission class. 3662 template <typename Derived> class OpSplitter { 3663 protected: 3664 /// The builder used to form new instructions. 3665 IRBuilderTy &IRB; 3666 3667 /// The indices which to be used with insert- or extractvalue to select the 3668 /// appropriate value within the aggregate. 3669 SmallVector<unsigned, 4> Indices; 3670 3671 /// The indices to a GEP instruction which will move Ptr to the correct slot 3672 /// within the aggregate. 3673 SmallVector<Value *, 4> GEPIndices; 3674 3675 /// The base pointer of the original op, used as a base for GEPing the 3676 /// split operations. 3677 Value *Ptr; 3678 3679 /// The base pointee type being GEPed into. 3680 Type *BaseTy; 3681 3682 /// Known alignment of the base pointer. 3683 Align BaseAlign; 3684 3685 /// To calculate offset of each component so we can correctly deduce 3686 /// alignments. 3687 const DataLayout &DL; 3688 3689 /// Initialize the splitter with an insertion point, Ptr and start with a 3690 /// single zero GEP index. 3691 OpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3692 Align BaseAlign, const DataLayout &DL, IRBuilderTy &IRB) 3693 : IRB(IRB), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr), BaseTy(BaseTy), 3694 BaseAlign(BaseAlign), DL(DL) { 3695 IRB.SetInsertPoint(InsertionPoint); 3696 } 3697 3698 public: 3699 /// Generic recursive split emission routine. 3700 /// 3701 /// This method recursively splits an aggregate op (load or store) into 3702 /// scalar or vector ops. It splits recursively until it hits a single value 3703 /// and emits that single value operation via the template argument. 3704 /// 3705 /// The logic of this routine relies on GEPs and insertvalue and 3706 /// extractvalue all operating with the same fundamental index list, merely 3707 /// formatted differently (GEPs need actual values). 3708 /// 3709 /// \param Ty The type being split recursively into smaller ops. 3710 /// \param Agg The aggregate value being built up or stored, depending on 3711 /// whether this is splitting a load or a store respectively. 3712 void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) { 3713 if (Ty->isSingleValueType()) { 3714 unsigned Offset = DL.getIndexedOffsetInType(BaseTy, GEPIndices); 3715 return static_cast<Derived *>(this)->emitFunc( 3716 Ty, Agg, commonAlignment(BaseAlign, Offset), Name); 3717 } 3718 3719 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 3720 unsigned OldSize = Indices.size(); 3721 (void)OldSize; 3722 for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size; 3723 ++Idx) { 3724 assert(Indices.size() == OldSize && "Did not return to the old size"); 3725 Indices.push_back(Idx); 3726 GEPIndices.push_back(IRB.getInt32(Idx)); 3727 emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx)); 3728 GEPIndices.pop_back(); 3729 Indices.pop_back(); 3730 } 3731 return; 3732 } 3733 3734 if (StructType *STy = dyn_cast<StructType>(Ty)) { 3735 unsigned OldSize = Indices.size(); 3736 (void)OldSize; 3737 for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size; 3738 ++Idx) { 3739 assert(Indices.size() == OldSize && "Did not return to the old size"); 3740 Indices.push_back(Idx); 3741 GEPIndices.push_back(IRB.getInt32(Idx)); 3742 emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx)); 3743 GEPIndices.pop_back(); 3744 Indices.pop_back(); 3745 } 3746 return; 3747 } 3748 3749 llvm_unreachable("Only arrays and structs are aggregate loadable types"); 3750 } 3751 }; 3752 3753 struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> { 3754 AAMDNodes AATags; 3755 3756 LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3757 AAMDNodes AATags, Align BaseAlign, const DataLayout &DL, 3758 IRBuilderTy &IRB) 3759 : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, DL, 3760 IRB), 3761 AATags(AATags) {} 3762 3763 /// Emit a leaf load of a single value. This is called at the leaves of the 3764 /// recursive emission to actually load values. 3765 void emitFunc(Type *Ty, Value *&Agg, Align Alignment, const Twine &Name) { 3766 assert(Ty->isSingleValueType()); 3767 // Load the single value and insert it using the indices. 3768 Value *GEP = 3769 IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep"); 3770 LoadInst *Load = 3771 IRB.CreateAlignedLoad(Ty, GEP, Alignment, Name + ".load"); 3772 3773 APInt Offset( 3774 DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()), 0); 3775 if (AATags && 3776 GEPOperator::accumulateConstantOffset(BaseTy, GEPIndices, DL, Offset)) 3777 Load->setAAMetadata(AATags.shift(Offset.getZExtValue())); 3778 3779 Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); 3780 LLVM_DEBUG(dbgs() << " to: " << *Load << "\n"); 3781 } 3782 }; 3783 3784 bool visitLoadInst(LoadInst &LI) { 3785 assert(LI.getPointerOperand() == *U); 3786 if (!LI.isSimple() || LI.getType()->isSingleValueType()) 3787 return false; 3788 3789 // We have an aggregate being loaded, split it apart. 3790 LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); 3791 LoadOpSplitter Splitter(&LI, *U, LI.getType(), LI.getAAMetadata(), 3792 getAdjustedAlignment(&LI, 0), DL, IRB); 3793 Value *V = PoisonValue::get(LI.getType()); 3794 Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca"); 3795 Visited.erase(&LI); 3796 LI.replaceAllUsesWith(V); 3797 LI.eraseFromParent(); 3798 return true; 3799 } 3800 3801 struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> { 3802 StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3803 AAMDNodes AATags, StoreInst *AggStore, Align BaseAlign, 3804 const DataLayout &DL, IRBuilderTy &IRB) 3805 : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, 3806 DL, IRB), 3807 AATags(AATags), AggStore(AggStore) {} 3808 AAMDNodes AATags; 3809 StoreInst *AggStore; 3810 /// Emit a leaf store of a single value. This is called at the leaves of the 3811 /// recursive emission to actually produce stores. 3812 void emitFunc(Type *Ty, Value *&Agg, Align Alignment, const Twine &Name) { 3813 assert(Ty->isSingleValueType()); 3814 // Extract the single value and store it using the indices. 3815 // 3816 // The gep and extractvalue values are factored out of the CreateStore 3817 // call to make the output independent of the argument evaluation order. 3818 Value *ExtractValue = 3819 IRB.CreateExtractValue(Agg, Indices, Name + ".extract"); 3820 Value *InBoundsGEP = 3821 IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep"); 3822 StoreInst *Store = 3823 IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Alignment); 3824 3825 APInt Offset( 3826 DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()), 0); 3827 GEPOperator::accumulateConstantOffset(BaseTy, GEPIndices, DL, Offset); 3828 if (AATags) 3829 Store->setAAMetadata(AATags.shift(Offset.getZExtValue())); 3830 3831 // migrateDebugInfo requires the base Alloca. Walk to it from this gep. 3832 // If we cannot (because there's an intervening non-const or unbounded 3833 // gep) then we wouldn't expect to see dbg.assign intrinsics linked to 3834 // this instruction. 3835 Value *Base = AggStore->getPointerOperand()->stripInBoundsOffsets(); 3836 if (auto *OldAI = dyn_cast<AllocaInst>(Base)) { 3837 uint64_t SizeInBits = 3838 DL.getTypeSizeInBits(Store->getValueOperand()->getType()); 3839 migrateDebugInfo(OldAI, /*IsSplit*/ true, Offset.getZExtValue() * 8, 3840 SizeInBits, AggStore, Store, 3841 Store->getPointerOperand(), Store->getValueOperand(), 3842 DL); 3843 } else { 3844 assert(at::getAssignmentMarkers(Store).empty() && 3845 "AT: unexpected debug.assign linked to store through " 3846 "unbounded GEP"); 3847 } 3848 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 3849 } 3850 }; 3851 3852 bool visitStoreInst(StoreInst &SI) { 3853 if (!SI.isSimple() || SI.getPointerOperand() != *U) 3854 return false; 3855 Value *V = SI.getValueOperand(); 3856 if (V->getType()->isSingleValueType()) 3857 return false; 3858 3859 // We have an aggregate being stored, split it apart. 3860 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 3861 StoreOpSplitter Splitter(&SI, *U, V->getType(), SI.getAAMetadata(), &SI, 3862 getAdjustedAlignment(&SI, 0), DL, IRB); 3863 Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca"); 3864 Visited.erase(&SI); 3865 // The stores replacing SI each have markers describing fragments of the 3866 // assignment so delete the assignment markers linked to SI. 3867 at::deleteAssignmentMarkers(&SI); 3868 SI.eraseFromParent(); 3869 return true; 3870 } 3871 3872 bool visitBitCastInst(BitCastInst &BC) { 3873 enqueueUsers(BC); 3874 return false; 3875 } 3876 3877 bool visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 3878 enqueueUsers(ASC); 3879 return false; 3880 } 3881 3882 // Fold gep (select cond, ptr1, ptr2) => select cond, gep(ptr1), gep(ptr2) 3883 bool foldGEPSelect(GetElementPtrInst &GEPI) { 3884 if (!GEPI.hasAllConstantIndices()) 3885 return false; 3886 3887 SelectInst *Sel = cast<SelectInst>(GEPI.getPointerOperand()); 3888 3889 LLVM_DEBUG(dbgs() << " Rewriting gep(select) -> select(gep):" 3890 << "\n original: " << *Sel 3891 << "\n " << GEPI); 3892 3893 IRB.SetInsertPoint(&GEPI); 3894 SmallVector<Value *, 4> Index(GEPI.indices()); 3895 bool IsInBounds = GEPI.isInBounds(); 3896 3897 Type *Ty = GEPI.getSourceElementType(); 3898 Value *True = Sel->getTrueValue(); 3899 Value *NTrue = IRB.CreateGEP(Ty, True, Index, True->getName() + ".sroa.gep", 3900 IsInBounds); 3901 3902 Value *False = Sel->getFalseValue(); 3903 3904 Value *NFalse = IRB.CreateGEP(Ty, False, Index, 3905 False->getName() + ".sroa.gep", IsInBounds); 3906 3907 Value *NSel = IRB.CreateSelect(Sel->getCondition(), NTrue, NFalse, 3908 Sel->getName() + ".sroa.sel"); 3909 Visited.erase(&GEPI); 3910 GEPI.replaceAllUsesWith(NSel); 3911 GEPI.eraseFromParent(); 3912 Instruction *NSelI = cast<Instruction>(NSel); 3913 Visited.insert(NSelI); 3914 enqueueUsers(*NSelI); 3915 3916 LLVM_DEBUG(dbgs() << "\n to: " << *NTrue 3917 << "\n " << *NFalse 3918 << "\n " << *NSel << '\n'); 3919 3920 return true; 3921 } 3922 3923 // Fold gep (phi ptr1, ptr2) => phi gep(ptr1), gep(ptr2) 3924 bool foldGEPPhi(GetElementPtrInst &GEPI) { 3925 if (!GEPI.hasAllConstantIndices()) 3926 return false; 3927 3928 PHINode *PHI = cast<PHINode>(GEPI.getPointerOperand()); 3929 if (GEPI.getParent() != PHI->getParent() || 3930 llvm::any_of(PHI->incoming_values(), [](Value *In) 3931 { Instruction *I = dyn_cast<Instruction>(In); 3932 return !I || isa<GetElementPtrInst>(I) || isa<PHINode>(I) || 3933 succ_empty(I->getParent()) || 3934 !I->getParent()->isLegalToHoistInto(); 3935 })) 3936 return false; 3937 3938 LLVM_DEBUG(dbgs() << " Rewriting gep(phi) -> phi(gep):" 3939 << "\n original: " << *PHI 3940 << "\n " << GEPI 3941 << "\n to: "); 3942 3943 SmallVector<Value *, 4> Index(GEPI.indices()); 3944 bool IsInBounds = GEPI.isInBounds(); 3945 IRB.SetInsertPoint(GEPI.getParent(), GEPI.getParent()->getFirstNonPHIIt()); 3946 PHINode *NewPN = IRB.CreatePHI(GEPI.getType(), PHI->getNumIncomingValues(), 3947 PHI->getName() + ".sroa.phi"); 3948 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I != E; ++I) { 3949 BasicBlock *B = PHI->getIncomingBlock(I); 3950 Value *NewVal = nullptr; 3951 int Idx = NewPN->getBasicBlockIndex(B); 3952 if (Idx >= 0) { 3953 NewVal = NewPN->getIncomingValue(Idx); 3954 } else { 3955 Instruction *In = cast<Instruction>(PHI->getIncomingValue(I)); 3956 3957 IRB.SetInsertPoint(In->getParent(), std::next(In->getIterator())); 3958 Type *Ty = GEPI.getSourceElementType(); 3959 NewVal = IRB.CreateGEP(Ty, In, Index, In->getName() + ".sroa.gep", 3960 IsInBounds); 3961 } 3962 NewPN->addIncoming(NewVal, B); 3963 } 3964 3965 Visited.erase(&GEPI); 3966 GEPI.replaceAllUsesWith(NewPN); 3967 GEPI.eraseFromParent(); 3968 Visited.insert(NewPN); 3969 enqueueUsers(*NewPN); 3970 3971 LLVM_DEBUG(for (Value *In : NewPN->incoming_values()) 3972 dbgs() << "\n " << *In; 3973 dbgs() << "\n " << *NewPN << '\n'); 3974 3975 return true; 3976 } 3977 3978 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { 3979 if (isa<SelectInst>(GEPI.getPointerOperand()) && 3980 foldGEPSelect(GEPI)) 3981 return true; 3982 3983 if (isa<PHINode>(GEPI.getPointerOperand()) && 3984 foldGEPPhi(GEPI)) 3985 return true; 3986 3987 enqueueUsers(GEPI); 3988 return false; 3989 } 3990 3991 bool visitPHINode(PHINode &PN) { 3992 enqueueUsers(PN); 3993 return false; 3994 } 3995 3996 bool visitSelectInst(SelectInst &SI) { 3997 enqueueUsers(SI); 3998 return false; 3999 } 4000 }; 4001 4002 } // end anonymous namespace 4003 4004 /// Strip aggregate type wrapping. 4005 /// 4006 /// This removes no-op aggregate types wrapping an underlying type. It will 4007 /// strip as many layers of types as it can without changing either the type 4008 /// size or the allocated size. 4009 static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) { 4010 if (Ty->isSingleValueType()) 4011 return Ty; 4012 4013 uint64_t AllocSize = DL.getTypeAllocSize(Ty).getFixedValue(); 4014 uint64_t TypeSize = DL.getTypeSizeInBits(Ty).getFixedValue(); 4015 4016 Type *InnerTy; 4017 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { 4018 InnerTy = ArrTy->getElementType(); 4019 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 4020 const StructLayout *SL = DL.getStructLayout(STy); 4021 unsigned Index = SL->getElementContainingOffset(0); 4022 InnerTy = STy->getElementType(Index); 4023 } else { 4024 return Ty; 4025 } 4026 4027 if (AllocSize > DL.getTypeAllocSize(InnerTy).getFixedValue() || 4028 TypeSize > DL.getTypeSizeInBits(InnerTy).getFixedValue()) 4029 return Ty; 4030 4031 return stripAggregateTypeWrapping(DL, InnerTy); 4032 } 4033 4034 /// Try to find a partition of the aggregate type passed in for a given 4035 /// offset and size. 4036 /// 4037 /// This recurses through the aggregate type and tries to compute a subtype 4038 /// based on the offset and size. When the offset and size span a sub-section 4039 /// of an array, it will even compute a new array type for that sub-section, 4040 /// and the same for structs. 4041 /// 4042 /// Note that this routine is very strict and tries to find a partition of the 4043 /// type which produces the *exact* right offset and size. It is not forgiving 4044 /// when the size or offset cause either end of type-based partition to be off. 4045 /// Also, this is a best-effort routine. It is reasonable to give up and not 4046 /// return a type if necessary. 4047 static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, 4048 uint64_t Size) { 4049 if (Offset == 0 && DL.getTypeAllocSize(Ty).getFixedValue() == Size) 4050 return stripAggregateTypeWrapping(DL, Ty); 4051 if (Offset > DL.getTypeAllocSize(Ty).getFixedValue() || 4052 (DL.getTypeAllocSize(Ty).getFixedValue() - Offset) < Size) 4053 return nullptr; 4054 4055 if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) { 4056 Type *ElementTy; 4057 uint64_t TyNumElements; 4058 if (auto *AT = dyn_cast<ArrayType>(Ty)) { 4059 ElementTy = AT->getElementType(); 4060 TyNumElements = AT->getNumElements(); 4061 } else { 4062 // FIXME: This isn't right for vectors with non-byte-sized or 4063 // non-power-of-two sized elements. 4064 auto *VT = cast<FixedVectorType>(Ty); 4065 ElementTy = VT->getElementType(); 4066 TyNumElements = VT->getNumElements(); 4067 } 4068 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedValue(); 4069 uint64_t NumSkippedElements = Offset / ElementSize; 4070 if (NumSkippedElements >= TyNumElements) 4071 return nullptr; 4072 Offset -= NumSkippedElements * ElementSize; 4073 4074 // First check if we need to recurse. 4075 if (Offset > 0 || Size < ElementSize) { 4076 // Bail if the partition ends in a different array element. 4077 if ((Offset + Size) > ElementSize) 4078 return nullptr; 4079 // Recurse through the element type trying to peel off offset bytes. 4080 return getTypePartition(DL, ElementTy, Offset, Size); 4081 } 4082 assert(Offset == 0); 4083 4084 if (Size == ElementSize) 4085 return stripAggregateTypeWrapping(DL, ElementTy); 4086 assert(Size > ElementSize); 4087 uint64_t NumElements = Size / ElementSize; 4088 if (NumElements * ElementSize != Size) 4089 return nullptr; 4090 return ArrayType::get(ElementTy, NumElements); 4091 } 4092 4093 StructType *STy = dyn_cast<StructType>(Ty); 4094 if (!STy) 4095 return nullptr; 4096 4097 const StructLayout *SL = DL.getStructLayout(STy); 4098 4099 if (SL->getSizeInBits().isScalable()) 4100 return nullptr; 4101 4102 if (Offset >= SL->getSizeInBytes()) 4103 return nullptr; 4104 uint64_t EndOffset = Offset + Size; 4105 if (EndOffset > SL->getSizeInBytes()) 4106 return nullptr; 4107 4108 unsigned Index = SL->getElementContainingOffset(Offset); 4109 Offset -= SL->getElementOffset(Index); 4110 4111 Type *ElementTy = STy->getElementType(Index); 4112 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedValue(); 4113 if (Offset >= ElementSize) 4114 return nullptr; // The offset points into alignment padding. 4115 4116 // See if any partition must be contained by the element. 4117 if (Offset > 0 || Size < ElementSize) { 4118 if ((Offset + Size) > ElementSize) 4119 return nullptr; 4120 return getTypePartition(DL, ElementTy, Offset, Size); 4121 } 4122 assert(Offset == 0); 4123 4124 if (Size == ElementSize) 4125 return stripAggregateTypeWrapping(DL, ElementTy); 4126 4127 StructType::element_iterator EI = STy->element_begin() + Index, 4128 EE = STy->element_end(); 4129 if (EndOffset < SL->getSizeInBytes()) { 4130 unsigned EndIndex = SL->getElementContainingOffset(EndOffset); 4131 if (Index == EndIndex) 4132 return nullptr; // Within a single element and its padding. 4133 4134 // Don't try to form "natural" types if the elements don't line up with the 4135 // expected size. 4136 // FIXME: We could potentially recurse down through the last element in the 4137 // sub-struct to find a natural end point. 4138 if (SL->getElementOffset(EndIndex) != EndOffset) 4139 return nullptr; 4140 4141 assert(Index < EndIndex); 4142 EE = STy->element_begin() + EndIndex; 4143 } 4144 4145 // Try to build up a sub-structure. 4146 StructType *SubTy = 4147 StructType::get(STy->getContext(), ArrayRef(EI, EE), STy->isPacked()); 4148 const StructLayout *SubSL = DL.getStructLayout(SubTy); 4149 if (Size != SubSL->getSizeInBytes()) 4150 return nullptr; // The sub-struct doesn't have quite the size needed. 4151 4152 return SubTy; 4153 } 4154 4155 /// Pre-split loads and stores to simplify rewriting. 4156 /// 4157 /// We want to break up the splittable load+store pairs as much as 4158 /// possible. This is important to do as a preprocessing step, as once we 4159 /// start rewriting the accesses to partitions of the alloca we lose the 4160 /// necessary information to correctly split apart paired loads and stores 4161 /// which both point into this alloca. The case to consider is something like 4162 /// the following: 4163 /// 4164 /// %a = alloca [12 x i8] 4165 /// %gep1 = getelementptr i8, ptr %a, i32 0 4166 /// %gep2 = getelementptr i8, ptr %a, i32 4 4167 /// %gep3 = getelementptr i8, ptr %a, i32 8 4168 /// store float 0.0, ptr %gep1 4169 /// store float 1.0, ptr %gep2 4170 /// %v = load i64, ptr %gep1 4171 /// store i64 %v, ptr %gep2 4172 /// %f1 = load float, ptr %gep2 4173 /// %f2 = load float, ptr %gep3 4174 /// 4175 /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and 4176 /// promote everything so we recover the 2 SSA values that should have been 4177 /// there all along. 4178 /// 4179 /// \returns true if any changes are made. 4180 bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { 4181 LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n"); 4182 4183 // Track the loads and stores which are candidates for pre-splitting here, in 4184 // the order they first appear during the partition scan. These give stable 4185 // iteration order and a basis for tracking which loads and stores we 4186 // actually split. 4187 SmallVector<LoadInst *, 4> Loads; 4188 SmallVector<StoreInst *, 4> Stores; 4189 4190 // We need to accumulate the splits required of each load or store where we 4191 // can find them via a direct lookup. This is important to cross-check loads 4192 // and stores against each other. We also track the slice so that we can kill 4193 // all the slices that end up split. 4194 struct SplitOffsets { 4195 Slice *S; 4196 std::vector<uint64_t> Splits; 4197 }; 4198 SmallDenseMap<Instruction *, SplitOffsets, 8> SplitOffsetsMap; 4199 4200 // Track loads out of this alloca which cannot, for any reason, be pre-split. 4201 // This is important as we also cannot pre-split stores of those loads! 4202 // FIXME: This is all pretty gross. It means that we can be more aggressive 4203 // in pre-splitting when the load feeding the store happens to come from 4204 // a separate alloca. Put another way, the effectiveness of SROA would be 4205 // decreased by a frontend which just concatenated all of its local allocas 4206 // into one big flat alloca. But defeating such patterns is exactly the job 4207 // SROA is tasked with! Sadly, to not have this discrepancy we would have 4208 // change store pre-splitting to actually force pre-splitting of the load 4209 // that feeds it *and all stores*. That makes pre-splitting much harder, but 4210 // maybe it would make it more principled? 4211 SmallPtrSet<LoadInst *, 8> UnsplittableLoads; 4212 4213 LLVM_DEBUG(dbgs() << " Searching for candidate loads and stores\n"); 4214 for (auto &P : AS.partitions()) { 4215 for (Slice &S : P) { 4216 Instruction *I = cast<Instruction>(S.getUse()->getUser()); 4217 if (!S.isSplittable() || S.endOffset() <= P.endOffset()) { 4218 // If this is a load we have to track that it can't participate in any 4219 // pre-splitting. If this is a store of a load we have to track that 4220 // that load also can't participate in any pre-splitting. 4221 if (auto *LI = dyn_cast<LoadInst>(I)) 4222 UnsplittableLoads.insert(LI); 4223 else if (auto *SI = dyn_cast<StoreInst>(I)) 4224 if (auto *LI = dyn_cast<LoadInst>(SI->getValueOperand())) 4225 UnsplittableLoads.insert(LI); 4226 continue; 4227 } 4228 assert(P.endOffset() > S.beginOffset() && 4229 "Empty or backwards partition!"); 4230 4231 // Determine if this is a pre-splittable slice. 4232 if (auto *LI = dyn_cast<LoadInst>(I)) { 4233 assert(!LI->isVolatile() && "Cannot split volatile loads!"); 4234 4235 // The load must be used exclusively to store into other pointers for 4236 // us to be able to arbitrarily pre-split it. The stores must also be 4237 // simple to avoid changing semantics. 4238 auto IsLoadSimplyStored = [](LoadInst *LI) { 4239 for (User *LU : LI->users()) { 4240 auto *SI = dyn_cast<StoreInst>(LU); 4241 if (!SI || !SI->isSimple()) 4242 return false; 4243 } 4244 return true; 4245 }; 4246 if (!IsLoadSimplyStored(LI)) { 4247 UnsplittableLoads.insert(LI); 4248 continue; 4249 } 4250 4251 Loads.push_back(LI); 4252 } else if (auto *SI = dyn_cast<StoreInst>(I)) { 4253 if (S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex())) 4254 // Skip stores *of* pointers. FIXME: This shouldn't even be possible! 4255 continue; 4256 auto *StoredLoad = dyn_cast<LoadInst>(SI->getValueOperand()); 4257 if (!StoredLoad || !StoredLoad->isSimple()) 4258 continue; 4259 assert(!SI->isVolatile() && "Cannot split volatile stores!"); 4260 4261 Stores.push_back(SI); 4262 } else { 4263 // Other uses cannot be pre-split. 4264 continue; 4265 } 4266 4267 // Record the initial split. 4268 LLVM_DEBUG(dbgs() << " Candidate: " << *I << "\n"); 4269 auto &Offsets = SplitOffsetsMap[I]; 4270 assert(Offsets.Splits.empty() && 4271 "Should not have splits the first time we see an instruction!"); 4272 Offsets.S = &S; 4273 Offsets.Splits.push_back(P.endOffset() - S.beginOffset()); 4274 } 4275 4276 // Now scan the already split slices, and add a split for any of them which 4277 // we're going to pre-split. 4278 for (Slice *S : P.splitSliceTails()) { 4279 auto SplitOffsetsMapI = 4280 SplitOffsetsMap.find(cast<Instruction>(S->getUse()->getUser())); 4281 if (SplitOffsetsMapI == SplitOffsetsMap.end()) 4282 continue; 4283 auto &Offsets = SplitOffsetsMapI->second; 4284 4285 assert(Offsets.S == S && "Found a mismatched slice!"); 4286 assert(!Offsets.Splits.empty() && 4287 "Cannot have an empty set of splits on the second partition!"); 4288 assert(Offsets.Splits.back() == 4289 P.beginOffset() - Offsets.S->beginOffset() && 4290 "Previous split does not end where this one begins!"); 4291 4292 // Record each split. The last partition's end isn't needed as the size 4293 // of the slice dictates that. 4294 if (S->endOffset() > P.endOffset()) 4295 Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset()); 4296 } 4297 } 4298 4299 // We may have split loads where some of their stores are split stores. For 4300 // such loads and stores, we can only pre-split them if their splits exactly 4301 // match relative to their starting offset. We have to verify this prior to 4302 // any rewriting. 4303 llvm::erase_if(Stores, [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) { 4304 // Lookup the load we are storing in our map of split 4305 // offsets. 4306 auto *LI = cast<LoadInst>(SI->getValueOperand()); 4307 // If it was completely unsplittable, then we're done, 4308 // and this store can't be pre-split. 4309 if (UnsplittableLoads.count(LI)) 4310 return true; 4311 4312 auto LoadOffsetsI = SplitOffsetsMap.find(LI); 4313 if (LoadOffsetsI == SplitOffsetsMap.end()) 4314 return false; // Unrelated loads are definitely safe. 4315 auto &LoadOffsets = LoadOffsetsI->second; 4316 4317 // Now lookup the store's offsets. 4318 auto &StoreOffsets = SplitOffsetsMap[SI]; 4319 4320 // If the relative offsets of each split in the load and 4321 // store match exactly, then we can split them and we 4322 // don't need to remove them here. 4323 if (LoadOffsets.Splits == StoreOffsets.Splits) 4324 return false; 4325 4326 LLVM_DEBUG(dbgs() << " Mismatched splits for load and store:\n" 4327 << " " << *LI << "\n" 4328 << " " << *SI << "\n"); 4329 4330 // We've found a store and load that we need to split 4331 // with mismatched relative splits. Just give up on them 4332 // and remove both instructions from our list of 4333 // candidates. 4334 UnsplittableLoads.insert(LI); 4335 return true; 4336 }); 4337 // Now we have to go *back* through all the stores, because a later store may 4338 // have caused an earlier store's load to become unsplittable and if it is 4339 // unsplittable for the later store, then we can't rely on it being split in 4340 // the earlier store either. 4341 llvm::erase_if(Stores, [&UnsplittableLoads](StoreInst *SI) { 4342 auto *LI = cast<LoadInst>(SI->getValueOperand()); 4343 return UnsplittableLoads.count(LI); 4344 }); 4345 // Once we've established all the loads that can't be split for some reason, 4346 // filter any that made it into our list out. 4347 llvm::erase_if(Loads, [&UnsplittableLoads](LoadInst *LI) { 4348 return UnsplittableLoads.count(LI); 4349 }); 4350 4351 // If no loads or stores are left, there is no pre-splitting to be done for 4352 // this alloca. 4353 if (Loads.empty() && Stores.empty()) 4354 return false; 4355 4356 // From here on, we can't fail and will be building new accesses, so rig up 4357 // an IR builder. 4358 IRBuilderTy IRB(&AI); 4359 4360 // Collect the new slices which we will merge into the alloca slices. 4361 SmallVector<Slice, 4> NewSlices; 4362 4363 // Track any allocas we end up splitting loads and stores for so we iterate 4364 // on them. 4365 SmallPtrSet<AllocaInst *, 4> ResplitPromotableAllocas; 4366 4367 // At this point, we have collected all of the loads and stores we can 4368 // pre-split, and the specific splits needed for them. We actually do the 4369 // splitting in a specific order in order to handle when one of the loads in 4370 // the value operand to one of the stores. 4371 // 4372 // First, we rewrite all of the split loads, and just accumulate each split 4373 // load in a parallel structure. We also build the slices for them and append 4374 // them to the alloca slices. 4375 SmallDenseMap<LoadInst *, std::vector<LoadInst *>, 1> SplitLoadsMap; 4376 std::vector<LoadInst *> SplitLoads; 4377 const DataLayout &DL = AI.getModule()->getDataLayout(); 4378 for (LoadInst *LI : Loads) { 4379 SplitLoads.clear(); 4380 4381 auto &Offsets = SplitOffsetsMap[LI]; 4382 unsigned SliceSize = Offsets.S->endOffset() - Offsets.S->beginOffset(); 4383 assert(LI->getType()->getIntegerBitWidth() % 8 == 0 && 4384 "Load must have type size equal to store size"); 4385 assert(LI->getType()->getIntegerBitWidth() / 8 >= SliceSize && 4386 "Load must be >= slice size"); 4387 4388 uint64_t BaseOffset = Offsets.S->beginOffset(); 4389 assert(BaseOffset + SliceSize > BaseOffset && 4390 "Cannot represent alloca access size using 64-bit integers!"); 4391 4392 Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand()); 4393 IRB.SetInsertPoint(LI); 4394 4395 LLVM_DEBUG(dbgs() << " Splitting load: " << *LI << "\n"); 4396 4397 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); 4398 int Idx = 0, Size = Offsets.Splits.size(); 4399 for (;;) { 4400 auto *PartTy = Type::getIntNTy(LI->getContext(), PartSize * 8); 4401 auto AS = LI->getPointerAddressSpace(); 4402 auto *PartPtrTy = LI->getPointerOperandType(); 4403 LoadInst *PLoad = IRB.CreateAlignedLoad( 4404 PartTy, 4405 getAdjustedPtr(IRB, DL, BasePtr, 4406 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4407 PartPtrTy, BasePtr->getName() + "."), 4408 getAdjustedAlignment(LI, PartOffset), 4409 /*IsVolatile*/ false, LI->getName()); 4410 PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access, 4411 LLVMContext::MD_access_group}); 4412 4413 // Append this load onto the list of split loads so we can find it later 4414 // to rewrite the stores. 4415 SplitLoads.push_back(PLoad); 4416 4417 // Now build a new slice for the alloca. 4418 NewSlices.push_back( 4419 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, 4420 &PLoad->getOperandUse(PLoad->getPointerOperandIndex()), 4421 /*IsSplittable*/ false)); 4422 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() 4423 << ", " << NewSlices.back().endOffset() 4424 << "): " << *PLoad << "\n"); 4425 4426 // See if we've handled all the splits. 4427 if (Idx >= Size) 4428 break; 4429 4430 // Setup the next partition. 4431 PartOffset = Offsets.Splits[Idx]; 4432 ++Idx; 4433 PartSize = (Idx < Size ? Offsets.Splits[Idx] : SliceSize) - PartOffset; 4434 } 4435 4436 // Now that we have the split loads, do the slow walk over all uses of the 4437 // load and rewrite them as split stores, or save the split loads to use 4438 // below if the store is going to be split there anyways. 4439 bool DeferredStores = false; 4440 for (User *LU : LI->users()) { 4441 StoreInst *SI = cast<StoreInst>(LU); 4442 if (!Stores.empty() && SplitOffsetsMap.count(SI)) { 4443 DeferredStores = true; 4444 LLVM_DEBUG(dbgs() << " Deferred splitting of store: " << *SI 4445 << "\n"); 4446 continue; 4447 } 4448 4449 Value *StoreBasePtr = SI->getPointerOperand(); 4450 IRB.SetInsertPoint(SI); 4451 4452 LLVM_DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n"); 4453 4454 for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) { 4455 LoadInst *PLoad = SplitLoads[Idx]; 4456 uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1]; 4457 auto *PartPtrTy = SI->getPointerOperandType(); 4458 4459 auto AS = SI->getPointerAddressSpace(); 4460 StoreInst *PStore = IRB.CreateAlignedStore( 4461 PLoad, 4462 getAdjustedPtr(IRB, DL, StoreBasePtr, 4463 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4464 PartPtrTy, StoreBasePtr->getName() + "."), 4465 getAdjustedAlignment(SI, PartOffset), 4466 /*IsVolatile*/ false); 4467 PStore->copyMetadata(*SI, {LLVMContext::MD_mem_parallel_loop_access, 4468 LLVMContext::MD_access_group, 4469 LLVMContext::MD_DIAssignID}); 4470 LLVM_DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n"); 4471 } 4472 4473 // We want to immediately iterate on any allocas impacted by splitting 4474 // this store, and we have to track any promotable alloca (indicated by 4475 // a direct store) as needing to be resplit because it is no longer 4476 // promotable. 4477 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(StoreBasePtr)) { 4478 ResplitPromotableAllocas.insert(OtherAI); 4479 Worklist.insert(OtherAI); 4480 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( 4481 StoreBasePtr->stripInBoundsOffsets())) { 4482 Worklist.insert(OtherAI); 4483 } 4484 4485 // Mark the original store as dead. 4486 DeadInsts.push_back(SI); 4487 } 4488 4489 // Save the split loads if there are deferred stores among the users. 4490 if (DeferredStores) 4491 SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads))); 4492 4493 // Mark the original load as dead and kill the original slice. 4494 DeadInsts.push_back(LI); 4495 Offsets.S->kill(); 4496 } 4497 4498 // Second, we rewrite all of the split stores. At this point, we know that 4499 // all loads from this alloca have been split already. For stores of such 4500 // loads, we can simply look up the pre-existing split loads. For stores of 4501 // other loads, we split those loads first and then write split stores of 4502 // them. 4503 for (StoreInst *SI : Stores) { 4504 auto *LI = cast<LoadInst>(SI->getValueOperand()); 4505 IntegerType *Ty = cast<IntegerType>(LI->getType()); 4506 assert(Ty->getBitWidth() % 8 == 0); 4507 uint64_t StoreSize = Ty->getBitWidth() / 8; 4508 assert(StoreSize > 0 && "Cannot have a zero-sized integer store!"); 4509 4510 auto &Offsets = SplitOffsetsMap[SI]; 4511 assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && 4512 "Slice size should always match load size exactly!"); 4513 uint64_t BaseOffset = Offsets.S->beginOffset(); 4514 assert(BaseOffset + StoreSize > BaseOffset && 4515 "Cannot represent alloca access size using 64-bit integers!"); 4516 4517 Value *LoadBasePtr = LI->getPointerOperand(); 4518 Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand()); 4519 4520 LLVM_DEBUG(dbgs() << " Splitting store: " << *SI << "\n"); 4521 4522 // Check whether we have an already split load. 4523 auto SplitLoadsMapI = SplitLoadsMap.find(LI); 4524 std::vector<LoadInst *> *SplitLoads = nullptr; 4525 if (SplitLoadsMapI != SplitLoadsMap.end()) { 4526 SplitLoads = &SplitLoadsMapI->second; 4527 assert(SplitLoads->size() == Offsets.Splits.size() + 1 && 4528 "Too few split loads for the number of splits in the store!"); 4529 } else { 4530 LLVM_DEBUG(dbgs() << " of load: " << *LI << "\n"); 4531 } 4532 4533 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); 4534 int Idx = 0, Size = Offsets.Splits.size(); 4535 for (;;) { 4536 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); 4537 auto *LoadPartPtrTy = LI->getPointerOperandType(); 4538 auto *StorePartPtrTy = SI->getPointerOperandType(); 4539 4540 // Either lookup a split load or create one. 4541 LoadInst *PLoad; 4542 if (SplitLoads) { 4543 PLoad = (*SplitLoads)[Idx]; 4544 } else { 4545 IRB.SetInsertPoint(LI); 4546 auto AS = LI->getPointerAddressSpace(); 4547 PLoad = IRB.CreateAlignedLoad( 4548 PartTy, 4549 getAdjustedPtr(IRB, DL, LoadBasePtr, 4550 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4551 LoadPartPtrTy, LoadBasePtr->getName() + "."), 4552 getAdjustedAlignment(LI, PartOffset), 4553 /*IsVolatile*/ false, LI->getName()); 4554 PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access, 4555 LLVMContext::MD_access_group}); 4556 } 4557 4558 // And store this partition. 4559 IRB.SetInsertPoint(SI); 4560 auto AS = SI->getPointerAddressSpace(); 4561 StoreInst *PStore = IRB.CreateAlignedStore( 4562 PLoad, 4563 getAdjustedPtr(IRB, DL, StoreBasePtr, 4564 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4565 StorePartPtrTy, StoreBasePtr->getName() + "."), 4566 getAdjustedAlignment(SI, PartOffset), 4567 /*IsVolatile*/ false); 4568 PStore->copyMetadata(*SI, {LLVMContext::MD_mem_parallel_loop_access, 4569 LLVMContext::MD_access_group}); 4570 4571 // Now build a new slice for the alloca. 4572 NewSlices.push_back( 4573 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, 4574 &PStore->getOperandUse(PStore->getPointerOperandIndex()), 4575 /*IsSplittable*/ false)); 4576 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() 4577 << ", " << NewSlices.back().endOffset() 4578 << "): " << *PStore << "\n"); 4579 if (!SplitLoads) { 4580 LLVM_DEBUG(dbgs() << " of split load: " << *PLoad << "\n"); 4581 } 4582 4583 // See if we've finished all the splits. 4584 if (Idx >= Size) 4585 break; 4586 4587 // Setup the next partition. 4588 PartOffset = Offsets.Splits[Idx]; 4589 ++Idx; 4590 PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset; 4591 } 4592 4593 // We want to immediately iterate on any allocas impacted by splitting 4594 // this load, which is only relevant if it isn't a load of this alloca and 4595 // thus we didn't already split the loads above. We also have to keep track 4596 // of any promotable allocas we split loads on as they can no longer be 4597 // promoted. 4598 if (!SplitLoads) { 4599 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(LoadBasePtr)) { 4600 assert(OtherAI != &AI && "We can't re-split our own alloca!"); 4601 ResplitPromotableAllocas.insert(OtherAI); 4602 Worklist.insert(OtherAI); 4603 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( 4604 LoadBasePtr->stripInBoundsOffsets())) { 4605 assert(OtherAI != &AI && "We can't re-split our own alloca!"); 4606 Worklist.insert(OtherAI); 4607 } 4608 } 4609 4610 // Mark the original store as dead now that we've split it up and kill its 4611 // slice. Note that we leave the original load in place unless this store 4612 // was its only use. It may in turn be split up if it is an alloca load 4613 // for some other alloca, but it may be a normal load. This may introduce 4614 // redundant loads, but where those can be merged the rest of the optimizer 4615 // should handle the merging, and this uncovers SSA splits which is more 4616 // important. In practice, the original loads will almost always be fully 4617 // split and removed eventually, and the splits will be merged by any 4618 // trivial CSE, including instcombine. 4619 if (LI->hasOneUse()) { 4620 assert(*LI->user_begin() == SI && "Single use isn't this store!"); 4621 DeadInsts.push_back(LI); 4622 } 4623 DeadInsts.push_back(SI); 4624 Offsets.S->kill(); 4625 } 4626 4627 // Remove the killed slices that have ben pre-split. 4628 llvm::erase_if(AS, [](const Slice &S) { return S.isDead(); }); 4629 4630 // Insert our new slices. This will sort and merge them into the sorted 4631 // sequence. 4632 AS.insert(NewSlices); 4633 4634 LLVM_DEBUG(dbgs() << " Pre-split slices:\n"); 4635 #ifndef NDEBUG 4636 for (auto I = AS.begin(), E = AS.end(); I != E; ++I) 4637 LLVM_DEBUG(AS.print(dbgs(), I, " ")); 4638 #endif 4639 4640 // Finally, don't try to promote any allocas that new require re-splitting. 4641 // They have already been added to the worklist above. 4642 llvm::erase_if(PromotableAllocas, [&](AllocaInst *AI) { 4643 return ResplitPromotableAllocas.count(AI); 4644 }); 4645 4646 return true; 4647 } 4648 4649 /// Rewrite an alloca partition's users. 4650 /// 4651 /// This routine drives both of the rewriting goals of the SROA pass. It tries 4652 /// to rewrite uses of an alloca partition to be conducive for SSA value 4653 /// promotion. If the partition needs a new, more refined alloca, this will 4654 /// build that new alloca, preserving as much type information as possible, and 4655 /// rewrite the uses of the old alloca to point at the new one and have the 4656 /// appropriate new offsets. It also evaluates how successful the rewrite was 4657 /// at enabling promotion and if it was successful queues the alloca to be 4658 /// promoted. 4659 AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS, 4660 Partition &P) { 4661 // Try to compute a friendly type for this partition of the alloca. This 4662 // won't always succeed, in which case we fall back to a legal integer type 4663 // or an i8 array of an appropriate size. 4664 Type *SliceTy = nullptr; 4665 VectorType *SliceVecTy = nullptr; 4666 const DataLayout &DL = AI.getModule()->getDataLayout(); 4667 std::pair<Type *, IntegerType *> CommonUseTy = 4668 findCommonType(P.begin(), P.end(), P.endOffset()); 4669 // Do all uses operate on the same type? 4670 if (CommonUseTy.first) 4671 if (DL.getTypeAllocSize(CommonUseTy.first).getFixedValue() >= P.size()) { 4672 SliceTy = CommonUseTy.first; 4673 SliceVecTy = dyn_cast<VectorType>(SliceTy); 4674 } 4675 // If not, can we find an appropriate subtype in the original allocated type? 4676 if (!SliceTy) 4677 if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(), 4678 P.beginOffset(), P.size())) 4679 SliceTy = TypePartitionTy; 4680 4681 // If still not, can we use the largest bitwidth integer type used? 4682 if (!SliceTy && CommonUseTy.second) 4683 if (DL.getTypeAllocSize(CommonUseTy.second).getFixedValue() >= P.size()) { 4684 SliceTy = CommonUseTy.second; 4685 SliceVecTy = dyn_cast<VectorType>(SliceTy); 4686 } 4687 if ((!SliceTy || (SliceTy->isArrayTy() && 4688 SliceTy->getArrayElementType()->isIntegerTy())) && 4689 DL.isLegalInteger(P.size() * 8)) { 4690 SliceTy = Type::getIntNTy(*C, P.size() * 8); 4691 } 4692 4693 // If the common use types are not viable for promotion then attempt to find 4694 // another type that is viable. 4695 if (SliceVecTy && !checkVectorTypeForPromotion(P, SliceVecTy, DL)) 4696 if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(), 4697 P.beginOffset(), P.size())) { 4698 VectorType *TypePartitionVecTy = dyn_cast<VectorType>(TypePartitionTy); 4699 if (TypePartitionVecTy && 4700 checkVectorTypeForPromotion(P, TypePartitionVecTy, DL)) 4701 SliceTy = TypePartitionTy; 4702 } 4703 4704 if (!SliceTy) 4705 SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size()); 4706 assert(DL.getTypeAllocSize(SliceTy).getFixedValue() >= P.size()); 4707 4708 bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL); 4709 4710 VectorType *VecTy = 4711 IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL); 4712 if (VecTy) 4713 SliceTy = VecTy; 4714 4715 // Check for the case where we're going to rewrite to a new alloca of the 4716 // exact same type as the original, and with the same access offsets. In that 4717 // case, re-use the existing alloca, but still run through the rewriter to 4718 // perform phi and select speculation. 4719 // P.beginOffset() can be non-zero even with the same type in a case with 4720 // out-of-bounds access (e.g. @PR35657 function in SROA/basictest.ll). 4721 AllocaInst *NewAI; 4722 if (SliceTy == AI.getAllocatedType() && P.beginOffset() == 0) { 4723 NewAI = &AI; 4724 // FIXME: We should be able to bail at this point with "nothing changed". 4725 // FIXME: We might want to defer PHI speculation until after here. 4726 // FIXME: return nullptr; 4727 } else { 4728 // Make sure the alignment is compatible with P.beginOffset(). 4729 const Align Alignment = commonAlignment(AI.getAlign(), P.beginOffset()); 4730 // If we will get at least this much alignment from the type alone, leave 4731 // the alloca's alignment unconstrained. 4732 const bool IsUnconstrained = Alignment <= DL.getABITypeAlign(SliceTy); 4733 NewAI = new AllocaInst( 4734 SliceTy, AI.getAddressSpace(), nullptr, 4735 IsUnconstrained ? DL.getPrefTypeAlign(SliceTy) : Alignment, 4736 AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI); 4737 // Copy the old AI debug location over to the new one. 4738 NewAI->setDebugLoc(AI.getDebugLoc()); 4739 ++NumNewAllocas; 4740 } 4741 4742 LLVM_DEBUG(dbgs() << "Rewriting alloca partition " 4743 << "[" << P.beginOffset() << "," << P.endOffset() 4744 << ") to: " << *NewAI << "\n"); 4745 4746 // Track the high watermark on the worklist as it is only relevant for 4747 // promoted allocas. We will reset it to this point if the alloca is not in 4748 // fact scheduled for promotion. 4749 unsigned PPWOldSize = PostPromotionWorklist.size(); 4750 unsigned NumUses = 0; 4751 SmallSetVector<PHINode *, 8> PHIUsers; 4752 SmallSetVector<SelectInst *, 8> SelectUsers; 4753 4754 AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(), 4755 P.endOffset(), IsIntegerPromotable, VecTy, 4756 PHIUsers, SelectUsers); 4757 bool Promotable = true; 4758 for (Slice *S : P.splitSliceTails()) { 4759 Promotable &= Rewriter.visit(S); 4760 ++NumUses; 4761 } 4762 for (Slice &S : P) { 4763 Promotable &= Rewriter.visit(&S); 4764 ++NumUses; 4765 } 4766 4767 NumAllocaPartitionUses += NumUses; 4768 MaxUsesPerAllocaPartition.updateMax(NumUses); 4769 4770 // Now that we've processed all the slices in the new partition, check if any 4771 // PHIs or Selects would block promotion. 4772 for (PHINode *PHI : PHIUsers) 4773 if (!isSafePHIToSpeculate(*PHI)) { 4774 Promotable = false; 4775 PHIUsers.clear(); 4776 SelectUsers.clear(); 4777 break; 4778 } 4779 4780 SmallVector<std::pair<SelectInst *, RewriteableMemOps>, 2> 4781 NewSelectsToRewrite; 4782 NewSelectsToRewrite.reserve(SelectUsers.size()); 4783 for (SelectInst *Sel : SelectUsers) { 4784 std::optional<RewriteableMemOps> Ops = 4785 isSafeSelectToSpeculate(*Sel, PreserveCFG); 4786 if (!Ops) { 4787 Promotable = false; 4788 PHIUsers.clear(); 4789 SelectUsers.clear(); 4790 NewSelectsToRewrite.clear(); 4791 break; 4792 } 4793 NewSelectsToRewrite.emplace_back(std::make_pair(Sel, *Ops)); 4794 } 4795 4796 if (Promotable) { 4797 for (Use *U : AS.getDeadUsesIfPromotable()) { 4798 auto *OldInst = dyn_cast<Instruction>(U->get()); 4799 Value::dropDroppableUse(*U); 4800 if (OldInst) 4801 if (isInstructionTriviallyDead(OldInst)) 4802 DeadInsts.push_back(OldInst); 4803 } 4804 if (PHIUsers.empty() && SelectUsers.empty()) { 4805 // Promote the alloca. 4806 PromotableAllocas.push_back(NewAI); 4807 } else { 4808 // If we have either PHIs or Selects to speculate, add them to those 4809 // worklists and re-queue the new alloca so that we promote in on the 4810 // next iteration. 4811 for (PHINode *PHIUser : PHIUsers) 4812 SpeculatablePHIs.insert(PHIUser); 4813 SelectsToRewrite.reserve(SelectsToRewrite.size() + 4814 NewSelectsToRewrite.size()); 4815 for (auto &&KV : llvm::make_range( 4816 std::make_move_iterator(NewSelectsToRewrite.begin()), 4817 std::make_move_iterator(NewSelectsToRewrite.end()))) 4818 SelectsToRewrite.insert(std::move(KV)); 4819 Worklist.insert(NewAI); 4820 } 4821 } else { 4822 // Drop any post-promotion work items if promotion didn't happen. 4823 while (PostPromotionWorklist.size() > PPWOldSize) 4824 PostPromotionWorklist.pop_back(); 4825 4826 // We couldn't promote and we didn't create a new partition, nothing 4827 // happened. 4828 if (NewAI == &AI) 4829 return nullptr; 4830 4831 // If we can't promote the alloca, iterate on it to check for new 4832 // refinements exposed by splitting the current alloca. Don't iterate on an 4833 // alloca which didn't actually change and didn't get promoted. 4834 Worklist.insert(NewAI); 4835 } 4836 4837 return NewAI; 4838 } 4839 4840 static void insertNewDbgInst(DIBuilder &DIB, DbgDeclareInst *Orig, 4841 AllocaInst *NewAddr, DIExpression *NewFragmentExpr, 4842 Instruction *BeforeInst) { 4843 DIB.insertDeclare(NewAddr, Orig->getVariable(), NewFragmentExpr, 4844 Orig->getDebugLoc(), BeforeInst); 4845 } 4846 static void insertNewDbgInst(DIBuilder &DIB, DbgAssignIntrinsic *Orig, 4847 AllocaInst *NewAddr, DIExpression *NewFragmentExpr, 4848 Instruction *BeforeInst) { 4849 (void)BeforeInst; 4850 if (!NewAddr->hasMetadata(LLVMContext::MD_DIAssignID)) { 4851 NewAddr->setMetadata(LLVMContext::MD_DIAssignID, 4852 DIAssignID::getDistinct(NewAddr->getContext())); 4853 } 4854 auto *NewAssign = DIB.insertDbgAssign( 4855 NewAddr, Orig->getValue(), Orig->getVariable(), NewFragmentExpr, NewAddr, 4856 Orig->getAddressExpression(), Orig->getDebugLoc()); 4857 LLVM_DEBUG(dbgs() << "Created new assign intrinsic: " << *NewAssign << "\n"); 4858 (void)NewAssign; 4859 } 4860 static void insertNewDbgInst(DIBuilder &DIB, DPValue *Orig, AllocaInst *NewAddr, 4861 DIExpression *NewFragmentExpr, 4862 Instruction *BeforeInst) { 4863 (void)DIB; 4864 DPValue *New = new DPValue(ValueAsMetadata::get(NewAddr), Orig->getVariable(), 4865 NewFragmentExpr, Orig->getDebugLoc(), 4866 DPValue::LocationType::Declare); 4867 BeforeInst->getParent()->insertDPValueBefore(New, BeforeInst->getIterator()); 4868 } 4869 4870 /// Walks the slices of an alloca and form partitions based on them, 4871 /// rewriting each of their uses. 4872 bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { 4873 if (AS.begin() == AS.end()) 4874 return false; 4875 4876 unsigned NumPartitions = 0; 4877 bool Changed = false; 4878 const DataLayout &DL = AI.getModule()->getDataLayout(); 4879 4880 // First try to pre-split loads and stores. 4881 Changed |= presplitLoadsAndStores(AI, AS); 4882 4883 // Now that we have identified any pre-splitting opportunities, 4884 // mark loads and stores unsplittable except for the following case. 4885 // We leave a slice splittable if all other slices are disjoint or fully 4886 // included in the slice, such as whole-alloca loads and stores. 4887 // If we fail to split these during pre-splitting, we want to force them 4888 // to be rewritten into a partition. 4889 bool IsSorted = true; 4890 4891 uint64_t AllocaSize = 4892 DL.getTypeAllocSize(AI.getAllocatedType()).getFixedValue(); 4893 const uint64_t MaxBitVectorSize = 1024; 4894 if (AllocaSize <= MaxBitVectorSize) { 4895 // If a byte boundary is included in any load or store, a slice starting or 4896 // ending at the boundary is not splittable. 4897 SmallBitVector SplittableOffset(AllocaSize + 1, true); 4898 for (Slice &S : AS) 4899 for (unsigned O = S.beginOffset() + 1; 4900 O < S.endOffset() && O < AllocaSize; O++) 4901 SplittableOffset.reset(O); 4902 4903 for (Slice &S : AS) { 4904 if (!S.isSplittable()) 4905 continue; 4906 4907 if ((S.beginOffset() > AllocaSize || SplittableOffset[S.beginOffset()]) && 4908 (S.endOffset() > AllocaSize || SplittableOffset[S.endOffset()])) 4909 continue; 4910 4911 if (isa<LoadInst>(S.getUse()->getUser()) || 4912 isa<StoreInst>(S.getUse()->getUser())) { 4913 S.makeUnsplittable(); 4914 IsSorted = false; 4915 } 4916 } 4917 } 4918 else { 4919 // We only allow whole-alloca splittable loads and stores 4920 // for a large alloca to avoid creating too large BitVector. 4921 for (Slice &S : AS) { 4922 if (!S.isSplittable()) 4923 continue; 4924 4925 if (S.beginOffset() == 0 && S.endOffset() >= AllocaSize) 4926 continue; 4927 4928 if (isa<LoadInst>(S.getUse()->getUser()) || 4929 isa<StoreInst>(S.getUse()->getUser())) { 4930 S.makeUnsplittable(); 4931 IsSorted = false; 4932 } 4933 } 4934 } 4935 4936 if (!IsSorted) 4937 llvm::sort(AS); 4938 4939 /// Describes the allocas introduced by rewritePartition in order to migrate 4940 /// the debug info. 4941 struct Fragment { 4942 AllocaInst *Alloca; 4943 uint64_t Offset; 4944 uint64_t Size; 4945 Fragment(AllocaInst *AI, uint64_t O, uint64_t S) 4946 : Alloca(AI), Offset(O), Size(S) {} 4947 }; 4948 SmallVector<Fragment, 4> Fragments; 4949 4950 // Rewrite each partition. 4951 for (auto &P : AS.partitions()) { 4952 if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) { 4953 Changed = true; 4954 if (NewAI != &AI) { 4955 uint64_t SizeOfByte = 8; 4956 uint64_t AllocaSize = 4957 DL.getTypeSizeInBits(NewAI->getAllocatedType()).getFixedValue(); 4958 // Don't include any padding. 4959 uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte); 4960 Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size)); 4961 } 4962 } 4963 ++NumPartitions; 4964 } 4965 4966 NumAllocaPartitions += NumPartitions; 4967 MaxPartitionsPerAlloca.updateMax(NumPartitions); 4968 4969 // Migrate debug information from the old alloca to the new alloca(s) 4970 // and the individual partitions. 4971 auto MigrateOne = [&](auto *DbgVariable) { 4972 auto *Expr = DbgVariable->getExpression(); 4973 DIBuilder DIB(*AI.getModule(), /*AllowUnresolved*/ false); 4974 uint64_t AllocaSize = 4975 DL.getTypeSizeInBits(AI.getAllocatedType()).getFixedValue(); 4976 for (auto Fragment : Fragments) { 4977 // Create a fragment expression describing the new partition or reuse AI's 4978 // expression if there is only one partition. 4979 auto *FragmentExpr = Expr; 4980 if (Fragment.Size < AllocaSize || Expr->isFragment()) { 4981 // If this alloca is already a scalar replacement of a larger aggregate, 4982 // Fragment.Offset describes the offset inside the scalar. 4983 auto ExprFragment = Expr->getFragmentInfo(); 4984 uint64_t Offset = ExprFragment ? ExprFragment->OffsetInBits : 0; 4985 uint64_t Start = Offset + Fragment.Offset; 4986 uint64_t Size = Fragment.Size; 4987 if (ExprFragment) { 4988 uint64_t AbsEnd = 4989 ExprFragment->OffsetInBits + ExprFragment->SizeInBits; 4990 if (Start >= AbsEnd) { 4991 // No need to describe a SROAed padding. 4992 continue; 4993 } 4994 Size = std::min(Size, AbsEnd - Start); 4995 } 4996 // The new, smaller fragment is stenciled out from the old fragment. 4997 if (auto OrigFragment = FragmentExpr->getFragmentInfo()) { 4998 assert(Start >= OrigFragment->OffsetInBits && 4999 "new fragment is outside of original fragment"); 5000 Start -= OrigFragment->OffsetInBits; 5001 } 5002 5003 // The alloca may be larger than the variable. 5004 auto VarSize = DbgVariable->getVariable()->getSizeInBits(); 5005 if (VarSize) { 5006 if (Size > *VarSize) 5007 Size = *VarSize; 5008 if (Size == 0 || Start + Size > *VarSize) 5009 continue; 5010 } 5011 5012 // Avoid creating a fragment expression that covers the entire variable. 5013 if (!VarSize || *VarSize != Size) { 5014 if (auto E = 5015 DIExpression::createFragmentExpression(Expr, Start, Size)) 5016 FragmentExpr = *E; 5017 else 5018 continue; 5019 } 5020 } 5021 5022 // Remove any existing intrinsics on the new alloca describing 5023 // the variable fragment. 5024 SmallVector<DbgDeclareInst *, 1> FragDbgDeclares; 5025 SmallVector<DPValue *, 1> FragDPVs; 5026 findDbgDeclares(FragDbgDeclares, Fragment.Alloca, &FragDPVs); 5027 auto RemoveOne = [DbgVariable](auto *OldDII) { 5028 auto SameVariableFragment = [](const auto *LHS, const auto *RHS) { 5029 return LHS->getVariable() == RHS->getVariable() && 5030 LHS->getDebugLoc()->getInlinedAt() == 5031 RHS->getDebugLoc()->getInlinedAt(); 5032 }; 5033 if (SameVariableFragment(OldDII, DbgVariable)) 5034 OldDII->eraseFromParent(); 5035 }; 5036 for_each(FragDbgDeclares, RemoveOne); 5037 for_each(FragDPVs, RemoveOne); 5038 5039 insertNewDbgInst(DIB, DbgVariable, Fragment.Alloca, FragmentExpr, &AI); 5040 } 5041 }; 5042 5043 // Migrate debug information from the old alloca to the new alloca(s) 5044 // and the individual partitions. 5045 SmallVector<DbgDeclareInst *, 1> DbgDeclares; 5046 SmallVector<DPValue *, 1> DPValues; 5047 findDbgDeclares(DbgDeclares, &AI, &DPValues); 5048 for_each(DbgDeclares, MigrateOne); 5049 for_each(DPValues, MigrateOne); 5050 for_each(at::getAssignmentMarkers(&AI), MigrateOne); 5051 5052 return Changed; 5053 } 5054 5055 /// Clobber a use with poison, deleting the used value if it becomes dead. 5056 void SROA::clobberUse(Use &U) { 5057 Value *OldV = U; 5058 // Replace the use with an poison value. 5059 U = PoisonValue::get(OldV->getType()); 5060 5061 // Check for this making an instruction dead. We have to garbage collect 5062 // all the dead instructions to ensure the uses of any alloca end up being 5063 // minimal. 5064 if (Instruction *OldI = dyn_cast<Instruction>(OldV)) 5065 if (isInstructionTriviallyDead(OldI)) { 5066 DeadInsts.push_back(OldI); 5067 } 5068 } 5069 5070 /// Analyze an alloca for SROA. 5071 /// 5072 /// This analyzes the alloca to ensure we can reason about it, builds 5073 /// the slices of the alloca, and then hands it off to be split and 5074 /// rewritten as needed. 5075 std::pair<bool /*Changed*/, bool /*CFGChanged*/> 5076 SROA::runOnAlloca(AllocaInst &AI) { 5077 bool Changed = false; 5078 bool CFGChanged = false; 5079 5080 LLVM_DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); 5081 ++NumAllocasAnalyzed; 5082 5083 // Special case dead allocas, as they're trivial. 5084 if (AI.use_empty()) { 5085 AI.eraseFromParent(); 5086 Changed = true; 5087 return {Changed, CFGChanged}; 5088 } 5089 const DataLayout &DL = AI.getModule()->getDataLayout(); 5090 5091 // Skip alloca forms that this analysis can't handle. 5092 auto *AT = AI.getAllocatedType(); 5093 TypeSize Size = DL.getTypeAllocSize(AT); 5094 if (AI.isArrayAllocation() || !AT->isSized() || Size.isScalable() || 5095 Size.getFixedValue() == 0) 5096 return {Changed, CFGChanged}; 5097 5098 // First, split any FCA loads and stores touching this alloca to promote 5099 // better splitting and promotion opportunities. 5100 IRBuilderTy IRB(&AI); 5101 AggLoadStoreRewriter AggRewriter(DL, IRB); 5102 Changed |= AggRewriter.rewrite(AI); 5103 5104 // Build the slices using a recursive instruction-visiting builder. 5105 AllocaSlices AS(DL, AI); 5106 LLVM_DEBUG(AS.print(dbgs())); 5107 if (AS.isEscaped()) 5108 return {Changed, CFGChanged}; 5109 5110 // Delete all the dead users of this alloca before splitting and rewriting it. 5111 for (Instruction *DeadUser : AS.getDeadUsers()) { 5112 // Free up everything used by this instruction. 5113 for (Use &DeadOp : DeadUser->operands()) 5114 clobberUse(DeadOp); 5115 5116 // Now replace the uses of this instruction. 5117 DeadUser->replaceAllUsesWith(PoisonValue::get(DeadUser->getType())); 5118 5119 // And mark it for deletion. 5120 DeadInsts.push_back(DeadUser); 5121 Changed = true; 5122 } 5123 for (Use *DeadOp : AS.getDeadOperands()) { 5124 clobberUse(*DeadOp); 5125 Changed = true; 5126 } 5127 5128 // No slices to split. Leave the dead alloca for a later pass to clean up. 5129 if (AS.begin() == AS.end()) 5130 return {Changed, CFGChanged}; 5131 5132 Changed |= splitAlloca(AI, AS); 5133 5134 LLVM_DEBUG(dbgs() << " Speculating PHIs\n"); 5135 while (!SpeculatablePHIs.empty()) 5136 speculatePHINodeLoads(IRB, *SpeculatablePHIs.pop_back_val()); 5137 5138 LLVM_DEBUG(dbgs() << " Rewriting Selects\n"); 5139 auto RemainingSelectsToRewrite = SelectsToRewrite.takeVector(); 5140 while (!RemainingSelectsToRewrite.empty()) { 5141 const auto [K, V] = RemainingSelectsToRewrite.pop_back_val(); 5142 CFGChanged |= 5143 rewriteSelectInstMemOps(*K, V, IRB, PreserveCFG ? nullptr : DTU); 5144 } 5145 5146 return {Changed, CFGChanged}; 5147 } 5148 5149 /// Delete the dead instructions accumulated in this run. 5150 /// 5151 /// Recursively deletes the dead instructions we've accumulated. This is done 5152 /// at the very end to maximize locality of the recursive delete and to 5153 /// minimize the problems of invalidated instruction pointers as such pointers 5154 /// are used heavily in the intermediate stages of the algorithm. 5155 /// 5156 /// We also record the alloca instructions deleted here so that they aren't 5157 /// subsequently handed to mem2reg to promote. 5158 bool SROA::deleteDeadInstructions( 5159 SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) { 5160 bool Changed = false; 5161 while (!DeadInsts.empty()) { 5162 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()); 5163 if (!I) 5164 continue; 5165 LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); 5166 5167 // If the instruction is an alloca, find the possible dbg.declare connected 5168 // to it, and remove it too. We must do this before calling RAUW or we will 5169 // not be able to find it. 5170 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { 5171 DeletedAllocas.insert(AI); 5172 SmallVector<DbgDeclareInst *, 1> DbgDeclares; 5173 SmallVector<DPValue *, 1> DPValues; 5174 findDbgDeclares(DbgDeclares, AI, &DPValues); 5175 for (DbgDeclareInst *OldDII : DbgDeclares) 5176 OldDII->eraseFromParent(); 5177 for (DPValue *OldDII : DPValues) 5178 OldDII->eraseFromParent(); 5179 } 5180 5181 at::deleteAssignmentMarkers(I); 5182 I->replaceAllUsesWith(UndefValue::get(I->getType())); 5183 5184 for (Use &Operand : I->operands()) 5185 if (Instruction *U = dyn_cast<Instruction>(Operand)) { 5186 // Zero out the operand and see if it becomes trivially dead. 5187 Operand = nullptr; 5188 if (isInstructionTriviallyDead(U)) 5189 DeadInsts.push_back(U); 5190 } 5191 5192 ++NumDeleted; 5193 I->eraseFromParent(); 5194 Changed = true; 5195 } 5196 return Changed; 5197 } 5198 5199 /// Promote the allocas, using the best available technique. 5200 /// 5201 /// This attempts to promote whatever allocas have been identified as viable in 5202 /// the PromotableAllocas list. If that list is empty, there is nothing to do. 5203 /// This function returns whether any promotion occurred. 5204 bool SROA::promoteAllocas(Function &F) { 5205 if (PromotableAllocas.empty()) 5206 return false; 5207 5208 NumPromoted += PromotableAllocas.size(); 5209 5210 if (SROASkipMem2Reg) { 5211 LLVM_DEBUG(dbgs() << "Not promoting allocas with mem2reg!\n"); 5212 } else { 5213 LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); 5214 PromoteMemToReg(PromotableAllocas, DTU->getDomTree(), AC); 5215 } 5216 5217 PromotableAllocas.clear(); 5218 return true; 5219 } 5220 5221 std::pair<bool /*Changed*/, bool /*CFGChanged*/> SROA::runSROA(Function &F) { 5222 LLVM_DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); 5223 5224 const DataLayout &DL = F.getParent()->getDataLayout(); 5225 BasicBlock &EntryBB = F.getEntryBlock(); 5226 for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); 5227 I != E; ++I) { 5228 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { 5229 if (DL.getTypeAllocSize(AI->getAllocatedType()).isScalable() && 5230 isAllocaPromotable(AI)) 5231 PromotableAllocas.push_back(AI); 5232 else 5233 Worklist.insert(AI); 5234 } 5235 } 5236 5237 bool Changed = false; 5238 bool CFGChanged = false; 5239 // A set of deleted alloca instruction pointers which should be removed from 5240 // the list of promotable allocas. 5241 SmallPtrSet<AllocaInst *, 4> DeletedAllocas; 5242 5243 do { 5244 while (!Worklist.empty()) { 5245 auto [IterationChanged, IterationCFGChanged] = 5246 runOnAlloca(*Worklist.pop_back_val()); 5247 Changed |= IterationChanged; 5248 CFGChanged |= IterationCFGChanged; 5249 5250 Changed |= deleteDeadInstructions(DeletedAllocas); 5251 5252 // Remove the deleted allocas from various lists so that we don't try to 5253 // continue processing them. 5254 if (!DeletedAllocas.empty()) { 5255 auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); }; 5256 Worklist.remove_if(IsInSet); 5257 PostPromotionWorklist.remove_if(IsInSet); 5258 llvm::erase_if(PromotableAllocas, IsInSet); 5259 DeletedAllocas.clear(); 5260 } 5261 } 5262 5263 Changed |= promoteAllocas(F); 5264 5265 Worklist = PostPromotionWorklist; 5266 PostPromotionWorklist.clear(); 5267 } while (!Worklist.empty()); 5268 5269 assert((!CFGChanged || Changed) && "Can not only modify the CFG."); 5270 assert((!CFGChanged || !PreserveCFG) && 5271 "Should not have modified the CFG when told to preserve it."); 5272 5273 if (Changed && isAssignmentTrackingEnabled(*F.getParent())) { 5274 for (auto &BB : F) 5275 RemoveRedundantDbgInstrs(&BB); 5276 } 5277 5278 return {Changed, CFGChanged}; 5279 } 5280 5281 PreservedAnalyses SROAPass::run(Function &F, FunctionAnalysisManager &AM) { 5282 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); 5283 AssumptionCache &AC = AM.getResult<AssumptionAnalysis>(F); 5284 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); 5285 auto [Changed, CFGChanged] = 5286 SROA(&F.getContext(), &DTU, &AC, PreserveCFG).runSROA(F); 5287 if (!Changed) 5288 return PreservedAnalyses::all(); 5289 PreservedAnalyses PA; 5290 if (!CFGChanged) 5291 PA.preserveSet<CFGAnalyses>(); 5292 PA.preserve<DominatorTreeAnalysis>(); 5293 return PA; 5294 } 5295 5296 void SROAPass::printPipeline( 5297 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 5298 static_cast<PassInfoMixin<SROAPass> *>(this)->printPipeline( 5299 OS, MapClassName2PassName); 5300 OS << (PreserveCFG == SROAOptions::PreserveCFG ? "<preserve-cfg>" 5301 : "<modify-cfg>"); 5302 } 5303 5304 SROAPass::SROAPass(SROAOptions PreserveCFG) : PreserveCFG(PreserveCFG) {} 5305 5306 namespace { 5307 5308 /// A legacy pass for the legacy pass manager that wraps the \c SROA pass. 5309 class SROALegacyPass : public FunctionPass { 5310 SROAOptions PreserveCFG; 5311 5312 public: 5313 static char ID; 5314 5315 SROALegacyPass(SROAOptions PreserveCFG = SROAOptions::PreserveCFG) 5316 : FunctionPass(ID), PreserveCFG(PreserveCFG) { 5317 initializeSROALegacyPassPass(*PassRegistry::getPassRegistry()); 5318 } 5319 5320 bool runOnFunction(Function &F) override { 5321 if (skipFunction(F)) 5322 return false; 5323 5324 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 5325 AssumptionCache &AC = 5326 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 5327 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); 5328 auto [Changed, _] = 5329 SROA(&F.getContext(), &DTU, &AC, PreserveCFG).runSROA(F); 5330 return Changed; 5331 } 5332 5333 void getAnalysisUsage(AnalysisUsage &AU) const override { 5334 AU.addRequired<AssumptionCacheTracker>(); 5335 AU.addRequired<DominatorTreeWrapperPass>(); 5336 AU.addPreserved<GlobalsAAWrapperPass>(); 5337 AU.addPreserved<DominatorTreeWrapperPass>(); 5338 } 5339 5340 StringRef getPassName() const override { return "SROA"; } 5341 }; 5342 5343 } // end anonymous namespace 5344 5345 char SROALegacyPass::ID = 0; 5346 5347 FunctionPass *llvm::createSROAPass(bool PreserveCFG) { 5348 return new SROALegacyPass(PreserveCFG ? SROAOptions::PreserveCFG 5349 : SROAOptions::ModifyCFG); 5350 } 5351 5352 INITIALIZE_PASS_BEGIN(SROALegacyPass, "sroa", 5353 "Scalar Replacement Of Aggregates", false, false) 5354 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 5355 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 5356 INITIALIZE_PASS_END(SROALegacyPass, "sroa", "Scalar Replacement Of Aggregates", 5357 false, false) 5358