1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The inline spiller modifies the machine function directly instead of 10 // inserting spills and restores in VirtRegMap. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SplitKit.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/MapVector.h" 18 #include "llvm/ADT/None.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/CodeGen/LiveInterval.h" 26 #include "llvm/CodeGen/LiveIntervalCalc.h" 27 #include "llvm/CodeGen/LiveIntervals.h" 28 #include "llvm/CodeGen/LiveRangeEdit.h" 29 #include "llvm/CodeGen/LiveStacks.h" 30 #include "llvm/CodeGen/MachineBasicBlock.h" 31 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 32 #include "llvm/CodeGen/MachineDominators.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineFunctionPass.h" 35 #include "llvm/CodeGen/MachineInstr.h" 36 #include "llvm/CodeGen/MachineInstrBuilder.h" 37 #include "llvm/CodeGen/MachineInstrBundle.h" 38 #include "llvm/CodeGen/MachineLoopInfo.h" 39 #include "llvm/CodeGen/MachineOperand.h" 40 #include "llvm/CodeGen/MachineRegisterInfo.h" 41 #include "llvm/CodeGen/SlotIndexes.h" 42 #include "llvm/CodeGen/Spiller.h" 43 #include "llvm/CodeGen/StackMaps.h" 44 #include "llvm/CodeGen/TargetInstrInfo.h" 45 #include "llvm/CodeGen/TargetOpcodes.h" 46 #include "llvm/CodeGen/TargetRegisterInfo.h" 47 #include "llvm/CodeGen/TargetSubtargetInfo.h" 48 #include "llvm/CodeGen/VirtRegMap.h" 49 #include "llvm/Config/llvm-config.h" 50 #include "llvm/Support/BlockFrequency.h" 51 #include "llvm/Support/BranchProbability.h" 52 #include "llvm/Support/CommandLine.h" 53 #include "llvm/Support/Compiler.h" 54 #include "llvm/Support/Debug.h" 55 #include "llvm/Support/ErrorHandling.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include <cassert> 58 #include <iterator> 59 #include <tuple> 60 #include <utility> 61 #include <vector> 62 63 using namespace llvm; 64 65 #define DEBUG_TYPE "regalloc" 66 67 STATISTIC(NumSpilledRanges, "Number of spilled live ranges"); 68 STATISTIC(NumSnippets, "Number of spilled snippets"); 69 STATISTIC(NumSpills, "Number of spills inserted"); 70 STATISTIC(NumSpillsRemoved, "Number of spills removed"); 71 STATISTIC(NumReloads, "Number of reloads inserted"); 72 STATISTIC(NumReloadsRemoved, "Number of reloads removed"); 73 STATISTIC(NumFolded, "Number of folded stack accesses"); 74 STATISTIC(NumFoldedLoads, "Number of folded loads"); 75 STATISTIC(NumRemats, "Number of rematerialized defs for spilling"); 76 77 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden, 78 cl::desc("Disable inline spill hoisting")); 79 static cl::opt<bool> 80 RestrictStatepointRemat("restrict-statepoint-remat", 81 cl::init(false), cl::Hidden, 82 cl::desc("Restrict remat for statepoint operands")); 83 84 namespace { 85 86 class HoistSpillHelper : private LiveRangeEdit::Delegate { 87 MachineFunction &MF; 88 LiveIntervals &LIS; 89 LiveStacks &LSS; 90 AliasAnalysis *AA; 91 MachineDominatorTree &MDT; 92 MachineLoopInfo &Loops; 93 VirtRegMap &VRM; 94 MachineRegisterInfo &MRI; 95 const TargetInstrInfo &TII; 96 const TargetRegisterInfo &TRI; 97 const MachineBlockFrequencyInfo &MBFI; 98 99 InsertPointAnalysis IPA; 100 101 // Map from StackSlot to the LiveInterval of the original register. 102 // Note the LiveInterval of the original register may have been deleted 103 // after it is spilled. We keep a copy here to track the range where 104 // spills can be moved. 105 DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI; 106 107 // Map from pair of (StackSlot and Original VNI) to a set of spills which 108 // have the same stackslot and have equal values defined by Original VNI. 109 // These spills are mergeable and are hoist candiates. 110 using MergeableSpillsMap = 111 MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>; 112 MergeableSpillsMap MergeableSpills; 113 114 /// This is the map from original register to a set containing all its 115 /// siblings. To hoist a spill to another BB, we need to find out a live 116 /// sibling there and use it as the source of the new spill. 117 DenseMap<Register, SmallSetVector<Register, 16>> Virt2SiblingsMap; 118 119 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI, 120 MachineBasicBlock &BB, Register &LiveReg); 121 122 void rmRedundantSpills( 123 SmallPtrSet<MachineInstr *, 16> &Spills, 124 SmallVectorImpl<MachineInstr *> &SpillsToRm, 125 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill); 126 127 void getVisitOrders( 128 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills, 129 SmallVectorImpl<MachineDomTreeNode *> &Orders, 130 SmallVectorImpl<MachineInstr *> &SpillsToRm, 131 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep, 132 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill); 133 134 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI, 135 SmallPtrSet<MachineInstr *, 16> &Spills, 136 SmallVectorImpl<MachineInstr *> &SpillsToRm, 137 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns); 138 139 public: 140 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf, 141 VirtRegMap &vrm) 142 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()), 143 LSS(pass.getAnalysis<LiveStacks>()), 144 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()), 145 MDT(pass.getAnalysis<MachineDominatorTree>()), 146 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm), 147 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()), 148 TRI(*mf.getSubtarget().getRegisterInfo()), 149 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()), 150 IPA(LIS, mf.getNumBlockIDs()) {} 151 152 void addToMergeableSpills(MachineInstr &Spill, int StackSlot, 153 unsigned Original); 154 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot); 155 void hoistAllSpills(); 156 void LRE_DidCloneVirtReg(Register, Register) override; 157 }; 158 159 class InlineSpiller : public Spiller { 160 MachineFunction &MF; 161 LiveIntervals &LIS; 162 LiveStacks &LSS; 163 AliasAnalysis *AA; 164 MachineDominatorTree &MDT; 165 MachineLoopInfo &Loops; 166 VirtRegMap &VRM; 167 MachineRegisterInfo &MRI; 168 const TargetInstrInfo &TII; 169 const TargetRegisterInfo &TRI; 170 const MachineBlockFrequencyInfo &MBFI; 171 172 // Variables that are valid during spill(), but used by multiple methods. 173 LiveRangeEdit *Edit; 174 LiveInterval *StackInt; 175 int StackSlot; 176 Register Original; 177 178 // All registers to spill to StackSlot, including the main register. 179 SmallVector<Register, 8> RegsToSpill; 180 181 // All COPY instructions to/from snippets. 182 // They are ignored since both operands refer to the same stack slot. 183 SmallPtrSet<MachineInstr*, 8> SnippetCopies; 184 185 // Values that failed to remat at some point. 186 SmallPtrSet<VNInfo*, 8> UsedValues; 187 188 // Dead defs generated during spilling. 189 SmallVector<MachineInstr*, 8> DeadDefs; 190 191 // Object records spills information and does the hoisting. 192 HoistSpillHelper HSpiller; 193 194 // Live range weight calculator. 195 VirtRegAuxInfo &VRAI; 196 197 ~InlineSpiller() override = default; 198 199 public: 200 InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM, 201 VirtRegAuxInfo &VRAI) 202 : MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()), 203 LSS(Pass.getAnalysis<LiveStacks>()), 204 AA(&Pass.getAnalysis<AAResultsWrapperPass>().getAAResults()), 205 MDT(Pass.getAnalysis<MachineDominatorTree>()), 206 Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM), 207 MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()), 208 TRI(*MF.getSubtarget().getRegisterInfo()), 209 MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()), 210 HSpiller(Pass, MF, VRM), VRAI(VRAI) {} 211 212 void spill(LiveRangeEdit &) override; 213 void postOptimization() override; 214 215 private: 216 bool isSnippet(const LiveInterval &SnipLI); 217 void collectRegsToSpill(); 218 219 bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); } 220 221 bool isSibling(Register Reg); 222 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI); 223 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI); 224 225 void markValueUsed(LiveInterval*, VNInfo*); 226 bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI); 227 bool reMaterializeFor(LiveInterval &, MachineInstr &MI); 228 void reMaterializeAll(); 229 230 bool coalesceStackAccess(MachineInstr *MI, Register Reg); 231 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>, 232 MachineInstr *LoadMI = nullptr); 233 void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI); 234 void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI); 235 236 void spillAroundUses(Register Reg); 237 void spillAll(); 238 }; 239 240 } // end anonymous namespace 241 242 Spiller::~Spiller() = default; 243 244 void Spiller::anchor() {} 245 246 Spiller *llvm::createInlineSpiller(MachineFunctionPass &Pass, 247 MachineFunction &MF, VirtRegMap &VRM, 248 VirtRegAuxInfo &VRAI) { 249 return new InlineSpiller(Pass, MF, VRM, VRAI); 250 } 251 252 //===----------------------------------------------------------------------===// 253 // Snippets 254 //===----------------------------------------------------------------------===// 255 256 // When spilling a virtual register, we also spill any snippets it is connected 257 // to. The snippets are small live ranges that only have a single real use, 258 // leftovers from live range splitting. Spilling them enables memory operand 259 // folding or tightens the live range around the single use. 260 // 261 // This minimizes register pressure and maximizes the store-to-load distance for 262 // spill slots which can be important in tight loops. 263 264 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register, 265 /// otherwise return 0. 266 static Register isFullCopyOf(const MachineInstr &MI, Register Reg) { 267 if (!MI.isFullCopy()) 268 return Register(); 269 if (MI.getOperand(0).getReg() == Reg) 270 return MI.getOperand(1).getReg(); 271 if (MI.getOperand(1).getReg() == Reg) 272 return MI.getOperand(0).getReg(); 273 return Register(); 274 } 275 276 static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) { 277 for (const MachineOperand &MO : MI.operands()) 278 if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg())) 279 LIS.getInterval(MO.getReg()); 280 } 281 282 /// isSnippet - Identify if a live interval is a snippet that should be spilled. 283 /// It is assumed that SnipLI is a virtual register with the same original as 284 /// Edit->getReg(). 285 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { 286 Register Reg = Edit->getReg(); 287 288 // A snippet is a tiny live range with only a single instruction using it 289 // besides copies to/from Reg or spills/fills. We accept: 290 // 291 // %snip = COPY %Reg / FILL fi# 292 // %snip = USE %snip 293 // %Reg = COPY %snip / SPILL %snip, fi# 294 // 295 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI)) 296 return false; 297 298 MachineInstr *UseMI = nullptr; 299 300 // Check that all uses satisfy our criteria. 301 for (MachineRegisterInfo::reg_instr_nodbg_iterator 302 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()), 303 E = MRI.reg_instr_nodbg_end(); 304 RI != E;) { 305 MachineInstr &MI = *RI++; 306 307 // Allow copies to/from Reg. 308 if (isFullCopyOf(MI, Reg)) 309 continue; 310 311 // Allow stack slot loads. 312 int FI; 313 if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) 314 continue; 315 316 // Allow stack slot stores. 317 if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) 318 continue; 319 320 // Allow a single additional instruction. 321 if (UseMI && &MI != UseMI) 322 return false; 323 UseMI = &MI; 324 } 325 return true; 326 } 327 328 /// collectRegsToSpill - Collect live range snippets that only have a single 329 /// real use. 330 void InlineSpiller::collectRegsToSpill() { 331 Register Reg = Edit->getReg(); 332 333 // Main register always spills. 334 RegsToSpill.assign(1, Reg); 335 SnippetCopies.clear(); 336 337 // Snippets all have the same original, so there can't be any for an original 338 // register. 339 if (Original == Reg) 340 return; 341 342 for (MachineInstr &MI : 343 llvm::make_early_inc_range(MRI.reg_instructions(Reg))) { 344 Register SnipReg = isFullCopyOf(MI, Reg); 345 if (!isSibling(SnipReg)) 346 continue; 347 LiveInterval &SnipLI = LIS.getInterval(SnipReg); 348 if (!isSnippet(SnipLI)) 349 continue; 350 SnippetCopies.insert(&MI); 351 if (isRegToSpill(SnipReg)) 352 continue; 353 RegsToSpill.push_back(SnipReg); 354 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n'); 355 ++NumSnippets; 356 } 357 } 358 359 bool InlineSpiller::isSibling(Register Reg) { 360 return Reg.isVirtual() && VRM.getOriginal(Reg) == Original; 361 } 362 363 /// It is beneficial to spill to earlier place in the same BB in case 364 /// as follows: 365 /// There is an alternative def earlier in the same MBB. 366 /// Hoist the spill as far as possible in SpillMBB. This can ease 367 /// register pressure: 368 /// 369 /// x = def 370 /// y = use x 371 /// s = copy x 372 /// 373 /// Hoisting the spill of s to immediately after the def removes the 374 /// interference between x and y: 375 /// 376 /// x = def 377 /// spill x 378 /// y = use killed x 379 /// 380 /// This hoist only helps when the copy kills its source. 381 /// 382 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI, 383 MachineInstr &CopyMI) { 384 SlotIndex Idx = LIS.getInstructionIndex(CopyMI); 385 #ifndef NDEBUG 386 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot()); 387 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy"); 388 #endif 389 390 Register SrcReg = CopyMI.getOperand(1).getReg(); 391 LiveInterval &SrcLI = LIS.getInterval(SrcReg); 392 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx); 393 LiveQueryResult SrcQ = SrcLI.Query(Idx); 394 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def); 395 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill()) 396 return false; 397 398 // Conservatively extend the stack slot range to the range of the original 399 // value. We may be able to do better with stack slot coloring by being more 400 // careful here. 401 assert(StackInt && "No stack slot assigned yet."); 402 LiveInterval &OrigLI = LIS.getInterval(Original); 403 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx); 404 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0)); 405 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": " 406 << *StackInt << '\n'); 407 408 // We are going to spill SrcVNI immediately after its def, so clear out 409 // any later spills of the same value. 410 eliminateRedundantSpills(SrcLI, SrcVNI); 411 412 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def); 413 MachineBasicBlock::iterator MII; 414 if (SrcVNI->isPHIDef()) 415 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin()); 416 else { 417 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def); 418 assert(DefMI && "Defining instruction disappeared"); 419 MII = DefMI; 420 ++MII; 421 } 422 MachineInstrSpan MIS(MII, MBB); 423 // Insert spill without kill flag immediately after def. 424 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot, 425 MRI.getRegClass(SrcReg), &TRI); 426 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII); 427 for (const MachineInstr &MI : make_range(MIS.begin(), MII)) 428 getVDefInterval(MI, LIS); 429 --MII; // Point to store instruction. 430 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII); 431 432 // If there is only 1 store instruction is required for spill, add it 433 // to mergeable list. In X86 AMX, 2 intructions are required to store. 434 // We disable the merge for this case. 435 if (MIS.begin() == MII) 436 HSpiller.addToMergeableSpills(*MII, StackSlot, Original); 437 ++NumSpills; 438 return true; 439 } 440 441 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any 442 /// redundant spills of this value in SLI.reg and sibling copies. 443 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { 444 assert(VNI && "Missing value"); 445 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 446 WorkList.push_back(std::make_pair(&SLI, VNI)); 447 assert(StackInt && "No stack slot assigned yet."); 448 449 do { 450 LiveInterval *LI; 451 std::tie(LI, VNI) = WorkList.pop_back_val(); 452 Register Reg = LI->reg(); 453 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@' 454 << VNI->def << " in " << *LI << '\n'); 455 456 // Regs to spill are taken care of. 457 if (isRegToSpill(Reg)) 458 continue; 459 460 // Add all of VNI's live range to StackInt. 461 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0)); 462 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n'); 463 464 // Find all spills and copies of VNI. 465 for (MachineInstr &MI : 466 llvm::make_early_inc_range(MRI.use_nodbg_instructions(Reg))) { 467 if (!MI.isCopy() && !MI.mayStore()) 468 continue; 469 SlotIndex Idx = LIS.getInstructionIndex(MI); 470 if (LI->getVNInfoAt(Idx) != VNI) 471 continue; 472 473 // Follow sibling copies down the dominator tree. 474 if (Register DstReg = isFullCopyOf(MI, Reg)) { 475 if (isSibling(DstReg)) { 476 LiveInterval &DstLI = LIS.getInterval(DstReg); 477 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot()); 478 assert(DstVNI && "Missing defined value"); 479 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot"); 480 WorkList.push_back(std::make_pair(&DstLI, DstVNI)); 481 } 482 continue; 483 } 484 485 // Erase spills. 486 int FI; 487 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) { 488 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI); 489 // eliminateDeadDefs won't normally remove stores, so switch opcode. 490 MI.setDesc(TII.get(TargetOpcode::KILL)); 491 DeadDefs.push_back(&MI); 492 ++NumSpillsRemoved; 493 if (HSpiller.rmFromMergeableSpills(MI, StackSlot)) 494 --NumSpills; 495 } 496 } 497 } while (!WorkList.empty()); 498 } 499 500 //===----------------------------------------------------------------------===// 501 // Rematerialization 502 //===----------------------------------------------------------------------===// 503 504 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining 505 /// instruction cannot be eliminated. See through snippet copies 506 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { 507 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 508 WorkList.push_back(std::make_pair(LI, VNI)); 509 do { 510 std::tie(LI, VNI) = WorkList.pop_back_val(); 511 if (!UsedValues.insert(VNI).second) 512 continue; 513 514 if (VNI->isPHIDef()) { 515 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def); 516 for (MachineBasicBlock *P : MBB->predecessors()) { 517 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P)); 518 if (PVNI) 519 WorkList.push_back(std::make_pair(LI, PVNI)); 520 } 521 continue; 522 } 523 524 // Follow snippet copies. 525 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 526 if (!SnippetCopies.count(MI)) 527 continue; 528 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg()); 529 assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy"); 530 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true)); 531 assert(SnipVNI && "Snippet undefined before copy"); 532 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI)); 533 } while (!WorkList.empty()); 534 } 535 536 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg, 537 MachineInstr &MI) { 538 if (!RestrictStatepointRemat) 539 return true; 540 // Here's a quick explanation of the problem we're trying to handle here: 541 // * There are some pseudo instructions with more vreg uses than there are 542 // physical registers on the machine. 543 // * This is normally handled by spilling the vreg, and folding the reload 544 // into the user instruction. (Thus decreasing the number of used vregs 545 // until the remainder can be assigned to physregs.) 546 // * However, since we may try to spill vregs in any order, we can end up 547 // trying to spill each operand to the instruction, and then rematting it 548 // instead. When that happens, the new live intervals (for the remats) are 549 // expected to be trivially assignable (i.e. RS_Done). However, since we 550 // may have more remats than physregs, we're guaranteed to fail to assign 551 // one. 552 // At the moment, we only handle this for STATEPOINTs since they're the only 553 // pseudo op where we've seen this. If we start seeing other instructions 554 // with the same problem, we need to revisit this. 555 if (MI.getOpcode() != TargetOpcode::STATEPOINT) 556 return true; 557 // For STATEPOINTs we allow re-materialization for fixed arguments only hoping 558 // that number of physical registers is enough to cover all fixed arguments. 559 // If it is not true we need to revisit it. 560 for (unsigned Idx = StatepointOpers(&MI).getVarIdx(), 561 EndIdx = MI.getNumOperands(); 562 Idx < EndIdx; ++Idx) { 563 MachineOperand &MO = MI.getOperand(Idx); 564 if (MO.isReg() && MO.getReg() == VReg) 565 return false; 566 } 567 return true; 568 } 569 570 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. 571 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) { 572 // Analyze instruction 573 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops; 574 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops); 575 576 if (!RI.Reads) 577 return false; 578 579 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true); 580 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex()); 581 582 if (!ParentVNI) { 583 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: "); 584 for (MachineOperand &MO : MI.operands()) 585 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) 586 MO.setIsUndef(); 587 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI); 588 return true; 589 } 590 591 if (SnippetCopies.count(&MI)) 592 return false; 593 594 LiveInterval &OrigLI = LIS.getInterval(Original); 595 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx); 596 LiveRangeEdit::Remat RM(ParentVNI); 597 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def); 598 599 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) { 600 markValueUsed(&VirtReg, ParentVNI); 601 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI); 602 return false; 603 } 604 605 // If the instruction also writes VirtReg.reg, it had better not require the 606 // same register for uses and defs. 607 if (RI.Tied) { 608 markValueUsed(&VirtReg, ParentVNI); 609 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI); 610 return false; 611 } 612 613 // Before rematerializing into a register for a single instruction, try to 614 // fold a load into the instruction. That avoids allocating a new register. 615 if (RM.OrigMI->canFoldAsLoad() && 616 foldMemoryOperand(Ops, RM.OrigMI)) { 617 Edit->markRematerialized(RM.ParentVNI); 618 ++NumFoldedLoads; 619 return true; 620 } 621 622 // If we can't guarantee that we'll be able to actually assign the new vreg, 623 // we can't remat. 624 if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) { 625 markValueUsed(&VirtReg, ParentVNI); 626 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI); 627 return false; 628 } 629 630 // Allocate a new register for the remat. 631 Register NewVReg = Edit->createFrom(Original); 632 633 // Finally we can rematerialize OrigMI before MI. 634 SlotIndex DefIdx = 635 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI); 636 637 // We take the DebugLoc from MI, since OrigMI may be attributed to a 638 // different source location. 639 auto *NewMI = LIS.getInstructionFromIndex(DefIdx); 640 NewMI->setDebugLoc(MI.getDebugLoc()); 641 642 (void)DefIdx; 643 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' 644 << *LIS.getInstructionFromIndex(DefIdx)); 645 646 // Replace operands 647 for (const auto &OpPair : Ops) { 648 MachineOperand &MO = OpPair.first->getOperand(OpPair.second); 649 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) { 650 MO.setReg(NewVReg); 651 MO.setIsKill(); 652 } 653 } 654 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n'); 655 656 ++NumRemats; 657 return true; 658 } 659 660 /// reMaterializeAll - Try to rematerialize as many uses as possible, 661 /// and trim the live ranges after. 662 void InlineSpiller::reMaterializeAll() { 663 if (!Edit->anyRematerializable(AA)) 664 return; 665 666 UsedValues.clear(); 667 668 // Try to remat before all uses of snippets. 669 bool anyRemat = false; 670 for (Register Reg : RegsToSpill) { 671 LiveInterval &LI = LIS.getInterval(Reg); 672 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) { 673 // Debug values are not allowed to affect codegen. 674 if (MI.isDebugValue()) 675 continue; 676 677 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug " 678 "instruction that isn't a DBG_VALUE"); 679 680 anyRemat |= reMaterializeFor(LI, MI); 681 } 682 } 683 if (!anyRemat) 684 return; 685 686 // Remove any values that were completely rematted. 687 for (Register Reg : RegsToSpill) { 688 LiveInterval &LI = LIS.getInterval(Reg); 689 for (VNInfo *VNI : llvm::make_range(LI.vni_begin(), LI.vni_end())) { 690 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI)) 691 continue; 692 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 693 MI->addRegisterDead(Reg, &TRI); 694 if (!MI->allDefsAreDead()) 695 continue; 696 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI); 697 DeadDefs.push_back(MI); 698 } 699 } 700 701 // Eliminate dead code after remat. Note that some snippet copies may be 702 // deleted here. 703 if (DeadDefs.empty()) 704 return; 705 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n"); 706 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA); 707 708 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions 709 // after rematerialization. To remove a VNI for a vreg from its LiveInterval, 710 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all 711 // removed, PHI VNI are still left in the LiveInterval. 712 // So to get rid of unused reg, we need to check whether it has non-dbg 713 // reference instead of whether it has non-empty interval. 714 unsigned ResultPos = 0; 715 for (Register Reg : RegsToSpill) { 716 if (MRI.reg_nodbg_empty(Reg)) { 717 Edit->eraseVirtReg(Reg); 718 continue; 719 } 720 721 assert(LIS.hasInterval(Reg) && 722 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && 723 "Empty and not used live-range?!"); 724 725 RegsToSpill[ResultPos++] = Reg; 726 } 727 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end()); 728 LLVM_DEBUG(dbgs() << RegsToSpill.size() 729 << " registers to spill after remat.\n"); 730 } 731 732 //===----------------------------------------------------------------------===// 733 // Spilling 734 //===----------------------------------------------------------------------===// 735 736 /// If MI is a load or store of StackSlot, it can be removed. 737 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) { 738 int FI = 0; 739 Register InstrReg = TII.isLoadFromStackSlot(*MI, FI); 740 bool IsLoad = InstrReg; 741 if (!IsLoad) 742 InstrReg = TII.isStoreToStackSlot(*MI, FI); 743 744 // We have a stack access. Is it the right register and slot? 745 if (InstrReg != Reg || FI != StackSlot) 746 return false; 747 748 if (!IsLoad) 749 HSpiller.rmFromMergeableSpills(*MI, StackSlot); 750 751 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI); 752 LIS.RemoveMachineInstrFromMaps(*MI); 753 MI->eraseFromParent(); 754 755 if (IsLoad) { 756 ++NumReloadsRemoved; 757 --NumReloads; 758 } else { 759 ++NumSpillsRemoved; 760 --NumSpills; 761 } 762 763 return true; 764 } 765 766 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 767 LLVM_DUMP_METHOD 768 // Dump the range of instructions from B to E with their slot indexes. 769 static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, 770 MachineBasicBlock::iterator E, 771 LiveIntervals const &LIS, 772 const char *const header, 773 Register VReg = Register()) { 774 char NextLine = '\n'; 775 char SlotIndent = '\t'; 776 777 if (std::next(B) == E) { 778 NextLine = ' '; 779 SlotIndent = ' '; 780 } 781 782 dbgs() << '\t' << header << ": " << NextLine; 783 784 for (MachineBasicBlock::iterator I = B; I != E; ++I) { 785 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot(); 786 787 // If a register was passed in and this instruction has it as a 788 // destination that is marked as an early clobber, print the 789 // early-clobber slot index. 790 if (VReg) { 791 MachineOperand *MO = I->findRegisterDefOperand(VReg); 792 if (MO && MO->isEarlyClobber()) 793 Idx = Idx.getRegSlot(true); 794 } 795 796 dbgs() << SlotIndent << Idx << '\t' << *I; 797 } 798 } 799 #endif 800 801 /// foldMemoryOperand - Try folding stack slot references in Ops into their 802 /// instructions. 803 /// 804 /// @param Ops Operand indices from AnalyzeVirtRegInBundle(). 805 /// @param LoadMI Load instruction to use instead of stack slot when non-null. 806 /// @return True on success. 807 bool InlineSpiller:: 808 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops, 809 MachineInstr *LoadMI) { 810 if (Ops.empty()) 811 return false; 812 // Don't attempt folding in bundles. 813 MachineInstr *MI = Ops.front().first; 814 if (Ops.back().first != MI || MI->isBundled()) 815 return false; 816 817 bool WasCopy = MI->isCopy(); 818 Register ImpReg; 819 820 // TII::foldMemoryOperand will do what we need here for statepoint 821 // (fold load into use and remove corresponding def). We will replace 822 // uses of removed def with loads (spillAroundUses). 823 // For that to work we need to untie def and use to pass it through 824 // foldMemoryOperand and signal foldPatchpoint that it is allowed to 825 // fold them. 826 bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT; 827 828 // Spill subregs if the target allows it. 829 // We always want to spill subregs for stackmap/patchpoint pseudos. 830 bool SpillSubRegs = TII.isSubregFoldable() || 831 MI->getOpcode() == TargetOpcode::STATEPOINT || 832 MI->getOpcode() == TargetOpcode::PATCHPOINT || 833 MI->getOpcode() == TargetOpcode::STACKMAP; 834 835 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 836 // operands. 837 SmallVector<unsigned, 8> FoldOps; 838 for (const auto &OpPair : Ops) { 839 unsigned Idx = OpPair.second; 840 assert(MI == OpPair.first && "Instruction conflict during operand folding"); 841 MachineOperand &MO = MI->getOperand(Idx); 842 if (MO.isImplicit()) { 843 ImpReg = MO.getReg(); 844 continue; 845 } 846 847 if (!SpillSubRegs && MO.getSubReg()) 848 return false; 849 // We cannot fold a load instruction into a def. 850 if (LoadMI && MO.isDef()) 851 return false; 852 // Tied use operands should not be passed to foldMemoryOperand. 853 if (UntieRegs || !MI->isRegTiedToDefOperand(Idx)) 854 FoldOps.push_back(Idx); 855 } 856 857 // If we only have implicit uses, we won't be able to fold that. 858 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try! 859 if (FoldOps.empty()) 860 return false; 861 862 MachineInstrSpan MIS(MI, MI->getParent()); 863 864 SmallVector<std::pair<unsigned, unsigned> > TiedOps; 865 if (UntieRegs) 866 for (unsigned Idx : FoldOps) { 867 MachineOperand &MO = MI->getOperand(Idx); 868 if (!MO.isTied()) 869 continue; 870 unsigned Tied = MI->findTiedOperandIdx(Idx); 871 if (MO.isUse()) 872 TiedOps.emplace_back(Tied, Idx); 873 else { 874 assert(MO.isDef() && "Tied to not use and def?"); 875 TiedOps.emplace_back(Idx, Tied); 876 } 877 MI->untieRegOperand(Idx); 878 } 879 880 MachineInstr *FoldMI = 881 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS) 882 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM); 883 if (!FoldMI) { 884 // Re-tie operands. 885 for (auto Tied : TiedOps) 886 MI->tieOperands(Tied.first, Tied.second); 887 return false; 888 } 889 890 // Remove LIS for any dead defs in the original MI not in FoldMI. 891 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) { 892 if (!MO->isReg()) 893 continue; 894 Register Reg = MO->getReg(); 895 if (!Reg || Register::isVirtualRegister(Reg) || MRI.isReserved(Reg)) { 896 continue; 897 } 898 // Skip non-Defs, including undef uses and internal reads. 899 if (MO->isUse()) 900 continue; 901 PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI); 902 if (RI.FullyDefined) 903 continue; 904 // FoldMI does not define this physreg. Remove the LI segment. 905 assert(MO->isDead() && "Cannot fold physreg def"); 906 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot(); 907 LIS.removePhysRegDefAt(Reg.asMCReg(), Idx); 908 } 909 910 int FI; 911 if (TII.isStoreToStackSlot(*MI, FI) && 912 HSpiller.rmFromMergeableSpills(*MI, FI)) 913 --NumSpills; 914 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI); 915 // Update the call site info. 916 if (MI->isCandidateForCallSiteEntry()) 917 MI->getMF()->moveCallSiteInfo(MI, FoldMI); 918 919 // If we've folded a store into an instruction labelled with debug-info, 920 // record a substitution from the old operand to the memory operand. Handle 921 // the simple common case where operand 0 is the one being folded, plus when 922 // the destination operand is also a tied def. More values could be 923 // substituted / preserved with more analysis. 924 if (MI->peekDebugInstrNum() && Ops[0].second == 0) { 925 // Helper lambda. 926 auto MakeSubstitution = [this,FoldMI,MI,&Ops]() { 927 // Substitute old operand zero to the new instructions memory operand. 928 unsigned OldOperandNum = Ops[0].second; 929 unsigned NewNum = FoldMI->getDebugInstrNum(); 930 unsigned OldNum = MI->getDebugInstrNum(); 931 MF.makeDebugValueSubstitution({OldNum, OldOperandNum}, 932 {NewNum, MachineFunction::DebugOperandMemNumber}); 933 }; 934 935 const MachineOperand &Op0 = MI->getOperand(Ops[0].second); 936 if (Ops.size() == 1 && Op0.isDef()) { 937 MakeSubstitution(); 938 } else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() && 939 Op0.getReg() == MI->getOperand(1).getReg()) { 940 MakeSubstitution(); 941 } 942 } else if (MI->peekDebugInstrNum()) { 943 // This is a debug-labelled instruction, but the operand being folded isn't 944 // at operand zero. Most likely this means it's a load being folded in. 945 // Substitute any register defs from operand zero up to the one being 946 // folded -- past that point, we don't know what the new operand indexes 947 // will be. 948 MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second); 949 } 950 951 MI->eraseFromParent(); 952 953 // Insert any new instructions other than FoldMI into the LIS maps. 954 assert(!MIS.empty() && "Unexpected empty span of instructions!"); 955 for (MachineInstr &MI : MIS) 956 if (&MI != FoldMI) 957 LIS.InsertMachineInstrInMaps(MI); 958 959 // TII.foldMemoryOperand may have left some implicit operands on the 960 // instruction. Strip them. 961 if (ImpReg) 962 for (unsigned i = FoldMI->getNumOperands(); i; --i) { 963 MachineOperand &MO = FoldMI->getOperand(i - 1); 964 if (!MO.isReg() || !MO.isImplicit()) 965 break; 966 if (MO.getReg() == ImpReg) 967 FoldMI->RemoveOperand(i - 1); 968 } 969 970 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS, 971 "folded")); 972 973 if (!WasCopy) 974 ++NumFolded; 975 else if (Ops.front().second == 0) { 976 ++NumSpills; 977 // If there is only 1 store instruction is required for spill, add it 978 // to mergeable list. In X86 AMX, 2 intructions are required to store. 979 // We disable the merge for this case. 980 if (std::distance(MIS.begin(), MIS.end()) <= 1) 981 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original); 982 } else 983 ++NumReloads; 984 return true; 985 } 986 987 void InlineSpiller::insertReload(Register NewVReg, 988 SlotIndex Idx, 989 MachineBasicBlock::iterator MI) { 990 MachineBasicBlock &MBB = *MI->getParent(); 991 992 MachineInstrSpan MIS(MI, &MBB); 993 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot, 994 MRI.getRegClass(NewVReg), &TRI); 995 996 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI); 997 998 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload", 999 NewVReg)); 1000 ++NumReloads; 1001 } 1002 1003 /// Check if \p Def fully defines a VReg with an undefined value. 1004 /// If that's the case, that means the value of VReg is actually 1005 /// not relevant. 1006 static bool isRealSpill(const MachineInstr &Def) { 1007 if (!Def.isImplicitDef()) 1008 return true; 1009 assert(Def.getNumOperands() == 1 && 1010 "Implicit def with more than one definition"); 1011 // We can say that the VReg defined by Def is undef, only if it is 1012 // fully defined by Def. Otherwise, some of the lanes may not be 1013 // undef and the value of the VReg matters. 1014 return Def.getOperand(0).getSubReg(); 1015 } 1016 1017 /// insertSpill - Insert a spill of NewVReg after MI. 1018 void InlineSpiller::insertSpill(Register NewVReg, bool isKill, 1019 MachineBasicBlock::iterator MI) { 1020 // Spill are not terminators, so inserting spills after terminators will 1021 // violate invariants in MachineVerifier. 1022 assert(!MI->isTerminator() && "Inserting a spill after a terminator"); 1023 MachineBasicBlock &MBB = *MI->getParent(); 1024 1025 MachineInstrSpan MIS(MI, &MBB); 1026 MachineBasicBlock::iterator SpillBefore = std::next(MI); 1027 bool IsRealSpill = isRealSpill(*MI); 1028 1029 if (IsRealSpill) 1030 TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot, 1031 MRI.getRegClass(NewVReg), &TRI); 1032 else 1033 // Don't spill undef value. 1034 // Anything works for undef, in particular keeping the memory 1035 // uninitialized is a viable option and it saves code size and 1036 // run time. 1037 BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL)) 1038 .addReg(NewVReg, getKillRegState(isKill)); 1039 1040 MachineBasicBlock::iterator Spill = std::next(MI); 1041 LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end()); 1042 for (const MachineInstr &MI : make_range(Spill, MIS.end())) 1043 getVDefInterval(MI, LIS); 1044 1045 LLVM_DEBUG( 1046 dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill")); 1047 ++NumSpills; 1048 // If there is only 1 store instruction is required for spill, add it 1049 // to mergeable list. In X86 AMX, 2 intructions are required to store. 1050 // We disable the merge for this case. 1051 if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1) 1052 HSpiller.addToMergeableSpills(*Spill, StackSlot, Original); 1053 } 1054 1055 /// spillAroundUses - insert spill code around each use of Reg. 1056 void InlineSpiller::spillAroundUses(Register Reg) { 1057 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n'); 1058 LiveInterval &OldLI = LIS.getInterval(Reg); 1059 1060 // Iterate over instructions using Reg. 1061 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) { 1062 // Debug values are not allowed to affect codegen. 1063 if (MI.isDebugValue()) { 1064 // Modify DBG_VALUE now that the value is in a spill slot. 1065 MachineBasicBlock *MBB = MI.getParent(); 1066 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << MI); 1067 buildDbgValueForSpill(*MBB, &MI, MI, StackSlot, Reg); 1068 MBB->erase(MI); 1069 continue; 1070 } 1071 1072 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug " 1073 "instruction that isn't a DBG_VALUE"); 1074 1075 // Ignore copies to/from snippets. We'll delete them. 1076 if (SnippetCopies.count(&MI)) 1077 continue; 1078 1079 // Stack slot accesses may coalesce away. 1080 if (coalesceStackAccess(&MI, Reg)) 1081 continue; 1082 1083 // Analyze instruction. 1084 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops; 1085 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, Reg, &Ops); 1086 1087 // Find the slot index where this instruction reads and writes OldLI. 1088 // This is usually the def slot, except for tied early clobbers. 1089 SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot(); 1090 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true))) 1091 if (SlotIndex::isSameInstr(Idx, VNI->def)) 1092 Idx = VNI->def; 1093 1094 // Check for a sibling copy. 1095 Register SibReg = isFullCopyOf(MI, Reg); 1096 if (SibReg && isSibling(SibReg)) { 1097 // This may actually be a copy between snippets. 1098 if (isRegToSpill(SibReg)) { 1099 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << MI); 1100 SnippetCopies.insert(&MI); 1101 continue; 1102 } 1103 if (RI.Writes) { 1104 if (hoistSpillInsideBB(OldLI, MI)) { 1105 // This COPY is now dead, the value is already in the stack slot. 1106 MI.getOperand(0).setIsDead(); 1107 DeadDefs.push_back(&MI); 1108 continue; 1109 } 1110 } else { 1111 // This is a reload for a sib-reg copy. Drop spills downstream. 1112 LiveInterval &SibLI = LIS.getInterval(SibReg); 1113 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx)); 1114 // The COPY will fold to a reload below. 1115 } 1116 } 1117 1118 // Attempt to fold memory ops. 1119 if (foldMemoryOperand(Ops)) 1120 continue; 1121 1122 // Create a new virtual register for spill/fill. 1123 // FIXME: Infer regclass from instruction alone. 1124 Register NewVReg = Edit->createFrom(Reg); 1125 1126 if (RI.Reads) 1127 insertReload(NewVReg, Idx, &MI); 1128 1129 // Rewrite instruction operands. 1130 bool hasLiveDef = false; 1131 for (const auto &OpPair : Ops) { 1132 MachineOperand &MO = OpPair.first->getOperand(OpPair.second); 1133 MO.setReg(NewVReg); 1134 if (MO.isUse()) { 1135 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second)) 1136 MO.setIsKill(); 1137 } else { 1138 if (!MO.isDead()) 1139 hasLiveDef = true; 1140 } 1141 } 1142 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << MI << '\n'); 1143 1144 // FIXME: Use a second vreg if instruction has no tied ops. 1145 if (RI.Writes) 1146 if (hasLiveDef) 1147 insertSpill(NewVReg, true, &MI); 1148 } 1149 } 1150 1151 /// spillAll - Spill all registers remaining after rematerialization. 1152 void InlineSpiller::spillAll() { 1153 // Update LiveStacks now that we are committed to spilling. 1154 if (StackSlot == VirtRegMap::NO_STACK_SLOT) { 1155 StackSlot = VRM.assignVirt2StackSlot(Original); 1156 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original)); 1157 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator()); 1158 } else 1159 StackInt = &LSS.getInterval(StackSlot); 1160 1161 if (Original != Edit->getReg()) 1162 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot); 1163 1164 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values"); 1165 for (Register Reg : RegsToSpill) 1166 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg), 1167 StackInt->getValNumInfo(0)); 1168 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n'); 1169 1170 // Spill around uses of all RegsToSpill. 1171 for (Register Reg : RegsToSpill) 1172 spillAroundUses(Reg); 1173 1174 // Hoisted spills may cause dead code. 1175 if (!DeadDefs.empty()) { 1176 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n"); 1177 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA); 1178 } 1179 1180 // Finally delete the SnippetCopies. 1181 for (Register Reg : RegsToSpill) { 1182 for (MachineInstr &MI : 1183 llvm::make_early_inc_range(MRI.reg_instructions(Reg))) { 1184 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy"); 1185 // FIXME: Do this with a LiveRangeEdit callback. 1186 LIS.RemoveMachineInstrFromMaps(MI); 1187 MI.eraseFromParent(); 1188 } 1189 } 1190 1191 // Delete all spilled registers. 1192 for (Register Reg : RegsToSpill) 1193 Edit->eraseVirtReg(Reg); 1194 } 1195 1196 void InlineSpiller::spill(LiveRangeEdit &edit) { 1197 ++NumSpilledRanges; 1198 Edit = &edit; 1199 assert(!Register::isStackSlot(edit.getReg()) && 1200 "Trying to spill a stack slot."); 1201 // Share a stack slot among all descendants of Original. 1202 Original = VRM.getOriginal(edit.getReg()); 1203 StackSlot = VRM.getStackSlot(Original); 1204 StackInt = nullptr; 1205 1206 LLVM_DEBUG(dbgs() << "Inline spilling " 1207 << TRI.getRegClassName(MRI.getRegClass(edit.getReg())) 1208 << ':' << edit.getParent() << "\nFrom original " 1209 << printReg(Original) << '\n'); 1210 assert(edit.getParent().isSpillable() && 1211 "Attempting to spill already spilled value."); 1212 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs"); 1213 1214 collectRegsToSpill(); 1215 reMaterializeAll(); 1216 1217 // Remat may handle everything. 1218 if (!RegsToSpill.empty()) 1219 spillAll(); 1220 1221 Edit->calculateRegClassAndHint(MF, VRAI); 1222 } 1223 1224 /// Optimizations after all the reg selections and spills are done. 1225 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); } 1226 1227 /// When a spill is inserted, add the spill to MergeableSpills map. 1228 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot, 1229 unsigned Original) { 1230 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator(); 1231 LiveInterval &OrigLI = LIS.getInterval(Original); 1232 // save a copy of LiveInterval in StackSlotToOrigLI because the original 1233 // LiveInterval may be cleared after all its references are spilled. 1234 if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) { 1235 auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight()); 1236 LI->assign(OrigLI, Allocator); 1237 StackSlotToOrigLI[StackSlot] = std::move(LI); 1238 } 1239 SlotIndex Idx = LIS.getInstructionIndex(Spill); 1240 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot()); 1241 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI); 1242 MergeableSpills[MIdx].insert(&Spill); 1243 } 1244 1245 /// When a spill is removed, remove the spill from MergeableSpills map. 1246 /// Return true if the spill is removed successfully. 1247 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill, 1248 int StackSlot) { 1249 auto It = StackSlotToOrigLI.find(StackSlot); 1250 if (It == StackSlotToOrigLI.end()) 1251 return false; 1252 SlotIndex Idx = LIS.getInstructionIndex(Spill); 1253 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot()); 1254 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI); 1255 return MergeableSpills[MIdx].erase(&Spill); 1256 } 1257 1258 /// Check BB to see if it is a possible target BB to place a hoisted spill, 1259 /// i.e., there should be a living sibling of OrigReg at the insert point. 1260 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI, 1261 MachineBasicBlock &BB, Register &LiveReg) { 1262 SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB); 1263 // The original def could be after the last insert point in the root block, 1264 // we can't hoist to here. 1265 if (Idx < OrigVNI.def) { 1266 // TODO: We could be better here. If LI is not alive in landing pad 1267 // we could hoist spill after LIP. 1268 LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n"); 1269 return false; 1270 } 1271 Register OrigReg = OrigLI.reg(); 1272 SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg]; 1273 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI"); 1274 1275 for (const Register &SibReg : Siblings) { 1276 LiveInterval &LI = LIS.getInterval(SibReg); 1277 VNInfo *VNI = LI.getVNInfoAt(Idx); 1278 if (VNI) { 1279 LiveReg = SibReg; 1280 return true; 1281 } 1282 } 1283 return false; 1284 } 1285 1286 /// Remove redundant spills in the same BB. Save those redundant spills in 1287 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map. 1288 void HoistSpillHelper::rmRedundantSpills( 1289 SmallPtrSet<MachineInstr *, 16> &Spills, 1290 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1291 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) { 1292 // For each spill saw, check SpillBBToSpill[] and see if its BB already has 1293 // another spill inside. If a BB contains more than one spill, only keep the 1294 // earlier spill with smaller SlotIndex. 1295 for (const auto CurrentSpill : Spills) { 1296 MachineBasicBlock *Block = CurrentSpill->getParent(); 1297 MachineDomTreeNode *Node = MDT.getBase().getNode(Block); 1298 MachineInstr *PrevSpill = SpillBBToSpill[Node]; 1299 if (PrevSpill) { 1300 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill); 1301 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill); 1302 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill; 1303 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill; 1304 SpillsToRm.push_back(SpillToRm); 1305 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep; 1306 } else { 1307 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill; 1308 } 1309 } 1310 for (const auto SpillToRm : SpillsToRm) 1311 Spills.erase(SpillToRm); 1312 } 1313 1314 /// Starting from \p Root find a top-down traversal order of the dominator 1315 /// tree to visit all basic blocks containing the elements of \p Spills. 1316 /// Redundant spills will be found and put into \p SpillsToRm at the same 1317 /// time. \p SpillBBToSpill will be populated as part of the process and 1318 /// maps a basic block to the first store occurring in the basic block. 1319 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre 1320 void HoistSpillHelper::getVisitOrders( 1321 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills, 1322 SmallVectorImpl<MachineDomTreeNode *> &Orders, 1323 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1324 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep, 1325 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) { 1326 // The set contains all the possible BB nodes to which we may hoist 1327 // original spills. 1328 SmallPtrSet<MachineDomTreeNode *, 8> WorkSet; 1329 // Save the BB nodes on the path from the first BB node containing 1330 // non-redundant spill to the Root node. 1331 SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath; 1332 // All the spills to be hoisted must originate from a single def instruction 1333 // to the OrigReg. It means the def instruction should dominate all the spills 1334 // to be hoisted. We choose the BB where the def instruction is located as 1335 // the Root. 1336 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom(); 1337 // For every node on the dominator tree with spill, walk up on the dominator 1338 // tree towards the Root node until it is reached. If there is other node 1339 // containing spill in the middle of the path, the previous spill saw will 1340 // be redundant and the node containing it will be removed. All the nodes on 1341 // the path starting from the first node with non-redundant spill to the Root 1342 // node will be added to the WorkSet, which will contain all the possible 1343 // locations where spills may be hoisted to after the loop below is done. 1344 for (const auto Spill : Spills) { 1345 MachineBasicBlock *Block = Spill->getParent(); 1346 MachineDomTreeNode *Node = MDT[Block]; 1347 MachineInstr *SpillToRm = nullptr; 1348 while (Node != RootIDomNode) { 1349 // If Node dominates Block, and it already contains a spill, the spill in 1350 // Block will be redundant. 1351 if (Node != MDT[Block] && SpillBBToSpill[Node]) { 1352 SpillToRm = SpillBBToSpill[MDT[Block]]; 1353 break; 1354 /// If we see the Node already in WorkSet, the path from the Node to 1355 /// the Root node must already be traversed by another spill. 1356 /// Then no need to repeat. 1357 } else if (WorkSet.count(Node)) { 1358 break; 1359 } else { 1360 NodesOnPath.insert(Node); 1361 } 1362 Node = Node->getIDom(); 1363 } 1364 if (SpillToRm) { 1365 SpillsToRm.push_back(SpillToRm); 1366 } else { 1367 // Add a BB containing the original spills to SpillsToKeep -- i.e., 1368 // set the initial status before hoisting start. The value of BBs 1369 // containing original spills is set to 0, in order to descriminate 1370 // with BBs containing hoisted spills which will be inserted to 1371 // SpillsToKeep later during hoisting. 1372 SpillsToKeep[MDT[Block]] = 0; 1373 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end()); 1374 } 1375 NodesOnPath.clear(); 1376 } 1377 1378 // Sort the nodes in WorkSet in top-down order and save the nodes 1379 // in Orders. Orders will be used for hoisting in runHoistSpills. 1380 unsigned idx = 0; 1381 Orders.push_back(MDT.getBase().getNode(Root)); 1382 do { 1383 MachineDomTreeNode *Node = Orders[idx++]; 1384 for (MachineDomTreeNode *Child : Node->children()) { 1385 if (WorkSet.count(Child)) 1386 Orders.push_back(Child); 1387 } 1388 } while (idx != Orders.size()); 1389 assert(Orders.size() == WorkSet.size() && 1390 "Orders have different size with WorkSet"); 1391 1392 #ifndef NDEBUG 1393 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n"); 1394 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin(); 1395 for (; RIt != Orders.rend(); RIt++) 1396 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ","); 1397 LLVM_DEBUG(dbgs() << "\n"); 1398 #endif 1399 } 1400 1401 /// Try to hoist spills according to BB hotness. The spills to removed will 1402 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in 1403 /// \p SpillsToIns. 1404 void HoistSpillHelper::runHoistSpills( 1405 LiveInterval &OrigLI, VNInfo &OrigVNI, 1406 SmallPtrSet<MachineInstr *, 16> &Spills, 1407 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1408 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) { 1409 // Visit order of dominator tree nodes. 1410 SmallVector<MachineDomTreeNode *, 32> Orders; 1411 // SpillsToKeep contains all the nodes where spills are to be inserted 1412 // during hoisting. If the spill to be inserted is an original spill 1413 // (not a hoisted one), the value of the map entry is 0. If the spill 1414 // is a hoisted spill, the value of the map entry is the VReg to be used 1415 // as the source of the spill. 1416 DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep; 1417 // Map from BB to the first spill inside of it. 1418 DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill; 1419 1420 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill); 1421 1422 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def); 1423 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep, 1424 SpillBBToSpill); 1425 1426 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of 1427 // nodes set and the cost of all the spills inside those nodes. 1428 // The nodes set are the locations where spills are to be inserted 1429 // in the subtree of current node. 1430 using NodesCostPair = 1431 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>; 1432 DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap; 1433 1434 // Iterate Orders set in reverse order, which will be a bottom-up order 1435 // in the dominator tree. Once we visit a dom tree node, we know its 1436 // children have already been visited and the spill locations in the 1437 // subtrees of all the children have been determined. 1438 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin(); 1439 for (; RIt != Orders.rend(); RIt++) { 1440 MachineBasicBlock *Block = (*RIt)->getBlock(); 1441 1442 // If Block contains an original spill, simply continue. 1443 if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) { 1444 SpillsInSubTreeMap[*RIt].first.insert(*RIt); 1445 // SpillsInSubTreeMap[*RIt].second contains the cost of spill. 1446 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block); 1447 continue; 1448 } 1449 1450 // Collect spills in subtree of current node (*RIt) to 1451 // SpillsInSubTreeMap[*RIt].first. 1452 for (MachineDomTreeNode *Child : (*RIt)->children()) { 1453 if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end()) 1454 continue; 1455 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below 1456 // should be placed before getting the begin and end iterators of 1457 // SpillsInSubTreeMap[Child].first, or else the iterators may be 1458 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time 1459 // and the map grows and then the original buckets in the map are moved. 1460 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree = 1461 SpillsInSubTreeMap[*RIt].first; 1462 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second; 1463 SubTreeCost += SpillsInSubTreeMap[Child].second; 1464 auto BI = SpillsInSubTreeMap[Child].first.begin(); 1465 auto EI = SpillsInSubTreeMap[Child].first.end(); 1466 SpillsInSubTree.insert(BI, EI); 1467 SpillsInSubTreeMap.erase(Child); 1468 } 1469 1470 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree = 1471 SpillsInSubTreeMap[*RIt].first; 1472 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second; 1473 // No spills in subtree, simply continue. 1474 if (SpillsInSubTree.empty()) 1475 continue; 1476 1477 // Check whether Block is a possible candidate to insert spill. 1478 Register LiveReg; 1479 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg)) 1480 continue; 1481 1482 // If there are multiple spills that could be merged, bias a little 1483 // to hoist the spill. 1484 BranchProbability MarginProb = (SpillsInSubTree.size() > 1) 1485 ? BranchProbability(9, 10) 1486 : BranchProbability(1, 1); 1487 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) { 1488 // Hoist: Move spills to current Block. 1489 for (const auto SpillBB : SpillsInSubTree) { 1490 // When SpillBB is a BB contains original spill, insert the spill 1491 // to SpillsToRm. 1492 if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() && 1493 !SpillsToKeep[SpillBB]) { 1494 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB]; 1495 SpillsToRm.push_back(SpillToRm); 1496 } 1497 // SpillBB will not contain spill anymore, remove it from SpillsToKeep. 1498 SpillsToKeep.erase(SpillBB); 1499 } 1500 // Current Block is the BB containing the new hoisted spill. Add it to 1501 // SpillsToKeep. LiveReg is the source of the new spill. 1502 SpillsToKeep[*RIt] = LiveReg; 1503 LLVM_DEBUG({ 1504 dbgs() << "spills in BB: "; 1505 for (const auto Rspill : SpillsInSubTree) 1506 dbgs() << Rspill->getBlock()->getNumber() << " "; 1507 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber() 1508 << "\n"; 1509 }); 1510 SpillsInSubTree.clear(); 1511 SpillsInSubTree.insert(*RIt); 1512 SubTreeCost = MBFI.getBlockFreq(Block); 1513 } 1514 } 1515 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill), 1516 // save them to SpillsToIns. 1517 for (const auto &Ent : SpillsToKeep) { 1518 if (Ent.second) 1519 SpillsToIns[Ent.first->getBlock()] = Ent.second; 1520 } 1521 } 1522 1523 /// For spills with equal values, remove redundant spills and hoist those left 1524 /// to less hot spots. 1525 /// 1526 /// Spills with equal values will be collected into the same set in 1527 /// MergeableSpills when spill is inserted. These equal spills are originated 1528 /// from the same defining instruction and are dominated by the instruction. 1529 /// Before hoisting all the equal spills, redundant spills inside in the same 1530 /// BB are first marked to be deleted. Then starting from the spills left, walk 1531 /// up on the dominator tree towards the Root node where the define instruction 1532 /// is located, mark the dominated spills to be deleted along the way and 1533 /// collect the BB nodes on the path from non-dominated spills to the define 1534 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places 1535 /// where we are considering to hoist the spills. We iterate the WorkSet in 1536 /// bottom-up order, and for each node, we will decide whether to hoist spills 1537 /// inside its subtree to that node. In this way, we can get benefit locally 1538 /// even if hoisting all the equal spills to one cold place is impossible. 1539 void HoistSpillHelper::hoistAllSpills() { 1540 SmallVector<Register, 4> NewVRegs; 1541 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this); 1542 1543 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) { 1544 Register Reg = Register::index2VirtReg(i); 1545 Register Original = VRM.getPreSplitReg(Reg); 1546 if (!MRI.def_empty(Reg)) 1547 Virt2SiblingsMap[Original].insert(Reg); 1548 } 1549 1550 // Each entry in MergeableSpills contains a spill set with equal values. 1551 for (auto &Ent : MergeableSpills) { 1552 int Slot = Ent.first.first; 1553 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot]; 1554 VNInfo *OrigVNI = Ent.first.second; 1555 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second; 1556 if (Ent.second.empty()) 1557 continue; 1558 1559 LLVM_DEBUG({ 1560 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n" 1561 << "Equal spills in BB: "; 1562 for (const auto spill : EqValSpills) 1563 dbgs() << spill->getParent()->getNumber() << " "; 1564 dbgs() << "\n"; 1565 }); 1566 1567 // SpillsToRm is the spill set to be removed from EqValSpills. 1568 SmallVector<MachineInstr *, 16> SpillsToRm; 1569 // SpillsToIns is the spill set to be newly inserted after hoisting. 1570 DenseMap<MachineBasicBlock *, unsigned> SpillsToIns; 1571 1572 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns); 1573 1574 LLVM_DEBUG({ 1575 dbgs() << "Finally inserted spills in BB: "; 1576 for (const auto &Ispill : SpillsToIns) 1577 dbgs() << Ispill.first->getNumber() << " "; 1578 dbgs() << "\nFinally removed spills in BB: "; 1579 for (const auto Rspill : SpillsToRm) 1580 dbgs() << Rspill->getParent()->getNumber() << " "; 1581 dbgs() << "\n"; 1582 }); 1583 1584 // Stack live range update. 1585 LiveInterval &StackIntvl = LSS.getInterval(Slot); 1586 if (!SpillsToIns.empty() || !SpillsToRm.empty()) 1587 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI, 1588 StackIntvl.getValNumInfo(0)); 1589 1590 // Insert hoisted spills. 1591 for (auto const &Insert : SpillsToIns) { 1592 MachineBasicBlock *BB = Insert.first; 1593 Register LiveReg = Insert.second; 1594 MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB); 1595 MachineInstrSpan MIS(MII, BB); 1596 TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot, 1597 MRI.getRegClass(LiveReg), &TRI); 1598 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII); 1599 for (const MachineInstr &MI : make_range(MIS.begin(), MII)) 1600 getVDefInterval(MI, LIS); 1601 ++NumSpills; 1602 } 1603 1604 // Remove redundant spills or change them to dead instructions. 1605 NumSpills -= SpillsToRm.size(); 1606 for (auto const RMEnt : SpillsToRm) { 1607 RMEnt->setDesc(TII.get(TargetOpcode::KILL)); 1608 for (unsigned i = RMEnt->getNumOperands(); i; --i) { 1609 MachineOperand &MO = RMEnt->getOperand(i - 1); 1610 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead()) 1611 RMEnt->RemoveOperand(i - 1); 1612 } 1613 } 1614 Edit.eliminateDeadDefs(SpillsToRm, None, AA); 1615 } 1616 } 1617 1618 /// For VirtReg clone, the \p New register should have the same physreg or 1619 /// stackslot as the \p old register. 1620 void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) { 1621 if (VRM.hasPhys(Old)) 1622 VRM.assignVirt2Phys(New, VRM.getPhys(Old)); 1623 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT) 1624 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old)); 1625 else 1626 llvm_unreachable("VReg should be assigned either physreg or stackslot"); 1627 if (VRM.hasShape(Old)) 1628 VRM.assignVirt2Shape(New, VRM.getShape(Old)); 1629 } 1630