1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The inline spiller modifies the machine function directly instead of 10 // inserting spills and restores in VirtRegMap. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SplitKit.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/MapVector.h" 18 #include "llvm/ADT/None.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/CodeGen/LiveInterval.h" 26 #include "llvm/CodeGen/LiveIntervals.h" 27 #include "llvm/CodeGen/LiveRangeEdit.h" 28 #include "llvm/CodeGen/LiveStacks.h" 29 #include "llvm/CodeGen/MachineBasicBlock.h" 30 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 31 #include "llvm/CodeGen/MachineDominators.h" 32 #include "llvm/CodeGen/MachineFunction.h" 33 #include "llvm/CodeGen/MachineFunctionPass.h" 34 #include "llvm/CodeGen/MachineInstr.h" 35 #include "llvm/CodeGen/MachineInstrBuilder.h" 36 #include "llvm/CodeGen/MachineInstrBundle.h" 37 #include "llvm/CodeGen/MachineLoopInfo.h" 38 #include "llvm/CodeGen/MachineOperand.h" 39 #include "llvm/CodeGen/MachineRegisterInfo.h" 40 #include "llvm/CodeGen/SlotIndexes.h" 41 #include "llvm/CodeGen/Spiller.h" 42 #include "llvm/CodeGen/StackMaps.h" 43 #include "llvm/CodeGen/TargetInstrInfo.h" 44 #include "llvm/CodeGen/TargetOpcodes.h" 45 #include "llvm/CodeGen/TargetRegisterInfo.h" 46 #include "llvm/CodeGen/TargetSubtargetInfo.h" 47 #include "llvm/CodeGen/VirtRegMap.h" 48 #include "llvm/Config/llvm-config.h" 49 #include "llvm/Support/BlockFrequency.h" 50 #include "llvm/Support/BranchProbability.h" 51 #include "llvm/Support/CommandLine.h" 52 #include "llvm/Support/Compiler.h" 53 #include "llvm/Support/Debug.h" 54 #include "llvm/Support/ErrorHandling.h" 55 #include "llvm/Support/raw_ostream.h" 56 #include <cassert> 57 #include <iterator> 58 #include <tuple> 59 #include <utility> 60 #include <vector> 61 62 using namespace llvm; 63 64 #define DEBUG_TYPE "regalloc" 65 66 STATISTIC(NumSpilledRanges, "Number of spilled live ranges"); 67 STATISTIC(NumSnippets, "Number of spilled snippets"); 68 STATISTIC(NumSpills, "Number of spills inserted"); 69 STATISTIC(NumSpillsRemoved, "Number of spills removed"); 70 STATISTIC(NumReloads, "Number of reloads inserted"); 71 STATISTIC(NumReloadsRemoved, "Number of reloads removed"); 72 STATISTIC(NumFolded, "Number of folded stack accesses"); 73 STATISTIC(NumFoldedLoads, "Number of folded loads"); 74 STATISTIC(NumRemats, "Number of rematerialized defs for spilling"); 75 76 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden, 77 cl::desc("Disable inline spill hoisting")); 78 static cl::opt<bool> 79 RestrictStatepointRemat("restrict-statepoint-remat", 80 cl::init(false), cl::Hidden, 81 cl::desc("Restrict remat for statepoint operands")); 82 83 namespace { 84 85 class HoistSpillHelper : private LiveRangeEdit::Delegate { 86 MachineFunction &MF; 87 LiveIntervals &LIS; 88 LiveStacks &LSS; 89 MachineDominatorTree &MDT; 90 MachineLoopInfo &Loops; 91 VirtRegMap &VRM; 92 MachineRegisterInfo &MRI; 93 const TargetInstrInfo &TII; 94 const TargetRegisterInfo &TRI; 95 const MachineBlockFrequencyInfo &MBFI; 96 97 InsertPointAnalysis IPA; 98 99 // Map from StackSlot to the LiveInterval of the original register. 100 // Note the LiveInterval of the original register may have been deleted 101 // after it is spilled. We keep a copy here to track the range where 102 // spills can be moved. 103 DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI; 104 105 // Map from pair of (StackSlot and Original VNI) to a set of spills which 106 // have the same stackslot and have equal values defined by Original VNI. 107 // These spills are mergeable and are hoist candiates. 108 using MergeableSpillsMap = 109 MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>; 110 MergeableSpillsMap MergeableSpills; 111 112 /// This is the map from original register to a set containing all its 113 /// siblings. To hoist a spill to another BB, we need to find out a live 114 /// sibling there and use it as the source of the new spill. 115 DenseMap<Register, SmallSetVector<Register, 16>> Virt2SiblingsMap; 116 117 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI, 118 MachineBasicBlock &BB, Register &LiveReg); 119 120 void rmRedundantSpills( 121 SmallPtrSet<MachineInstr *, 16> &Spills, 122 SmallVectorImpl<MachineInstr *> &SpillsToRm, 123 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill); 124 125 void getVisitOrders( 126 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills, 127 SmallVectorImpl<MachineDomTreeNode *> &Orders, 128 SmallVectorImpl<MachineInstr *> &SpillsToRm, 129 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep, 130 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill); 131 132 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI, 133 SmallPtrSet<MachineInstr *, 16> &Spills, 134 SmallVectorImpl<MachineInstr *> &SpillsToRm, 135 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns); 136 137 public: 138 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf, 139 VirtRegMap &vrm) 140 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()), 141 LSS(pass.getAnalysis<LiveStacks>()), 142 MDT(pass.getAnalysis<MachineDominatorTree>()), 143 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm), 144 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()), 145 TRI(*mf.getSubtarget().getRegisterInfo()), 146 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()), 147 IPA(LIS, mf.getNumBlockIDs()) {} 148 149 void addToMergeableSpills(MachineInstr &Spill, int StackSlot, 150 unsigned Original); 151 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot); 152 void hoistAllSpills(); 153 void LRE_DidCloneVirtReg(Register, Register) override; 154 }; 155 156 class InlineSpiller : public Spiller { 157 MachineFunction &MF; 158 LiveIntervals &LIS; 159 LiveStacks &LSS; 160 MachineDominatorTree &MDT; 161 MachineLoopInfo &Loops; 162 VirtRegMap &VRM; 163 MachineRegisterInfo &MRI; 164 const TargetInstrInfo &TII; 165 const TargetRegisterInfo &TRI; 166 const MachineBlockFrequencyInfo &MBFI; 167 168 // Variables that are valid during spill(), but used by multiple methods. 169 LiveRangeEdit *Edit; 170 LiveInterval *StackInt; 171 int StackSlot; 172 Register Original; 173 174 // All registers to spill to StackSlot, including the main register. 175 SmallVector<Register, 8> RegsToSpill; 176 177 // All COPY instructions to/from snippets. 178 // They are ignored since both operands refer to the same stack slot. 179 SmallPtrSet<MachineInstr*, 8> SnippetCopies; 180 181 // Values that failed to remat at some point. 182 SmallPtrSet<VNInfo*, 8> UsedValues; 183 184 // Dead defs generated during spilling. 185 SmallVector<MachineInstr*, 8> DeadDefs; 186 187 // Object records spills information and does the hoisting. 188 HoistSpillHelper HSpiller; 189 190 // Live range weight calculator. 191 VirtRegAuxInfo &VRAI; 192 193 ~InlineSpiller() override = default; 194 195 public: 196 InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM, 197 VirtRegAuxInfo &VRAI) 198 : MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()), 199 LSS(Pass.getAnalysis<LiveStacks>()), 200 MDT(Pass.getAnalysis<MachineDominatorTree>()), 201 Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM), 202 MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()), 203 TRI(*MF.getSubtarget().getRegisterInfo()), 204 MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()), 205 HSpiller(Pass, MF, VRM), VRAI(VRAI) {} 206 207 void spill(LiveRangeEdit &) override; 208 void postOptimization() override; 209 210 private: 211 bool isSnippet(const LiveInterval &SnipLI); 212 void collectRegsToSpill(); 213 214 bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); } 215 216 bool isSibling(Register Reg); 217 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI); 218 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI); 219 220 void markValueUsed(LiveInterval*, VNInfo*); 221 bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI); 222 bool reMaterializeFor(LiveInterval &, MachineInstr &MI); 223 void reMaterializeAll(); 224 225 bool coalesceStackAccess(MachineInstr *MI, Register Reg); 226 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>, 227 MachineInstr *LoadMI = nullptr); 228 void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI); 229 void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI); 230 231 void spillAroundUses(Register Reg); 232 void spillAll(); 233 }; 234 235 } // end anonymous namespace 236 237 Spiller::~Spiller() = default; 238 239 void Spiller::anchor() {} 240 241 Spiller *llvm::createInlineSpiller(MachineFunctionPass &Pass, 242 MachineFunction &MF, VirtRegMap &VRM, 243 VirtRegAuxInfo &VRAI) { 244 return new InlineSpiller(Pass, MF, VRM, VRAI); 245 } 246 247 //===----------------------------------------------------------------------===// 248 // Snippets 249 //===----------------------------------------------------------------------===// 250 251 // When spilling a virtual register, we also spill any snippets it is connected 252 // to. The snippets are small live ranges that only have a single real use, 253 // leftovers from live range splitting. Spilling them enables memory operand 254 // folding or tightens the live range around the single use. 255 // 256 // This minimizes register pressure and maximizes the store-to-load distance for 257 // spill slots which can be important in tight loops. 258 259 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register, 260 /// otherwise return 0. 261 static Register isFullCopyOf(const MachineInstr &MI, Register Reg) { 262 if (!MI.isFullCopy()) 263 return Register(); 264 if (MI.getOperand(0).getReg() == Reg) 265 return MI.getOperand(1).getReg(); 266 if (MI.getOperand(1).getReg() == Reg) 267 return MI.getOperand(0).getReg(); 268 return Register(); 269 } 270 271 static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) { 272 for (const MachineOperand &MO : MI.operands()) 273 if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg())) 274 LIS.getInterval(MO.getReg()); 275 } 276 277 /// isSnippet - Identify if a live interval is a snippet that should be spilled. 278 /// It is assumed that SnipLI is a virtual register with the same original as 279 /// Edit->getReg(). 280 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { 281 Register Reg = Edit->getReg(); 282 283 // A snippet is a tiny live range with only a single instruction using it 284 // besides copies to/from Reg or spills/fills. We accept: 285 // 286 // %snip = COPY %Reg / FILL fi# 287 // %snip = USE %snip 288 // %Reg = COPY %snip / SPILL %snip, fi# 289 // 290 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI)) 291 return false; 292 293 MachineInstr *UseMI = nullptr; 294 295 // Check that all uses satisfy our criteria. 296 for (MachineRegisterInfo::reg_instr_nodbg_iterator 297 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()), 298 E = MRI.reg_instr_nodbg_end(); 299 RI != E;) { 300 MachineInstr &MI = *RI++; 301 302 // Allow copies to/from Reg. 303 if (isFullCopyOf(MI, Reg)) 304 continue; 305 306 // Allow stack slot loads. 307 int FI; 308 if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) 309 continue; 310 311 // Allow stack slot stores. 312 if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) 313 continue; 314 315 // Allow a single additional instruction. 316 if (UseMI && &MI != UseMI) 317 return false; 318 UseMI = &MI; 319 } 320 return true; 321 } 322 323 /// collectRegsToSpill - Collect live range snippets that only have a single 324 /// real use. 325 void InlineSpiller::collectRegsToSpill() { 326 Register Reg = Edit->getReg(); 327 328 // Main register always spills. 329 RegsToSpill.assign(1, Reg); 330 SnippetCopies.clear(); 331 332 // Snippets all have the same original, so there can't be any for an original 333 // register. 334 if (Original == Reg) 335 return; 336 337 for (MachineInstr &MI : 338 llvm::make_early_inc_range(MRI.reg_instructions(Reg))) { 339 Register SnipReg = isFullCopyOf(MI, Reg); 340 if (!isSibling(SnipReg)) 341 continue; 342 LiveInterval &SnipLI = LIS.getInterval(SnipReg); 343 if (!isSnippet(SnipLI)) 344 continue; 345 SnippetCopies.insert(&MI); 346 if (isRegToSpill(SnipReg)) 347 continue; 348 RegsToSpill.push_back(SnipReg); 349 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n'); 350 ++NumSnippets; 351 } 352 } 353 354 bool InlineSpiller::isSibling(Register Reg) { 355 return Reg.isVirtual() && VRM.getOriginal(Reg) == Original; 356 } 357 358 /// It is beneficial to spill to earlier place in the same BB in case 359 /// as follows: 360 /// There is an alternative def earlier in the same MBB. 361 /// Hoist the spill as far as possible in SpillMBB. This can ease 362 /// register pressure: 363 /// 364 /// x = def 365 /// y = use x 366 /// s = copy x 367 /// 368 /// Hoisting the spill of s to immediately after the def removes the 369 /// interference between x and y: 370 /// 371 /// x = def 372 /// spill x 373 /// y = use killed x 374 /// 375 /// This hoist only helps when the copy kills its source. 376 /// 377 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI, 378 MachineInstr &CopyMI) { 379 SlotIndex Idx = LIS.getInstructionIndex(CopyMI); 380 #ifndef NDEBUG 381 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot()); 382 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy"); 383 #endif 384 385 Register SrcReg = CopyMI.getOperand(1).getReg(); 386 LiveInterval &SrcLI = LIS.getInterval(SrcReg); 387 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx); 388 LiveQueryResult SrcQ = SrcLI.Query(Idx); 389 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def); 390 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill()) 391 return false; 392 393 // Conservatively extend the stack slot range to the range of the original 394 // value. We may be able to do better with stack slot coloring by being more 395 // careful here. 396 assert(StackInt && "No stack slot assigned yet."); 397 LiveInterval &OrigLI = LIS.getInterval(Original); 398 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx); 399 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0)); 400 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": " 401 << *StackInt << '\n'); 402 403 // We are going to spill SrcVNI immediately after its def, so clear out 404 // any later spills of the same value. 405 eliminateRedundantSpills(SrcLI, SrcVNI); 406 407 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def); 408 MachineBasicBlock::iterator MII; 409 if (SrcVNI->isPHIDef()) 410 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin()); 411 else { 412 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def); 413 assert(DefMI && "Defining instruction disappeared"); 414 MII = DefMI; 415 ++MII; 416 } 417 MachineInstrSpan MIS(MII, MBB); 418 // Insert spill without kill flag immediately after def. 419 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot, 420 MRI.getRegClass(SrcReg), &TRI); 421 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII); 422 for (const MachineInstr &MI : make_range(MIS.begin(), MII)) 423 getVDefInterval(MI, LIS); 424 --MII; // Point to store instruction. 425 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII); 426 427 // If there is only 1 store instruction is required for spill, add it 428 // to mergeable list. In X86 AMX, 2 intructions are required to store. 429 // We disable the merge for this case. 430 if (MIS.begin() == MII) 431 HSpiller.addToMergeableSpills(*MII, StackSlot, Original); 432 ++NumSpills; 433 return true; 434 } 435 436 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any 437 /// redundant spills of this value in SLI.reg and sibling copies. 438 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { 439 assert(VNI && "Missing value"); 440 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 441 WorkList.push_back(std::make_pair(&SLI, VNI)); 442 assert(StackInt && "No stack slot assigned yet."); 443 444 do { 445 LiveInterval *LI; 446 std::tie(LI, VNI) = WorkList.pop_back_val(); 447 Register Reg = LI->reg(); 448 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@' 449 << VNI->def << " in " << *LI << '\n'); 450 451 // Regs to spill are taken care of. 452 if (isRegToSpill(Reg)) 453 continue; 454 455 // Add all of VNI's live range to StackInt. 456 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0)); 457 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n'); 458 459 // Find all spills and copies of VNI. 460 for (MachineInstr &MI : 461 llvm::make_early_inc_range(MRI.use_nodbg_instructions(Reg))) { 462 if (!MI.isCopy() && !MI.mayStore()) 463 continue; 464 SlotIndex Idx = LIS.getInstructionIndex(MI); 465 if (LI->getVNInfoAt(Idx) != VNI) 466 continue; 467 468 // Follow sibling copies down the dominator tree. 469 if (Register DstReg = isFullCopyOf(MI, Reg)) { 470 if (isSibling(DstReg)) { 471 LiveInterval &DstLI = LIS.getInterval(DstReg); 472 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot()); 473 assert(DstVNI && "Missing defined value"); 474 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot"); 475 WorkList.push_back(std::make_pair(&DstLI, DstVNI)); 476 } 477 continue; 478 } 479 480 // Erase spills. 481 int FI; 482 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) { 483 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI); 484 // eliminateDeadDefs won't normally remove stores, so switch opcode. 485 MI.setDesc(TII.get(TargetOpcode::KILL)); 486 DeadDefs.push_back(&MI); 487 ++NumSpillsRemoved; 488 if (HSpiller.rmFromMergeableSpills(MI, StackSlot)) 489 --NumSpills; 490 } 491 } 492 } while (!WorkList.empty()); 493 } 494 495 //===----------------------------------------------------------------------===// 496 // Rematerialization 497 //===----------------------------------------------------------------------===// 498 499 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining 500 /// instruction cannot be eliminated. See through snippet copies 501 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { 502 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 503 WorkList.push_back(std::make_pair(LI, VNI)); 504 do { 505 std::tie(LI, VNI) = WorkList.pop_back_val(); 506 if (!UsedValues.insert(VNI).second) 507 continue; 508 509 if (VNI->isPHIDef()) { 510 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def); 511 for (MachineBasicBlock *P : MBB->predecessors()) { 512 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P)); 513 if (PVNI) 514 WorkList.push_back(std::make_pair(LI, PVNI)); 515 } 516 continue; 517 } 518 519 // Follow snippet copies. 520 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 521 if (!SnippetCopies.count(MI)) 522 continue; 523 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg()); 524 assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy"); 525 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true)); 526 assert(SnipVNI && "Snippet undefined before copy"); 527 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI)); 528 } while (!WorkList.empty()); 529 } 530 531 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg, 532 MachineInstr &MI) { 533 if (!RestrictStatepointRemat) 534 return true; 535 // Here's a quick explanation of the problem we're trying to handle here: 536 // * There are some pseudo instructions with more vreg uses than there are 537 // physical registers on the machine. 538 // * This is normally handled by spilling the vreg, and folding the reload 539 // into the user instruction. (Thus decreasing the number of used vregs 540 // until the remainder can be assigned to physregs.) 541 // * However, since we may try to spill vregs in any order, we can end up 542 // trying to spill each operand to the instruction, and then rematting it 543 // instead. When that happens, the new live intervals (for the remats) are 544 // expected to be trivially assignable (i.e. RS_Done). However, since we 545 // may have more remats than physregs, we're guaranteed to fail to assign 546 // one. 547 // At the moment, we only handle this for STATEPOINTs since they're the only 548 // pseudo op where we've seen this. If we start seeing other instructions 549 // with the same problem, we need to revisit this. 550 if (MI.getOpcode() != TargetOpcode::STATEPOINT) 551 return true; 552 // For STATEPOINTs we allow re-materialization for fixed arguments only hoping 553 // that number of physical registers is enough to cover all fixed arguments. 554 // If it is not true we need to revisit it. 555 for (unsigned Idx = StatepointOpers(&MI).getVarIdx(), 556 EndIdx = MI.getNumOperands(); 557 Idx < EndIdx; ++Idx) { 558 MachineOperand &MO = MI.getOperand(Idx); 559 if (MO.isReg() && MO.getReg() == VReg) 560 return false; 561 } 562 return true; 563 } 564 565 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. 566 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) { 567 // Analyze instruction 568 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops; 569 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops); 570 571 if (!RI.Reads) 572 return false; 573 574 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true); 575 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex()); 576 577 if (!ParentVNI) { 578 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: "); 579 for (MachineOperand &MO : MI.operands()) 580 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) 581 MO.setIsUndef(); 582 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI); 583 return true; 584 } 585 586 if (SnippetCopies.count(&MI)) 587 return false; 588 589 LiveInterval &OrigLI = LIS.getInterval(Original); 590 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx); 591 LiveRangeEdit::Remat RM(ParentVNI); 592 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def); 593 594 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) { 595 markValueUsed(&VirtReg, ParentVNI); 596 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI); 597 return false; 598 } 599 600 // If the instruction also writes VirtReg.reg, it had better not require the 601 // same register for uses and defs. 602 if (RI.Tied) { 603 markValueUsed(&VirtReg, ParentVNI); 604 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI); 605 return false; 606 } 607 608 // Before rematerializing into a register for a single instruction, try to 609 // fold a load into the instruction. That avoids allocating a new register. 610 if (RM.OrigMI->canFoldAsLoad() && 611 foldMemoryOperand(Ops, RM.OrigMI)) { 612 Edit->markRematerialized(RM.ParentVNI); 613 ++NumFoldedLoads; 614 return true; 615 } 616 617 // If we can't guarantee that we'll be able to actually assign the new vreg, 618 // we can't remat. 619 if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) { 620 markValueUsed(&VirtReg, ParentVNI); 621 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI); 622 return false; 623 } 624 625 // Allocate a new register for the remat. 626 Register NewVReg = Edit->createFrom(Original); 627 628 // Finally we can rematerialize OrigMI before MI. 629 SlotIndex DefIdx = 630 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI); 631 632 // We take the DebugLoc from MI, since OrigMI may be attributed to a 633 // different source location. 634 auto *NewMI = LIS.getInstructionFromIndex(DefIdx); 635 NewMI->setDebugLoc(MI.getDebugLoc()); 636 637 (void)DefIdx; 638 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' 639 << *LIS.getInstructionFromIndex(DefIdx)); 640 641 // Replace operands 642 for (const auto &OpPair : Ops) { 643 MachineOperand &MO = OpPair.first->getOperand(OpPair.second); 644 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) { 645 MO.setReg(NewVReg); 646 MO.setIsKill(); 647 } 648 } 649 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n'); 650 651 ++NumRemats; 652 return true; 653 } 654 655 /// reMaterializeAll - Try to rematerialize as many uses as possible, 656 /// and trim the live ranges after. 657 void InlineSpiller::reMaterializeAll() { 658 if (!Edit->anyRematerializable()) 659 return; 660 661 UsedValues.clear(); 662 663 // Try to remat before all uses of snippets. 664 bool anyRemat = false; 665 for (Register Reg : RegsToSpill) { 666 LiveInterval &LI = LIS.getInterval(Reg); 667 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) { 668 // Debug values are not allowed to affect codegen. 669 if (MI.isDebugValue()) 670 continue; 671 672 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug " 673 "instruction that isn't a DBG_VALUE"); 674 675 anyRemat |= reMaterializeFor(LI, MI); 676 } 677 } 678 if (!anyRemat) 679 return; 680 681 // Remove any values that were completely rematted. 682 for (Register Reg : RegsToSpill) { 683 LiveInterval &LI = LIS.getInterval(Reg); 684 for (VNInfo *VNI : LI.vnis()) { 685 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI)) 686 continue; 687 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 688 MI->addRegisterDead(Reg, &TRI); 689 if (!MI->allDefsAreDead()) 690 continue; 691 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI); 692 DeadDefs.push_back(MI); 693 } 694 } 695 696 // Eliminate dead code after remat. Note that some snippet copies may be 697 // deleted here. 698 if (DeadDefs.empty()) 699 return; 700 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n"); 701 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); 702 703 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions 704 // after rematerialization. To remove a VNI for a vreg from its LiveInterval, 705 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all 706 // removed, PHI VNI are still left in the LiveInterval. 707 // So to get rid of unused reg, we need to check whether it has non-dbg 708 // reference instead of whether it has non-empty interval. 709 unsigned ResultPos = 0; 710 for (Register Reg : RegsToSpill) { 711 if (MRI.reg_nodbg_empty(Reg)) { 712 Edit->eraseVirtReg(Reg); 713 continue; 714 } 715 716 assert(LIS.hasInterval(Reg) && 717 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && 718 "Empty and not used live-range?!"); 719 720 RegsToSpill[ResultPos++] = Reg; 721 } 722 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end()); 723 LLVM_DEBUG(dbgs() << RegsToSpill.size() 724 << " registers to spill after remat.\n"); 725 } 726 727 //===----------------------------------------------------------------------===// 728 // Spilling 729 //===----------------------------------------------------------------------===// 730 731 /// If MI is a load or store of StackSlot, it can be removed. 732 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) { 733 int FI = 0; 734 Register InstrReg = TII.isLoadFromStackSlot(*MI, FI); 735 bool IsLoad = InstrReg; 736 if (!IsLoad) 737 InstrReg = TII.isStoreToStackSlot(*MI, FI); 738 739 // We have a stack access. Is it the right register and slot? 740 if (InstrReg != Reg || FI != StackSlot) 741 return false; 742 743 if (!IsLoad) 744 HSpiller.rmFromMergeableSpills(*MI, StackSlot); 745 746 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI); 747 LIS.RemoveMachineInstrFromMaps(*MI); 748 MI->eraseFromParent(); 749 750 if (IsLoad) { 751 ++NumReloadsRemoved; 752 --NumReloads; 753 } else { 754 ++NumSpillsRemoved; 755 --NumSpills; 756 } 757 758 return true; 759 } 760 761 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 762 LLVM_DUMP_METHOD 763 // Dump the range of instructions from B to E with their slot indexes. 764 static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, 765 MachineBasicBlock::iterator E, 766 LiveIntervals const &LIS, 767 const char *const header, 768 Register VReg = Register()) { 769 char NextLine = '\n'; 770 char SlotIndent = '\t'; 771 772 if (std::next(B) == E) { 773 NextLine = ' '; 774 SlotIndent = ' '; 775 } 776 777 dbgs() << '\t' << header << ": " << NextLine; 778 779 for (MachineBasicBlock::iterator I = B; I != E; ++I) { 780 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot(); 781 782 // If a register was passed in and this instruction has it as a 783 // destination that is marked as an early clobber, print the 784 // early-clobber slot index. 785 if (VReg) { 786 MachineOperand *MO = I->findRegisterDefOperand(VReg); 787 if (MO && MO->isEarlyClobber()) 788 Idx = Idx.getRegSlot(true); 789 } 790 791 dbgs() << SlotIndent << Idx << '\t' << *I; 792 } 793 } 794 #endif 795 796 /// foldMemoryOperand - Try folding stack slot references in Ops into their 797 /// instructions. 798 /// 799 /// @param Ops Operand indices from AnalyzeVirtRegInBundle(). 800 /// @param LoadMI Load instruction to use instead of stack slot when non-null. 801 /// @return True on success. 802 bool InlineSpiller:: 803 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops, 804 MachineInstr *LoadMI) { 805 if (Ops.empty()) 806 return false; 807 // Don't attempt folding in bundles. 808 MachineInstr *MI = Ops.front().first; 809 if (Ops.back().first != MI || MI->isBundled()) 810 return false; 811 812 bool WasCopy = MI->isCopy(); 813 Register ImpReg; 814 815 // TII::foldMemoryOperand will do what we need here for statepoint 816 // (fold load into use and remove corresponding def). We will replace 817 // uses of removed def with loads (spillAroundUses). 818 // For that to work we need to untie def and use to pass it through 819 // foldMemoryOperand and signal foldPatchpoint that it is allowed to 820 // fold them. 821 bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT; 822 823 // Spill subregs if the target allows it. 824 // We always want to spill subregs for stackmap/patchpoint pseudos. 825 bool SpillSubRegs = TII.isSubregFoldable() || 826 MI->getOpcode() == TargetOpcode::STATEPOINT || 827 MI->getOpcode() == TargetOpcode::PATCHPOINT || 828 MI->getOpcode() == TargetOpcode::STACKMAP; 829 830 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 831 // operands. 832 SmallVector<unsigned, 8> FoldOps; 833 for (const auto &OpPair : Ops) { 834 unsigned Idx = OpPair.second; 835 assert(MI == OpPair.first && "Instruction conflict during operand folding"); 836 MachineOperand &MO = MI->getOperand(Idx); 837 838 // No point restoring an undef read, and we'll produce an invalid live 839 // interval. 840 // TODO: Is this really the correct way to handle undef tied uses? 841 if (MO.isUse() && !MO.readsReg() && !MO.isTied()) 842 continue; 843 844 if (MO.isImplicit()) { 845 ImpReg = MO.getReg(); 846 continue; 847 } 848 849 if (!SpillSubRegs && MO.getSubReg()) 850 return false; 851 // We cannot fold a load instruction into a def. 852 if (LoadMI && MO.isDef()) 853 return false; 854 // Tied use operands should not be passed to foldMemoryOperand. 855 if (UntieRegs || !MI->isRegTiedToDefOperand(Idx)) 856 FoldOps.push_back(Idx); 857 } 858 859 // If we only have implicit uses, we won't be able to fold that. 860 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try! 861 if (FoldOps.empty()) 862 return false; 863 864 MachineInstrSpan MIS(MI, MI->getParent()); 865 866 SmallVector<std::pair<unsigned, unsigned> > TiedOps; 867 if (UntieRegs) 868 for (unsigned Idx : FoldOps) { 869 MachineOperand &MO = MI->getOperand(Idx); 870 if (!MO.isTied()) 871 continue; 872 unsigned Tied = MI->findTiedOperandIdx(Idx); 873 if (MO.isUse()) 874 TiedOps.emplace_back(Tied, Idx); 875 else { 876 assert(MO.isDef() && "Tied to not use and def?"); 877 TiedOps.emplace_back(Idx, Tied); 878 } 879 MI->untieRegOperand(Idx); 880 } 881 882 MachineInstr *FoldMI = 883 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS) 884 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM); 885 if (!FoldMI) { 886 // Re-tie operands. 887 for (auto Tied : TiedOps) 888 MI->tieOperands(Tied.first, Tied.second); 889 return false; 890 } 891 892 // Remove LIS for any dead defs in the original MI not in FoldMI. 893 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) { 894 if (!MO->isReg()) 895 continue; 896 Register Reg = MO->getReg(); 897 if (!Reg || Register::isVirtualRegister(Reg) || MRI.isReserved(Reg)) { 898 continue; 899 } 900 // Skip non-Defs, including undef uses and internal reads. 901 if (MO->isUse()) 902 continue; 903 PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI); 904 if (RI.FullyDefined) 905 continue; 906 // FoldMI does not define this physreg. Remove the LI segment. 907 assert(MO->isDead() && "Cannot fold physreg def"); 908 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot(); 909 LIS.removePhysRegDefAt(Reg.asMCReg(), Idx); 910 } 911 912 int FI; 913 if (TII.isStoreToStackSlot(*MI, FI) && 914 HSpiller.rmFromMergeableSpills(*MI, FI)) 915 --NumSpills; 916 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI); 917 // Update the call site info. 918 if (MI->isCandidateForCallSiteEntry()) 919 MI->getMF()->moveCallSiteInfo(MI, FoldMI); 920 921 // If we've folded a store into an instruction labelled with debug-info, 922 // record a substitution from the old operand to the memory operand. Handle 923 // the simple common case where operand 0 is the one being folded, plus when 924 // the destination operand is also a tied def. More values could be 925 // substituted / preserved with more analysis. 926 if (MI->peekDebugInstrNum() && Ops[0].second == 0) { 927 // Helper lambda. 928 auto MakeSubstitution = [this,FoldMI,MI,&Ops]() { 929 // Substitute old operand zero to the new instructions memory operand. 930 unsigned OldOperandNum = Ops[0].second; 931 unsigned NewNum = FoldMI->getDebugInstrNum(); 932 unsigned OldNum = MI->getDebugInstrNum(); 933 MF.makeDebugValueSubstitution({OldNum, OldOperandNum}, 934 {NewNum, MachineFunction::DebugOperandMemNumber}); 935 }; 936 937 const MachineOperand &Op0 = MI->getOperand(Ops[0].second); 938 if (Ops.size() == 1 && Op0.isDef()) { 939 MakeSubstitution(); 940 } else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() && 941 Op0.getReg() == MI->getOperand(1).getReg()) { 942 MakeSubstitution(); 943 } 944 } else if (MI->peekDebugInstrNum()) { 945 // This is a debug-labelled instruction, but the operand being folded isn't 946 // at operand zero. Most likely this means it's a load being folded in. 947 // Substitute any register defs from operand zero up to the one being 948 // folded -- past that point, we don't know what the new operand indexes 949 // will be. 950 MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second); 951 } 952 953 MI->eraseFromParent(); 954 955 // Insert any new instructions other than FoldMI into the LIS maps. 956 assert(!MIS.empty() && "Unexpected empty span of instructions!"); 957 for (MachineInstr &MI : MIS) 958 if (&MI != FoldMI) 959 LIS.InsertMachineInstrInMaps(MI); 960 961 // TII.foldMemoryOperand may have left some implicit operands on the 962 // instruction. Strip them. 963 if (ImpReg) 964 for (unsigned i = FoldMI->getNumOperands(); i; --i) { 965 MachineOperand &MO = FoldMI->getOperand(i - 1); 966 if (!MO.isReg() || !MO.isImplicit()) 967 break; 968 if (MO.getReg() == ImpReg) 969 FoldMI->removeOperand(i - 1); 970 } 971 972 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS, 973 "folded")); 974 975 if (!WasCopy) 976 ++NumFolded; 977 else if (Ops.front().second == 0) { 978 ++NumSpills; 979 // If there is only 1 store instruction is required for spill, add it 980 // to mergeable list. In X86 AMX, 2 intructions are required to store. 981 // We disable the merge for this case. 982 if (std::distance(MIS.begin(), MIS.end()) <= 1) 983 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original); 984 } else 985 ++NumReloads; 986 return true; 987 } 988 989 void InlineSpiller::insertReload(Register NewVReg, 990 SlotIndex Idx, 991 MachineBasicBlock::iterator MI) { 992 MachineBasicBlock &MBB = *MI->getParent(); 993 994 MachineInstrSpan MIS(MI, &MBB); 995 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot, 996 MRI.getRegClass(NewVReg), &TRI); 997 998 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI); 999 1000 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload", 1001 NewVReg)); 1002 ++NumReloads; 1003 } 1004 1005 /// Check if \p Def fully defines a VReg with an undefined value. 1006 /// If that's the case, that means the value of VReg is actually 1007 /// not relevant. 1008 static bool isRealSpill(const MachineInstr &Def) { 1009 if (!Def.isImplicitDef()) 1010 return true; 1011 assert(Def.getNumOperands() == 1 && 1012 "Implicit def with more than one definition"); 1013 // We can say that the VReg defined by Def is undef, only if it is 1014 // fully defined by Def. Otherwise, some of the lanes may not be 1015 // undef and the value of the VReg matters. 1016 return Def.getOperand(0).getSubReg(); 1017 } 1018 1019 /// insertSpill - Insert a spill of NewVReg after MI. 1020 void InlineSpiller::insertSpill(Register NewVReg, bool isKill, 1021 MachineBasicBlock::iterator MI) { 1022 // Spill are not terminators, so inserting spills after terminators will 1023 // violate invariants in MachineVerifier. 1024 assert(!MI->isTerminator() && "Inserting a spill after a terminator"); 1025 MachineBasicBlock &MBB = *MI->getParent(); 1026 1027 MachineInstrSpan MIS(MI, &MBB); 1028 MachineBasicBlock::iterator SpillBefore = std::next(MI); 1029 bool IsRealSpill = isRealSpill(*MI); 1030 1031 if (IsRealSpill) 1032 TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot, 1033 MRI.getRegClass(NewVReg), &TRI); 1034 else 1035 // Don't spill undef value. 1036 // Anything works for undef, in particular keeping the memory 1037 // uninitialized is a viable option and it saves code size and 1038 // run time. 1039 BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL)) 1040 .addReg(NewVReg, getKillRegState(isKill)); 1041 1042 MachineBasicBlock::iterator Spill = std::next(MI); 1043 LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end()); 1044 for (const MachineInstr &MI : make_range(Spill, MIS.end())) 1045 getVDefInterval(MI, LIS); 1046 1047 LLVM_DEBUG( 1048 dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill")); 1049 ++NumSpills; 1050 // If there is only 1 store instruction is required for spill, add it 1051 // to mergeable list. In X86 AMX, 2 intructions are required to store. 1052 // We disable the merge for this case. 1053 if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1) 1054 HSpiller.addToMergeableSpills(*Spill, StackSlot, Original); 1055 } 1056 1057 /// spillAroundUses - insert spill code around each use of Reg. 1058 void InlineSpiller::spillAroundUses(Register Reg) { 1059 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n'); 1060 LiveInterval &OldLI = LIS.getInterval(Reg); 1061 1062 // Iterate over instructions using Reg. 1063 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) { 1064 // Debug values are not allowed to affect codegen. 1065 if (MI.isDebugValue()) { 1066 // Modify DBG_VALUE now that the value is in a spill slot. 1067 MachineBasicBlock *MBB = MI.getParent(); 1068 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << MI); 1069 buildDbgValueForSpill(*MBB, &MI, MI, StackSlot, Reg); 1070 MBB->erase(MI); 1071 continue; 1072 } 1073 1074 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug " 1075 "instruction that isn't a DBG_VALUE"); 1076 1077 // Ignore copies to/from snippets. We'll delete them. 1078 if (SnippetCopies.count(&MI)) 1079 continue; 1080 1081 // Stack slot accesses may coalesce away. 1082 if (coalesceStackAccess(&MI, Reg)) 1083 continue; 1084 1085 // Analyze instruction. 1086 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops; 1087 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, Reg, &Ops); 1088 1089 // Find the slot index where this instruction reads and writes OldLI. 1090 // This is usually the def slot, except for tied early clobbers. 1091 SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot(); 1092 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true))) 1093 if (SlotIndex::isSameInstr(Idx, VNI->def)) 1094 Idx = VNI->def; 1095 1096 // Check for a sibling copy. 1097 Register SibReg = isFullCopyOf(MI, Reg); 1098 if (SibReg && isSibling(SibReg)) { 1099 // This may actually be a copy between snippets. 1100 if (isRegToSpill(SibReg)) { 1101 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << MI); 1102 SnippetCopies.insert(&MI); 1103 continue; 1104 } 1105 if (RI.Writes) { 1106 if (hoistSpillInsideBB(OldLI, MI)) { 1107 // This COPY is now dead, the value is already in the stack slot. 1108 MI.getOperand(0).setIsDead(); 1109 DeadDefs.push_back(&MI); 1110 continue; 1111 } 1112 } else { 1113 // This is a reload for a sib-reg copy. Drop spills downstream. 1114 LiveInterval &SibLI = LIS.getInterval(SibReg); 1115 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx)); 1116 // The COPY will fold to a reload below. 1117 } 1118 } 1119 1120 // Attempt to fold memory ops. 1121 if (foldMemoryOperand(Ops)) 1122 continue; 1123 1124 // Create a new virtual register for spill/fill. 1125 // FIXME: Infer regclass from instruction alone. 1126 Register NewVReg = Edit->createFrom(Reg); 1127 1128 if (RI.Reads) 1129 insertReload(NewVReg, Idx, &MI); 1130 1131 // Rewrite instruction operands. 1132 bool hasLiveDef = false; 1133 for (const auto &OpPair : Ops) { 1134 MachineOperand &MO = OpPair.first->getOperand(OpPair.second); 1135 MO.setReg(NewVReg); 1136 if (MO.isUse()) { 1137 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second)) 1138 MO.setIsKill(); 1139 } else { 1140 if (!MO.isDead()) 1141 hasLiveDef = true; 1142 } 1143 } 1144 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << MI << '\n'); 1145 1146 // FIXME: Use a second vreg if instruction has no tied ops. 1147 if (RI.Writes) 1148 if (hasLiveDef) 1149 insertSpill(NewVReg, true, &MI); 1150 } 1151 } 1152 1153 /// spillAll - Spill all registers remaining after rematerialization. 1154 void InlineSpiller::spillAll() { 1155 // Update LiveStacks now that we are committed to spilling. 1156 if (StackSlot == VirtRegMap::NO_STACK_SLOT) { 1157 StackSlot = VRM.assignVirt2StackSlot(Original); 1158 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original)); 1159 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator()); 1160 } else 1161 StackInt = &LSS.getInterval(StackSlot); 1162 1163 if (Original != Edit->getReg()) 1164 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot); 1165 1166 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values"); 1167 for (Register Reg : RegsToSpill) 1168 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg), 1169 StackInt->getValNumInfo(0)); 1170 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n'); 1171 1172 // Spill around uses of all RegsToSpill. 1173 for (Register Reg : RegsToSpill) 1174 spillAroundUses(Reg); 1175 1176 // Hoisted spills may cause dead code. 1177 if (!DeadDefs.empty()) { 1178 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n"); 1179 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); 1180 } 1181 1182 // Finally delete the SnippetCopies. 1183 for (Register Reg : RegsToSpill) { 1184 for (MachineInstr &MI : 1185 llvm::make_early_inc_range(MRI.reg_instructions(Reg))) { 1186 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy"); 1187 // FIXME: Do this with a LiveRangeEdit callback. 1188 LIS.RemoveMachineInstrFromMaps(MI); 1189 MI.eraseFromParent(); 1190 } 1191 } 1192 1193 // Delete all spilled registers. 1194 for (Register Reg : RegsToSpill) 1195 Edit->eraseVirtReg(Reg); 1196 } 1197 1198 void InlineSpiller::spill(LiveRangeEdit &edit) { 1199 ++NumSpilledRanges; 1200 Edit = &edit; 1201 assert(!Register::isStackSlot(edit.getReg()) && 1202 "Trying to spill a stack slot."); 1203 // Share a stack slot among all descendants of Original. 1204 Original = VRM.getOriginal(edit.getReg()); 1205 StackSlot = VRM.getStackSlot(Original); 1206 StackInt = nullptr; 1207 1208 LLVM_DEBUG(dbgs() << "Inline spilling " 1209 << TRI.getRegClassName(MRI.getRegClass(edit.getReg())) 1210 << ':' << edit.getParent() << "\nFrom original " 1211 << printReg(Original) << '\n'); 1212 assert(edit.getParent().isSpillable() && 1213 "Attempting to spill already spilled value."); 1214 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs"); 1215 1216 collectRegsToSpill(); 1217 reMaterializeAll(); 1218 1219 // Remat may handle everything. 1220 if (!RegsToSpill.empty()) 1221 spillAll(); 1222 1223 Edit->calculateRegClassAndHint(MF, VRAI); 1224 } 1225 1226 /// Optimizations after all the reg selections and spills are done. 1227 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); } 1228 1229 /// When a spill is inserted, add the spill to MergeableSpills map. 1230 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot, 1231 unsigned Original) { 1232 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator(); 1233 LiveInterval &OrigLI = LIS.getInterval(Original); 1234 // save a copy of LiveInterval in StackSlotToOrigLI because the original 1235 // LiveInterval may be cleared after all its references are spilled. 1236 if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) { 1237 auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight()); 1238 LI->assign(OrigLI, Allocator); 1239 StackSlotToOrigLI[StackSlot] = std::move(LI); 1240 } 1241 SlotIndex Idx = LIS.getInstructionIndex(Spill); 1242 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot()); 1243 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI); 1244 MergeableSpills[MIdx].insert(&Spill); 1245 } 1246 1247 /// When a spill is removed, remove the spill from MergeableSpills map. 1248 /// Return true if the spill is removed successfully. 1249 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill, 1250 int StackSlot) { 1251 auto It = StackSlotToOrigLI.find(StackSlot); 1252 if (It == StackSlotToOrigLI.end()) 1253 return false; 1254 SlotIndex Idx = LIS.getInstructionIndex(Spill); 1255 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot()); 1256 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI); 1257 return MergeableSpills[MIdx].erase(&Spill); 1258 } 1259 1260 /// Check BB to see if it is a possible target BB to place a hoisted spill, 1261 /// i.e., there should be a living sibling of OrigReg at the insert point. 1262 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI, 1263 MachineBasicBlock &BB, Register &LiveReg) { 1264 SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB); 1265 // The original def could be after the last insert point in the root block, 1266 // we can't hoist to here. 1267 if (Idx < OrigVNI.def) { 1268 // TODO: We could be better here. If LI is not alive in landing pad 1269 // we could hoist spill after LIP. 1270 LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n"); 1271 return false; 1272 } 1273 Register OrigReg = OrigLI.reg(); 1274 SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg]; 1275 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI"); 1276 1277 for (const Register &SibReg : Siblings) { 1278 LiveInterval &LI = LIS.getInterval(SibReg); 1279 VNInfo *VNI = LI.getVNInfoAt(Idx); 1280 if (VNI) { 1281 LiveReg = SibReg; 1282 return true; 1283 } 1284 } 1285 return false; 1286 } 1287 1288 /// Remove redundant spills in the same BB. Save those redundant spills in 1289 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map. 1290 void HoistSpillHelper::rmRedundantSpills( 1291 SmallPtrSet<MachineInstr *, 16> &Spills, 1292 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1293 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) { 1294 // For each spill saw, check SpillBBToSpill[] and see if its BB already has 1295 // another spill inside. If a BB contains more than one spill, only keep the 1296 // earlier spill with smaller SlotIndex. 1297 for (auto *const CurrentSpill : Spills) { 1298 MachineBasicBlock *Block = CurrentSpill->getParent(); 1299 MachineDomTreeNode *Node = MDT.getBase().getNode(Block); 1300 MachineInstr *PrevSpill = SpillBBToSpill[Node]; 1301 if (PrevSpill) { 1302 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill); 1303 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill); 1304 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill; 1305 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill; 1306 SpillsToRm.push_back(SpillToRm); 1307 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep; 1308 } else { 1309 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill; 1310 } 1311 } 1312 for (auto *const SpillToRm : SpillsToRm) 1313 Spills.erase(SpillToRm); 1314 } 1315 1316 /// Starting from \p Root find a top-down traversal order of the dominator 1317 /// tree to visit all basic blocks containing the elements of \p Spills. 1318 /// Redundant spills will be found and put into \p SpillsToRm at the same 1319 /// time. \p SpillBBToSpill will be populated as part of the process and 1320 /// maps a basic block to the first store occurring in the basic block. 1321 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre 1322 void HoistSpillHelper::getVisitOrders( 1323 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills, 1324 SmallVectorImpl<MachineDomTreeNode *> &Orders, 1325 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1326 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep, 1327 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) { 1328 // The set contains all the possible BB nodes to which we may hoist 1329 // original spills. 1330 SmallPtrSet<MachineDomTreeNode *, 8> WorkSet; 1331 // Save the BB nodes on the path from the first BB node containing 1332 // non-redundant spill to the Root node. 1333 SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath; 1334 // All the spills to be hoisted must originate from a single def instruction 1335 // to the OrigReg. It means the def instruction should dominate all the spills 1336 // to be hoisted. We choose the BB where the def instruction is located as 1337 // the Root. 1338 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom(); 1339 // For every node on the dominator tree with spill, walk up on the dominator 1340 // tree towards the Root node until it is reached. If there is other node 1341 // containing spill in the middle of the path, the previous spill saw will 1342 // be redundant and the node containing it will be removed. All the nodes on 1343 // the path starting from the first node with non-redundant spill to the Root 1344 // node will be added to the WorkSet, which will contain all the possible 1345 // locations where spills may be hoisted to after the loop below is done. 1346 for (auto *const Spill : Spills) { 1347 MachineBasicBlock *Block = Spill->getParent(); 1348 MachineDomTreeNode *Node = MDT[Block]; 1349 MachineInstr *SpillToRm = nullptr; 1350 while (Node != RootIDomNode) { 1351 // If Node dominates Block, and it already contains a spill, the spill in 1352 // Block will be redundant. 1353 if (Node != MDT[Block] && SpillBBToSpill[Node]) { 1354 SpillToRm = SpillBBToSpill[MDT[Block]]; 1355 break; 1356 /// If we see the Node already in WorkSet, the path from the Node to 1357 /// the Root node must already be traversed by another spill. 1358 /// Then no need to repeat. 1359 } else if (WorkSet.count(Node)) { 1360 break; 1361 } else { 1362 NodesOnPath.insert(Node); 1363 } 1364 Node = Node->getIDom(); 1365 } 1366 if (SpillToRm) { 1367 SpillsToRm.push_back(SpillToRm); 1368 } else { 1369 // Add a BB containing the original spills to SpillsToKeep -- i.e., 1370 // set the initial status before hoisting start. The value of BBs 1371 // containing original spills is set to 0, in order to descriminate 1372 // with BBs containing hoisted spills which will be inserted to 1373 // SpillsToKeep later during hoisting. 1374 SpillsToKeep[MDT[Block]] = 0; 1375 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end()); 1376 } 1377 NodesOnPath.clear(); 1378 } 1379 1380 // Sort the nodes in WorkSet in top-down order and save the nodes 1381 // in Orders. Orders will be used for hoisting in runHoistSpills. 1382 unsigned idx = 0; 1383 Orders.push_back(MDT.getBase().getNode(Root)); 1384 do { 1385 MachineDomTreeNode *Node = Orders[idx++]; 1386 for (MachineDomTreeNode *Child : Node->children()) { 1387 if (WorkSet.count(Child)) 1388 Orders.push_back(Child); 1389 } 1390 } while (idx != Orders.size()); 1391 assert(Orders.size() == WorkSet.size() && 1392 "Orders have different size with WorkSet"); 1393 1394 #ifndef NDEBUG 1395 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n"); 1396 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin(); 1397 for (; RIt != Orders.rend(); RIt++) 1398 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ","); 1399 LLVM_DEBUG(dbgs() << "\n"); 1400 #endif 1401 } 1402 1403 /// Try to hoist spills according to BB hotness. The spills to removed will 1404 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in 1405 /// \p SpillsToIns. 1406 void HoistSpillHelper::runHoistSpills( 1407 LiveInterval &OrigLI, VNInfo &OrigVNI, 1408 SmallPtrSet<MachineInstr *, 16> &Spills, 1409 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1410 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) { 1411 // Visit order of dominator tree nodes. 1412 SmallVector<MachineDomTreeNode *, 32> Orders; 1413 // SpillsToKeep contains all the nodes where spills are to be inserted 1414 // during hoisting. If the spill to be inserted is an original spill 1415 // (not a hoisted one), the value of the map entry is 0. If the spill 1416 // is a hoisted spill, the value of the map entry is the VReg to be used 1417 // as the source of the spill. 1418 DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep; 1419 // Map from BB to the first spill inside of it. 1420 DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill; 1421 1422 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill); 1423 1424 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def); 1425 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep, 1426 SpillBBToSpill); 1427 1428 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of 1429 // nodes set and the cost of all the spills inside those nodes. 1430 // The nodes set are the locations where spills are to be inserted 1431 // in the subtree of current node. 1432 using NodesCostPair = 1433 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>; 1434 DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap; 1435 1436 // Iterate Orders set in reverse order, which will be a bottom-up order 1437 // in the dominator tree. Once we visit a dom tree node, we know its 1438 // children have already been visited and the spill locations in the 1439 // subtrees of all the children have been determined. 1440 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin(); 1441 for (; RIt != Orders.rend(); RIt++) { 1442 MachineBasicBlock *Block = (*RIt)->getBlock(); 1443 1444 // If Block contains an original spill, simply continue. 1445 if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) { 1446 SpillsInSubTreeMap[*RIt].first.insert(*RIt); 1447 // SpillsInSubTreeMap[*RIt].second contains the cost of spill. 1448 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block); 1449 continue; 1450 } 1451 1452 // Collect spills in subtree of current node (*RIt) to 1453 // SpillsInSubTreeMap[*RIt].first. 1454 for (MachineDomTreeNode *Child : (*RIt)->children()) { 1455 if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end()) 1456 continue; 1457 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below 1458 // should be placed before getting the begin and end iterators of 1459 // SpillsInSubTreeMap[Child].first, or else the iterators may be 1460 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time 1461 // and the map grows and then the original buckets in the map are moved. 1462 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree = 1463 SpillsInSubTreeMap[*RIt].first; 1464 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second; 1465 SubTreeCost += SpillsInSubTreeMap[Child].second; 1466 auto BI = SpillsInSubTreeMap[Child].first.begin(); 1467 auto EI = SpillsInSubTreeMap[Child].first.end(); 1468 SpillsInSubTree.insert(BI, EI); 1469 SpillsInSubTreeMap.erase(Child); 1470 } 1471 1472 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree = 1473 SpillsInSubTreeMap[*RIt].first; 1474 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second; 1475 // No spills in subtree, simply continue. 1476 if (SpillsInSubTree.empty()) 1477 continue; 1478 1479 // Check whether Block is a possible candidate to insert spill. 1480 Register LiveReg; 1481 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg)) 1482 continue; 1483 1484 // If there are multiple spills that could be merged, bias a little 1485 // to hoist the spill. 1486 BranchProbability MarginProb = (SpillsInSubTree.size() > 1) 1487 ? BranchProbability(9, 10) 1488 : BranchProbability(1, 1); 1489 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) { 1490 // Hoist: Move spills to current Block. 1491 for (auto *const SpillBB : SpillsInSubTree) { 1492 // When SpillBB is a BB contains original spill, insert the spill 1493 // to SpillsToRm. 1494 if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() && 1495 !SpillsToKeep[SpillBB]) { 1496 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB]; 1497 SpillsToRm.push_back(SpillToRm); 1498 } 1499 // SpillBB will not contain spill anymore, remove it from SpillsToKeep. 1500 SpillsToKeep.erase(SpillBB); 1501 } 1502 // Current Block is the BB containing the new hoisted spill. Add it to 1503 // SpillsToKeep. LiveReg is the source of the new spill. 1504 SpillsToKeep[*RIt] = LiveReg; 1505 LLVM_DEBUG({ 1506 dbgs() << "spills in BB: "; 1507 for (const auto Rspill : SpillsInSubTree) 1508 dbgs() << Rspill->getBlock()->getNumber() << " "; 1509 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber() 1510 << "\n"; 1511 }); 1512 SpillsInSubTree.clear(); 1513 SpillsInSubTree.insert(*RIt); 1514 SubTreeCost = MBFI.getBlockFreq(Block); 1515 } 1516 } 1517 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill), 1518 // save them to SpillsToIns. 1519 for (const auto &Ent : SpillsToKeep) { 1520 if (Ent.second) 1521 SpillsToIns[Ent.first->getBlock()] = Ent.second; 1522 } 1523 } 1524 1525 /// For spills with equal values, remove redundant spills and hoist those left 1526 /// to less hot spots. 1527 /// 1528 /// Spills with equal values will be collected into the same set in 1529 /// MergeableSpills when spill is inserted. These equal spills are originated 1530 /// from the same defining instruction and are dominated by the instruction. 1531 /// Before hoisting all the equal spills, redundant spills inside in the same 1532 /// BB are first marked to be deleted. Then starting from the spills left, walk 1533 /// up on the dominator tree towards the Root node where the define instruction 1534 /// is located, mark the dominated spills to be deleted along the way and 1535 /// collect the BB nodes on the path from non-dominated spills to the define 1536 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places 1537 /// where we are considering to hoist the spills. We iterate the WorkSet in 1538 /// bottom-up order, and for each node, we will decide whether to hoist spills 1539 /// inside its subtree to that node. In this way, we can get benefit locally 1540 /// even if hoisting all the equal spills to one cold place is impossible. 1541 void HoistSpillHelper::hoistAllSpills() { 1542 SmallVector<Register, 4> NewVRegs; 1543 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this); 1544 1545 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) { 1546 Register Reg = Register::index2VirtReg(i); 1547 Register Original = VRM.getPreSplitReg(Reg); 1548 if (!MRI.def_empty(Reg)) 1549 Virt2SiblingsMap[Original].insert(Reg); 1550 } 1551 1552 // Each entry in MergeableSpills contains a spill set with equal values. 1553 for (auto &Ent : MergeableSpills) { 1554 int Slot = Ent.first.first; 1555 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot]; 1556 VNInfo *OrigVNI = Ent.first.second; 1557 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second; 1558 if (Ent.second.empty()) 1559 continue; 1560 1561 LLVM_DEBUG({ 1562 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n" 1563 << "Equal spills in BB: "; 1564 for (const auto spill : EqValSpills) 1565 dbgs() << spill->getParent()->getNumber() << " "; 1566 dbgs() << "\n"; 1567 }); 1568 1569 // SpillsToRm is the spill set to be removed from EqValSpills. 1570 SmallVector<MachineInstr *, 16> SpillsToRm; 1571 // SpillsToIns is the spill set to be newly inserted after hoisting. 1572 DenseMap<MachineBasicBlock *, unsigned> SpillsToIns; 1573 1574 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns); 1575 1576 LLVM_DEBUG({ 1577 dbgs() << "Finally inserted spills in BB: "; 1578 for (const auto &Ispill : SpillsToIns) 1579 dbgs() << Ispill.first->getNumber() << " "; 1580 dbgs() << "\nFinally removed spills in BB: "; 1581 for (const auto Rspill : SpillsToRm) 1582 dbgs() << Rspill->getParent()->getNumber() << " "; 1583 dbgs() << "\n"; 1584 }); 1585 1586 // Stack live range update. 1587 LiveInterval &StackIntvl = LSS.getInterval(Slot); 1588 if (!SpillsToIns.empty() || !SpillsToRm.empty()) 1589 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI, 1590 StackIntvl.getValNumInfo(0)); 1591 1592 // Insert hoisted spills. 1593 for (auto const &Insert : SpillsToIns) { 1594 MachineBasicBlock *BB = Insert.first; 1595 Register LiveReg = Insert.second; 1596 MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB); 1597 MachineInstrSpan MIS(MII, BB); 1598 TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot, 1599 MRI.getRegClass(LiveReg), &TRI); 1600 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII); 1601 for (const MachineInstr &MI : make_range(MIS.begin(), MII)) 1602 getVDefInterval(MI, LIS); 1603 ++NumSpills; 1604 } 1605 1606 // Remove redundant spills or change them to dead instructions. 1607 NumSpills -= SpillsToRm.size(); 1608 for (auto *const RMEnt : SpillsToRm) { 1609 RMEnt->setDesc(TII.get(TargetOpcode::KILL)); 1610 for (unsigned i = RMEnt->getNumOperands(); i; --i) { 1611 MachineOperand &MO = RMEnt->getOperand(i - 1); 1612 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead()) 1613 RMEnt->removeOperand(i - 1); 1614 } 1615 } 1616 Edit.eliminateDeadDefs(SpillsToRm, None); 1617 } 1618 } 1619 1620 /// For VirtReg clone, the \p New register should have the same physreg or 1621 /// stackslot as the \p old register. 1622 void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) { 1623 if (VRM.hasPhys(Old)) 1624 VRM.assignVirt2Phys(New, VRM.getPhys(Old)); 1625 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT) 1626 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old)); 1627 else 1628 llvm_unreachable("VReg should be assigned either physreg or stackslot"); 1629 if (VRM.hasShape(Old)) 1630 VRM.assignVirt2Shape(New, VRM.getShape(Old)); 1631 } 1632