1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The inline spiller modifies the machine function directly instead of 10 // inserting spills and restores in VirtRegMap. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SplitKit.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/MapVector.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SetVector.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/CodeGen/LiveInterval.h" 25 #include "llvm/CodeGen/LiveIntervals.h" 26 #include "llvm/CodeGen/LiveRangeEdit.h" 27 #include "llvm/CodeGen/LiveStacks.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 30 #include "llvm/CodeGen/MachineDominators.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/CodeGen/MachineFunctionPass.h" 33 #include "llvm/CodeGen/MachineInstr.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineInstrBundle.h" 36 #include "llvm/CodeGen/MachineLoopInfo.h" 37 #include "llvm/CodeGen/MachineOperand.h" 38 #include "llvm/CodeGen/MachineRegisterInfo.h" 39 #include "llvm/CodeGen/SlotIndexes.h" 40 #include "llvm/CodeGen/Spiller.h" 41 #include "llvm/CodeGen/StackMaps.h" 42 #include "llvm/CodeGen/TargetInstrInfo.h" 43 #include "llvm/CodeGen/TargetOpcodes.h" 44 #include "llvm/CodeGen/TargetRegisterInfo.h" 45 #include "llvm/CodeGen/TargetSubtargetInfo.h" 46 #include "llvm/CodeGen/VirtRegMap.h" 47 #include "llvm/Config/llvm-config.h" 48 #include "llvm/Support/BlockFrequency.h" 49 #include "llvm/Support/BranchProbability.h" 50 #include "llvm/Support/CommandLine.h" 51 #include "llvm/Support/Compiler.h" 52 #include "llvm/Support/Debug.h" 53 #include "llvm/Support/ErrorHandling.h" 54 #include "llvm/Support/raw_ostream.h" 55 #include <cassert> 56 #include <iterator> 57 #include <tuple> 58 #include <utility> 59 #include <vector> 60 61 using namespace llvm; 62 63 #define DEBUG_TYPE "regalloc" 64 65 STATISTIC(NumSpilledRanges, "Number of spilled live ranges"); 66 STATISTIC(NumSnippets, "Number of spilled snippets"); 67 STATISTIC(NumSpills, "Number of spills inserted"); 68 STATISTIC(NumSpillsRemoved, "Number of spills removed"); 69 STATISTIC(NumReloads, "Number of reloads inserted"); 70 STATISTIC(NumReloadsRemoved, "Number of reloads removed"); 71 STATISTIC(NumFolded, "Number of folded stack accesses"); 72 STATISTIC(NumFoldedLoads, "Number of folded loads"); 73 STATISTIC(NumRemats, "Number of rematerialized defs for spilling"); 74 75 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden, 76 cl::desc("Disable inline spill hoisting")); 77 static cl::opt<bool> 78 RestrictStatepointRemat("restrict-statepoint-remat", 79 cl::init(false), cl::Hidden, 80 cl::desc("Restrict remat for statepoint operands")); 81 82 namespace { 83 84 class HoistSpillHelper : private LiveRangeEdit::Delegate { 85 MachineFunction &MF; 86 LiveIntervals &LIS; 87 LiveStacks &LSS; 88 MachineDominatorTree &MDT; 89 MachineLoopInfo &Loops; 90 VirtRegMap &VRM; 91 MachineRegisterInfo &MRI; 92 const TargetInstrInfo &TII; 93 const TargetRegisterInfo &TRI; 94 const MachineBlockFrequencyInfo &MBFI; 95 96 InsertPointAnalysis IPA; 97 98 // Map from StackSlot to the LiveInterval of the original register. 99 // Note the LiveInterval of the original register may have been deleted 100 // after it is spilled. We keep a copy here to track the range where 101 // spills can be moved. 102 DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI; 103 104 // Map from pair of (StackSlot and Original VNI) to a set of spills which 105 // have the same stackslot and have equal values defined by Original VNI. 106 // These spills are mergeable and are hoist candidates. 107 using MergeableSpillsMap = 108 MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>; 109 MergeableSpillsMap MergeableSpills; 110 111 /// This is the map from original register to a set containing all its 112 /// siblings. To hoist a spill to another BB, we need to find out a live 113 /// sibling there and use it as the source of the new spill. 114 DenseMap<Register, SmallSetVector<Register, 16>> Virt2SiblingsMap; 115 116 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI, 117 MachineBasicBlock &BB, Register &LiveReg); 118 119 void rmRedundantSpills( 120 SmallPtrSet<MachineInstr *, 16> &Spills, 121 SmallVectorImpl<MachineInstr *> &SpillsToRm, 122 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill); 123 124 void getVisitOrders( 125 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills, 126 SmallVectorImpl<MachineDomTreeNode *> &Orders, 127 SmallVectorImpl<MachineInstr *> &SpillsToRm, 128 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep, 129 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill); 130 131 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI, 132 SmallPtrSet<MachineInstr *, 16> &Spills, 133 SmallVectorImpl<MachineInstr *> &SpillsToRm, 134 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns); 135 136 public: 137 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf, 138 VirtRegMap &vrm) 139 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()), 140 LSS(pass.getAnalysis<LiveStacks>()), 141 MDT(pass.getAnalysis<MachineDominatorTree>()), 142 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm), 143 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()), 144 TRI(*mf.getSubtarget().getRegisterInfo()), 145 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()), 146 IPA(LIS, mf.getNumBlockIDs()) {} 147 148 void addToMergeableSpills(MachineInstr &Spill, int StackSlot, 149 unsigned Original); 150 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot); 151 void hoistAllSpills(); 152 void LRE_DidCloneVirtReg(Register, Register) override; 153 }; 154 155 class InlineSpiller : public Spiller { 156 MachineFunction &MF; 157 LiveIntervals &LIS; 158 LiveStacks &LSS; 159 MachineDominatorTree &MDT; 160 MachineLoopInfo &Loops; 161 VirtRegMap &VRM; 162 MachineRegisterInfo &MRI; 163 const TargetInstrInfo &TII; 164 const TargetRegisterInfo &TRI; 165 const MachineBlockFrequencyInfo &MBFI; 166 167 // Variables that are valid during spill(), but used by multiple methods. 168 LiveRangeEdit *Edit; 169 LiveInterval *StackInt; 170 int StackSlot; 171 Register Original; 172 173 // All registers to spill to StackSlot, including the main register. 174 SmallVector<Register, 8> RegsToSpill; 175 176 // All COPY instructions to/from snippets. 177 // They are ignored since both operands refer to the same stack slot. 178 SmallPtrSet<MachineInstr*, 8> SnippetCopies; 179 180 // Values that failed to remat at some point. 181 SmallPtrSet<VNInfo*, 8> UsedValues; 182 183 // Dead defs generated during spilling. 184 SmallVector<MachineInstr*, 8> DeadDefs; 185 186 // Object records spills information and does the hoisting. 187 HoistSpillHelper HSpiller; 188 189 // Live range weight calculator. 190 VirtRegAuxInfo &VRAI; 191 192 ~InlineSpiller() override = default; 193 194 public: 195 InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM, 196 VirtRegAuxInfo &VRAI) 197 : MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()), 198 LSS(Pass.getAnalysis<LiveStacks>()), 199 MDT(Pass.getAnalysis<MachineDominatorTree>()), 200 Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM), 201 MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()), 202 TRI(*MF.getSubtarget().getRegisterInfo()), 203 MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()), 204 HSpiller(Pass, MF, VRM), VRAI(VRAI) {} 205 206 void spill(LiveRangeEdit &) override; 207 void postOptimization() override; 208 209 private: 210 bool isSnippet(const LiveInterval &SnipLI); 211 void collectRegsToSpill(); 212 213 bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); } 214 215 bool isSibling(Register Reg); 216 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI); 217 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI); 218 219 void markValueUsed(LiveInterval*, VNInfo*); 220 bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI); 221 bool reMaterializeFor(LiveInterval &, MachineInstr &MI); 222 void reMaterializeAll(); 223 224 bool coalesceStackAccess(MachineInstr *MI, Register Reg); 225 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>, 226 MachineInstr *LoadMI = nullptr); 227 void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI); 228 void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI); 229 230 void spillAroundUses(Register Reg); 231 void spillAll(); 232 }; 233 234 } // end anonymous namespace 235 236 Spiller::~Spiller() = default; 237 238 void Spiller::anchor() {} 239 240 Spiller *llvm::createInlineSpiller(MachineFunctionPass &Pass, 241 MachineFunction &MF, VirtRegMap &VRM, 242 VirtRegAuxInfo &VRAI) { 243 return new InlineSpiller(Pass, MF, VRM, VRAI); 244 } 245 246 //===----------------------------------------------------------------------===// 247 // Snippets 248 //===----------------------------------------------------------------------===// 249 250 // When spilling a virtual register, we also spill any snippets it is connected 251 // to. The snippets are small live ranges that only have a single real use, 252 // leftovers from live range splitting. Spilling them enables memory operand 253 // folding or tightens the live range around the single use. 254 // 255 // This minimizes register pressure and maximizes the store-to-load distance for 256 // spill slots which can be important in tight loops. 257 258 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register, 259 /// otherwise return 0. 260 static Register isFullCopyOf(const MachineInstr &MI, Register Reg) { 261 if (!MI.isFullCopy()) 262 return Register(); 263 if (MI.getOperand(0).getReg() == Reg) 264 return MI.getOperand(1).getReg(); 265 if (MI.getOperand(1).getReg() == Reg) 266 return MI.getOperand(0).getReg(); 267 return Register(); 268 } 269 270 static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) { 271 for (const MachineOperand &MO : MI.operands()) 272 if (MO.isReg() && MO.isDef() && MO.getReg().isVirtual()) 273 LIS.getInterval(MO.getReg()); 274 } 275 276 /// isSnippet - Identify if a live interval is a snippet that should be spilled. 277 /// It is assumed that SnipLI is a virtual register with the same original as 278 /// Edit->getReg(). 279 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { 280 Register Reg = Edit->getReg(); 281 282 // A snippet is a tiny live range with only a single instruction using it 283 // besides copies to/from Reg or spills/fills. 284 // Exception is done for statepoint instructions which will fold fills 285 // into their operands. 286 // We accept: 287 // 288 // %snip = COPY %Reg / FILL fi# 289 // %snip = USE %snip 290 // %snip = STATEPOINT %snip in var arg area 291 // %Reg = COPY %snip / SPILL %snip, fi# 292 // 293 if (!LIS.intervalIsInOneMBB(SnipLI)) 294 return false; 295 296 // Number of defs should not exceed 2 not accounting defs coming from 297 // statepoint instructions. 298 unsigned NumValNums = SnipLI.getNumValNums(); 299 for (auto *VNI : SnipLI.vnis()) { 300 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 301 if (MI->getOpcode() == TargetOpcode::STATEPOINT) 302 --NumValNums; 303 } 304 if (NumValNums > 2) 305 return false; 306 307 MachineInstr *UseMI = nullptr; 308 309 // Check that all uses satisfy our criteria. 310 for (MachineRegisterInfo::reg_instr_nodbg_iterator 311 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()), 312 E = MRI.reg_instr_nodbg_end(); 313 RI != E;) { 314 MachineInstr &MI = *RI++; 315 316 // Allow copies to/from Reg. 317 if (isFullCopyOf(MI, Reg)) 318 continue; 319 320 // Allow stack slot loads. 321 int FI; 322 if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) 323 continue; 324 325 // Allow stack slot stores. 326 if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) 327 continue; 328 329 if (StatepointOpers::isFoldableReg(&MI, SnipLI.reg())) 330 continue; 331 332 // Allow a single additional instruction. 333 if (UseMI && &MI != UseMI) 334 return false; 335 UseMI = &MI; 336 } 337 return true; 338 } 339 340 /// collectRegsToSpill - Collect live range snippets that only have a single 341 /// real use. 342 void InlineSpiller::collectRegsToSpill() { 343 Register Reg = Edit->getReg(); 344 345 // Main register always spills. 346 RegsToSpill.assign(1, Reg); 347 SnippetCopies.clear(); 348 349 // Snippets all have the same original, so there can't be any for an original 350 // register. 351 if (Original == Reg) 352 return; 353 354 for (MachineInstr &MI : 355 llvm::make_early_inc_range(MRI.reg_instructions(Reg))) { 356 Register SnipReg = isFullCopyOf(MI, Reg); 357 if (!isSibling(SnipReg)) 358 continue; 359 LiveInterval &SnipLI = LIS.getInterval(SnipReg); 360 if (!isSnippet(SnipLI)) 361 continue; 362 SnippetCopies.insert(&MI); 363 if (isRegToSpill(SnipReg)) 364 continue; 365 RegsToSpill.push_back(SnipReg); 366 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n'); 367 ++NumSnippets; 368 } 369 } 370 371 bool InlineSpiller::isSibling(Register Reg) { 372 return Reg.isVirtual() && VRM.getOriginal(Reg) == Original; 373 } 374 375 /// It is beneficial to spill to earlier place in the same BB in case 376 /// as follows: 377 /// There is an alternative def earlier in the same MBB. 378 /// Hoist the spill as far as possible in SpillMBB. This can ease 379 /// register pressure: 380 /// 381 /// x = def 382 /// y = use x 383 /// s = copy x 384 /// 385 /// Hoisting the spill of s to immediately after the def removes the 386 /// interference between x and y: 387 /// 388 /// x = def 389 /// spill x 390 /// y = use killed x 391 /// 392 /// This hoist only helps when the copy kills its source. 393 /// 394 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI, 395 MachineInstr &CopyMI) { 396 SlotIndex Idx = LIS.getInstructionIndex(CopyMI); 397 #ifndef NDEBUG 398 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot()); 399 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy"); 400 #endif 401 402 Register SrcReg = CopyMI.getOperand(1).getReg(); 403 LiveInterval &SrcLI = LIS.getInterval(SrcReg); 404 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx); 405 LiveQueryResult SrcQ = SrcLI.Query(Idx); 406 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def); 407 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill()) 408 return false; 409 410 // Conservatively extend the stack slot range to the range of the original 411 // value. We may be able to do better with stack slot coloring by being more 412 // careful here. 413 assert(StackInt && "No stack slot assigned yet."); 414 LiveInterval &OrigLI = LIS.getInterval(Original); 415 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx); 416 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0)); 417 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": " 418 << *StackInt << '\n'); 419 420 // We are going to spill SrcVNI immediately after its def, so clear out 421 // any later spills of the same value. 422 eliminateRedundantSpills(SrcLI, SrcVNI); 423 424 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def); 425 MachineBasicBlock::iterator MII; 426 if (SrcVNI->isPHIDef()) 427 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin()); 428 else { 429 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def); 430 assert(DefMI && "Defining instruction disappeared"); 431 MII = DefMI; 432 ++MII; 433 } 434 MachineInstrSpan MIS(MII, MBB); 435 // Insert spill without kill flag immediately after def. 436 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot, 437 MRI.getRegClass(SrcReg), &TRI, Register()); 438 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII); 439 for (const MachineInstr &MI : make_range(MIS.begin(), MII)) 440 getVDefInterval(MI, LIS); 441 --MII; // Point to store instruction. 442 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII); 443 444 // If there is only 1 store instruction is required for spill, add it 445 // to mergeable list. In X86 AMX, 2 intructions are required to store. 446 // We disable the merge for this case. 447 if (MIS.begin() == MII) 448 HSpiller.addToMergeableSpills(*MII, StackSlot, Original); 449 ++NumSpills; 450 return true; 451 } 452 453 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any 454 /// redundant spills of this value in SLI.reg and sibling copies. 455 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { 456 assert(VNI && "Missing value"); 457 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 458 WorkList.push_back(std::make_pair(&SLI, VNI)); 459 assert(StackInt && "No stack slot assigned yet."); 460 461 do { 462 LiveInterval *LI; 463 std::tie(LI, VNI) = WorkList.pop_back_val(); 464 Register Reg = LI->reg(); 465 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@' 466 << VNI->def << " in " << *LI << '\n'); 467 468 // Regs to spill are taken care of. 469 if (isRegToSpill(Reg)) 470 continue; 471 472 // Add all of VNI's live range to StackInt. 473 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0)); 474 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n'); 475 476 // Find all spills and copies of VNI. 477 for (MachineInstr &MI : 478 llvm::make_early_inc_range(MRI.use_nodbg_instructions(Reg))) { 479 if (!MI.isCopy() && !MI.mayStore()) 480 continue; 481 SlotIndex Idx = LIS.getInstructionIndex(MI); 482 if (LI->getVNInfoAt(Idx) != VNI) 483 continue; 484 485 // Follow sibling copies down the dominator tree. 486 if (Register DstReg = isFullCopyOf(MI, Reg)) { 487 if (isSibling(DstReg)) { 488 LiveInterval &DstLI = LIS.getInterval(DstReg); 489 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot()); 490 assert(DstVNI && "Missing defined value"); 491 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot"); 492 WorkList.push_back(std::make_pair(&DstLI, DstVNI)); 493 } 494 continue; 495 } 496 497 // Erase spills. 498 int FI; 499 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) { 500 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI); 501 // eliminateDeadDefs won't normally remove stores, so switch opcode. 502 MI.setDesc(TII.get(TargetOpcode::KILL)); 503 DeadDefs.push_back(&MI); 504 ++NumSpillsRemoved; 505 if (HSpiller.rmFromMergeableSpills(MI, StackSlot)) 506 --NumSpills; 507 } 508 } 509 } while (!WorkList.empty()); 510 } 511 512 //===----------------------------------------------------------------------===// 513 // Rematerialization 514 //===----------------------------------------------------------------------===// 515 516 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining 517 /// instruction cannot be eliminated. See through snippet copies 518 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { 519 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 520 WorkList.push_back(std::make_pair(LI, VNI)); 521 do { 522 std::tie(LI, VNI) = WorkList.pop_back_val(); 523 if (!UsedValues.insert(VNI).second) 524 continue; 525 526 if (VNI->isPHIDef()) { 527 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def); 528 for (MachineBasicBlock *P : MBB->predecessors()) { 529 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P)); 530 if (PVNI) 531 WorkList.push_back(std::make_pair(LI, PVNI)); 532 } 533 continue; 534 } 535 536 // Follow snippet copies. 537 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 538 if (!SnippetCopies.count(MI)) 539 continue; 540 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg()); 541 assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy"); 542 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true)); 543 assert(SnipVNI && "Snippet undefined before copy"); 544 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI)); 545 } while (!WorkList.empty()); 546 } 547 548 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg, 549 MachineInstr &MI) { 550 if (!RestrictStatepointRemat) 551 return true; 552 // Here's a quick explanation of the problem we're trying to handle here: 553 // * There are some pseudo instructions with more vreg uses than there are 554 // physical registers on the machine. 555 // * This is normally handled by spilling the vreg, and folding the reload 556 // into the user instruction. (Thus decreasing the number of used vregs 557 // until the remainder can be assigned to physregs.) 558 // * However, since we may try to spill vregs in any order, we can end up 559 // trying to spill each operand to the instruction, and then rematting it 560 // instead. When that happens, the new live intervals (for the remats) are 561 // expected to be trivially assignable (i.e. RS_Done). However, since we 562 // may have more remats than physregs, we're guaranteed to fail to assign 563 // one. 564 // At the moment, we only handle this for STATEPOINTs since they're the only 565 // pseudo op where we've seen this. If we start seeing other instructions 566 // with the same problem, we need to revisit this. 567 if (MI.getOpcode() != TargetOpcode::STATEPOINT) 568 return true; 569 // For STATEPOINTs we allow re-materialization for fixed arguments only hoping 570 // that number of physical registers is enough to cover all fixed arguments. 571 // If it is not true we need to revisit it. 572 for (unsigned Idx = StatepointOpers(&MI).getVarIdx(), 573 EndIdx = MI.getNumOperands(); 574 Idx < EndIdx; ++Idx) { 575 MachineOperand &MO = MI.getOperand(Idx); 576 if (MO.isReg() && MO.getReg() == VReg) 577 return false; 578 } 579 return true; 580 } 581 582 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. 583 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) { 584 // Analyze instruction 585 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops; 586 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops); 587 588 if (!RI.Reads) 589 return false; 590 591 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true); 592 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex()); 593 594 if (!ParentVNI) { 595 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: "); 596 for (MachineOperand &MO : MI.operands()) 597 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) 598 MO.setIsUndef(); 599 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI); 600 return true; 601 } 602 603 if (SnippetCopies.count(&MI)) 604 return false; 605 606 LiveInterval &OrigLI = LIS.getInterval(Original); 607 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx); 608 LiveRangeEdit::Remat RM(ParentVNI); 609 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def); 610 611 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) { 612 markValueUsed(&VirtReg, ParentVNI); 613 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI); 614 return false; 615 } 616 617 // If the instruction also writes VirtReg.reg, it had better not require the 618 // same register for uses and defs. 619 if (RI.Tied) { 620 markValueUsed(&VirtReg, ParentVNI); 621 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI); 622 return false; 623 } 624 625 // Before rematerializing into a register for a single instruction, try to 626 // fold a load into the instruction. That avoids allocating a new register. 627 if (RM.OrigMI->canFoldAsLoad() && 628 foldMemoryOperand(Ops, RM.OrigMI)) { 629 Edit->markRematerialized(RM.ParentVNI); 630 ++NumFoldedLoads; 631 return true; 632 } 633 634 // If we can't guarantee that we'll be able to actually assign the new vreg, 635 // we can't remat. 636 if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) { 637 markValueUsed(&VirtReg, ParentVNI); 638 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI); 639 return false; 640 } 641 642 // Allocate a new register for the remat. 643 Register NewVReg = Edit->createFrom(Original); 644 645 // Finally we can rematerialize OrigMI before MI. 646 SlotIndex DefIdx = 647 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI); 648 649 // We take the DebugLoc from MI, since OrigMI may be attributed to a 650 // different source location. 651 auto *NewMI = LIS.getInstructionFromIndex(DefIdx); 652 NewMI->setDebugLoc(MI.getDebugLoc()); 653 654 (void)DefIdx; 655 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' 656 << *LIS.getInstructionFromIndex(DefIdx)); 657 658 // Replace operands 659 for (const auto &OpPair : Ops) { 660 MachineOperand &MO = OpPair.first->getOperand(OpPair.second); 661 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) { 662 MO.setReg(NewVReg); 663 MO.setIsKill(); 664 } 665 } 666 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n'); 667 668 ++NumRemats; 669 return true; 670 } 671 672 /// reMaterializeAll - Try to rematerialize as many uses as possible, 673 /// and trim the live ranges after. 674 void InlineSpiller::reMaterializeAll() { 675 if (!Edit->anyRematerializable()) 676 return; 677 678 UsedValues.clear(); 679 680 // Try to remat before all uses of snippets. 681 bool anyRemat = false; 682 for (Register Reg : RegsToSpill) { 683 LiveInterval &LI = LIS.getInterval(Reg); 684 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) { 685 // Debug values are not allowed to affect codegen. 686 if (MI.isDebugValue()) 687 continue; 688 689 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug " 690 "instruction that isn't a DBG_VALUE"); 691 692 anyRemat |= reMaterializeFor(LI, MI); 693 } 694 } 695 if (!anyRemat) 696 return; 697 698 // Remove any values that were completely rematted. 699 for (Register Reg : RegsToSpill) { 700 LiveInterval &LI = LIS.getInterval(Reg); 701 for (VNInfo *VNI : LI.vnis()) { 702 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI)) 703 continue; 704 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 705 MI->addRegisterDead(Reg, &TRI); 706 if (!MI->allDefsAreDead()) 707 continue; 708 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI); 709 DeadDefs.push_back(MI); 710 } 711 } 712 713 // Eliminate dead code after remat. Note that some snippet copies may be 714 // deleted here. 715 if (DeadDefs.empty()) 716 return; 717 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n"); 718 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); 719 720 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions 721 // after rematerialization. To remove a VNI for a vreg from its LiveInterval, 722 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all 723 // removed, PHI VNI are still left in the LiveInterval. 724 // So to get rid of unused reg, we need to check whether it has non-dbg 725 // reference instead of whether it has non-empty interval. 726 unsigned ResultPos = 0; 727 for (Register Reg : RegsToSpill) { 728 if (MRI.reg_nodbg_empty(Reg)) { 729 Edit->eraseVirtReg(Reg); 730 continue; 731 } 732 733 assert(LIS.hasInterval(Reg) && 734 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && 735 "Empty and not used live-range?!"); 736 737 RegsToSpill[ResultPos++] = Reg; 738 } 739 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end()); 740 LLVM_DEBUG(dbgs() << RegsToSpill.size() 741 << " registers to spill after remat.\n"); 742 } 743 744 //===----------------------------------------------------------------------===// 745 // Spilling 746 //===----------------------------------------------------------------------===// 747 748 /// If MI is a load or store of StackSlot, it can be removed. 749 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) { 750 int FI = 0; 751 Register InstrReg = TII.isLoadFromStackSlot(*MI, FI); 752 bool IsLoad = InstrReg; 753 if (!IsLoad) 754 InstrReg = TII.isStoreToStackSlot(*MI, FI); 755 756 // We have a stack access. Is it the right register and slot? 757 if (InstrReg != Reg || FI != StackSlot) 758 return false; 759 760 if (!IsLoad) 761 HSpiller.rmFromMergeableSpills(*MI, StackSlot); 762 763 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI); 764 LIS.RemoveMachineInstrFromMaps(*MI); 765 MI->eraseFromParent(); 766 767 if (IsLoad) { 768 ++NumReloadsRemoved; 769 --NumReloads; 770 } else { 771 ++NumSpillsRemoved; 772 --NumSpills; 773 } 774 775 return true; 776 } 777 778 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 779 LLVM_DUMP_METHOD 780 // Dump the range of instructions from B to E with their slot indexes. 781 static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, 782 MachineBasicBlock::iterator E, 783 LiveIntervals const &LIS, 784 const char *const header, 785 Register VReg = Register()) { 786 char NextLine = '\n'; 787 char SlotIndent = '\t'; 788 789 if (std::next(B) == E) { 790 NextLine = ' '; 791 SlotIndent = ' '; 792 } 793 794 dbgs() << '\t' << header << ": " << NextLine; 795 796 for (MachineBasicBlock::iterator I = B; I != E; ++I) { 797 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot(); 798 799 // If a register was passed in and this instruction has it as a 800 // destination that is marked as an early clobber, print the 801 // early-clobber slot index. 802 if (VReg) { 803 MachineOperand *MO = I->findRegisterDefOperand(VReg); 804 if (MO && MO->isEarlyClobber()) 805 Idx = Idx.getRegSlot(true); 806 } 807 808 dbgs() << SlotIndent << Idx << '\t' << *I; 809 } 810 } 811 #endif 812 813 /// foldMemoryOperand - Try folding stack slot references in Ops into their 814 /// instructions. 815 /// 816 /// @param Ops Operand indices from AnalyzeVirtRegInBundle(). 817 /// @param LoadMI Load instruction to use instead of stack slot when non-null. 818 /// @return True on success. 819 bool InlineSpiller:: 820 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops, 821 MachineInstr *LoadMI) { 822 if (Ops.empty()) 823 return false; 824 // Don't attempt folding in bundles. 825 MachineInstr *MI = Ops.front().first; 826 if (Ops.back().first != MI || MI->isBundled()) 827 return false; 828 829 bool WasCopy = MI->isCopy(); 830 Register ImpReg; 831 832 // TII::foldMemoryOperand will do what we need here for statepoint 833 // (fold load into use and remove corresponding def). We will replace 834 // uses of removed def with loads (spillAroundUses). 835 // For that to work we need to untie def and use to pass it through 836 // foldMemoryOperand and signal foldPatchpoint that it is allowed to 837 // fold them. 838 bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT; 839 840 // Spill subregs if the target allows it. 841 // We always want to spill subregs for stackmap/patchpoint pseudos. 842 bool SpillSubRegs = TII.isSubregFoldable() || 843 MI->getOpcode() == TargetOpcode::STATEPOINT || 844 MI->getOpcode() == TargetOpcode::PATCHPOINT || 845 MI->getOpcode() == TargetOpcode::STACKMAP; 846 847 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 848 // operands. 849 SmallVector<unsigned, 8> FoldOps; 850 for (const auto &OpPair : Ops) { 851 unsigned Idx = OpPair.second; 852 assert(MI == OpPair.first && "Instruction conflict during operand folding"); 853 MachineOperand &MO = MI->getOperand(Idx); 854 855 // No point restoring an undef read, and we'll produce an invalid live 856 // interval. 857 // TODO: Is this really the correct way to handle undef tied uses? 858 if (MO.isUse() && !MO.readsReg() && !MO.isTied()) 859 continue; 860 861 if (MO.isImplicit()) { 862 ImpReg = MO.getReg(); 863 continue; 864 } 865 866 if (!SpillSubRegs && MO.getSubReg()) 867 return false; 868 // We cannot fold a load instruction into a def. 869 if (LoadMI && MO.isDef()) 870 return false; 871 // Tied use operands should not be passed to foldMemoryOperand. 872 if (UntieRegs || !MI->isRegTiedToDefOperand(Idx)) 873 FoldOps.push_back(Idx); 874 } 875 876 // If we only have implicit uses, we won't be able to fold that. 877 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try! 878 if (FoldOps.empty()) 879 return false; 880 881 MachineInstrSpan MIS(MI, MI->getParent()); 882 883 SmallVector<std::pair<unsigned, unsigned> > TiedOps; 884 if (UntieRegs) 885 for (unsigned Idx : FoldOps) { 886 MachineOperand &MO = MI->getOperand(Idx); 887 if (!MO.isTied()) 888 continue; 889 unsigned Tied = MI->findTiedOperandIdx(Idx); 890 if (MO.isUse()) 891 TiedOps.emplace_back(Tied, Idx); 892 else { 893 assert(MO.isDef() && "Tied to not use and def?"); 894 TiedOps.emplace_back(Idx, Tied); 895 } 896 MI->untieRegOperand(Idx); 897 } 898 899 MachineInstr *FoldMI = 900 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS) 901 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM); 902 if (!FoldMI) { 903 // Re-tie operands. 904 for (auto Tied : TiedOps) 905 MI->tieOperands(Tied.first, Tied.second); 906 return false; 907 } 908 909 // Remove LIS for any dead defs in the original MI not in FoldMI. 910 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) { 911 if (!MO->isReg()) 912 continue; 913 Register Reg = MO->getReg(); 914 if (!Reg || Reg.isVirtual() || MRI.isReserved(Reg)) { 915 continue; 916 } 917 // Skip non-Defs, including undef uses and internal reads. 918 if (MO->isUse()) 919 continue; 920 PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI); 921 if (RI.FullyDefined) 922 continue; 923 // FoldMI does not define this physreg. Remove the LI segment. 924 assert(MO->isDead() && "Cannot fold physreg def"); 925 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot(); 926 LIS.removePhysRegDefAt(Reg.asMCReg(), Idx); 927 } 928 929 int FI; 930 if (TII.isStoreToStackSlot(*MI, FI) && 931 HSpiller.rmFromMergeableSpills(*MI, FI)) 932 --NumSpills; 933 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI); 934 // Update the call site info. 935 if (MI->isCandidateForCallSiteEntry()) 936 MI->getMF()->moveCallSiteInfo(MI, FoldMI); 937 938 // If we've folded a store into an instruction labelled with debug-info, 939 // record a substitution from the old operand to the memory operand. Handle 940 // the simple common case where operand 0 is the one being folded, plus when 941 // the destination operand is also a tied def. More values could be 942 // substituted / preserved with more analysis. 943 if (MI->peekDebugInstrNum() && Ops[0].second == 0) { 944 // Helper lambda. 945 auto MakeSubstitution = [this,FoldMI,MI,&Ops]() { 946 // Substitute old operand zero to the new instructions memory operand. 947 unsigned OldOperandNum = Ops[0].second; 948 unsigned NewNum = FoldMI->getDebugInstrNum(); 949 unsigned OldNum = MI->getDebugInstrNum(); 950 MF.makeDebugValueSubstitution({OldNum, OldOperandNum}, 951 {NewNum, MachineFunction::DebugOperandMemNumber}); 952 }; 953 954 const MachineOperand &Op0 = MI->getOperand(Ops[0].second); 955 if (Ops.size() == 1 && Op0.isDef()) { 956 MakeSubstitution(); 957 } else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() && 958 Op0.getReg() == MI->getOperand(1).getReg()) { 959 MakeSubstitution(); 960 } 961 } else if (MI->peekDebugInstrNum()) { 962 // This is a debug-labelled instruction, but the operand being folded isn't 963 // at operand zero. Most likely this means it's a load being folded in. 964 // Substitute any register defs from operand zero up to the one being 965 // folded -- past that point, we don't know what the new operand indexes 966 // will be. 967 MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second); 968 } 969 970 MI->eraseFromParent(); 971 972 // Insert any new instructions other than FoldMI into the LIS maps. 973 assert(!MIS.empty() && "Unexpected empty span of instructions!"); 974 for (MachineInstr &MI : MIS) 975 if (&MI != FoldMI) 976 LIS.InsertMachineInstrInMaps(MI); 977 978 // TII.foldMemoryOperand may have left some implicit operands on the 979 // instruction. Strip them. 980 if (ImpReg) 981 for (unsigned i = FoldMI->getNumOperands(); i; --i) { 982 MachineOperand &MO = FoldMI->getOperand(i - 1); 983 if (!MO.isReg() || !MO.isImplicit()) 984 break; 985 if (MO.getReg() == ImpReg) 986 FoldMI->removeOperand(i - 1); 987 } 988 989 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS, 990 "folded")); 991 992 if (!WasCopy) 993 ++NumFolded; 994 else if (Ops.front().second == 0) { 995 ++NumSpills; 996 // If there is only 1 store instruction is required for spill, add it 997 // to mergeable list. In X86 AMX, 2 intructions are required to store. 998 // We disable the merge for this case. 999 if (std::distance(MIS.begin(), MIS.end()) <= 1) 1000 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original); 1001 } else 1002 ++NumReloads; 1003 return true; 1004 } 1005 1006 void InlineSpiller::insertReload(Register NewVReg, 1007 SlotIndex Idx, 1008 MachineBasicBlock::iterator MI) { 1009 MachineBasicBlock &MBB = *MI->getParent(); 1010 1011 MachineInstrSpan MIS(MI, &MBB); 1012 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot, 1013 MRI.getRegClass(NewVReg), &TRI, Register()); 1014 1015 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI); 1016 1017 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload", 1018 NewVReg)); 1019 ++NumReloads; 1020 } 1021 1022 /// Check if \p Def fully defines a VReg with an undefined value. 1023 /// If that's the case, that means the value of VReg is actually 1024 /// not relevant. 1025 static bool isRealSpill(const MachineInstr &Def) { 1026 if (!Def.isImplicitDef()) 1027 return true; 1028 assert(Def.getNumOperands() == 1 && 1029 "Implicit def with more than one definition"); 1030 // We can say that the VReg defined by Def is undef, only if it is 1031 // fully defined by Def. Otherwise, some of the lanes may not be 1032 // undef and the value of the VReg matters. 1033 return Def.getOperand(0).getSubReg(); 1034 } 1035 1036 /// insertSpill - Insert a spill of NewVReg after MI. 1037 void InlineSpiller::insertSpill(Register NewVReg, bool isKill, 1038 MachineBasicBlock::iterator MI) { 1039 // Spill are not terminators, so inserting spills after terminators will 1040 // violate invariants in MachineVerifier. 1041 assert(!MI->isTerminator() && "Inserting a spill after a terminator"); 1042 MachineBasicBlock &MBB = *MI->getParent(); 1043 1044 MachineInstrSpan MIS(MI, &MBB); 1045 MachineBasicBlock::iterator SpillBefore = std::next(MI); 1046 bool IsRealSpill = isRealSpill(*MI); 1047 1048 if (IsRealSpill) 1049 TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot, 1050 MRI.getRegClass(NewVReg), &TRI, Register()); 1051 else 1052 // Don't spill undef value. 1053 // Anything works for undef, in particular keeping the memory 1054 // uninitialized is a viable option and it saves code size and 1055 // run time. 1056 BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL)) 1057 .addReg(NewVReg, getKillRegState(isKill)); 1058 1059 MachineBasicBlock::iterator Spill = std::next(MI); 1060 LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end()); 1061 for (const MachineInstr &MI : make_range(Spill, MIS.end())) 1062 getVDefInterval(MI, LIS); 1063 1064 LLVM_DEBUG( 1065 dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill")); 1066 ++NumSpills; 1067 // If there is only 1 store instruction is required for spill, add it 1068 // to mergeable list. In X86 AMX, 2 intructions are required to store. 1069 // We disable the merge for this case. 1070 if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1) 1071 HSpiller.addToMergeableSpills(*Spill, StackSlot, Original); 1072 } 1073 1074 /// spillAroundUses - insert spill code around each use of Reg. 1075 void InlineSpiller::spillAroundUses(Register Reg) { 1076 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n'); 1077 LiveInterval &OldLI = LIS.getInterval(Reg); 1078 1079 // Iterate over instructions using Reg. 1080 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) { 1081 // Debug values are not allowed to affect codegen. 1082 if (MI.isDebugValue()) { 1083 // Modify DBG_VALUE now that the value is in a spill slot. 1084 MachineBasicBlock *MBB = MI.getParent(); 1085 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << MI); 1086 buildDbgValueForSpill(*MBB, &MI, MI, StackSlot, Reg); 1087 MBB->erase(MI); 1088 continue; 1089 } 1090 1091 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug " 1092 "instruction that isn't a DBG_VALUE"); 1093 1094 // Ignore copies to/from snippets. We'll delete them. 1095 if (SnippetCopies.count(&MI)) 1096 continue; 1097 1098 // Stack slot accesses may coalesce away. 1099 if (coalesceStackAccess(&MI, Reg)) 1100 continue; 1101 1102 // Analyze instruction. 1103 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops; 1104 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, Reg, &Ops); 1105 1106 // Find the slot index where this instruction reads and writes OldLI. 1107 // This is usually the def slot, except for tied early clobbers. 1108 SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot(); 1109 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true))) 1110 if (SlotIndex::isSameInstr(Idx, VNI->def)) 1111 Idx = VNI->def; 1112 1113 // Check for a sibling copy. 1114 Register SibReg = isFullCopyOf(MI, Reg); 1115 if (SibReg && isSibling(SibReg)) { 1116 // This may actually be a copy between snippets. 1117 if (isRegToSpill(SibReg)) { 1118 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << MI); 1119 SnippetCopies.insert(&MI); 1120 continue; 1121 } 1122 if (RI.Writes) { 1123 if (hoistSpillInsideBB(OldLI, MI)) { 1124 // This COPY is now dead, the value is already in the stack slot. 1125 MI.getOperand(0).setIsDead(); 1126 DeadDefs.push_back(&MI); 1127 continue; 1128 } 1129 } else { 1130 // This is a reload for a sib-reg copy. Drop spills downstream. 1131 LiveInterval &SibLI = LIS.getInterval(SibReg); 1132 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx)); 1133 // The COPY will fold to a reload below. 1134 } 1135 } 1136 1137 // Attempt to fold memory ops. 1138 if (foldMemoryOperand(Ops)) 1139 continue; 1140 1141 // Create a new virtual register for spill/fill. 1142 // FIXME: Infer regclass from instruction alone. 1143 Register NewVReg = Edit->createFrom(Reg); 1144 1145 if (RI.Reads) 1146 insertReload(NewVReg, Idx, &MI); 1147 1148 // Rewrite instruction operands. 1149 bool hasLiveDef = false; 1150 for (const auto &OpPair : Ops) { 1151 MachineOperand &MO = OpPair.first->getOperand(OpPair.second); 1152 MO.setReg(NewVReg); 1153 if (MO.isUse()) { 1154 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second)) 1155 MO.setIsKill(); 1156 } else { 1157 if (!MO.isDead()) 1158 hasLiveDef = true; 1159 } 1160 } 1161 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << MI << '\n'); 1162 1163 // FIXME: Use a second vreg if instruction has no tied ops. 1164 if (RI.Writes) 1165 if (hasLiveDef) 1166 insertSpill(NewVReg, true, &MI); 1167 } 1168 } 1169 1170 /// spillAll - Spill all registers remaining after rematerialization. 1171 void InlineSpiller::spillAll() { 1172 // Update LiveStacks now that we are committed to spilling. 1173 if (StackSlot == VirtRegMap::NO_STACK_SLOT) { 1174 StackSlot = VRM.assignVirt2StackSlot(Original); 1175 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original)); 1176 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator()); 1177 } else 1178 StackInt = &LSS.getInterval(StackSlot); 1179 1180 if (Original != Edit->getReg()) 1181 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot); 1182 1183 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values"); 1184 for (Register Reg : RegsToSpill) 1185 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg), 1186 StackInt->getValNumInfo(0)); 1187 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n'); 1188 1189 // Spill around uses of all RegsToSpill. 1190 for (Register Reg : RegsToSpill) 1191 spillAroundUses(Reg); 1192 1193 // Hoisted spills may cause dead code. 1194 if (!DeadDefs.empty()) { 1195 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n"); 1196 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); 1197 } 1198 1199 // Finally delete the SnippetCopies. 1200 for (Register Reg : RegsToSpill) { 1201 for (MachineInstr &MI : 1202 llvm::make_early_inc_range(MRI.reg_instructions(Reg))) { 1203 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy"); 1204 // FIXME: Do this with a LiveRangeEdit callback. 1205 LIS.RemoveMachineInstrFromMaps(MI); 1206 MI.eraseFromParent(); 1207 } 1208 } 1209 1210 // Delete all spilled registers. 1211 for (Register Reg : RegsToSpill) 1212 Edit->eraseVirtReg(Reg); 1213 } 1214 1215 void InlineSpiller::spill(LiveRangeEdit &edit) { 1216 ++NumSpilledRanges; 1217 Edit = &edit; 1218 assert(!Register::isStackSlot(edit.getReg()) && 1219 "Trying to spill a stack slot."); 1220 // Share a stack slot among all descendants of Original. 1221 Original = VRM.getOriginal(edit.getReg()); 1222 StackSlot = VRM.getStackSlot(Original); 1223 StackInt = nullptr; 1224 1225 LLVM_DEBUG(dbgs() << "Inline spilling " 1226 << TRI.getRegClassName(MRI.getRegClass(edit.getReg())) 1227 << ':' << edit.getParent() << "\nFrom original " 1228 << printReg(Original) << '\n'); 1229 assert(edit.getParent().isSpillable() && 1230 "Attempting to spill already spilled value."); 1231 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs"); 1232 1233 collectRegsToSpill(); 1234 reMaterializeAll(); 1235 1236 // Remat may handle everything. 1237 if (!RegsToSpill.empty()) 1238 spillAll(); 1239 1240 Edit->calculateRegClassAndHint(MF, VRAI); 1241 } 1242 1243 /// Optimizations after all the reg selections and spills are done. 1244 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); } 1245 1246 /// When a spill is inserted, add the spill to MergeableSpills map. 1247 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot, 1248 unsigned Original) { 1249 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator(); 1250 LiveInterval &OrigLI = LIS.getInterval(Original); 1251 // save a copy of LiveInterval in StackSlotToOrigLI because the original 1252 // LiveInterval may be cleared after all its references are spilled. 1253 if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) { 1254 auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight()); 1255 LI->assign(OrigLI, Allocator); 1256 StackSlotToOrigLI[StackSlot] = std::move(LI); 1257 } 1258 SlotIndex Idx = LIS.getInstructionIndex(Spill); 1259 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot()); 1260 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI); 1261 MergeableSpills[MIdx].insert(&Spill); 1262 } 1263 1264 /// When a spill is removed, remove the spill from MergeableSpills map. 1265 /// Return true if the spill is removed successfully. 1266 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill, 1267 int StackSlot) { 1268 auto It = StackSlotToOrigLI.find(StackSlot); 1269 if (It == StackSlotToOrigLI.end()) 1270 return false; 1271 SlotIndex Idx = LIS.getInstructionIndex(Spill); 1272 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot()); 1273 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI); 1274 return MergeableSpills[MIdx].erase(&Spill); 1275 } 1276 1277 /// Check BB to see if it is a possible target BB to place a hoisted spill, 1278 /// i.e., there should be a living sibling of OrigReg at the insert point. 1279 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI, 1280 MachineBasicBlock &BB, Register &LiveReg) { 1281 SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB); 1282 // The original def could be after the last insert point in the root block, 1283 // we can't hoist to here. 1284 if (Idx < OrigVNI.def) { 1285 // TODO: We could be better here. If LI is not alive in landing pad 1286 // we could hoist spill after LIP. 1287 LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n"); 1288 return false; 1289 } 1290 Register OrigReg = OrigLI.reg(); 1291 SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg]; 1292 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI"); 1293 1294 for (const Register &SibReg : Siblings) { 1295 LiveInterval &LI = LIS.getInterval(SibReg); 1296 VNInfo *VNI = LI.getVNInfoAt(Idx); 1297 if (VNI) { 1298 LiveReg = SibReg; 1299 return true; 1300 } 1301 } 1302 return false; 1303 } 1304 1305 /// Remove redundant spills in the same BB. Save those redundant spills in 1306 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map. 1307 void HoistSpillHelper::rmRedundantSpills( 1308 SmallPtrSet<MachineInstr *, 16> &Spills, 1309 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1310 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) { 1311 // For each spill saw, check SpillBBToSpill[] and see if its BB already has 1312 // another spill inside. If a BB contains more than one spill, only keep the 1313 // earlier spill with smaller SlotIndex. 1314 for (auto *const CurrentSpill : Spills) { 1315 MachineBasicBlock *Block = CurrentSpill->getParent(); 1316 MachineDomTreeNode *Node = MDT.getBase().getNode(Block); 1317 MachineInstr *PrevSpill = SpillBBToSpill[Node]; 1318 if (PrevSpill) { 1319 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill); 1320 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill); 1321 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill; 1322 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill; 1323 SpillsToRm.push_back(SpillToRm); 1324 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep; 1325 } else { 1326 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill; 1327 } 1328 } 1329 for (auto *const SpillToRm : SpillsToRm) 1330 Spills.erase(SpillToRm); 1331 } 1332 1333 /// Starting from \p Root find a top-down traversal order of the dominator 1334 /// tree to visit all basic blocks containing the elements of \p Spills. 1335 /// Redundant spills will be found and put into \p SpillsToRm at the same 1336 /// time. \p SpillBBToSpill will be populated as part of the process and 1337 /// maps a basic block to the first store occurring in the basic block. 1338 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre 1339 void HoistSpillHelper::getVisitOrders( 1340 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills, 1341 SmallVectorImpl<MachineDomTreeNode *> &Orders, 1342 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1343 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep, 1344 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) { 1345 // The set contains all the possible BB nodes to which we may hoist 1346 // original spills. 1347 SmallPtrSet<MachineDomTreeNode *, 8> WorkSet; 1348 // Save the BB nodes on the path from the first BB node containing 1349 // non-redundant spill to the Root node. 1350 SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath; 1351 // All the spills to be hoisted must originate from a single def instruction 1352 // to the OrigReg. It means the def instruction should dominate all the spills 1353 // to be hoisted. We choose the BB where the def instruction is located as 1354 // the Root. 1355 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom(); 1356 // For every node on the dominator tree with spill, walk up on the dominator 1357 // tree towards the Root node until it is reached. If there is other node 1358 // containing spill in the middle of the path, the previous spill saw will 1359 // be redundant and the node containing it will be removed. All the nodes on 1360 // the path starting from the first node with non-redundant spill to the Root 1361 // node will be added to the WorkSet, which will contain all the possible 1362 // locations where spills may be hoisted to after the loop below is done. 1363 for (auto *const Spill : Spills) { 1364 MachineBasicBlock *Block = Spill->getParent(); 1365 MachineDomTreeNode *Node = MDT[Block]; 1366 MachineInstr *SpillToRm = nullptr; 1367 while (Node != RootIDomNode) { 1368 // If Node dominates Block, and it already contains a spill, the spill in 1369 // Block will be redundant. 1370 if (Node != MDT[Block] && SpillBBToSpill[Node]) { 1371 SpillToRm = SpillBBToSpill[MDT[Block]]; 1372 break; 1373 /// If we see the Node already in WorkSet, the path from the Node to 1374 /// the Root node must already be traversed by another spill. 1375 /// Then no need to repeat. 1376 } else if (WorkSet.count(Node)) { 1377 break; 1378 } else { 1379 NodesOnPath.insert(Node); 1380 } 1381 Node = Node->getIDom(); 1382 } 1383 if (SpillToRm) { 1384 SpillsToRm.push_back(SpillToRm); 1385 } else { 1386 // Add a BB containing the original spills to SpillsToKeep -- i.e., 1387 // set the initial status before hoisting start. The value of BBs 1388 // containing original spills is set to 0, in order to descriminate 1389 // with BBs containing hoisted spills which will be inserted to 1390 // SpillsToKeep later during hoisting. 1391 SpillsToKeep[MDT[Block]] = 0; 1392 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end()); 1393 } 1394 NodesOnPath.clear(); 1395 } 1396 1397 // Sort the nodes in WorkSet in top-down order and save the nodes 1398 // in Orders. Orders will be used for hoisting in runHoistSpills. 1399 unsigned idx = 0; 1400 Orders.push_back(MDT.getBase().getNode(Root)); 1401 do { 1402 MachineDomTreeNode *Node = Orders[idx++]; 1403 for (MachineDomTreeNode *Child : Node->children()) { 1404 if (WorkSet.count(Child)) 1405 Orders.push_back(Child); 1406 } 1407 } while (idx != Orders.size()); 1408 assert(Orders.size() == WorkSet.size() && 1409 "Orders have different size with WorkSet"); 1410 1411 #ifndef NDEBUG 1412 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n"); 1413 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin(); 1414 for (; RIt != Orders.rend(); RIt++) 1415 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ","); 1416 LLVM_DEBUG(dbgs() << "\n"); 1417 #endif 1418 } 1419 1420 /// Try to hoist spills according to BB hotness. The spills to removed will 1421 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in 1422 /// \p SpillsToIns. 1423 void HoistSpillHelper::runHoistSpills( 1424 LiveInterval &OrigLI, VNInfo &OrigVNI, 1425 SmallPtrSet<MachineInstr *, 16> &Spills, 1426 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1427 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) { 1428 // Visit order of dominator tree nodes. 1429 SmallVector<MachineDomTreeNode *, 32> Orders; 1430 // SpillsToKeep contains all the nodes where spills are to be inserted 1431 // during hoisting. If the spill to be inserted is an original spill 1432 // (not a hoisted one), the value of the map entry is 0. If the spill 1433 // is a hoisted spill, the value of the map entry is the VReg to be used 1434 // as the source of the spill. 1435 DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep; 1436 // Map from BB to the first spill inside of it. 1437 DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill; 1438 1439 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill); 1440 1441 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def); 1442 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep, 1443 SpillBBToSpill); 1444 1445 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of 1446 // nodes set and the cost of all the spills inside those nodes. 1447 // The nodes set are the locations where spills are to be inserted 1448 // in the subtree of current node. 1449 using NodesCostPair = 1450 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>; 1451 DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap; 1452 1453 // Iterate Orders set in reverse order, which will be a bottom-up order 1454 // in the dominator tree. Once we visit a dom tree node, we know its 1455 // children have already been visited and the spill locations in the 1456 // subtrees of all the children have been determined. 1457 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin(); 1458 for (; RIt != Orders.rend(); RIt++) { 1459 MachineBasicBlock *Block = (*RIt)->getBlock(); 1460 1461 // If Block contains an original spill, simply continue. 1462 if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) { 1463 SpillsInSubTreeMap[*RIt].first.insert(*RIt); 1464 // SpillsInSubTreeMap[*RIt].second contains the cost of spill. 1465 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block); 1466 continue; 1467 } 1468 1469 // Collect spills in subtree of current node (*RIt) to 1470 // SpillsInSubTreeMap[*RIt].first. 1471 for (MachineDomTreeNode *Child : (*RIt)->children()) { 1472 if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end()) 1473 continue; 1474 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below 1475 // should be placed before getting the begin and end iterators of 1476 // SpillsInSubTreeMap[Child].first, or else the iterators may be 1477 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time 1478 // and the map grows and then the original buckets in the map are moved. 1479 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree = 1480 SpillsInSubTreeMap[*RIt].first; 1481 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second; 1482 SubTreeCost += SpillsInSubTreeMap[Child].second; 1483 auto BI = SpillsInSubTreeMap[Child].first.begin(); 1484 auto EI = SpillsInSubTreeMap[Child].first.end(); 1485 SpillsInSubTree.insert(BI, EI); 1486 SpillsInSubTreeMap.erase(Child); 1487 } 1488 1489 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree = 1490 SpillsInSubTreeMap[*RIt].first; 1491 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second; 1492 // No spills in subtree, simply continue. 1493 if (SpillsInSubTree.empty()) 1494 continue; 1495 1496 // Check whether Block is a possible candidate to insert spill. 1497 Register LiveReg; 1498 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg)) 1499 continue; 1500 1501 // If there are multiple spills that could be merged, bias a little 1502 // to hoist the spill. 1503 BranchProbability MarginProb = (SpillsInSubTree.size() > 1) 1504 ? BranchProbability(9, 10) 1505 : BranchProbability(1, 1); 1506 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) { 1507 // Hoist: Move spills to current Block. 1508 for (auto *const SpillBB : SpillsInSubTree) { 1509 // When SpillBB is a BB contains original spill, insert the spill 1510 // to SpillsToRm. 1511 if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() && 1512 !SpillsToKeep[SpillBB]) { 1513 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB]; 1514 SpillsToRm.push_back(SpillToRm); 1515 } 1516 // SpillBB will not contain spill anymore, remove it from SpillsToKeep. 1517 SpillsToKeep.erase(SpillBB); 1518 } 1519 // Current Block is the BB containing the new hoisted spill. Add it to 1520 // SpillsToKeep. LiveReg is the source of the new spill. 1521 SpillsToKeep[*RIt] = LiveReg; 1522 LLVM_DEBUG({ 1523 dbgs() << "spills in BB: "; 1524 for (const auto Rspill : SpillsInSubTree) 1525 dbgs() << Rspill->getBlock()->getNumber() << " "; 1526 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber() 1527 << "\n"; 1528 }); 1529 SpillsInSubTree.clear(); 1530 SpillsInSubTree.insert(*RIt); 1531 SubTreeCost = MBFI.getBlockFreq(Block); 1532 } 1533 } 1534 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill), 1535 // save them to SpillsToIns. 1536 for (const auto &Ent : SpillsToKeep) { 1537 if (Ent.second) 1538 SpillsToIns[Ent.first->getBlock()] = Ent.second; 1539 } 1540 } 1541 1542 /// For spills with equal values, remove redundant spills and hoist those left 1543 /// to less hot spots. 1544 /// 1545 /// Spills with equal values will be collected into the same set in 1546 /// MergeableSpills when spill is inserted. These equal spills are originated 1547 /// from the same defining instruction and are dominated by the instruction. 1548 /// Before hoisting all the equal spills, redundant spills inside in the same 1549 /// BB are first marked to be deleted. Then starting from the spills left, walk 1550 /// up on the dominator tree towards the Root node where the define instruction 1551 /// is located, mark the dominated spills to be deleted along the way and 1552 /// collect the BB nodes on the path from non-dominated spills to the define 1553 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places 1554 /// where we are considering to hoist the spills. We iterate the WorkSet in 1555 /// bottom-up order, and for each node, we will decide whether to hoist spills 1556 /// inside its subtree to that node. In this way, we can get benefit locally 1557 /// even if hoisting all the equal spills to one cold place is impossible. 1558 void HoistSpillHelper::hoistAllSpills() { 1559 SmallVector<Register, 4> NewVRegs; 1560 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this); 1561 1562 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) { 1563 Register Reg = Register::index2VirtReg(i); 1564 Register Original = VRM.getPreSplitReg(Reg); 1565 if (!MRI.def_empty(Reg)) 1566 Virt2SiblingsMap[Original].insert(Reg); 1567 } 1568 1569 // Each entry in MergeableSpills contains a spill set with equal values. 1570 for (auto &Ent : MergeableSpills) { 1571 int Slot = Ent.first.first; 1572 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot]; 1573 VNInfo *OrigVNI = Ent.first.second; 1574 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second; 1575 if (Ent.second.empty()) 1576 continue; 1577 1578 LLVM_DEBUG({ 1579 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n" 1580 << "Equal spills in BB: "; 1581 for (const auto spill : EqValSpills) 1582 dbgs() << spill->getParent()->getNumber() << " "; 1583 dbgs() << "\n"; 1584 }); 1585 1586 // SpillsToRm is the spill set to be removed from EqValSpills. 1587 SmallVector<MachineInstr *, 16> SpillsToRm; 1588 // SpillsToIns is the spill set to be newly inserted after hoisting. 1589 DenseMap<MachineBasicBlock *, unsigned> SpillsToIns; 1590 1591 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns); 1592 1593 LLVM_DEBUG({ 1594 dbgs() << "Finally inserted spills in BB: "; 1595 for (const auto &Ispill : SpillsToIns) 1596 dbgs() << Ispill.first->getNumber() << " "; 1597 dbgs() << "\nFinally removed spills in BB: "; 1598 for (const auto Rspill : SpillsToRm) 1599 dbgs() << Rspill->getParent()->getNumber() << " "; 1600 dbgs() << "\n"; 1601 }); 1602 1603 // Stack live range update. 1604 LiveInterval &StackIntvl = LSS.getInterval(Slot); 1605 if (!SpillsToIns.empty() || !SpillsToRm.empty()) 1606 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI, 1607 StackIntvl.getValNumInfo(0)); 1608 1609 // Insert hoisted spills. 1610 for (auto const &Insert : SpillsToIns) { 1611 MachineBasicBlock *BB = Insert.first; 1612 Register LiveReg = Insert.second; 1613 MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB); 1614 MachineInstrSpan MIS(MII, BB); 1615 TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot, 1616 MRI.getRegClass(LiveReg), &TRI, Register()); 1617 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII); 1618 for (const MachineInstr &MI : make_range(MIS.begin(), MII)) 1619 getVDefInterval(MI, LIS); 1620 ++NumSpills; 1621 } 1622 1623 // Remove redundant spills or change them to dead instructions. 1624 NumSpills -= SpillsToRm.size(); 1625 for (auto *const RMEnt : SpillsToRm) { 1626 RMEnt->setDesc(TII.get(TargetOpcode::KILL)); 1627 for (unsigned i = RMEnt->getNumOperands(); i; --i) { 1628 MachineOperand &MO = RMEnt->getOperand(i - 1); 1629 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead()) 1630 RMEnt->removeOperand(i - 1); 1631 } 1632 } 1633 Edit.eliminateDeadDefs(SpillsToRm, std::nullopt); 1634 } 1635 } 1636 1637 /// For VirtReg clone, the \p New register should have the same physreg or 1638 /// stackslot as the \p old register. 1639 void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) { 1640 if (VRM.hasPhys(Old)) 1641 VRM.assignVirt2Phys(New, VRM.getPhys(Old)); 1642 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT) 1643 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old)); 1644 else 1645 llvm_unreachable("VReg should be assigned either physreg or stackslot"); 1646 if (VRM.hasShape(Old)) 1647 VRM.assignVirt2Shape(New, VRM.getShape(Old)); 1648 } 1649