1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The inline spiller modifies the machine function directly instead of 10 // inserting spills and restores in VirtRegMap. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SplitKit.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/MapVector.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SetVector.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/CodeGen/LiveInterval.h" 25 #include "llvm/CodeGen/LiveIntervals.h" 26 #include "llvm/CodeGen/LiveRangeEdit.h" 27 #include "llvm/CodeGen/LiveStacks.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 30 #include "llvm/CodeGen/MachineDominators.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/CodeGen/MachineFunctionPass.h" 33 #include "llvm/CodeGen/MachineInstr.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineInstrBundle.h" 36 #include "llvm/CodeGen/MachineLoopInfo.h" 37 #include "llvm/CodeGen/MachineOperand.h" 38 #include "llvm/CodeGen/MachineRegisterInfo.h" 39 #include "llvm/CodeGen/SlotIndexes.h" 40 #include "llvm/CodeGen/Spiller.h" 41 #include "llvm/CodeGen/StackMaps.h" 42 #include "llvm/CodeGen/TargetInstrInfo.h" 43 #include "llvm/CodeGen/TargetOpcodes.h" 44 #include "llvm/CodeGen/TargetRegisterInfo.h" 45 #include "llvm/CodeGen/TargetSubtargetInfo.h" 46 #include "llvm/CodeGen/VirtRegMap.h" 47 #include "llvm/Config/llvm-config.h" 48 #include "llvm/Support/BlockFrequency.h" 49 #include "llvm/Support/BranchProbability.h" 50 #include "llvm/Support/CommandLine.h" 51 #include "llvm/Support/Compiler.h" 52 #include "llvm/Support/Debug.h" 53 #include "llvm/Support/ErrorHandling.h" 54 #include "llvm/Support/raw_ostream.h" 55 #include <cassert> 56 #include <iterator> 57 #include <tuple> 58 #include <utility> 59 #include <vector> 60 61 using namespace llvm; 62 63 #define DEBUG_TYPE "regalloc" 64 65 STATISTIC(NumSpilledRanges, "Number of spilled live ranges"); 66 STATISTIC(NumSnippets, "Number of spilled snippets"); 67 STATISTIC(NumSpills, "Number of spills inserted"); 68 STATISTIC(NumSpillsRemoved, "Number of spills removed"); 69 STATISTIC(NumReloads, "Number of reloads inserted"); 70 STATISTIC(NumReloadsRemoved, "Number of reloads removed"); 71 STATISTIC(NumFolded, "Number of folded stack accesses"); 72 STATISTIC(NumFoldedLoads, "Number of folded loads"); 73 STATISTIC(NumRemats, "Number of rematerialized defs for spilling"); 74 75 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden, 76 cl::desc("Disable inline spill hoisting")); 77 static cl::opt<bool> 78 RestrictStatepointRemat("restrict-statepoint-remat", 79 cl::init(false), cl::Hidden, 80 cl::desc("Restrict remat for statepoint operands")); 81 82 namespace { 83 84 class HoistSpillHelper : private LiveRangeEdit::Delegate { 85 MachineFunction &MF; 86 LiveIntervals &LIS; 87 LiveStacks &LSS; 88 MachineDominatorTree &MDT; 89 MachineLoopInfo &Loops; 90 VirtRegMap &VRM; 91 MachineRegisterInfo &MRI; 92 const TargetInstrInfo &TII; 93 const TargetRegisterInfo &TRI; 94 const MachineBlockFrequencyInfo &MBFI; 95 96 InsertPointAnalysis IPA; 97 98 // Map from StackSlot to the LiveInterval of the original register. 99 // Note the LiveInterval of the original register may have been deleted 100 // after it is spilled. We keep a copy here to track the range where 101 // spills can be moved. 102 DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI; 103 104 // Map from pair of (StackSlot and Original VNI) to a set of spills which 105 // have the same stackslot and have equal values defined by Original VNI. 106 // These spills are mergeable and are hoist candidates. 107 using MergeableSpillsMap = 108 MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>; 109 MergeableSpillsMap MergeableSpills; 110 111 /// This is the map from original register to a set containing all its 112 /// siblings. To hoist a spill to another BB, we need to find out a live 113 /// sibling there and use it as the source of the new spill. 114 DenseMap<Register, SmallSetVector<Register, 16>> Virt2SiblingsMap; 115 116 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI, 117 MachineBasicBlock &BB, Register &LiveReg); 118 119 void rmRedundantSpills( 120 SmallPtrSet<MachineInstr *, 16> &Spills, 121 SmallVectorImpl<MachineInstr *> &SpillsToRm, 122 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill); 123 124 void getVisitOrders( 125 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills, 126 SmallVectorImpl<MachineDomTreeNode *> &Orders, 127 SmallVectorImpl<MachineInstr *> &SpillsToRm, 128 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep, 129 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill); 130 131 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI, 132 SmallPtrSet<MachineInstr *, 16> &Spills, 133 SmallVectorImpl<MachineInstr *> &SpillsToRm, 134 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns); 135 136 public: 137 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf, 138 VirtRegMap &vrm) 139 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()), 140 LSS(pass.getAnalysis<LiveStacks>()), 141 MDT(pass.getAnalysis<MachineDominatorTree>()), 142 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm), 143 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()), 144 TRI(*mf.getSubtarget().getRegisterInfo()), 145 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()), 146 IPA(LIS, mf.getNumBlockIDs()) {} 147 148 void addToMergeableSpills(MachineInstr &Spill, int StackSlot, 149 unsigned Original); 150 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot); 151 void hoistAllSpills(); 152 void LRE_DidCloneVirtReg(Register, Register) override; 153 }; 154 155 class InlineSpiller : public Spiller { 156 MachineFunction &MF; 157 LiveIntervals &LIS; 158 LiveStacks &LSS; 159 MachineDominatorTree &MDT; 160 MachineLoopInfo &Loops; 161 VirtRegMap &VRM; 162 MachineRegisterInfo &MRI; 163 const TargetInstrInfo &TII; 164 const TargetRegisterInfo &TRI; 165 const MachineBlockFrequencyInfo &MBFI; 166 167 // Variables that are valid during spill(), but used by multiple methods. 168 LiveRangeEdit *Edit = nullptr; 169 LiveInterval *StackInt = nullptr; 170 int StackSlot; 171 Register Original; 172 173 // All registers to spill to StackSlot, including the main register. 174 SmallVector<Register, 8> RegsToSpill; 175 176 // All COPY instructions to/from snippets. 177 // They are ignored since both operands refer to the same stack slot. 178 // For bundled copies, this will only include the first header copy. 179 SmallPtrSet<MachineInstr*, 8> SnippetCopies; 180 181 // Values that failed to remat at some point. 182 SmallPtrSet<VNInfo*, 8> UsedValues; 183 184 // Dead defs generated during spilling. 185 SmallVector<MachineInstr*, 8> DeadDefs; 186 187 // Object records spills information and does the hoisting. 188 HoistSpillHelper HSpiller; 189 190 // Live range weight calculator. 191 VirtRegAuxInfo &VRAI; 192 193 ~InlineSpiller() override = default; 194 195 public: 196 InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM, 197 VirtRegAuxInfo &VRAI) 198 : MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()), 199 LSS(Pass.getAnalysis<LiveStacks>()), 200 MDT(Pass.getAnalysis<MachineDominatorTree>()), 201 Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM), 202 MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()), 203 TRI(*MF.getSubtarget().getRegisterInfo()), 204 MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()), 205 HSpiller(Pass, MF, VRM), VRAI(VRAI) {} 206 207 void spill(LiveRangeEdit &) override; 208 void postOptimization() override; 209 210 private: 211 bool isSnippet(const LiveInterval &SnipLI); 212 void collectRegsToSpill(); 213 214 bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); } 215 216 bool isSibling(Register Reg); 217 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI); 218 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI); 219 220 void markValueUsed(LiveInterval*, VNInfo*); 221 bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI); 222 bool reMaterializeFor(LiveInterval &, MachineInstr &MI); 223 void reMaterializeAll(); 224 225 bool coalesceStackAccess(MachineInstr *MI, Register Reg); 226 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>, 227 MachineInstr *LoadMI = nullptr); 228 void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI); 229 void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI); 230 231 void spillAroundUses(Register Reg); 232 void spillAll(); 233 }; 234 235 } // end anonymous namespace 236 237 Spiller::~Spiller() = default; 238 239 void Spiller::anchor() {} 240 241 Spiller *llvm::createInlineSpiller(MachineFunctionPass &Pass, 242 MachineFunction &MF, VirtRegMap &VRM, 243 VirtRegAuxInfo &VRAI) { 244 return new InlineSpiller(Pass, MF, VRM, VRAI); 245 } 246 247 //===----------------------------------------------------------------------===// 248 // Snippets 249 //===----------------------------------------------------------------------===// 250 251 // When spilling a virtual register, we also spill any snippets it is connected 252 // to. The snippets are small live ranges that only have a single real use, 253 // leftovers from live range splitting. Spilling them enables memory operand 254 // folding or tightens the live range around the single use. 255 // 256 // This minimizes register pressure and maximizes the store-to-load distance for 257 // spill slots which can be important in tight loops. 258 259 /// If MI is a COPY to or from Reg, return the other register, otherwise return 260 /// 0. 261 static Register isCopyOf(const MachineInstr &MI, Register Reg) { 262 assert(!MI.isBundled()); 263 if (!MI.isCopy()) 264 return Register(); 265 266 const MachineOperand &DstOp = MI.getOperand(0); 267 const MachineOperand &SrcOp = MI.getOperand(1); 268 269 // TODO: Probably only worth allowing subreg copies with undef dests. 270 if (DstOp.getSubReg() != SrcOp.getSubReg()) 271 return Register(); 272 if (DstOp.getReg() == Reg) 273 return SrcOp.getReg(); 274 if (SrcOp.getReg() == Reg) 275 return DstOp.getReg(); 276 return Register(); 277 } 278 279 /// Check for a copy bundle as formed by SplitKit. 280 static Register isCopyOfBundle(const MachineInstr &FirstMI, Register Reg) { 281 if (!FirstMI.isBundled()) 282 return isCopyOf(FirstMI, Reg); 283 284 assert(!FirstMI.isBundledWithPred() && FirstMI.isBundledWithSucc() && 285 "expected to see first instruction in bundle"); 286 287 Register SnipReg; 288 MachineBasicBlock::const_instr_iterator I = FirstMI.getIterator(); 289 while (I->isBundledWithSucc()) { 290 const MachineInstr &MI = *I; 291 if (!MI.isCopy()) 292 return Register(); 293 294 const MachineOperand &DstOp = MI.getOperand(0); 295 const MachineOperand &SrcOp = MI.getOperand(1); 296 if (DstOp.getReg() == Reg) { 297 if (!SnipReg) 298 SnipReg = SrcOp.getReg(); 299 else if (SnipReg != SrcOp.getReg()) 300 return Register(); 301 } else if (SrcOp.getReg() == Reg) { 302 if (!SnipReg) 303 SnipReg = DstOp.getReg(); 304 else if (SnipReg != DstOp.getReg()) 305 return Register(); 306 } 307 308 ++I; 309 } 310 311 return Register(); 312 } 313 314 static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) { 315 for (const MachineOperand &MO : MI.all_defs()) 316 if (MO.getReg().isVirtual()) 317 LIS.getInterval(MO.getReg()); 318 } 319 320 /// isSnippet - Identify if a live interval is a snippet that should be spilled. 321 /// It is assumed that SnipLI is a virtual register with the same original as 322 /// Edit->getReg(). 323 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { 324 Register Reg = Edit->getReg(); 325 326 // A snippet is a tiny live range with only a single instruction using it 327 // besides copies to/from Reg or spills/fills. 328 // Exception is done for statepoint instructions which will fold fills 329 // into their operands. 330 // We accept: 331 // 332 // %snip = COPY %Reg / FILL fi# 333 // %snip = USE %snip 334 // %snip = STATEPOINT %snip in var arg area 335 // %Reg = COPY %snip / SPILL %snip, fi# 336 // 337 if (!LIS.intervalIsInOneMBB(SnipLI)) 338 return false; 339 340 // Number of defs should not exceed 2 not accounting defs coming from 341 // statepoint instructions. 342 unsigned NumValNums = SnipLI.getNumValNums(); 343 for (auto *VNI : SnipLI.vnis()) { 344 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 345 if (MI->getOpcode() == TargetOpcode::STATEPOINT) 346 --NumValNums; 347 } 348 if (NumValNums > 2) 349 return false; 350 351 MachineInstr *UseMI = nullptr; 352 353 // Check that all uses satisfy our criteria. 354 for (MachineRegisterInfo::reg_bundle_nodbg_iterator 355 RI = MRI.reg_bundle_nodbg_begin(SnipLI.reg()), 356 E = MRI.reg_bundle_nodbg_end(); 357 RI != E;) { 358 MachineInstr &MI = *RI++; 359 360 // Allow copies to/from Reg. 361 if (isCopyOfBundle(MI, Reg)) 362 continue; 363 364 // Allow stack slot loads. 365 int FI; 366 if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) 367 continue; 368 369 // Allow stack slot stores. 370 if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) 371 continue; 372 373 if (StatepointOpers::isFoldableReg(&MI, SnipLI.reg())) 374 continue; 375 376 // Allow a single additional instruction. 377 if (UseMI && &MI != UseMI) 378 return false; 379 UseMI = &MI; 380 } 381 return true; 382 } 383 384 /// collectRegsToSpill - Collect live range snippets that only have a single 385 /// real use. 386 void InlineSpiller::collectRegsToSpill() { 387 Register Reg = Edit->getReg(); 388 389 // Main register always spills. 390 RegsToSpill.assign(1, Reg); 391 SnippetCopies.clear(); 392 393 // Snippets all have the same original, so there can't be any for an original 394 // register. 395 if (Original == Reg) 396 return; 397 398 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) { 399 Register SnipReg = isCopyOfBundle(MI, Reg); 400 if (!isSibling(SnipReg)) 401 continue; 402 LiveInterval &SnipLI = LIS.getInterval(SnipReg); 403 if (!isSnippet(SnipLI)) 404 continue; 405 SnippetCopies.insert(&MI); 406 if (isRegToSpill(SnipReg)) 407 continue; 408 RegsToSpill.push_back(SnipReg); 409 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n'); 410 ++NumSnippets; 411 } 412 } 413 414 bool InlineSpiller::isSibling(Register Reg) { 415 return Reg.isVirtual() && VRM.getOriginal(Reg) == Original; 416 } 417 418 /// It is beneficial to spill to earlier place in the same BB in case 419 /// as follows: 420 /// There is an alternative def earlier in the same MBB. 421 /// Hoist the spill as far as possible in SpillMBB. This can ease 422 /// register pressure: 423 /// 424 /// x = def 425 /// y = use x 426 /// s = copy x 427 /// 428 /// Hoisting the spill of s to immediately after the def removes the 429 /// interference between x and y: 430 /// 431 /// x = def 432 /// spill x 433 /// y = use killed x 434 /// 435 /// This hoist only helps when the copy kills its source. 436 /// 437 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI, 438 MachineInstr &CopyMI) { 439 SlotIndex Idx = LIS.getInstructionIndex(CopyMI); 440 #ifndef NDEBUG 441 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot()); 442 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy"); 443 #endif 444 445 Register SrcReg = CopyMI.getOperand(1).getReg(); 446 LiveInterval &SrcLI = LIS.getInterval(SrcReg); 447 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx); 448 LiveQueryResult SrcQ = SrcLI.Query(Idx); 449 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def); 450 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill()) 451 return false; 452 453 // Conservatively extend the stack slot range to the range of the original 454 // value. We may be able to do better with stack slot coloring by being more 455 // careful here. 456 assert(StackInt && "No stack slot assigned yet."); 457 LiveInterval &OrigLI = LIS.getInterval(Original); 458 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx); 459 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0)); 460 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": " 461 << *StackInt << '\n'); 462 463 // We are going to spill SrcVNI immediately after its def, so clear out 464 // any later spills of the same value. 465 eliminateRedundantSpills(SrcLI, SrcVNI); 466 467 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def); 468 MachineBasicBlock::iterator MII; 469 if (SrcVNI->isPHIDef()) 470 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin()); 471 else { 472 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def); 473 assert(DefMI && "Defining instruction disappeared"); 474 MII = DefMI; 475 ++MII; 476 } 477 MachineInstrSpan MIS(MII, MBB); 478 // Insert spill without kill flag immediately after def. 479 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot, 480 MRI.getRegClass(SrcReg), &TRI, Register()); 481 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII); 482 for (const MachineInstr &MI : make_range(MIS.begin(), MII)) 483 getVDefInterval(MI, LIS); 484 --MII; // Point to store instruction. 485 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII); 486 487 // If there is only 1 store instruction is required for spill, add it 488 // to mergeable list. In X86 AMX, 2 intructions are required to store. 489 // We disable the merge for this case. 490 if (MIS.begin() == MII) 491 HSpiller.addToMergeableSpills(*MII, StackSlot, Original); 492 ++NumSpills; 493 return true; 494 } 495 496 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any 497 /// redundant spills of this value in SLI.reg and sibling copies. 498 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { 499 assert(VNI && "Missing value"); 500 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 501 WorkList.push_back(std::make_pair(&SLI, VNI)); 502 assert(StackInt && "No stack slot assigned yet."); 503 504 do { 505 LiveInterval *LI; 506 std::tie(LI, VNI) = WorkList.pop_back_val(); 507 Register Reg = LI->reg(); 508 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@' 509 << VNI->def << " in " << *LI << '\n'); 510 511 // Regs to spill are taken care of. 512 if (isRegToSpill(Reg)) 513 continue; 514 515 // Add all of VNI's live range to StackInt. 516 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0)); 517 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n'); 518 519 // Find all spills and copies of VNI. 520 for (MachineInstr &MI : 521 llvm::make_early_inc_range(MRI.use_nodbg_bundles(Reg))) { 522 if (!MI.isCopy() && !MI.mayStore()) 523 continue; 524 SlotIndex Idx = LIS.getInstructionIndex(MI); 525 if (LI->getVNInfoAt(Idx) != VNI) 526 continue; 527 528 // Follow sibling copies down the dominator tree. 529 if (Register DstReg = isCopyOfBundle(MI, Reg)) { 530 if (isSibling(DstReg)) { 531 LiveInterval &DstLI = LIS.getInterval(DstReg); 532 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot()); 533 assert(DstVNI && "Missing defined value"); 534 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot"); 535 536 WorkList.push_back(std::make_pair(&DstLI, DstVNI)); 537 } 538 continue; 539 } 540 541 // Erase spills. 542 int FI; 543 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) { 544 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI); 545 // eliminateDeadDefs won't normally remove stores, so switch opcode. 546 MI.setDesc(TII.get(TargetOpcode::KILL)); 547 DeadDefs.push_back(&MI); 548 ++NumSpillsRemoved; 549 if (HSpiller.rmFromMergeableSpills(MI, StackSlot)) 550 --NumSpills; 551 } 552 } 553 } while (!WorkList.empty()); 554 } 555 556 //===----------------------------------------------------------------------===// 557 // Rematerialization 558 //===----------------------------------------------------------------------===// 559 560 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining 561 /// instruction cannot be eliminated. See through snippet copies 562 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { 563 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 564 WorkList.push_back(std::make_pair(LI, VNI)); 565 do { 566 std::tie(LI, VNI) = WorkList.pop_back_val(); 567 if (!UsedValues.insert(VNI).second) 568 continue; 569 570 if (VNI->isPHIDef()) { 571 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def); 572 for (MachineBasicBlock *P : MBB->predecessors()) { 573 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P)); 574 if (PVNI) 575 WorkList.push_back(std::make_pair(LI, PVNI)); 576 } 577 continue; 578 } 579 580 // Follow snippet copies. 581 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 582 if (!SnippetCopies.count(MI)) 583 continue; 584 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg()); 585 assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy"); 586 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true)); 587 assert(SnipVNI && "Snippet undefined before copy"); 588 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI)); 589 } while (!WorkList.empty()); 590 } 591 592 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg, 593 MachineInstr &MI) { 594 if (!RestrictStatepointRemat) 595 return true; 596 // Here's a quick explanation of the problem we're trying to handle here: 597 // * There are some pseudo instructions with more vreg uses than there are 598 // physical registers on the machine. 599 // * This is normally handled by spilling the vreg, and folding the reload 600 // into the user instruction. (Thus decreasing the number of used vregs 601 // until the remainder can be assigned to physregs.) 602 // * However, since we may try to spill vregs in any order, we can end up 603 // trying to spill each operand to the instruction, and then rematting it 604 // instead. When that happens, the new live intervals (for the remats) are 605 // expected to be trivially assignable (i.e. RS_Done). However, since we 606 // may have more remats than physregs, we're guaranteed to fail to assign 607 // one. 608 // At the moment, we only handle this for STATEPOINTs since they're the only 609 // pseudo op where we've seen this. If we start seeing other instructions 610 // with the same problem, we need to revisit this. 611 if (MI.getOpcode() != TargetOpcode::STATEPOINT) 612 return true; 613 // For STATEPOINTs we allow re-materialization for fixed arguments only hoping 614 // that number of physical registers is enough to cover all fixed arguments. 615 // If it is not true we need to revisit it. 616 for (unsigned Idx = StatepointOpers(&MI).getVarIdx(), 617 EndIdx = MI.getNumOperands(); 618 Idx < EndIdx; ++Idx) { 619 MachineOperand &MO = MI.getOperand(Idx); 620 if (MO.isReg() && MO.getReg() == VReg) 621 return false; 622 } 623 return true; 624 } 625 626 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. 627 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) { 628 // Analyze instruction 629 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops; 630 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops); 631 632 if (!RI.Reads) 633 return false; 634 635 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true); 636 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex()); 637 638 if (!ParentVNI) { 639 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: "); 640 for (MachineOperand &MO : MI.all_uses()) 641 if (MO.getReg() == VirtReg.reg()) 642 MO.setIsUndef(); 643 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI); 644 return true; 645 } 646 647 if (SnippetCopies.count(&MI)) 648 return false; 649 650 LiveInterval &OrigLI = LIS.getInterval(Original); 651 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx); 652 LiveRangeEdit::Remat RM(ParentVNI); 653 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def); 654 655 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) { 656 markValueUsed(&VirtReg, ParentVNI); 657 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI); 658 return false; 659 } 660 661 // If the instruction also writes VirtReg.reg, it had better not require the 662 // same register for uses and defs. 663 if (RI.Tied) { 664 markValueUsed(&VirtReg, ParentVNI); 665 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI); 666 return false; 667 } 668 669 // Before rematerializing into a register for a single instruction, try to 670 // fold a load into the instruction. That avoids allocating a new register. 671 if (RM.OrigMI->canFoldAsLoad() && 672 foldMemoryOperand(Ops, RM.OrigMI)) { 673 Edit->markRematerialized(RM.ParentVNI); 674 ++NumFoldedLoads; 675 return true; 676 } 677 678 // If we can't guarantee that we'll be able to actually assign the new vreg, 679 // we can't remat. 680 if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) { 681 markValueUsed(&VirtReg, ParentVNI); 682 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI); 683 return false; 684 } 685 686 // Allocate a new register for the remat. 687 Register NewVReg = Edit->createFrom(Original); 688 689 // Finally we can rematerialize OrigMI before MI. 690 SlotIndex DefIdx = 691 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI); 692 693 // We take the DebugLoc from MI, since OrigMI may be attributed to a 694 // different source location. 695 auto *NewMI = LIS.getInstructionFromIndex(DefIdx); 696 NewMI->setDebugLoc(MI.getDebugLoc()); 697 698 (void)DefIdx; 699 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' 700 << *LIS.getInstructionFromIndex(DefIdx)); 701 702 // Replace operands 703 for (const auto &OpPair : Ops) { 704 MachineOperand &MO = OpPair.first->getOperand(OpPair.second); 705 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) { 706 MO.setReg(NewVReg); 707 MO.setIsKill(); 708 } 709 } 710 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n'); 711 712 ++NumRemats; 713 return true; 714 } 715 716 /// reMaterializeAll - Try to rematerialize as many uses as possible, 717 /// and trim the live ranges after. 718 void InlineSpiller::reMaterializeAll() { 719 if (!Edit->anyRematerializable()) 720 return; 721 722 UsedValues.clear(); 723 724 // Try to remat before all uses of snippets. 725 bool anyRemat = false; 726 for (Register Reg : RegsToSpill) { 727 LiveInterval &LI = LIS.getInterval(Reg); 728 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) { 729 // Debug values are not allowed to affect codegen. 730 if (MI.isDebugValue()) 731 continue; 732 733 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug " 734 "instruction that isn't a DBG_VALUE"); 735 736 anyRemat |= reMaterializeFor(LI, MI); 737 } 738 } 739 if (!anyRemat) 740 return; 741 742 // Remove any values that were completely rematted. 743 for (Register Reg : RegsToSpill) { 744 LiveInterval &LI = LIS.getInterval(Reg); 745 for (VNInfo *VNI : LI.vnis()) { 746 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI)) 747 continue; 748 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 749 MI->addRegisterDead(Reg, &TRI); 750 if (!MI->allDefsAreDead()) 751 continue; 752 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI); 753 DeadDefs.push_back(MI); 754 } 755 } 756 757 // Eliminate dead code after remat. Note that some snippet copies may be 758 // deleted here. 759 if (DeadDefs.empty()) 760 return; 761 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n"); 762 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); 763 764 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions 765 // after rematerialization. To remove a VNI for a vreg from its LiveInterval, 766 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all 767 // removed, PHI VNI are still left in the LiveInterval. 768 // So to get rid of unused reg, we need to check whether it has non-dbg 769 // reference instead of whether it has non-empty interval. 770 unsigned ResultPos = 0; 771 for (Register Reg : RegsToSpill) { 772 if (MRI.reg_nodbg_empty(Reg)) { 773 Edit->eraseVirtReg(Reg); 774 continue; 775 } 776 777 assert(LIS.hasInterval(Reg) && 778 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && 779 "Empty and not used live-range?!"); 780 781 RegsToSpill[ResultPos++] = Reg; 782 } 783 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end()); 784 LLVM_DEBUG(dbgs() << RegsToSpill.size() 785 << " registers to spill after remat.\n"); 786 } 787 788 //===----------------------------------------------------------------------===// 789 // Spilling 790 //===----------------------------------------------------------------------===// 791 792 /// If MI is a load or store of StackSlot, it can be removed. 793 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) { 794 int FI = 0; 795 Register InstrReg = TII.isLoadFromStackSlot(*MI, FI); 796 bool IsLoad = InstrReg; 797 if (!IsLoad) 798 InstrReg = TII.isStoreToStackSlot(*MI, FI); 799 800 // We have a stack access. Is it the right register and slot? 801 if (InstrReg != Reg || FI != StackSlot) 802 return false; 803 804 if (!IsLoad) 805 HSpiller.rmFromMergeableSpills(*MI, StackSlot); 806 807 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI); 808 LIS.RemoveMachineInstrFromMaps(*MI); 809 MI->eraseFromParent(); 810 811 if (IsLoad) { 812 ++NumReloadsRemoved; 813 --NumReloads; 814 } else { 815 ++NumSpillsRemoved; 816 --NumSpills; 817 } 818 819 return true; 820 } 821 822 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 823 LLVM_DUMP_METHOD 824 // Dump the range of instructions from B to E with their slot indexes. 825 static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, 826 MachineBasicBlock::iterator E, 827 LiveIntervals const &LIS, 828 const char *const header, 829 Register VReg = Register()) { 830 char NextLine = '\n'; 831 char SlotIndent = '\t'; 832 833 if (std::next(B) == E) { 834 NextLine = ' '; 835 SlotIndent = ' '; 836 } 837 838 dbgs() << '\t' << header << ": " << NextLine; 839 840 for (MachineBasicBlock::iterator I = B; I != E; ++I) { 841 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot(); 842 843 // If a register was passed in and this instruction has it as a 844 // destination that is marked as an early clobber, print the 845 // early-clobber slot index. 846 if (VReg) { 847 MachineOperand *MO = I->findRegisterDefOperand(VReg); 848 if (MO && MO->isEarlyClobber()) 849 Idx = Idx.getRegSlot(true); 850 } 851 852 dbgs() << SlotIndent << Idx << '\t' << *I; 853 } 854 } 855 #endif 856 857 /// foldMemoryOperand - Try folding stack slot references in Ops into their 858 /// instructions. 859 /// 860 /// @param Ops Operand indices from AnalyzeVirtRegInBundle(). 861 /// @param LoadMI Load instruction to use instead of stack slot when non-null. 862 /// @return True on success. 863 bool InlineSpiller:: 864 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops, 865 MachineInstr *LoadMI) { 866 if (Ops.empty()) 867 return false; 868 // Don't attempt folding in bundles. 869 MachineInstr *MI = Ops.front().first; 870 if (Ops.back().first != MI || MI->isBundled()) 871 return false; 872 873 bool WasCopy = MI->isCopy(); 874 Register ImpReg; 875 876 // TII::foldMemoryOperand will do what we need here for statepoint 877 // (fold load into use and remove corresponding def). We will replace 878 // uses of removed def with loads (spillAroundUses). 879 // For that to work we need to untie def and use to pass it through 880 // foldMemoryOperand and signal foldPatchpoint that it is allowed to 881 // fold them. 882 bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT; 883 884 // Spill subregs if the target allows it. 885 // We always want to spill subregs for stackmap/patchpoint pseudos. 886 bool SpillSubRegs = TII.isSubregFoldable() || 887 MI->getOpcode() == TargetOpcode::STATEPOINT || 888 MI->getOpcode() == TargetOpcode::PATCHPOINT || 889 MI->getOpcode() == TargetOpcode::STACKMAP; 890 891 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 892 // operands. 893 SmallVector<unsigned, 8> FoldOps; 894 for (const auto &OpPair : Ops) { 895 unsigned Idx = OpPair.second; 896 assert(MI == OpPair.first && "Instruction conflict during operand folding"); 897 MachineOperand &MO = MI->getOperand(Idx); 898 899 // No point restoring an undef read, and we'll produce an invalid live 900 // interval. 901 // TODO: Is this really the correct way to handle undef tied uses? 902 if (MO.isUse() && !MO.readsReg() && !MO.isTied()) 903 continue; 904 905 if (MO.isImplicit()) { 906 ImpReg = MO.getReg(); 907 continue; 908 } 909 910 if (!SpillSubRegs && MO.getSubReg()) 911 return false; 912 // We cannot fold a load instruction into a def. 913 if (LoadMI && MO.isDef()) 914 return false; 915 // Tied use operands should not be passed to foldMemoryOperand. 916 if (UntieRegs || !MI->isRegTiedToDefOperand(Idx)) 917 FoldOps.push_back(Idx); 918 } 919 920 // If we only have implicit uses, we won't be able to fold that. 921 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try! 922 if (FoldOps.empty()) 923 return false; 924 925 MachineInstrSpan MIS(MI, MI->getParent()); 926 927 SmallVector<std::pair<unsigned, unsigned> > TiedOps; 928 if (UntieRegs) 929 for (unsigned Idx : FoldOps) { 930 MachineOperand &MO = MI->getOperand(Idx); 931 if (!MO.isTied()) 932 continue; 933 unsigned Tied = MI->findTiedOperandIdx(Idx); 934 if (MO.isUse()) 935 TiedOps.emplace_back(Tied, Idx); 936 else { 937 assert(MO.isDef() && "Tied to not use and def?"); 938 TiedOps.emplace_back(Idx, Tied); 939 } 940 MI->untieRegOperand(Idx); 941 } 942 943 MachineInstr *FoldMI = 944 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS) 945 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM); 946 if (!FoldMI) { 947 // Re-tie operands. 948 for (auto Tied : TiedOps) 949 MI->tieOperands(Tied.first, Tied.second); 950 return false; 951 } 952 953 // Remove LIS for any dead defs in the original MI not in FoldMI. 954 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) { 955 if (!MO->isReg()) 956 continue; 957 Register Reg = MO->getReg(); 958 if (!Reg || Reg.isVirtual() || MRI.isReserved(Reg)) { 959 continue; 960 } 961 // Skip non-Defs, including undef uses and internal reads. 962 if (MO->isUse()) 963 continue; 964 PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI); 965 if (RI.FullyDefined) 966 continue; 967 // FoldMI does not define this physreg. Remove the LI segment. 968 assert(MO->isDead() && "Cannot fold physreg def"); 969 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot(); 970 LIS.removePhysRegDefAt(Reg.asMCReg(), Idx); 971 } 972 973 int FI; 974 if (TII.isStoreToStackSlot(*MI, FI) && 975 HSpiller.rmFromMergeableSpills(*MI, FI)) 976 --NumSpills; 977 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI); 978 // Update the call site info. 979 if (MI->isCandidateForCallSiteEntry()) 980 MI->getMF()->moveCallSiteInfo(MI, FoldMI); 981 982 // If we've folded a store into an instruction labelled with debug-info, 983 // record a substitution from the old operand to the memory operand. Handle 984 // the simple common case where operand 0 is the one being folded, plus when 985 // the destination operand is also a tied def. More values could be 986 // substituted / preserved with more analysis. 987 if (MI->peekDebugInstrNum() && Ops[0].second == 0) { 988 // Helper lambda. 989 auto MakeSubstitution = [this,FoldMI,MI,&Ops]() { 990 // Substitute old operand zero to the new instructions memory operand. 991 unsigned OldOperandNum = Ops[0].second; 992 unsigned NewNum = FoldMI->getDebugInstrNum(); 993 unsigned OldNum = MI->getDebugInstrNum(); 994 MF.makeDebugValueSubstitution({OldNum, OldOperandNum}, 995 {NewNum, MachineFunction::DebugOperandMemNumber}); 996 }; 997 998 const MachineOperand &Op0 = MI->getOperand(Ops[0].second); 999 if (Ops.size() == 1 && Op0.isDef()) { 1000 MakeSubstitution(); 1001 } else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() && 1002 Op0.getReg() == MI->getOperand(1).getReg()) { 1003 MakeSubstitution(); 1004 } 1005 } else if (MI->peekDebugInstrNum()) { 1006 // This is a debug-labelled instruction, but the operand being folded isn't 1007 // at operand zero. Most likely this means it's a load being folded in. 1008 // Substitute any register defs from operand zero up to the one being 1009 // folded -- past that point, we don't know what the new operand indexes 1010 // will be. 1011 MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second); 1012 } 1013 1014 MI->eraseFromParent(); 1015 1016 // Insert any new instructions other than FoldMI into the LIS maps. 1017 assert(!MIS.empty() && "Unexpected empty span of instructions!"); 1018 for (MachineInstr &MI : MIS) 1019 if (&MI != FoldMI) 1020 LIS.InsertMachineInstrInMaps(MI); 1021 1022 // TII.foldMemoryOperand may have left some implicit operands on the 1023 // instruction. Strip them. 1024 if (ImpReg) 1025 for (unsigned i = FoldMI->getNumOperands(); i; --i) { 1026 MachineOperand &MO = FoldMI->getOperand(i - 1); 1027 if (!MO.isReg() || !MO.isImplicit()) 1028 break; 1029 if (MO.getReg() == ImpReg) 1030 FoldMI->removeOperand(i - 1); 1031 } 1032 1033 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS, 1034 "folded")); 1035 1036 if (!WasCopy) 1037 ++NumFolded; 1038 else if (Ops.front().second == 0) { 1039 ++NumSpills; 1040 // If there is only 1 store instruction is required for spill, add it 1041 // to mergeable list. In X86 AMX, 2 intructions are required to store. 1042 // We disable the merge for this case. 1043 if (std::distance(MIS.begin(), MIS.end()) <= 1) 1044 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original); 1045 } else 1046 ++NumReloads; 1047 return true; 1048 } 1049 1050 void InlineSpiller::insertReload(Register NewVReg, 1051 SlotIndex Idx, 1052 MachineBasicBlock::iterator MI) { 1053 MachineBasicBlock &MBB = *MI->getParent(); 1054 1055 MachineInstrSpan MIS(MI, &MBB); 1056 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot, 1057 MRI.getRegClass(NewVReg), &TRI, Register()); 1058 1059 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI); 1060 1061 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload", 1062 NewVReg)); 1063 ++NumReloads; 1064 } 1065 1066 /// Check if \p Def fully defines a VReg with an undefined value. 1067 /// If that's the case, that means the value of VReg is actually 1068 /// not relevant. 1069 static bool isRealSpill(const MachineInstr &Def) { 1070 if (!Def.isImplicitDef()) 1071 return true; 1072 assert(Def.getNumOperands() == 1 && 1073 "Implicit def with more than one definition"); 1074 // We can say that the VReg defined by Def is undef, only if it is 1075 // fully defined by Def. Otherwise, some of the lanes may not be 1076 // undef and the value of the VReg matters. 1077 return Def.getOperand(0).getSubReg(); 1078 } 1079 1080 /// insertSpill - Insert a spill of NewVReg after MI. 1081 void InlineSpiller::insertSpill(Register NewVReg, bool isKill, 1082 MachineBasicBlock::iterator MI) { 1083 // Spill are not terminators, so inserting spills after terminators will 1084 // violate invariants in MachineVerifier. 1085 assert(!MI->isTerminator() && "Inserting a spill after a terminator"); 1086 MachineBasicBlock &MBB = *MI->getParent(); 1087 1088 MachineInstrSpan MIS(MI, &MBB); 1089 MachineBasicBlock::iterator SpillBefore = std::next(MI); 1090 bool IsRealSpill = isRealSpill(*MI); 1091 1092 if (IsRealSpill) 1093 TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot, 1094 MRI.getRegClass(NewVReg), &TRI, Register()); 1095 else 1096 // Don't spill undef value. 1097 // Anything works for undef, in particular keeping the memory 1098 // uninitialized is a viable option and it saves code size and 1099 // run time. 1100 BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL)) 1101 .addReg(NewVReg, getKillRegState(isKill)); 1102 1103 MachineBasicBlock::iterator Spill = std::next(MI); 1104 LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end()); 1105 for (const MachineInstr &MI : make_range(Spill, MIS.end())) 1106 getVDefInterval(MI, LIS); 1107 1108 LLVM_DEBUG( 1109 dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill")); 1110 ++NumSpills; 1111 // If there is only 1 store instruction is required for spill, add it 1112 // to mergeable list. In X86 AMX, 2 intructions are required to store. 1113 // We disable the merge for this case. 1114 if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1) 1115 HSpiller.addToMergeableSpills(*Spill, StackSlot, Original); 1116 } 1117 1118 /// spillAroundUses - insert spill code around each use of Reg. 1119 void InlineSpiller::spillAroundUses(Register Reg) { 1120 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n'); 1121 LiveInterval &OldLI = LIS.getInterval(Reg); 1122 1123 // Iterate over instructions using Reg. 1124 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) { 1125 // Debug values are not allowed to affect codegen. 1126 if (MI.isDebugValue()) { 1127 // Modify DBG_VALUE now that the value is in a spill slot. 1128 MachineBasicBlock *MBB = MI.getParent(); 1129 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << MI); 1130 buildDbgValueForSpill(*MBB, &MI, MI, StackSlot, Reg); 1131 MBB->erase(MI); 1132 continue; 1133 } 1134 1135 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug " 1136 "instruction that isn't a DBG_VALUE"); 1137 1138 // Ignore copies to/from snippets. We'll delete them. 1139 if (SnippetCopies.count(&MI)) 1140 continue; 1141 1142 // Stack slot accesses may coalesce away. 1143 if (coalesceStackAccess(&MI, Reg)) 1144 continue; 1145 1146 // Analyze instruction. 1147 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops; 1148 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, Reg, &Ops); 1149 1150 // Find the slot index where this instruction reads and writes OldLI. 1151 // This is usually the def slot, except for tied early clobbers. 1152 SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot(); 1153 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true))) 1154 if (SlotIndex::isSameInstr(Idx, VNI->def)) 1155 Idx = VNI->def; 1156 1157 // Check for a sibling copy. 1158 Register SibReg = isCopyOfBundle(MI, Reg); 1159 if (SibReg && isSibling(SibReg)) { 1160 // This may actually be a copy between snippets. 1161 if (isRegToSpill(SibReg)) { 1162 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << MI); 1163 SnippetCopies.insert(&MI); 1164 continue; 1165 } 1166 if (RI.Writes) { 1167 if (hoistSpillInsideBB(OldLI, MI)) { 1168 // This COPY is now dead, the value is already in the stack slot. 1169 MI.getOperand(0).setIsDead(); 1170 DeadDefs.push_back(&MI); 1171 continue; 1172 } 1173 } else { 1174 // This is a reload for a sib-reg copy. Drop spills downstream. 1175 LiveInterval &SibLI = LIS.getInterval(SibReg); 1176 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx)); 1177 // The COPY will fold to a reload below. 1178 } 1179 } 1180 1181 // Attempt to fold memory ops. 1182 if (foldMemoryOperand(Ops)) 1183 continue; 1184 1185 // Create a new virtual register for spill/fill. 1186 // FIXME: Infer regclass from instruction alone. 1187 Register NewVReg = Edit->createFrom(Reg); 1188 1189 if (RI.Reads) 1190 insertReload(NewVReg, Idx, &MI); 1191 1192 // Rewrite instruction operands. 1193 bool hasLiveDef = false; 1194 for (const auto &OpPair : Ops) { 1195 MachineOperand &MO = OpPair.first->getOperand(OpPair.second); 1196 MO.setReg(NewVReg); 1197 if (MO.isUse()) { 1198 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second)) 1199 MO.setIsKill(); 1200 } else { 1201 if (!MO.isDead()) 1202 hasLiveDef = true; 1203 } 1204 } 1205 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << MI << '\n'); 1206 1207 // FIXME: Use a second vreg if instruction has no tied ops. 1208 if (RI.Writes) 1209 if (hasLiveDef) 1210 insertSpill(NewVReg, true, &MI); 1211 } 1212 } 1213 1214 /// spillAll - Spill all registers remaining after rematerialization. 1215 void InlineSpiller::spillAll() { 1216 // Update LiveStacks now that we are committed to spilling. 1217 if (StackSlot == VirtRegMap::NO_STACK_SLOT) { 1218 StackSlot = VRM.assignVirt2StackSlot(Original); 1219 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original)); 1220 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator()); 1221 } else 1222 StackInt = &LSS.getInterval(StackSlot); 1223 1224 if (Original != Edit->getReg()) 1225 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot); 1226 1227 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values"); 1228 for (Register Reg : RegsToSpill) 1229 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg), 1230 StackInt->getValNumInfo(0)); 1231 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n'); 1232 1233 // Spill around uses of all RegsToSpill. 1234 for (Register Reg : RegsToSpill) 1235 spillAroundUses(Reg); 1236 1237 // Hoisted spills may cause dead code. 1238 if (!DeadDefs.empty()) { 1239 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n"); 1240 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); 1241 } 1242 1243 // Finally delete the SnippetCopies. 1244 for (Register Reg : RegsToSpill) { 1245 for (MachineInstr &MI : 1246 llvm::make_early_inc_range(MRI.reg_instructions(Reg))) { 1247 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy"); 1248 // FIXME: Do this with a LiveRangeEdit callback. 1249 LIS.getSlotIndexes()->removeSingleMachineInstrFromMaps(MI); 1250 MI.eraseFromBundle(); 1251 } 1252 } 1253 1254 // Delete all spilled registers. 1255 for (Register Reg : RegsToSpill) 1256 Edit->eraseVirtReg(Reg); 1257 } 1258 1259 void InlineSpiller::spill(LiveRangeEdit &edit) { 1260 ++NumSpilledRanges; 1261 Edit = &edit; 1262 assert(!Register::isStackSlot(edit.getReg()) && 1263 "Trying to spill a stack slot."); 1264 // Share a stack slot among all descendants of Original. 1265 Original = VRM.getOriginal(edit.getReg()); 1266 StackSlot = VRM.getStackSlot(Original); 1267 StackInt = nullptr; 1268 1269 LLVM_DEBUG(dbgs() << "Inline spilling " 1270 << TRI.getRegClassName(MRI.getRegClass(edit.getReg())) 1271 << ':' << edit.getParent() << "\nFrom original " 1272 << printReg(Original) << '\n'); 1273 assert(edit.getParent().isSpillable() && 1274 "Attempting to spill already spilled value."); 1275 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs"); 1276 1277 collectRegsToSpill(); 1278 reMaterializeAll(); 1279 1280 // Remat may handle everything. 1281 if (!RegsToSpill.empty()) 1282 spillAll(); 1283 1284 Edit->calculateRegClassAndHint(MF, VRAI); 1285 } 1286 1287 /// Optimizations after all the reg selections and spills are done. 1288 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); } 1289 1290 /// When a spill is inserted, add the spill to MergeableSpills map. 1291 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot, 1292 unsigned Original) { 1293 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator(); 1294 LiveInterval &OrigLI = LIS.getInterval(Original); 1295 // save a copy of LiveInterval in StackSlotToOrigLI because the original 1296 // LiveInterval may be cleared after all its references are spilled. 1297 if (!StackSlotToOrigLI.contains(StackSlot)) { 1298 auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight()); 1299 LI->assign(OrigLI, Allocator); 1300 StackSlotToOrigLI[StackSlot] = std::move(LI); 1301 } 1302 SlotIndex Idx = LIS.getInstructionIndex(Spill); 1303 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot()); 1304 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI); 1305 MergeableSpills[MIdx].insert(&Spill); 1306 } 1307 1308 /// When a spill is removed, remove the spill from MergeableSpills map. 1309 /// Return true if the spill is removed successfully. 1310 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill, 1311 int StackSlot) { 1312 auto It = StackSlotToOrigLI.find(StackSlot); 1313 if (It == StackSlotToOrigLI.end()) 1314 return false; 1315 SlotIndex Idx = LIS.getInstructionIndex(Spill); 1316 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot()); 1317 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI); 1318 return MergeableSpills[MIdx].erase(&Spill); 1319 } 1320 1321 /// Check BB to see if it is a possible target BB to place a hoisted spill, 1322 /// i.e., there should be a living sibling of OrigReg at the insert point. 1323 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI, 1324 MachineBasicBlock &BB, Register &LiveReg) { 1325 SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB); 1326 // The original def could be after the last insert point in the root block, 1327 // we can't hoist to here. 1328 if (Idx < OrigVNI.def) { 1329 // TODO: We could be better here. If LI is not alive in landing pad 1330 // we could hoist spill after LIP. 1331 LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n"); 1332 return false; 1333 } 1334 Register OrigReg = OrigLI.reg(); 1335 SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg]; 1336 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI"); 1337 1338 for (const Register &SibReg : Siblings) { 1339 LiveInterval &LI = LIS.getInterval(SibReg); 1340 VNInfo *VNI = LI.getVNInfoAt(Idx); 1341 if (VNI) { 1342 LiveReg = SibReg; 1343 return true; 1344 } 1345 } 1346 return false; 1347 } 1348 1349 /// Remove redundant spills in the same BB. Save those redundant spills in 1350 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map. 1351 void HoistSpillHelper::rmRedundantSpills( 1352 SmallPtrSet<MachineInstr *, 16> &Spills, 1353 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1354 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) { 1355 // For each spill saw, check SpillBBToSpill[] and see if its BB already has 1356 // another spill inside. If a BB contains more than one spill, only keep the 1357 // earlier spill with smaller SlotIndex. 1358 for (auto *const CurrentSpill : Spills) { 1359 MachineBasicBlock *Block = CurrentSpill->getParent(); 1360 MachineDomTreeNode *Node = MDT.getBase().getNode(Block); 1361 MachineInstr *PrevSpill = SpillBBToSpill[Node]; 1362 if (PrevSpill) { 1363 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill); 1364 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill); 1365 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill; 1366 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill; 1367 SpillsToRm.push_back(SpillToRm); 1368 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep; 1369 } else { 1370 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill; 1371 } 1372 } 1373 for (auto *const SpillToRm : SpillsToRm) 1374 Spills.erase(SpillToRm); 1375 } 1376 1377 /// Starting from \p Root find a top-down traversal order of the dominator 1378 /// tree to visit all basic blocks containing the elements of \p Spills. 1379 /// Redundant spills will be found and put into \p SpillsToRm at the same 1380 /// time. \p SpillBBToSpill will be populated as part of the process and 1381 /// maps a basic block to the first store occurring in the basic block. 1382 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre 1383 void HoistSpillHelper::getVisitOrders( 1384 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills, 1385 SmallVectorImpl<MachineDomTreeNode *> &Orders, 1386 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1387 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep, 1388 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) { 1389 // The set contains all the possible BB nodes to which we may hoist 1390 // original spills. 1391 SmallPtrSet<MachineDomTreeNode *, 8> WorkSet; 1392 // Save the BB nodes on the path from the first BB node containing 1393 // non-redundant spill to the Root node. 1394 SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath; 1395 // All the spills to be hoisted must originate from a single def instruction 1396 // to the OrigReg. It means the def instruction should dominate all the spills 1397 // to be hoisted. We choose the BB where the def instruction is located as 1398 // the Root. 1399 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom(); 1400 // For every node on the dominator tree with spill, walk up on the dominator 1401 // tree towards the Root node until it is reached. If there is other node 1402 // containing spill in the middle of the path, the previous spill saw will 1403 // be redundant and the node containing it will be removed. All the nodes on 1404 // the path starting from the first node with non-redundant spill to the Root 1405 // node will be added to the WorkSet, which will contain all the possible 1406 // locations where spills may be hoisted to after the loop below is done. 1407 for (auto *const Spill : Spills) { 1408 MachineBasicBlock *Block = Spill->getParent(); 1409 MachineDomTreeNode *Node = MDT[Block]; 1410 MachineInstr *SpillToRm = nullptr; 1411 while (Node != RootIDomNode) { 1412 // If Node dominates Block, and it already contains a spill, the spill in 1413 // Block will be redundant. 1414 if (Node != MDT[Block] && SpillBBToSpill[Node]) { 1415 SpillToRm = SpillBBToSpill[MDT[Block]]; 1416 break; 1417 /// If we see the Node already in WorkSet, the path from the Node to 1418 /// the Root node must already be traversed by another spill. 1419 /// Then no need to repeat. 1420 } else if (WorkSet.count(Node)) { 1421 break; 1422 } else { 1423 NodesOnPath.insert(Node); 1424 } 1425 Node = Node->getIDom(); 1426 } 1427 if (SpillToRm) { 1428 SpillsToRm.push_back(SpillToRm); 1429 } else { 1430 // Add a BB containing the original spills to SpillsToKeep -- i.e., 1431 // set the initial status before hoisting start. The value of BBs 1432 // containing original spills is set to 0, in order to descriminate 1433 // with BBs containing hoisted spills which will be inserted to 1434 // SpillsToKeep later during hoisting. 1435 SpillsToKeep[MDT[Block]] = 0; 1436 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end()); 1437 } 1438 NodesOnPath.clear(); 1439 } 1440 1441 // Sort the nodes in WorkSet in top-down order and save the nodes 1442 // in Orders. Orders will be used for hoisting in runHoistSpills. 1443 unsigned idx = 0; 1444 Orders.push_back(MDT.getBase().getNode(Root)); 1445 do { 1446 MachineDomTreeNode *Node = Orders[idx++]; 1447 for (MachineDomTreeNode *Child : Node->children()) { 1448 if (WorkSet.count(Child)) 1449 Orders.push_back(Child); 1450 } 1451 } while (idx != Orders.size()); 1452 assert(Orders.size() == WorkSet.size() && 1453 "Orders have different size with WorkSet"); 1454 1455 #ifndef NDEBUG 1456 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n"); 1457 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin(); 1458 for (; RIt != Orders.rend(); RIt++) 1459 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ","); 1460 LLVM_DEBUG(dbgs() << "\n"); 1461 #endif 1462 } 1463 1464 /// Try to hoist spills according to BB hotness. The spills to removed will 1465 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in 1466 /// \p SpillsToIns. 1467 void HoistSpillHelper::runHoistSpills( 1468 LiveInterval &OrigLI, VNInfo &OrigVNI, 1469 SmallPtrSet<MachineInstr *, 16> &Spills, 1470 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1471 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) { 1472 // Visit order of dominator tree nodes. 1473 SmallVector<MachineDomTreeNode *, 32> Orders; 1474 // SpillsToKeep contains all the nodes where spills are to be inserted 1475 // during hoisting. If the spill to be inserted is an original spill 1476 // (not a hoisted one), the value of the map entry is 0. If the spill 1477 // is a hoisted spill, the value of the map entry is the VReg to be used 1478 // as the source of the spill. 1479 DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep; 1480 // Map from BB to the first spill inside of it. 1481 DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill; 1482 1483 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill); 1484 1485 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def); 1486 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep, 1487 SpillBBToSpill); 1488 1489 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of 1490 // nodes set and the cost of all the spills inside those nodes. 1491 // The nodes set are the locations where spills are to be inserted 1492 // in the subtree of current node. 1493 using NodesCostPair = 1494 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>; 1495 DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap; 1496 1497 // Iterate Orders set in reverse order, which will be a bottom-up order 1498 // in the dominator tree. Once we visit a dom tree node, we know its 1499 // children have already been visited and the spill locations in the 1500 // subtrees of all the children have been determined. 1501 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin(); 1502 for (; RIt != Orders.rend(); RIt++) { 1503 MachineBasicBlock *Block = (*RIt)->getBlock(); 1504 1505 // If Block contains an original spill, simply continue. 1506 if (SpillsToKeep.contains(*RIt) && !SpillsToKeep[*RIt]) { 1507 SpillsInSubTreeMap[*RIt].first.insert(*RIt); 1508 // SpillsInSubTreeMap[*RIt].second contains the cost of spill. 1509 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block); 1510 continue; 1511 } 1512 1513 // Collect spills in subtree of current node (*RIt) to 1514 // SpillsInSubTreeMap[*RIt].first. 1515 for (MachineDomTreeNode *Child : (*RIt)->children()) { 1516 if (!SpillsInSubTreeMap.contains(Child)) 1517 continue; 1518 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below 1519 // should be placed before getting the begin and end iterators of 1520 // SpillsInSubTreeMap[Child].first, or else the iterators may be 1521 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time 1522 // and the map grows and then the original buckets in the map are moved. 1523 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree = 1524 SpillsInSubTreeMap[*RIt].first; 1525 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second; 1526 SubTreeCost += SpillsInSubTreeMap[Child].second; 1527 auto BI = SpillsInSubTreeMap[Child].first.begin(); 1528 auto EI = SpillsInSubTreeMap[Child].first.end(); 1529 SpillsInSubTree.insert(BI, EI); 1530 SpillsInSubTreeMap.erase(Child); 1531 } 1532 1533 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree = 1534 SpillsInSubTreeMap[*RIt].first; 1535 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second; 1536 // No spills in subtree, simply continue. 1537 if (SpillsInSubTree.empty()) 1538 continue; 1539 1540 // Check whether Block is a possible candidate to insert spill. 1541 Register LiveReg; 1542 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg)) 1543 continue; 1544 1545 // If there are multiple spills that could be merged, bias a little 1546 // to hoist the spill. 1547 BranchProbability MarginProb = (SpillsInSubTree.size() > 1) 1548 ? BranchProbability(9, 10) 1549 : BranchProbability(1, 1); 1550 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) { 1551 // Hoist: Move spills to current Block. 1552 for (auto *const SpillBB : SpillsInSubTree) { 1553 // When SpillBB is a BB contains original spill, insert the spill 1554 // to SpillsToRm. 1555 if (SpillsToKeep.contains(SpillBB) && !SpillsToKeep[SpillBB]) { 1556 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB]; 1557 SpillsToRm.push_back(SpillToRm); 1558 } 1559 // SpillBB will not contain spill anymore, remove it from SpillsToKeep. 1560 SpillsToKeep.erase(SpillBB); 1561 } 1562 // Current Block is the BB containing the new hoisted spill. Add it to 1563 // SpillsToKeep. LiveReg is the source of the new spill. 1564 SpillsToKeep[*RIt] = LiveReg; 1565 LLVM_DEBUG({ 1566 dbgs() << "spills in BB: "; 1567 for (const auto Rspill : SpillsInSubTree) 1568 dbgs() << Rspill->getBlock()->getNumber() << " "; 1569 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber() 1570 << "\n"; 1571 }); 1572 SpillsInSubTree.clear(); 1573 SpillsInSubTree.insert(*RIt); 1574 SubTreeCost = MBFI.getBlockFreq(Block); 1575 } 1576 } 1577 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill), 1578 // save them to SpillsToIns. 1579 for (const auto &Ent : SpillsToKeep) { 1580 if (Ent.second) 1581 SpillsToIns[Ent.first->getBlock()] = Ent.second; 1582 } 1583 } 1584 1585 /// For spills with equal values, remove redundant spills and hoist those left 1586 /// to less hot spots. 1587 /// 1588 /// Spills with equal values will be collected into the same set in 1589 /// MergeableSpills when spill is inserted. These equal spills are originated 1590 /// from the same defining instruction and are dominated by the instruction. 1591 /// Before hoisting all the equal spills, redundant spills inside in the same 1592 /// BB are first marked to be deleted. Then starting from the spills left, walk 1593 /// up on the dominator tree towards the Root node where the define instruction 1594 /// is located, mark the dominated spills to be deleted along the way and 1595 /// collect the BB nodes on the path from non-dominated spills to the define 1596 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places 1597 /// where we are considering to hoist the spills. We iterate the WorkSet in 1598 /// bottom-up order, and for each node, we will decide whether to hoist spills 1599 /// inside its subtree to that node. In this way, we can get benefit locally 1600 /// even if hoisting all the equal spills to one cold place is impossible. 1601 void HoistSpillHelper::hoistAllSpills() { 1602 SmallVector<Register, 4> NewVRegs; 1603 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this); 1604 1605 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) { 1606 Register Reg = Register::index2VirtReg(i); 1607 Register Original = VRM.getPreSplitReg(Reg); 1608 if (!MRI.def_empty(Reg)) 1609 Virt2SiblingsMap[Original].insert(Reg); 1610 } 1611 1612 // Each entry in MergeableSpills contains a spill set with equal values. 1613 for (auto &Ent : MergeableSpills) { 1614 int Slot = Ent.first.first; 1615 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot]; 1616 VNInfo *OrigVNI = Ent.first.second; 1617 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second; 1618 if (Ent.second.empty()) 1619 continue; 1620 1621 LLVM_DEBUG({ 1622 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n" 1623 << "Equal spills in BB: "; 1624 for (const auto spill : EqValSpills) 1625 dbgs() << spill->getParent()->getNumber() << " "; 1626 dbgs() << "\n"; 1627 }); 1628 1629 // SpillsToRm is the spill set to be removed from EqValSpills. 1630 SmallVector<MachineInstr *, 16> SpillsToRm; 1631 // SpillsToIns is the spill set to be newly inserted after hoisting. 1632 DenseMap<MachineBasicBlock *, unsigned> SpillsToIns; 1633 1634 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns); 1635 1636 LLVM_DEBUG({ 1637 dbgs() << "Finally inserted spills in BB: "; 1638 for (const auto &Ispill : SpillsToIns) 1639 dbgs() << Ispill.first->getNumber() << " "; 1640 dbgs() << "\nFinally removed spills in BB: "; 1641 for (const auto Rspill : SpillsToRm) 1642 dbgs() << Rspill->getParent()->getNumber() << " "; 1643 dbgs() << "\n"; 1644 }); 1645 1646 // Stack live range update. 1647 LiveInterval &StackIntvl = LSS.getInterval(Slot); 1648 if (!SpillsToIns.empty() || !SpillsToRm.empty()) 1649 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI, 1650 StackIntvl.getValNumInfo(0)); 1651 1652 // Insert hoisted spills. 1653 for (auto const &Insert : SpillsToIns) { 1654 MachineBasicBlock *BB = Insert.first; 1655 Register LiveReg = Insert.second; 1656 MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB); 1657 MachineInstrSpan MIS(MII, BB); 1658 TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot, 1659 MRI.getRegClass(LiveReg), &TRI, Register()); 1660 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII); 1661 for (const MachineInstr &MI : make_range(MIS.begin(), MII)) 1662 getVDefInterval(MI, LIS); 1663 ++NumSpills; 1664 } 1665 1666 // Remove redundant spills or change them to dead instructions. 1667 NumSpills -= SpillsToRm.size(); 1668 for (auto *const RMEnt : SpillsToRm) { 1669 RMEnt->setDesc(TII.get(TargetOpcode::KILL)); 1670 for (unsigned i = RMEnt->getNumOperands(); i; --i) { 1671 MachineOperand &MO = RMEnt->getOperand(i - 1); 1672 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead()) 1673 RMEnt->removeOperand(i - 1); 1674 } 1675 } 1676 Edit.eliminateDeadDefs(SpillsToRm, std::nullopt); 1677 } 1678 } 1679 1680 /// For VirtReg clone, the \p New register should have the same physreg or 1681 /// stackslot as the \p old register. 1682 void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) { 1683 if (VRM.hasPhys(Old)) 1684 VRM.assignVirt2Phys(New, VRM.getPhys(Old)); 1685 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT) 1686 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old)); 1687 else 1688 llvm_unreachable("VReg should be assigned either physreg or stackslot"); 1689 if (VRM.hasShape(Old)) 1690 VRM.assignVirt2Shape(New, VRM.getShape(Old)); 1691 } 1692