1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The inline spiller modifies the machine function directly instead of 10 // inserting spills and restores in VirtRegMap. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SplitKit.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/MapVector.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SetVector.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/CodeGen/LiveInterval.h" 25 #include "llvm/CodeGen/LiveIntervals.h" 26 #include "llvm/CodeGen/LiveRangeEdit.h" 27 #include "llvm/CodeGen/LiveStacks.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 30 #include "llvm/CodeGen/MachineDominators.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/CodeGen/MachineFunctionPass.h" 33 #include "llvm/CodeGen/MachineInstr.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineInstrBundle.h" 36 #include "llvm/CodeGen/MachineLoopInfo.h" 37 #include "llvm/CodeGen/MachineOperand.h" 38 #include "llvm/CodeGen/MachineRegisterInfo.h" 39 #include "llvm/CodeGen/SlotIndexes.h" 40 #include "llvm/CodeGen/Spiller.h" 41 #include "llvm/CodeGen/StackMaps.h" 42 #include "llvm/CodeGen/TargetInstrInfo.h" 43 #include "llvm/CodeGen/TargetOpcodes.h" 44 #include "llvm/CodeGen/TargetRegisterInfo.h" 45 #include "llvm/CodeGen/TargetSubtargetInfo.h" 46 #include "llvm/CodeGen/VirtRegMap.h" 47 #include "llvm/Config/llvm-config.h" 48 #include "llvm/Support/BlockFrequency.h" 49 #include "llvm/Support/BranchProbability.h" 50 #include "llvm/Support/CommandLine.h" 51 #include "llvm/Support/Compiler.h" 52 #include "llvm/Support/Debug.h" 53 #include "llvm/Support/ErrorHandling.h" 54 #include "llvm/Support/raw_ostream.h" 55 #include <cassert> 56 #include <iterator> 57 #include <tuple> 58 #include <utility> 59 #include <vector> 60 61 using namespace llvm; 62 63 #define DEBUG_TYPE "regalloc" 64 65 STATISTIC(NumSpilledRanges, "Number of spilled live ranges"); 66 STATISTIC(NumSnippets, "Number of spilled snippets"); 67 STATISTIC(NumSpills, "Number of spills inserted"); 68 STATISTIC(NumSpillsRemoved, "Number of spills removed"); 69 STATISTIC(NumReloads, "Number of reloads inserted"); 70 STATISTIC(NumReloadsRemoved, "Number of reloads removed"); 71 STATISTIC(NumFolded, "Number of folded stack accesses"); 72 STATISTIC(NumFoldedLoads, "Number of folded loads"); 73 STATISTIC(NumRemats, "Number of rematerialized defs for spilling"); 74 75 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden, 76 cl::desc("Disable inline spill hoisting")); 77 static cl::opt<bool> 78 RestrictStatepointRemat("restrict-statepoint-remat", 79 cl::init(false), cl::Hidden, 80 cl::desc("Restrict remat for statepoint operands")); 81 82 namespace { 83 84 class HoistSpillHelper : private LiveRangeEdit::Delegate { 85 MachineFunction &MF; 86 LiveIntervals &LIS; 87 LiveStacks &LSS; 88 MachineDominatorTree &MDT; 89 MachineLoopInfo &Loops; 90 VirtRegMap &VRM; 91 MachineRegisterInfo &MRI; 92 const TargetInstrInfo &TII; 93 const TargetRegisterInfo &TRI; 94 const MachineBlockFrequencyInfo &MBFI; 95 96 InsertPointAnalysis IPA; 97 98 // Map from StackSlot to the LiveInterval of the original register. 99 // Note the LiveInterval of the original register may have been deleted 100 // after it is spilled. We keep a copy here to track the range where 101 // spills can be moved. 102 DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI; 103 104 // Map from pair of (StackSlot and Original VNI) to a set of spills which 105 // have the same stackslot and have equal values defined by Original VNI. 106 // These spills are mergeable and are hoist candidates. 107 using MergeableSpillsMap = 108 MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>; 109 MergeableSpillsMap MergeableSpills; 110 111 /// This is the map from original register to a set containing all its 112 /// siblings. To hoist a spill to another BB, we need to find out a live 113 /// sibling there and use it as the source of the new spill. 114 DenseMap<Register, SmallSetVector<Register, 16>> Virt2SiblingsMap; 115 116 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI, 117 MachineBasicBlock &BB, Register &LiveReg); 118 119 void rmRedundantSpills( 120 SmallPtrSet<MachineInstr *, 16> &Spills, 121 SmallVectorImpl<MachineInstr *> &SpillsToRm, 122 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill); 123 124 void getVisitOrders( 125 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills, 126 SmallVectorImpl<MachineDomTreeNode *> &Orders, 127 SmallVectorImpl<MachineInstr *> &SpillsToRm, 128 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep, 129 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill); 130 131 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI, 132 SmallPtrSet<MachineInstr *, 16> &Spills, 133 SmallVectorImpl<MachineInstr *> &SpillsToRm, 134 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns); 135 136 public: 137 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf, 138 VirtRegMap &vrm) 139 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()), 140 LSS(pass.getAnalysis<LiveStacks>()), 141 MDT(pass.getAnalysis<MachineDominatorTree>()), 142 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm), 143 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()), 144 TRI(*mf.getSubtarget().getRegisterInfo()), 145 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()), 146 IPA(LIS, mf.getNumBlockIDs()) {} 147 148 void addToMergeableSpills(MachineInstr &Spill, int StackSlot, 149 unsigned Original); 150 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot); 151 void hoistAllSpills(); 152 void LRE_DidCloneVirtReg(Register, Register) override; 153 }; 154 155 class InlineSpiller : public Spiller { 156 MachineFunction &MF; 157 LiveIntervals &LIS; 158 LiveStacks &LSS; 159 MachineDominatorTree &MDT; 160 MachineLoopInfo &Loops; 161 VirtRegMap &VRM; 162 MachineRegisterInfo &MRI; 163 const TargetInstrInfo &TII; 164 const TargetRegisterInfo &TRI; 165 const MachineBlockFrequencyInfo &MBFI; 166 167 // Variables that are valid during spill(), but used by multiple methods. 168 LiveRangeEdit *Edit = nullptr; 169 LiveInterval *StackInt = nullptr; 170 int StackSlot; 171 Register Original; 172 173 // All registers to spill to StackSlot, including the main register. 174 SmallVector<Register, 8> RegsToSpill; 175 176 // All COPY instructions to/from snippets. 177 // They are ignored since both operands refer to the same stack slot. 178 // For bundled copies, this will only include the first header copy. 179 SmallPtrSet<MachineInstr*, 8> SnippetCopies; 180 181 // Values that failed to remat at some point. 182 SmallPtrSet<VNInfo*, 8> UsedValues; 183 184 // Dead defs generated during spilling. 185 SmallVector<MachineInstr*, 8> DeadDefs; 186 187 // Object records spills information and does the hoisting. 188 HoistSpillHelper HSpiller; 189 190 // Live range weight calculator. 191 VirtRegAuxInfo &VRAI; 192 193 ~InlineSpiller() override = default; 194 195 public: 196 InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM, 197 VirtRegAuxInfo &VRAI) 198 : MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()), 199 LSS(Pass.getAnalysis<LiveStacks>()), 200 MDT(Pass.getAnalysis<MachineDominatorTree>()), 201 Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM), 202 MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()), 203 TRI(*MF.getSubtarget().getRegisterInfo()), 204 MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()), 205 HSpiller(Pass, MF, VRM), VRAI(VRAI) {} 206 207 void spill(LiveRangeEdit &) override; 208 void postOptimization() override; 209 210 private: 211 bool isSnippet(const LiveInterval &SnipLI); 212 void collectRegsToSpill(); 213 214 bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); } 215 216 bool isSibling(Register Reg); 217 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI); 218 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI); 219 220 void markValueUsed(LiveInterval*, VNInfo*); 221 bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI); 222 bool reMaterializeFor(LiveInterval &, MachineInstr &MI); 223 void reMaterializeAll(); 224 225 bool coalesceStackAccess(MachineInstr *MI, Register Reg); 226 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>, 227 MachineInstr *LoadMI = nullptr); 228 void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI); 229 void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI); 230 231 void spillAroundUses(Register Reg); 232 void spillAll(); 233 }; 234 235 } // end anonymous namespace 236 237 Spiller::~Spiller() = default; 238 239 void Spiller::anchor() {} 240 241 Spiller *llvm::createInlineSpiller(MachineFunctionPass &Pass, 242 MachineFunction &MF, VirtRegMap &VRM, 243 VirtRegAuxInfo &VRAI) { 244 return new InlineSpiller(Pass, MF, VRM, VRAI); 245 } 246 247 //===----------------------------------------------------------------------===// 248 // Snippets 249 //===----------------------------------------------------------------------===// 250 251 // When spilling a virtual register, we also spill any snippets it is connected 252 // to. The snippets are small live ranges that only have a single real use, 253 // leftovers from live range splitting. Spilling them enables memory operand 254 // folding or tightens the live range around the single use. 255 // 256 // This minimizes register pressure and maximizes the store-to-load distance for 257 // spill slots which can be important in tight loops. 258 259 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register, 260 /// otherwise return 0. 261 static Register isCopyOf(const MachineInstr &MI, Register Reg, 262 const TargetInstrInfo &TII) { 263 if (!TII.isCopyInstr(MI)) 264 return Register(); 265 266 const MachineOperand &DstOp = MI.getOperand(0); 267 const MachineOperand &SrcOp = MI.getOperand(1); 268 269 // TODO: Probably only worth allowing subreg copies with undef dests. 270 if (DstOp.getSubReg() != SrcOp.getSubReg()) 271 return Register(); 272 if (DstOp.getReg() == Reg) 273 return SrcOp.getReg(); 274 if (SrcOp.getReg() == Reg) 275 return DstOp.getReg(); 276 return Register(); 277 } 278 279 /// Check for a copy bundle as formed by SplitKit. 280 static Register isCopyOfBundle(const MachineInstr &FirstMI, Register Reg, 281 const TargetInstrInfo &TII) { 282 if (!FirstMI.isBundled()) 283 return isCopyOf(FirstMI, Reg, TII); 284 285 assert(!FirstMI.isBundledWithPred() && FirstMI.isBundledWithSucc() && 286 "expected to see first instruction in bundle"); 287 288 Register SnipReg; 289 MachineBasicBlock::const_instr_iterator I = FirstMI.getIterator(); 290 while (I->isBundledWithSucc()) { 291 const MachineInstr &MI = *I; 292 auto CopyInst = TII.isCopyInstr(MI); 293 if (!CopyInst) 294 return Register(); 295 296 const MachineOperand &DstOp = *CopyInst->Destination; 297 const MachineOperand &SrcOp = *CopyInst->Source; 298 if (DstOp.getReg() == Reg) { 299 if (!SnipReg) 300 SnipReg = SrcOp.getReg(); 301 else if (SnipReg != SrcOp.getReg()) 302 return Register(); 303 } else if (SrcOp.getReg() == Reg) { 304 if (!SnipReg) 305 SnipReg = DstOp.getReg(); 306 else if (SnipReg != DstOp.getReg()) 307 return Register(); 308 } 309 310 ++I; 311 } 312 313 return Register(); 314 } 315 316 static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) { 317 for (const MachineOperand &MO : MI.all_defs()) 318 if (MO.getReg().isVirtual()) 319 LIS.getInterval(MO.getReg()); 320 } 321 322 /// isSnippet - Identify if a live interval is a snippet that should be spilled. 323 /// It is assumed that SnipLI is a virtual register with the same original as 324 /// Edit->getReg(). 325 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { 326 Register Reg = Edit->getReg(); 327 328 // A snippet is a tiny live range with only a single instruction using it 329 // besides copies to/from Reg or spills/fills. 330 // Exception is done for statepoint instructions which will fold fills 331 // into their operands. 332 // We accept: 333 // 334 // %snip = COPY %Reg / FILL fi# 335 // %snip = USE %snip 336 // %snip = STATEPOINT %snip in var arg area 337 // %Reg = COPY %snip / SPILL %snip, fi# 338 // 339 if (!LIS.intervalIsInOneMBB(SnipLI)) 340 return false; 341 342 // Number of defs should not exceed 2 not accounting defs coming from 343 // statepoint instructions. 344 unsigned NumValNums = SnipLI.getNumValNums(); 345 for (auto *VNI : SnipLI.vnis()) { 346 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 347 if (MI->getOpcode() == TargetOpcode::STATEPOINT) 348 --NumValNums; 349 } 350 if (NumValNums > 2) 351 return false; 352 353 MachineInstr *UseMI = nullptr; 354 355 // Check that all uses satisfy our criteria. 356 for (MachineRegisterInfo::reg_bundle_nodbg_iterator 357 RI = MRI.reg_bundle_nodbg_begin(SnipLI.reg()), 358 E = MRI.reg_bundle_nodbg_end(); 359 RI != E;) { 360 MachineInstr &MI = *RI++; 361 362 // Allow copies to/from Reg. 363 if (isCopyOfBundle(MI, Reg, TII)) 364 continue; 365 366 // Allow stack slot loads. 367 int FI; 368 if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) 369 continue; 370 371 // Allow stack slot stores. 372 if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) 373 continue; 374 375 if (StatepointOpers::isFoldableReg(&MI, SnipLI.reg())) 376 continue; 377 378 // Allow a single additional instruction. 379 if (UseMI && &MI != UseMI) 380 return false; 381 UseMI = &MI; 382 } 383 return true; 384 } 385 386 /// collectRegsToSpill - Collect live range snippets that only have a single 387 /// real use. 388 void InlineSpiller::collectRegsToSpill() { 389 Register Reg = Edit->getReg(); 390 391 // Main register always spills. 392 RegsToSpill.assign(1, Reg); 393 SnippetCopies.clear(); 394 395 // Snippets all have the same original, so there can't be any for an original 396 // register. 397 if (Original == Reg) 398 return; 399 400 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) { 401 Register SnipReg = isCopyOfBundle(MI, Reg, TII); 402 if (!isSibling(SnipReg)) 403 continue; 404 LiveInterval &SnipLI = LIS.getInterval(SnipReg); 405 if (!isSnippet(SnipLI)) 406 continue; 407 SnippetCopies.insert(&MI); 408 if (isRegToSpill(SnipReg)) 409 continue; 410 RegsToSpill.push_back(SnipReg); 411 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n'); 412 ++NumSnippets; 413 } 414 } 415 416 bool InlineSpiller::isSibling(Register Reg) { 417 return Reg.isVirtual() && VRM.getOriginal(Reg) == Original; 418 } 419 420 /// It is beneficial to spill to earlier place in the same BB in case 421 /// as follows: 422 /// There is an alternative def earlier in the same MBB. 423 /// Hoist the spill as far as possible in SpillMBB. This can ease 424 /// register pressure: 425 /// 426 /// x = def 427 /// y = use x 428 /// s = copy x 429 /// 430 /// Hoisting the spill of s to immediately after the def removes the 431 /// interference between x and y: 432 /// 433 /// x = def 434 /// spill x 435 /// y = use killed x 436 /// 437 /// This hoist only helps when the copy kills its source. 438 /// 439 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI, 440 MachineInstr &CopyMI) { 441 SlotIndex Idx = LIS.getInstructionIndex(CopyMI); 442 #ifndef NDEBUG 443 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot()); 444 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy"); 445 #endif 446 447 Register SrcReg = CopyMI.getOperand(1).getReg(); 448 LiveInterval &SrcLI = LIS.getInterval(SrcReg); 449 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx); 450 LiveQueryResult SrcQ = SrcLI.Query(Idx); 451 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def); 452 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill()) 453 return false; 454 455 // Conservatively extend the stack slot range to the range of the original 456 // value. We may be able to do better with stack slot coloring by being more 457 // careful here. 458 assert(StackInt && "No stack slot assigned yet."); 459 LiveInterval &OrigLI = LIS.getInterval(Original); 460 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx); 461 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0)); 462 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": " 463 << *StackInt << '\n'); 464 465 // We are going to spill SrcVNI immediately after its def, so clear out 466 // any later spills of the same value. 467 eliminateRedundantSpills(SrcLI, SrcVNI); 468 469 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def); 470 MachineBasicBlock::iterator MII; 471 if (SrcVNI->isPHIDef()) 472 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin()); 473 else { 474 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def); 475 assert(DefMI && "Defining instruction disappeared"); 476 MII = DefMI; 477 ++MII; 478 } 479 MachineInstrSpan MIS(MII, MBB); 480 // Insert spill without kill flag immediately after def. 481 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot, 482 MRI.getRegClass(SrcReg), &TRI, Register()); 483 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII); 484 for (const MachineInstr &MI : make_range(MIS.begin(), MII)) 485 getVDefInterval(MI, LIS); 486 --MII; // Point to store instruction. 487 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII); 488 489 // If there is only 1 store instruction is required for spill, add it 490 // to mergeable list. In X86 AMX, 2 intructions are required to store. 491 // We disable the merge for this case. 492 if (MIS.begin() == MII) 493 HSpiller.addToMergeableSpills(*MII, StackSlot, Original); 494 ++NumSpills; 495 return true; 496 } 497 498 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any 499 /// redundant spills of this value in SLI.reg and sibling copies. 500 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { 501 assert(VNI && "Missing value"); 502 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 503 WorkList.push_back(std::make_pair(&SLI, VNI)); 504 assert(StackInt && "No stack slot assigned yet."); 505 506 do { 507 LiveInterval *LI; 508 std::tie(LI, VNI) = WorkList.pop_back_val(); 509 Register Reg = LI->reg(); 510 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@' 511 << VNI->def << " in " << *LI << '\n'); 512 513 // Regs to spill are taken care of. 514 if (isRegToSpill(Reg)) 515 continue; 516 517 // Add all of VNI's live range to StackInt. 518 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0)); 519 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n'); 520 521 // Find all spills and copies of VNI. 522 for (MachineInstr &MI : 523 llvm::make_early_inc_range(MRI.use_nodbg_bundles(Reg))) { 524 if (!MI.mayStore() && !TII.isCopyInstr(MI)) 525 continue; 526 SlotIndex Idx = LIS.getInstructionIndex(MI); 527 if (LI->getVNInfoAt(Idx) != VNI) 528 continue; 529 530 // Follow sibling copies down the dominator tree. 531 if (Register DstReg = isCopyOfBundle(MI, Reg, TII)) { 532 if (isSibling(DstReg)) { 533 LiveInterval &DstLI = LIS.getInterval(DstReg); 534 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot()); 535 assert(DstVNI && "Missing defined value"); 536 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot"); 537 538 WorkList.push_back(std::make_pair(&DstLI, DstVNI)); 539 } 540 continue; 541 } 542 543 // Erase spills. 544 int FI; 545 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) { 546 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI); 547 // eliminateDeadDefs won't normally remove stores, so switch opcode. 548 MI.setDesc(TII.get(TargetOpcode::KILL)); 549 DeadDefs.push_back(&MI); 550 ++NumSpillsRemoved; 551 if (HSpiller.rmFromMergeableSpills(MI, StackSlot)) 552 --NumSpills; 553 } 554 } 555 } while (!WorkList.empty()); 556 } 557 558 //===----------------------------------------------------------------------===// 559 // Rematerialization 560 //===----------------------------------------------------------------------===// 561 562 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining 563 /// instruction cannot be eliminated. See through snippet copies 564 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { 565 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 566 WorkList.push_back(std::make_pair(LI, VNI)); 567 do { 568 std::tie(LI, VNI) = WorkList.pop_back_val(); 569 if (!UsedValues.insert(VNI).second) 570 continue; 571 572 if (VNI->isPHIDef()) { 573 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def); 574 for (MachineBasicBlock *P : MBB->predecessors()) { 575 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P)); 576 if (PVNI) 577 WorkList.push_back(std::make_pair(LI, PVNI)); 578 } 579 continue; 580 } 581 582 // Follow snippet copies. 583 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 584 if (!SnippetCopies.count(MI)) 585 continue; 586 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg()); 587 assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy"); 588 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true)); 589 assert(SnipVNI && "Snippet undefined before copy"); 590 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI)); 591 } while (!WorkList.empty()); 592 } 593 594 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg, 595 MachineInstr &MI) { 596 if (!RestrictStatepointRemat) 597 return true; 598 // Here's a quick explanation of the problem we're trying to handle here: 599 // * There are some pseudo instructions with more vreg uses than there are 600 // physical registers on the machine. 601 // * This is normally handled by spilling the vreg, and folding the reload 602 // into the user instruction. (Thus decreasing the number of used vregs 603 // until the remainder can be assigned to physregs.) 604 // * However, since we may try to spill vregs in any order, we can end up 605 // trying to spill each operand to the instruction, and then rematting it 606 // instead. When that happens, the new live intervals (for the remats) are 607 // expected to be trivially assignable (i.e. RS_Done). However, since we 608 // may have more remats than physregs, we're guaranteed to fail to assign 609 // one. 610 // At the moment, we only handle this for STATEPOINTs since they're the only 611 // pseudo op where we've seen this. If we start seeing other instructions 612 // with the same problem, we need to revisit this. 613 if (MI.getOpcode() != TargetOpcode::STATEPOINT) 614 return true; 615 // For STATEPOINTs we allow re-materialization for fixed arguments only hoping 616 // that number of physical registers is enough to cover all fixed arguments. 617 // If it is not true we need to revisit it. 618 for (unsigned Idx = StatepointOpers(&MI).getVarIdx(), 619 EndIdx = MI.getNumOperands(); 620 Idx < EndIdx; ++Idx) { 621 MachineOperand &MO = MI.getOperand(Idx); 622 if (MO.isReg() && MO.getReg() == VReg) 623 return false; 624 } 625 return true; 626 } 627 628 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. 629 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) { 630 // Analyze instruction 631 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops; 632 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops); 633 634 if (!RI.Reads) 635 return false; 636 637 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true); 638 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex()); 639 640 if (!ParentVNI) { 641 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: "); 642 for (MachineOperand &MO : MI.all_uses()) 643 if (MO.getReg() == VirtReg.reg()) 644 MO.setIsUndef(); 645 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI); 646 return true; 647 } 648 649 if (SnippetCopies.count(&MI)) 650 return false; 651 652 LiveInterval &OrigLI = LIS.getInterval(Original); 653 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx); 654 LiveRangeEdit::Remat RM(ParentVNI); 655 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def); 656 657 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) { 658 markValueUsed(&VirtReg, ParentVNI); 659 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI); 660 return false; 661 } 662 663 // If the instruction also writes VirtReg.reg, it had better not require the 664 // same register for uses and defs. 665 if (RI.Tied) { 666 markValueUsed(&VirtReg, ParentVNI); 667 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI); 668 return false; 669 } 670 671 // Before rematerializing into a register for a single instruction, try to 672 // fold a load into the instruction. That avoids allocating a new register. 673 if (RM.OrigMI->canFoldAsLoad() && 674 foldMemoryOperand(Ops, RM.OrigMI)) { 675 Edit->markRematerialized(RM.ParentVNI); 676 ++NumFoldedLoads; 677 return true; 678 } 679 680 // If we can't guarantee that we'll be able to actually assign the new vreg, 681 // we can't remat. 682 if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) { 683 markValueUsed(&VirtReg, ParentVNI); 684 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI); 685 return false; 686 } 687 688 // Allocate a new register for the remat. 689 Register NewVReg = Edit->createFrom(Original); 690 691 // Finally we can rematerialize OrigMI before MI. 692 SlotIndex DefIdx = 693 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI); 694 695 // We take the DebugLoc from MI, since OrigMI may be attributed to a 696 // different source location. 697 auto *NewMI = LIS.getInstructionFromIndex(DefIdx); 698 NewMI->setDebugLoc(MI.getDebugLoc()); 699 700 (void)DefIdx; 701 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' 702 << *LIS.getInstructionFromIndex(DefIdx)); 703 704 // Replace operands 705 for (const auto &OpPair : Ops) { 706 MachineOperand &MO = OpPair.first->getOperand(OpPair.second); 707 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) { 708 MO.setReg(NewVReg); 709 MO.setIsKill(); 710 } 711 } 712 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n'); 713 714 ++NumRemats; 715 return true; 716 } 717 718 /// reMaterializeAll - Try to rematerialize as many uses as possible, 719 /// and trim the live ranges after. 720 void InlineSpiller::reMaterializeAll() { 721 if (!Edit->anyRematerializable()) 722 return; 723 724 UsedValues.clear(); 725 726 // Try to remat before all uses of snippets. 727 bool anyRemat = false; 728 for (Register Reg : RegsToSpill) { 729 LiveInterval &LI = LIS.getInterval(Reg); 730 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) { 731 // Debug values are not allowed to affect codegen. 732 if (MI.isDebugValue()) 733 continue; 734 735 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug " 736 "instruction that isn't a DBG_VALUE"); 737 738 anyRemat |= reMaterializeFor(LI, MI); 739 } 740 } 741 if (!anyRemat) 742 return; 743 744 // Remove any values that were completely rematted. 745 for (Register Reg : RegsToSpill) { 746 LiveInterval &LI = LIS.getInterval(Reg); 747 for (VNInfo *VNI : LI.vnis()) { 748 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI)) 749 continue; 750 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 751 MI->addRegisterDead(Reg, &TRI); 752 if (!MI->allDefsAreDead()) 753 continue; 754 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI); 755 DeadDefs.push_back(MI); 756 } 757 } 758 759 // Eliminate dead code after remat. Note that some snippet copies may be 760 // deleted here. 761 if (DeadDefs.empty()) 762 return; 763 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n"); 764 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); 765 766 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions 767 // after rematerialization. To remove a VNI for a vreg from its LiveInterval, 768 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all 769 // removed, PHI VNI are still left in the LiveInterval. 770 // So to get rid of unused reg, we need to check whether it has non-dbg 771 // reference instead of whether it has non-empty interval. 772 unsigned ResultPos = 0; 773 for (Register Reg : RegsToSpill) { 774 if (MRI.reg_nodbg_empty(Reg)) { 775 Edit->eraseVirtReg(Reg); 776 continue; 777 } 778 779 assert(LIS.hasInterval(Reg) && 780 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && 781 "Empty and not used live-range?!"); 782 783 RegsToSpill[ResultPos++] = Reg; 784 } 785 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end()); 786 LLVM_DEBUG(dbgs() << RegsToSpill.size() 787 << " registers to spill after remat.\n"); 788 } 789 790 //===----------------------------------------------------------------------===// 791 // Spilling 792 //===----------------------------------------------------------------------===// 793 794 /// If MI is a load or store of StackSlot, it can be removed. 795 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) { 796 int FI = 0; 797 Register InstrReg = TII.isLoadFromStackSlot(*MI, FI); 798 bool IsLoad = InstrReg; 799 if (!IsLoad) 800 InstrReg = TII.isStoreToStackSlot(*MI, FI); 801 802 // We have a stack access. Is it the right register and slot? 803 if (InstrReg != Reg || FI != StackSlot) 804 return false; 805 806 if (!IsLoad) 807 HSpiller.rmFromMergeableSpills(*MI, StackSlot); 808 809 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI); 810 LIS.RemoveMachineInstrFromMaps(*MI); 811 MI->eraseFromParent(); 812 813 if (IsLoad) { 814 ++NumReloadsRemoved; 815 --NumReloads; 816 } else { 817 ++NumSpillsRemoved; 818 --NumSpills; 819 } 820 821 return true; 822 } 823 824 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 825 LLVM_DUMP_METHOD 826 // Dump the range of instructions from B to E with their slot indexes. 827 static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, 828 MachineBasicBlock::iterator E, 829 LiveIntervals const &LIS, 830 const char *const header, 831 Register VReg = Register()) { 832 char NextLine = '\n'; 833 char SlotIndent = '\t'; 834 835 if (std::next(B) == E) { 836 NextLine = ' '; 837 SlotIndent = ' '; 838 } 839 840 dbgs() << '\t' << header << ": " << NextLine; 841 842 for (MachineBasicBlock::iterator I = B; I != E; ++I) { 843 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot(); 844 845 // If a register was passed in and this instruction has it as a 846 // destination that is marked as an early clobber, print the 847 // early-clobber slot index. 848 if (VReg) { 849 MachineOperand *MO = I->findRegisterDefOperand(VReg); 850 if (MO && MO->isEarlyClobber()) 851 Idx = Idx.getRegSlot(true); 852 } 853 854 dbgs() << SlotIndent << Idx << '\t' << *I; 855 } 856 } 857 #endif 858 859 /// foldMemoryOperand - Try folding stack slot references in Ops into their 860 /// instructions. 861 /// 862 /// @param Ops Operand indices from AnalyzeVirtRegInBundle(). 863 /// @param LoadMI Load instruction to use instead of stack slot when non-null. 864 /// @return True on success. 865 bool InlineSpiller:: 866 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops, 867 MachineInstr *LoadMI) { 868 if (Ops.empty()) 869 return false; 870 // Don't attempt folding in bundles. 871 MachineInstr *MI = Ops.front().first; 872 if (Ops.back().first != MI || MI->isBundled()) 873 return false; 874 875 bool WasCopy = TII.isCopyInstr(*MI).has_value(); 876 Register ImpReg; 877 878 // TII::foldMemoryOperand will do what we need here for statepoint 879 // (fold load into use and remove corresponding def). We will replace 880 // uses of removed def with loads (spillAroundUses). 881 // For that to work we need to untie def and use to pass it through 882 // foldMemoryOperand and signal foldPatchpoint that it is allowed to 883 // fold them. 884 bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT; 885 886 // Spill subregs if the target allows it. 887 // We always want to spill subregs for stackmap/patchpoint pseudos. 888 bool SpillSubRegs = TII.isSubregFoldable() || 889 MI->getOpcode() == TargetOpcode::STATEPOINT || 890 MI->getOpcode() == TargetOpcode::PATCHPOINT || 891 MI->getOpcode() == TargetOpcode::STACKMAP; 892 893 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 894 // operands. 895 SmallVector<unsigned, 8> FoldOps; 896 for (const auto &OpPair : Ops) { 897 unsigned Idx = OpPair.second; 898 assert(MI == OpPair.first && "Instruction conflict during operand folding"); 899 MachineOperand &MO = MI->getOperand(Idx); 900 901 // No point restoring an undef read, and we'll produce an invalid live 902 // interval. 903 // TODO: Is this really the correct way to handle undef tied uses? 904 if (MO.isUse() && !MO.readsReg() && !MO.isTied()) 905 continue; 906 907 if (MO.isImplicit()) { 908 ImpReg = MO.getReg(); 909 continue; 910 } 911 912 if (!SpillSubRegs && MO.getSubReg()) 913 return false; 914 // We cannot fold a load instruction into a def. 915 if (LoadMI && MO.isDef()) 916 return false; 917 // Tied use operands should not be passed to foldMemoryOperand. 918 if (UntieRegs || !MI->isRegTiedToDefOperand(Idx)) 919 FoldOps.push_back(Idx); 920 } 921 922 // If we only have implicit uses, we won't be able to fold that. 923 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try! 924 if (FoldOps.empty()) 925 return false; 926 927 MachineInstrSpan MIS(MI, MI->getParent()); 928 929 SmallVector<std::pair<unsigned, unsigned> > TiedOps; 930 if (UntieRegs) 931 for (unsigned Idx : FoldOps) { 932 MachineOperand &MO = MI->getOperand(Idx); 933 if (!MO.isTied()) 934 continue; 935 unsigned Tied = MI->findTiedOperandIdx(Idx); 936 if (MO.isUse()) 937 TiedOps.emplace_back(Tied, Idx); 938 else { 939 assert(MO.isDef() && "Tied to not use and def?"); 940 TiedOps.emplace_back(Idx, Tied); 941 } 942 MI->untieRegOperand(Idx); 943 } 944 945 MachineInstr *FoldMI = 946 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS) 947 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM); 948 if (!FoldMI) { 949 // Re-tie operands. 950 for (auto Tied : TiedOps) 951 MI->tieOperands(Tied.first, Tied.second); 952 return false; 953 } 954 955 // Remove LIS for any dead defs in the original MI not in FoldMI. 956 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) { 957 if (!MO->isReg()) 958 continue; 959 Register Reg = MO->getReg(); 960 if (!Reg || Reg.isVirtual() || MRI.isReserved(Reg)) { 961 continue; 962 } 963 // Skip non-Defs, including undef uses and internal reads. 964 if (MO->isUse()) 965 continue; 966 PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI); 967 if (RI.FullyDefined) 968 continue; 969 // FoldMI does not define this physreg. Remove the LI segment. 970 assert(MO->isDead() && "Cannot fold physreg def"); 971 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot(); 972 LIS.removePhysRegDefAt(Reg.asMCReg(), Idx); 973 } 974 975 int FI; 976 if (TII.isStoreToStackSlot(*MI, FI) && 977 HSpiller.rmFromMergeableSpills(*MI, FI)) 978 --NumSpills; 979 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI); 980 // Update the call site info. 981 if (MI->isCandidateForCallSiteEntry()) 982 MI->getMF()->moveCallSiteInfo(MI, FoldMI); 983 984 // If we've folded a store into an instruction labelled with debug-info, 985 // record a substitution from the old operand to the memory operand. Handle 986 // the simple common case where operand 0 is the one being folded, plus when 987 // the destination operand is also a tied def. More values could be 988 // substituted / preserved with more analysis. 989 if (MI->peekDebugInstrNum() && Ops[0].second == 0) { 990 // Helper lambda. 991 auto MakeSubstitution = [this,FoldMI,MI,&Ops]() { 992 // Substitute old operand zero to the new instructions memory operand. 993 unsigned OldOperandNum = Ops[0].second; 994 unsigned NewNum = FoldMI->getDebugInstrNum(); 995 unsigned OldNum = MI->getDebugInstrNum(); 996 MF.makeDebugValueSubstitution({OldNum, OldOperandNum}, 997 {NewNum, MachineFunction::DebugOperandMemNumber}); 998 }; 999 1000 const MachineOperand &Op0 = MI->getOperand(Ops[0].second); 1001 if (Ops.size() == 1 && Op0.isDef()) { 1002 MakeSubstitution(); 1003 } else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() && 1004 Op0.getReg() == MI->getOperand(1).getReg()) { 1005 MakeSubstitution(); 1006 } 1007 } else if (MI->peekDebugInstrNum()) { 1008 // This is a debug-labelled instruction, but the operand being folded isn't 1009 // at operand zero. Most likely this means it's a load being folded in. 1010 // Substitute any register defs from operand zero up to the one being 1011 // folded -- past that point, we don't know what the new operand indexes 1012 // will be. 1013 MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second); 1014 } 1015 1016 MI->eraseFromParent(); 1017 1018 // Insert any new instructions other than FoldMI into the LIS maps. 1019 assert(!MIS.empty() && "Unexpected empty span of instructions!"); 1020 for (MachineInstr &MI : MIS) 1021 if (&MI != FoldMI) 1022 LIS.InsertMachineInstrInMaps(MI); 1023 1024 // TII.foldMemoryOperand may have left some implicit operands on the 1025 // instruction. Strip them. 1026 if (ImpReg) 1027 for (unsigned i = FoldMI->getNumOperands(); i; --i) { 1028 MachineOperand &MO = FoldMI->getOperand(i - 1); 1029 if (!MO.isReg() || !MO.isImplicit()) 1030 break; 1031 if (MO.getReg() == ImpReg) 1032 FoldMI->removeOperand(i - 1); 1033 } 1034 1035 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS, 1036 "folded")); 1037 1038 if (!WasCopy) 1039 ++NumFolded; 1040 else if (Ops.front().second == 0) { 1041 ++NumSpills; 1042 // If there is only 1 store instruction is required for spill, add it 1043 // to mergeable list. In X86 AMX, 2 intructions are required to store. 1044 // We disable the merge for this case. 1045 if (std::distance(MIS.begin(), MIS.end()) <= 1) 1046 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original); 1047 } else 1048 ++NumReloads; 1049 return true; 1050 } 1051 1052 void InlineSpiller::insertReload(Register NewVReg, 1053 SlotIndex Idx, 1054 MachineBasicBlock::iterator MI) { 1055 MachineBasicBlock &MBB = *MI->getParent(); 1056 1057 MachineInstrSpan MIS(MI, &MBB); 1058 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot, 1059 MRI.getRegClass(NewVReg), &TRI, Register()); 1060 1061 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI); 1062 1063 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload", 1064 NewVReg)); 1065 ++NumReloads; 1066 } 1067 1068 /// Check if \p Def fully defines a VReg with an undefined value. 1069 /// If that's the case, that means the value of VReg is actually 1070 /// not relevant. 1071 static bool isRealSpill(const MachineInstr &Def) { 1072 if (!Def.isImplicitDef()) 1073 return true; 1074 assert(Def.getNumOperands() == 1 && 1075 "Implicit def with more than one definition"); 1076 // We can say that the VReg defined by Def is undef, only if it is 1077 // fully defined by Def. Otherwise, some of the lanes may not be 1078 // undef and the value of the VReg matters. 1079 return Def.getOperand(0).getSubReg(); 1080 } 1081 1082 /// insertSpill - Insert a spill of NewVReg after MI. 1083 void InlineSpiller::insertSpill(Register NewVReg, bool isKill, 1084 MachineBasicBlock::iterator MI) { 1085 // Spill are not terminators, so inserting spills after terminators will 1086 // violate invariants in MachineVerifier. 1087 assert(!MI->isTerminator() && "Inserting a spill after a terminator"); 1088 MachineBasicBlock &MBB = *MI->getParent(); 1089 1090 MachineInstrSpan MIS(MI, &MBB); 1091 MachineBasicBlock::iterator SpillBefore = std::next(MI); 1092 bool IsRealSpill = isRealSpill(*MI); 1093 1094 if (IsRealSpill) 1095 TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot, 1096 MRI.getRegClass(NewVReg), &TRI, Register()); 1097 else 1098 // Don't spill undef value. 1099 // Anything works for undef, in particular keeping the memory 1100 // uninitialized is a viable option and it saves code size and 1101 // run time. 1102 BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL)) 1103 .addReg(NewVReg, getKillRegState(isKill)); 1104 1105 MachineBasicBlock::iterator Spill = std::next(MI); 1106 LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end()); 1107 for (const MachineInstr &MI : make_range(Spill, MIS.end())) 1108 getVDefInterval(MI, LIS); 1109 1110 LLVM_DEBUG( 1111 dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill")); 1112 ++NumSpills; 1113 // If there is only 1 store instruction is required for spill, add it 1114 // to mergeable list. In X86 AMX, 2 intructions are required to store. 1115 // We disable the merge for this case. 1116 if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1) 1117 HSpiller.addToMergeableSpills(*Spill, StackSlot, Original); 1118 } 1119 1120 /// spillAroundUses - insert spill code around each use of Reg. 1121 void InlineSpiller::spillAroundUses(Register Reg) { 1122 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n'); 1123 LiveInterval &OldLI = LIS.getInterval(Reg); 1124 1125 // Iterate over instructions using Reg. 1126 for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) { 1127 // Debug values are not allowed to affect codegen. 1128 if (MI.isDebugValue()) { 1129 // Modify DBG_VALUE now that the value is in a spill slot. 1130 MachineBasicBlock *MBB = MI.getParent(); 1131 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << MI); 1132 buildDbgValueForSpill(*MBB, &MI, MI, StackSlot, Reg); 1133 MBB->erase(MI); 1134 continue; 1135 } 1136 1137 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug " 1138 "instruction that isn't a DBG_VALUE"); 1139 1140 // Ignore copies to/from snippets. We'll delete them. 1141 if (SnippetCopies.count(&MI)) 1142 continue; 1143 1144 // Stack slot accesses may coalesce away. 1145 if (coalesceStackAccess(&MI, Reg)) 1146 continue; 1147 1148 // Analyze instruction. 1149 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops; 1150 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, Reg, &Ops); 1151 1152 // Find the slot index where this instruction reads and writes OldLI. 1153 // This is usually the def slot, except for tied early clobbers. 1154 SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot(); 1155 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true))) 1156 if (SlotIndex::isSameInstr(Idx, VNI->def)) 1157 Idx = VNI->def; 1158 1159 // Check for a sibling copy. 1160 Register SibReg = isCopyOfBundle(MI, Reg, TII); 1161 if (SibReg && isSibling(SibReg)) { 1162 // This may actually be a copy between snippets. 1163 if (isRegToSpill(SibReg)) { 1164 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << MI); 1165 SnippetCopies.insert(&MI); 1166 continue; 1167 } 1168 if (RI.Writes) { 1169 if (hoistSpillInsideBB(OldLI, MI)) { 1170 // This COPY is now dead, the value is already in the stack slot. 1171 MI.getOperand(0).setIsDead(); 1172 DeadDefs.push_back(&MI); 1173 continue; 1174 } 1175 } else { 1176 // This is a reload for a sib-reg copy. Drop spills downstream. 1177 LiveInterval &SibLI = LIS.getInterval(SibReg); 1178 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx)); 1179 // The COPY will fold to a reload below. 1180 } 1181 } 1182 1183 // Attempt to fold memory ops. 1184 if (foldMemoryOperand(Ops)) 1185 continue; 1186 1187 // Create a new virtual register for spill/fill. 1188 // FIXME: Infer regclass from instruction alone. 1189 Register NewVReg = Edit->createFrom(Reg); 1190 1191 if (RI.Reads) 1192 insertReload(NewVReg, Idx, &MI); 1193 1194 // Rewrite instruction operands. 1195 bool hasLiveDef = false; 1196 for (const auto &OpPair : Ops) { 1197 MachineOperand &MO = OpPair.first->getOperand(OpPair.second); 1198 MO.setReg(NewVReg); 1199 if (MO.isUse()) { 1200 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second)) 1201 MO.setIsKill(); 1202 } else { 1203 if (!MO.isDead()) 1204 hasLiveDef = true; 1205 } 1206 } 1207 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << MI << '\n'); 1208 1209 // FIXME: Use a second vreg if instruction has no tied ops. 1210 if (RI.Writes) 1211 if (hasLiveDef) 1212 insertSpill(NewVReg, true, &MI); 1213 } 1214 } 1215 1216 /// spillAll - Spill all registers remaining after rematerialization. 1217 void InlineSpiller::spillAll() { 1218 // Update LiveStacks now that we are committed to spilling. 1219 if (StackSlot == VirtRegMap::NO_STACK_SLOT) { 1220 StackSlot = VRM.assignVirt2StackSlot(Original); 1221 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original)); 1222 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator()); 1223 } else 1224 StackInt = &LSS.getInterval(StackSlot); 1225 1226 if (Original != Edit->getReg()) 1227 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot); 1228 1229 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values"); 1230 for (Register Reg : RegsToSpill) 1231 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg), 1232 StackInt->getValNumInfo(0)); 1233 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n'); 1234 1235 // Spill around uses of all RegsToSpill. 1236 for (Register Reg : RegsToSpill) 1237 spillAroundUses(Reg); 1238 1239 // Hoisted spills may cause dead code. 1240 if (!DeadDefs.empty()) { 1241 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n"); 1242 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); 1243 } 1244 1245 // Finally delete the SnippetCopies. 1246 for (Register Reg : RegsToSpill) { 1247 for (MachineInstr &MI : 1248 llvm::make_early_inc_range(MRI.reg_instructions(Reg))) { 1249 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy"); 1250 // FIXME: Do this with a LiveRangeEdit callback. 1251 LIS.getSlotIndexes()->removeSingleMachineInstrFromMaps(MI); 1252 MI.eraseFromBundle(); 1253 } 1254 } 1255 1256 // Delete all spilled registers. 1257 for (Register Reg : RegsToSpill) 1258 Edit->eraseVirtReg(Reg); 1259 } 1260 1261 void InlineSpiller::spill(LiveRangeEdit &edit) { 1262 ++NumSpilledRanges; 1263 Edit = &edit; 1264 assert(!Register::isStackSlot(edit.getReg()) && 1265 "Trying to spill a stack slot."); 1266 // Share a stack slot among all descendants of Original. 1267 Original = VRM.getOriginal(edit.getReg()); 1268 StackSlot = VRM.getStackSlot(Original); 1269 StackInt = nullptr; 1270 1271 LLVM_DEBUG(dbgs() << "Inline spilling " 1272 << TRI.getRegClassName(MRI.getRegClass(edit.getReg())) 1273 << ':' << edit.getParent() << "\nFrom original " 1274 << printReg(Original) << '\n'); 1275 assert(edit.getParent().isSpillable() && 1276 "Attempting to spill already spilled value."); 1277 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs"); 1278 1279 collectRegsToSpill(); 1280 reMaterializeAll(); 1281 1282 // Remat may handle everything. 1283 if (!RegsToSpill.empty()) 1284 spillAll(); 1285 1286 Edit->calculateRegClassAndHint(MF, VRAI); 1287 } 1288 1289 /// Optimizations after all the reg selections and spills are done. 1290 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); } 1291 1292 /// When a spill is inserted, add the spill to MergeableSpills map. 1293 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot, 1294 unsigned Original) { 1295 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator(); 1296 LiveInterval &OrigLI = LIS.getInterval(Original); 1297 // save a copy of LiveInterval in StackSlotToOrigLI because the original 1298 // LiveInterval may be cleared after all its references are spilled. 1299 if (!StackSlotToOrigLI.contains(StackSlot)) { 1300 auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight()); 1301 LI->assign(OrigLI, Allocator); 1302 StackSlotToOrigLI[StackSlot] = std::move(LI); 1303 } 1304 SlotIndex Idx = LIS.getInstructionIndex(Spill); 1305 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot()); 1306 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI); 1307 MergeableSpills[MIdx].insert(&Spill); 1308 } 1309 1310 /// When a spill is removed, remove the spill from MergeableSpills map. 1311 /// Return true if the spill is removed successfully. 1312 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill, 1313 int StackSlot) { 1314 auto It = StackSlotToOrigLI.find(StackSlot); 1315 if (It == StackSlotToOrigLI.end()) 1316 return false; 1317 SlotIndex Idx = LIS.getInstructionIndex(Spill); 1318 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot()); 1319 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI); 1320 return MergeableSpills[MIdx].erase(&Spill); 1321 } 1322 1323 /// Check BB to see if it is a possible target BB to place a hoisted spill, 1324 /// i.e., there should be a living sibling of OrigReg at the insert point. 1325 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI, 1326 MachineBasicBlock &BB, Register &LiveReg) { 1327 SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB); 1328 // The original def could be after the last insert point in the root block, 1329 // we can't hoist to here. 1330 if (Idx < OrigVNI.def) { 1331 // TODO: We could be better here. If LI is not alive in landing pad 1332 // we could hoist spill after LIP. 1333 LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n"); 1334 return false; 1335 } 1336 Register OrigReg = OrigLI.reg(); 1337 SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg]; 1338 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI"); 1339 1340 for (const Register &SibReg : Siblings) { 1341 LiveInterval &LI = LIS.getInterval(SibReg); 1342 VNInfo *VNI = LI.getVNInfoAt(Idx); 1343 if (VNI) { 1344 LiveReg = SibReg; 1345 return true; 1346 } 1347 } 1348 return false; 1349 } 1350 1351 /// Remove redundant spills in the same BB. Save those redundant spills in 1352 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map. 1353 void HoistSpillHelper::rmRedundantSpills( 1354 SmallPtrSet<MachineInstr *, 16> &Spills, 1355 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1356 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) { 1357 // For each spill saw, check SpillBBToSpill[] and see if its BB already has 1358 // another spill inside. If a BB contains more than one spill, only keep the 1359 // earlier spill with smaller SlotIndex. 1360 for (auto *const CurrentSpill : Spills) { 1361 MachineBasicBlock *Block = CurrentSpill->getParent(); 1362 MachineDomTreeNode *Node = MDT.getBase().getNode(Block); 1363 MachineInstr *PrevSpill = SpillBBToSpill[Node]; 1364 if (PrevSpill) { 1365 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill); 1366 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill); 1367 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill; 1368 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill; 1369 SpillsToRm.push_back(SpillToRm); 1370 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep; 1371 } else { 1372 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill; 1373 } 1374 } 1375 for (auto *const SpillToRm : SpillsToRm) 1376 Spills.erase(SpillToRm); 1377 } 1378 1379 /// Starting from \p Root find a top-down traversal order of the dominator 1380 /// tree to visit all basic blocks containing the elements of \p Spills. 1381 /// Redundant spills will be found and put into \p SpillsToRm at the same 1382 /// time. \p SpillBBToSpill will be populated as part of the process and 1383 /// maps a basic block to the first store occurring in the basic block. 1384 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre 1385 void HoistSpillHelper::getVisitOrders( 1386 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills, 1387 SmallVectorImpl<MachineDomTreeNode *> &Orders, 1388 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1389 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep, 1390 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) { 1391 // The set contains all the possible BB nodes to which we may hoist 1392 // original spills. 1393 SmallPtrSet<MachineDomTreeNode *, 8> WorkSet; 1394 // Save the BB nodes on the path from the first BB node containing 1395 // non-redundant spill to the Root node. 1396 SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath; 1397 // All the spills to be hoisted must originate from a single def instruction 1398 // to the OrigReg. It means the def instruction should dominate all the spills 1399 // to be hoisted. We choose the BB where the def instruction is located as 1400 // the Root. 1401 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom(); 1402 // For every node on the dominator tree with spill, walk up on the dominator 1403 // tree towards the Root node until it is reached. If there is other node 1404 // containing spill in the middle of the path, the previous spill saw will 1405 // be redundant and the node containing it will be removed. All the nodes on 1406 // the path starting from the first node with non-redundant spill to the Root 1407 // node will be added to the WorkSet, which will contain all the possible 1408 // locations where spills may be hoisted to after the loop below is done. 1409 for (auto *const Spill : Spills) { 1410 MachineBasicBlock *Block = Spill->getParent(); 1411 MachineDomTreeNode *Node = MDT[Block]; 1412 MachineInstr *SpillToRm = nullptr; 1413 while (Node != RootIDomNode) { 1414 // If Node dominates Block, and it already contains a spill, the spill in 1415 // Block will be redundant. 1416 if (Node != MDT[Block] && SpillBBToSpill[Node]) { 1417 SpillToRm = SpillBBToSpill[MDT[Block]]; 1418 break; 1419 /// If we see the Node already in WorkSet, the path from the Node to 1420 /// the Root node must already be traversed by another spill. 1421 /// Then no need to repeat. 1422 } else if (WorkSet.count(Node)) { 1423 break; 1424 } else { 1425 NodesOnPath.insert(Node); 1426 } 1427 Node = Node->getIDom(); 1428 } 1429 if (SpillToRm) { 1430 SpillsToRm.push_back(SpillToRm); 1431 } else { 1432 // Add a BB containing the original spills to SpillsToKeep -- i.e., 1433 // set the initial status before hoisting start. The value of BBs 1434 // containing original spills is set to 0, in order to descriminate 1435 // with BBs containing hoisted spills which will be inserted to 1436 // SpillsToKeep later during hoisting. 1437 SpillsToKeep[MDT[Block]] = 0; 1438 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end()); 1439 } 1440 NodesOnPath.clear(); 1441 } 1442 1443 // Sort the nodes in WorkSet in top-down order and save the nodes 1444 // in Orders. Orders will be used for hoisting in runHoistSpills. 1445 unsigned idx = 0; 1446 Orders.push_back(MDT.getBase().getNode(Root)); 1447 do { 1448 MachineDomTreeNode *Node = Orders[idx++]; 1449 for (MachineDomTreeNode *Child : Node->children()) { 1450 if (WorkSet.count(Child)) 1451 Orders.push_back(Child); 1452 } 1453 } while (idx != Orders.size()); 1454 assert(Orders.size() == WorkSet.size() && 1455 "Orders have different size with WorkSet"); 1456 1457 #ifndef NDEBUG 1458 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n"); 1459 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin(); 1460 for (; RIt != Orders.rend(); RIt++) 1461 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ","); 1462 LLVM_DEBUG(dbgs() << "\n"); 1463 #endif 1464 } 1465 1466 /// Try to hoist spills according to BB hotness. The spills to removed will 1467 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in 1468 /// \p SpillsToIns. 1469 void HoistSpillHelper::runHoistSpills( 1470 LiveInterval &OrigLI, VNInfo &OrigVNI, 1471 SmallPtrSet<MachineInstr *, 16> &Spills, 1472 SmallVectorImpl<MachineInstr *> &SpillsToRm, 1473 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) { 1474 // Visit order of dominator tree nodes. 1475 SmallVector<MachineDomTreeNode *, 32> Orders; 1476 // SpillsToKeep contains all the nodes where spills are to be inserted 1477 // during hoisting. If the spill to be inserted is an original spill 1478 // (not a hoisted one), the value of the map entry is 0. If the spill 1479 // is a hoisted spill, the value of the map entry is the VReg to be used 1480 // as the source of the spill. 1481 DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep; 1482 // Map from BB to the first spill inside of it. 1483 DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill; 1484 1485 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill); 1486 1487 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def); 1488 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep, 1489 SpillBBToSpill); 1490 1491 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of 1492 // nodes set and the cost of all the spills inside those nodes. 1493 // The nodes set are the locations where spills are to be inserted 1494 // in the subtree of current node. 1495 using NodesCostPair = 1496 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>; 1497 DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap; 1498 1499 // Iterate Orders set in reverse order, which will be a bottom-up order 1500 // in the dominator tree. Once we visit a dom tree node, we know its 1501 // children have already been visited and the spill locations in the 1502 // subtrees of all the children have been determined. 1503 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin(); 1504 for (; RIt != Orders.rend(); RIt++) { 1505 MachineBasicBlock *Block = (*RIt)->getBlock(); 1506 1507 // If Block contains an original spill, simply continue. 1508 if (SpillsToKeep.contains(*RIt) && !SpillsToKeep[*RIt]) { 1509 SpillsInSubTreeMap[*RIt].first.insert(*RIt); 1510 // SpillsInSubTreeMap[*RIt].second contains the cost of spill. 1511 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block); 1512 continue; 1513 } 1514 1515 // Collect spills in subtree of current node (*RIt) to 1516 // SpillsInSubTreeMap[*RIt].first. 1517 for (MachineDomTreeNode *Child : (*RIt)->children()) { 1518 if (!SpillsInSubTreeMap.contains(Child)) 1519 continue; 1520 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below 1521 // should be placed before getting the begin and end iterators of 1522 // SpillsInSubTreeMap[Child].first, or else the iterators may be 1523 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time 1524 // and the map grows and then the original buckets in the map are moved. 1525 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree = 1526 SpillsInSubTreeMap[*RIt].first; 1527 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second; 1528 SubTreeCost += SpillsInSubTreeMap[Child].second; 1529 auto BI = SpillsInSubTreeMap[Child].first.begin(); 1530 auto EI = SpillsInSubTreeMap[Child].first.end(); 1531 SpillsInSubTree.insert(BI, EI); 1532 SpillsInSubTreeMap.erase(Child); 1533 } 1534 1535 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree = 1536 SpillsInSubTreeMap[*RIt].first; 1537 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second; 1538 // No spills in subtree, simply continue. 1539 if (SpillsInSubTree.empty()) 1540 continue; 1541 1542 // Check whether Block is a possible candidate to insert spill. 1543 Register LiveReg; 1544 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg)) 1545 continue; 1546 1547 // If there are multiple spills that could be merged, bias a little 1548 // to hoist the spill. 1549 BranchProbability MarginProb = (SpillsInSubTree.size() > 1) 1550 ? BranchProbability(9, 10) 1551 : BranchProbability(1, 1); 1552 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) { 1553 // Hoist: Move spills to current Block. 1554 for (auto *const SpillBB : SpillsInSubTree) { 1555 // When SpillBB is a BB contains original spill, insert the spill 1556 // to SpillsToRm. 1557 if (SpillsToKeep.contains(SpillBB) && !SpillsToKeep[SpillBB]) { 1558 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB]; 1559 SpillsToRm.push_back(SpillToRm); 1560 } 1561 // SpillBB will not contain spill anymore, remove it from SpillsToKeep. 1562 SpillsToKeep.erase(SpillBB); 1563 } 1564 // Current Block is the BB containing the new hoisted spill. Add it to 1565 // SpillsToKeep. LiveReg is the source of the new spill. 1566 SpillsToKeep[*RIt] = LiveReg; 1567 LLVM_DEBUG({ 1568 dbgs() << "spills in BB: "; 1569 for (const auto Rspill : SpillsInSubTree) 1570 dbgs() << Rspill->getBlock()->getNumber() << " "; 1571 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber() 1572 << "\n"; 1573 }); 1574 SpillsInSubTree.clear(); 1575 SpillsInSubTree.insert(*RIt); 1576 SubTreeCost = MBFI.getBlockFreq(Block); 1577 } 1578 } 1579 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill), 1580 // save them to SpillsToIns. 1581 for (const auto &Ent : SpillsToKeep) { 1582 if (Ent.second) 1583 SpillsToIns[Ent.first->getBlock()] = Ent.second; 1584 } 1585 } 1586 1587 /// For spills with equal values, remove redundant spills and hoist those left 1588 /// to less hot spots. 1589 /// 1590 /// Spills with equal values will be collected into the same set in 1591 /// MergeableSpills when spill is inserted. These equal spills are originated 1592 /// from the same defining instruction and are dominated by the instruction. 1593 /// Before hoisting all the equal spills, redundant spills inside in the same 1594 /// BB are first marked to be deleted. Then starting from the spills left, walk 1595 /// up on the dominator tree towards the Root node where the define instruction 1596 /// is located, mark the dominated spills to be deleted along the way and 1597 /// collect the BB nodes on the path from non-dominated spills to the define 1598 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places 1599 /// where we are considering to hoist the spills. We iterate the WorkSet in 1600 /// bottom-up order, and for each node, we will decide whether to hoist spills 1601 /// inside its subtree to that node. In this way, we can get benefit locally 1602 /// even if hoisting all the equal spills to one cold place is impossible. 1603 void HoistSpillHelper::hoistAllSpills() { 1604 SmallVector<Register, 4> NewVRegs; 1605 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this); 1606 1607 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) { 1608 Register Reg = Register::index2VirtReg(i); 1609 Register Original = VRM.getPreSplitReg(Reg); 1610 if (!MRI.def_empty(Reg)) 1611 Virt2SiblingsMap[Original].insert(Reg); 1612 } 1613 1614 // Each entry in MergeableSpills contains a spill set with equal values. 1615 for (auto &Ent : MergeableSpills) { 1616 int Slot = Ent.first.first; 1617 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot]; 1618 VNInfo *OrigVNI = Ent.first.second; 1619 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second; 1620 if (Ent.second.empty()) 1621 continue; 1622 1623 LLVM_DEBUG({ 1624 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n" 1625 << "Equal spills in BB: "; 1626 for (const auto spill : EqValSpills) 1627 dbgs() << spill->getParent()->getNumber() << " "; 1628 dbgs() << "\n"; 1629 }); 1630 1631 // SpillsToRm is the spill set to be removed from EqValSpills. 1632 SmallVector<MachineInstr *, 16> SpillsToRm; 1633 // SpillsToIns is the spill set to be newly inserted after hoisting. 1634 DenseMap<MachineBasicBlock *, unsigned> SpillsToIns; 1635 1636 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns); 1637 1638 LLVM_DEBUG({ 1639 dbgs() << "Finally inserted spills in BB: "; 1640 for (const auto &Ispill : SpillsToIns) 1641 dbgs() << Ispill.first->getNumber() << " "; 1642 dbgs() << "\nFinally removed spills in BB: "; 1643 for (const auto Rspill : SpillsToRm) 1644 dbgs() << Rspill->getParent()->getNumber() << " "; 1645 dbgs() << "\n"; 1646 }); 1647 1648 // Stack live range update. 1649 LiveInterval &StackIntvl = LSS.getInterval(Slot); 1650 if (!SpillsToIns.empty() || !SpillsToRm.empty()) 1651 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI, 1652 StackIntvl.getValNumInfo(0)); 1653 1654 // Insert hoisted spills. 1655 for (auto const &Insert : SpillsToIns) { 1656 MachineBasicBlock *BB = Insert.first; 1657 Register LiveReg = Insert.second; 1658 MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB); 1659 MachineInstrSpan MIS(MII, BB); 1660 TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot, 1661 MRI.getRegClass(LiveReg), &TRI, Register()); 1662 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII); 1663 for (const MachineInstr &MI : make_range(MIS.begin(), MII)) 1664 getVDefInterval(MI, LIS); 1665 ++NumSpills; 1666 } 1667 1668 // Remove redundant spills or change them to dead instructions. 1669 NumSpills -= SpillsToRm.size(); 1670 for (auto *const RMEnt : SpillsToRm) { 1671 RMEnt->setDesc(TII.get(TargetOpcode::KILL)); 1672 for (unsigned i = RMEnt->getNumOperands(); i; --i) { 1673 MachineOperand &MO = RMEnt->getOperand(i - 1); 1674 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead()) 1675 RMEnt->removeOperand(i - 1); 1676 } 1677 } 1678 Edit.eliminateDeadDefs(SpillsToRm, std::nullopt); 1679 } 1680 } 1681 1682 /// For VirtReg clone, the \p New register should have the same physreg or 1683 /// stackslot as the \p old register. 1684 void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) { 1685 if (VRM.hasPhys(Old)) 1686 VRM.assignVirt2Phys(New, VRM.getPhys(Old)); 1687 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT) 1688 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old)); 1689 else 1690 llvm_unreachable("VReg should be assigned either physreg or stackslot"); 1691 if (VRM.hasShape(Old)) 1692 VRM.assignVirt2Shape(New, VRM.getShape(Old)); 1693 } 1694