1 //===- RegAllocGreedy.cpp - greedy register allocator ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the RAGreedy function pass for register allocation in 10 // optimized builds. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RegAllocGreedy.h" 15 #include "AllocationOrder.h" 16 #include "InterferenceCache.h" 17 #include "LiveDebugVariables.h" 18 #include "RegAllocBase.h" 19 #include "RegAllocEvictionAdvisor.h" 20 #include "SpillPlacement.h" 21 #include "SplitKit.h" 22 #include "llvm/ADT/ArrayRef.h" 23 #include "llvm/ADT/BitVector.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/IndexedMap.h" 26 #include "llvm/ADT/MapVector.h" 27 #include "llvm/ADT/SetVector.h" 28 #include "llvm/ADT/SmallPtrSet.h" 29 #include "llvm/ADT/SmallSet.h" 30 #include "llvm/ADT/SmallVector.h" 31 #include "llvm/ADT/Statistic.h" 32 #include "llvm/ADT/StringRef.h" 33 #include "llvm/Analysis/AliasAnalysis.h" 34 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 35 #include "llvm/CodeGen/CalcSpillWeights.h" 36 #include "llvm/CodeGen/EdgeBundles.h" 37 #include "llvm/CodeGen/LiveInterval.h" 38 #include "llvm/CodeGen/LiveIntervalUnion.h" 39 #include "llvm/CodeGen/LiveIntervals.h" 40 #include "llvm/CodeGen/LiveRangeEdit.h" 41 #include "llvm/CodeGen/LiveRegMatrix.h" 42 #include "llvm/CodeGen/LiveStacks.h" 43 #include "llvm/CodeGen/MachineBasicBlock.h" 44 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 45 #include "llvm/CodeGen/MachineDominators.h" 46 #include "llvm/CodeGen/MachineFrameInfo.h" 47 #include "llvm/CodeGen/MachineFunction.h" 48 #include "llvm/CodeGen/MachineFunctionPass.h" 49 #include "llvm/CodeGen/MachineInstr.h" 50 #include "llvm/CodeGen/MachineLoopInfo.h" 51 #include "llvm/CodeGen/MachineOperand.h" 52 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 53 #include "llvm/CodeGen/MachineRegisterInfo.h" 54 #include "llvm/CodeGen/RegAllocRegistry.h" 55 #include "llvm/CodeGen/RegisterClassInfo.h" 56 #include "llvm/CodeGen/SlotIndexes.h" 57 #include "llvm/CodeGen/Spiller.h" 58 #include "llvm/CodeGen/TargetInstrInfo.h" 59 #include "llvm/CodeGen/TargetRegisterInfo.h" 60 #include "llvm/CodeGen/TargetSubtargetInfo.h" 61 #include "llvm/CodeGen/VirtRegMap.h" 62 #include "llvm/IR/DebugInfoMetadata.h" 63 #include "llvm/IR/Function.h" 64 #include "llvm/IR/LLVMContext.h" 65 #include "llvm/MC/MCRegisterInfo.h" 66 #include "llvm/Pass.h" 67 #include "llvm/Support/BlockFrequency.h" 68 #include "llvm/Support/BranchProbability.h" 69 #include "llvm/Support/CommandLine.h" 70 #include "llvm/Support/Debug.h" 71 #include "llvm/Support/MathExtras.h" 72 #include "llvm/Support/Timer.h" 73 #include "llvm/Support/raw_ostream.h" 74 #include "llvm/Target/TargetMachine.h" 75 #include <algorithm> 76 #include <cassert> 77 #include <cstdint> 78 #include <memory> 79 #include <queue> 80 #include <tuple> 81 #include <utility> 82 83 using namespace llvm; 84 85 #define DEBUG_TYPE "regalloc" 86 87 STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 88 STATISTIC(NumLocalSplits, "Number of split local live ranges"); 89 STATISTIC(NumEvicted, "Number of interferences evicted"); 90 91 static cl::opt<SplitEditor::ComplementSpillMode> SplitSpillMode( 92 "split-spill-mode", cl::Hidden, 93 cl::desc("Spill mode for splitting live ranges"), 94 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), 95 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), 96 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed")), 97 cl::init(SplitEditor::SM_Speed)); 98 99 static cl::opt<unsigned> 100 LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden, 101 cl::desc("Last chance recoloring max depth"), 102 cl::init(5)); 103 104 static cl::opt<unsigned> LastChanceRecoloringMaxInterference( 105 "lcr-max-interf", cl::Hidden, 106 cl::desc("Last chance recoloring maximum number of considered" 107 " interference at a time"), 108 cl::init(8)); 109 110 static cl::opt<bool> ExhaustiveSearch( 111 "exhaustive-register-search", cl::NotHidden, 112 cl::desc("Exhaustive Search for registers bypassing the depth " 113 "and interference cutoffs of last chance recoloring"), 114 cl::Hidden); 115 116 static cl::opt<bool> EnableDeferredSpilling( 117 "enable-deferred-spilling", cl::Hidden, 118 cl::desc("Instead of spilling a variable right away, defer the actual " 119 "code insertion to the end of the allocation. That way the " 120 "allocator might still find a suitable coloring for this " 121 "variable because of other evicted variables."), 122 cl::init(false)); 123 124 // FIXME: Find a good default for this flag and remove the flag. 125 static cl::opt<unsigned> 126 CSRFirstTimeCost("regalloc-csr-first-time-cost", 127 cl::desc("Cost for first time use of callee-saved register."), 128 cl::init(0), cl::Hidden); 129 130 static cl::opt<bool> ConsiderLocalIntervalCost( 131 "consider-local-interval-cost", cl::Hidden, 132 cl::desc("Consider the cost of local intervals created by a split " 133 "candidate when choosing the best split candidate."), 134 cl::init(false)); 135 136 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 137 createGreedyRegisterAllocator); 138 139 char RAGreedy::ID = 0; 140 char &llvm::RAGreedyID = RAGreedy::ID; 141 142 INITIALIZE_PASS_BEGIN(RAGreedy, "greedy", 143 "Greedy Register Allocator", false, false) 144 INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables) 145 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 146 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 147 INITIALIZE_PASS_DEPENDENCY(RegisterCoalescer) 148 INITIALIZE_PASS_DEPENDENCY(MachineScheduler) 149 INITIALIZE_PASS_DEPENDENCY(LiveStacks) 150 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 151 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 152 INITIALIZE_PASS_DEPENDENCY(VirtRegMap) 153 INITIALIZE_PASS_DEPENDENCY(LiveRegMatrix) 154 INITIALIZE_PASS_DEPENDENCY(EdgeBundles) 155 INITIALIZE_PASS_DEPENDENCY(SpillPlacement) 156 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) 157 INITIALIZE_PASS_DEPENDENCY(RegAllocEvictionAdvisorAnalysis) 158 INITIALIZE_PASS_END(RAGreedy, "greedy", 159 "Greedy Register Allocator", false, false) 160 161 #ifndef NDEBUG 162 const char *const RAGreedy::StageName[] = { 163 "RS_New", 164 "RS_Assign", 165 "RS_Split", 166 "RS_Split2", 167 "RS_Spill", 168 "RS_Memory", 169 "RS_Done" 170 }; 171 #endif 172 173 // Hysteresis to use when comparing floats. 174 // This helps stabilize decisions based on float comparisons. 175 const float Hysteresis = (2007 / 2048.0f); // 0.97998046875 176 177 FunctionPass* llvm::createGreedyRegisterAllocator() { 178 return new RAGreedy(); 179 } 180 181 namespace llvm { 182 FunctionPass* createGreedyRegisterAllocator( 183 std::function<bool(const TargetRegisterInfo &TRI, 184 const TargetRegisterClass &RC)> Ftor); 185 186 } 187 188 FunctionPass* llvm::createGreedyRegisterAllocator( 189 std::function<bool(const TargetRegisterInfo &TRI, 190 const TargetRegisterClass &RC)> Ftor) { 191 return new RAGreedy(Ftor); 192 } 193 194 RAGreedy::RAGreedy(RegClassFilterFunc F): 195 MachineFunctionPass(ID), 196 RegAllocBase(F) { 197 } 198 199 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 200 AU.setPreservesCFG(); 201 AU.addRequired<MachineBlockFrequencyInfo>(); 202 AU.addPreserved<MachineBlockFrequencyInfo>(); 203 AU.addRequired<AAResultsWrapperPass>(); 204 AU.addPreserved<AAResultsWrapperPass>(); 205 AU.addRequired<LiveIntervals>(); 206 AU.addPreserved<LiveIntervals>(); 207 AU.addRequired<SlotIndexes>(); 208 AU.addPreserved<SlotIndexes>(); 209 AU.addRequired<LiveDebugVariables>(); 210 AU.addPreserved<LiveDebugVariables>(); 211 AU.addRequired<LiveStacks>(); 212 AU.addPreserved<LiveStacks>(); 213 AU.addRequired<MachineDominatorTree>(); 214 AU.addPreserved<MachineDominatorTree>(); 215 AU.addRequired<MachineLoopInfo>(); 216 AU.addPreserved<MachineLoopInfo>(); 217 AU.addRequired<VirtRegMap>(); 218 AU.addPreserved<VirtRegMap>(); 219 AU.addRequired<LiveRegMatrix>(); 220 AU.addPreserved<LiveRegMatrix>(); 221 AU.addRequired<EdgeBundles>(); 222 AU.addRequired<SpillPlacement>(); 223 AU.addRequired<MachineOptimizationRemarkEmitterPass>(); 224 AU.addRequired<RegAllocEvictionAdvisorAnalysis>(); 225 MachineFunctionPass::getAnalysisUsage(AU); 226 } 227 228 //===----------------------------------------------------------------------===// 229 // LiveRangeEdit delegate methods 230 //===----------------------------------------------------------------------===// 231 232 bool RAGreedy::LRE_CanEraseVirtReg(Register VirtReg) { 233 LiveInterval &LI = LIS->getInterval(VirtReg); 234 if (VRM->hasPhys(VirtReg)) { 235 Matrix->unassign(LI); 236 aboutToRemoveInterval(LI); 237 return true; 238 } 239 // Unassigned virtreg is probably in the priority queue. 240 // RegAllocBase will erase it after dequeueing. 241 // Nonetheless, clear the live-range so that the debug 242 // dump will show the right state for that VirtReg. 243 LI.clear(); 244 return false; 245 } 246 247 void RAGreedy::LRE_WillShrinkVirtReg(Register VirtReg) { 248 if (!VRM->hasPhys(VirtReg)) 249 return; 250 251 // Register is assigned, put it back on the queue for reassignment. 252 LiveInterval &LI = LIS->getInterval(VirtReg); 253 Matrix->unassign(LI); 254 RegAllocBase::enqueue(&LI); 255 } 256 257 void RAGreedy::LRE_DidCloneVirtReg(Register New, Register Old) { 258 ExtraInfo->LRE_DidCloneVirtReg(New, Old); 259 } 260 261 void RAGreedy::ExtraRegInfo::LRE_DidCloneVirtReg(Register New, Register Old) { 262 // Cloning a register we haven't even heard about yet? Just ignore it. 263 if (!Info.inBounds(Old)) 264 return; 265 266 // LRE may clone a virtual register because dead code elimination causes it to 267 // be split into connected components. The new components are much smaller 268 // than the original, so they should get a new chance at being assigned. 269 // same stage as the parent. 270 Info[Old].Stage = RS_Assign; 271 Info.grow(New.id()); 272 Info[New] = Info[Old]; 273 } 274 275 void RAGreedy::releaseMemory() { 276 SpillerInstance.reset(); 277 GlobalCand.clear(); 278 } 279 280 void RAGreedy::enqueueImpl(LiveInterval *LI) { enqueue(Queue, LI); } 281 282 void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) { 283 // Prioritize live ranges by size, assigning larger ranges first. 284 // The queue holds (size, reg) pairs. 285 const unsigned Size = LI->getSize(); 286 const Register Reg = LI->reg(); 287 assert(Reg.isVirtual() && "Can only enqueue virtual registers"); 288 unsigned Prio; 289 290 auto Stage = ExtraInfo->getOrInitStage(Reg); 291 if (Stage == RS_New) { 292 Stage = RS_Assign; 293 ExtraInfo->setStage(Reg, Stage); 294 } 295 if (Stage == RS_Split) { 296 // Unsplit ranges that couldn't be allocated immediately are deferred until 297 // everything else has been allocated. 298 Prio = Size; 299 } else if (Stage == RS_Memory) { 300 // Memory operand should be considered last. 301 // Change the priority such that Memory operand are assigned in 302 // the reverse order that they came in. 303 // TODO: Make this a member variable and probably do something about hints. 304 static unsigned MemOp = 0; 305 Prio = MemOp++; 306 } else { 307 // Giant live ranges fall back to the global assignment heuristic, which 308 // prevents excessive spilling in pathological cases. 309 bool ReverseLocal = TRI->reverseLocalAssignment(); 310 const TargetRegisterClass &RC = *MRI->getRegClass(Reg); 311 bool ForceGlobal = !ReverseLocal && 312 (Size / SlotIndex::InstrDist) > (2 * RCI.getNumAllocatableRegs(&RC)); 313 314 if (Stage == RS_Assign && !ForceGlobal && !LI->empty() && 315 LIS->intervalIsInOneMBB(*LI)) { 316 // Allocate original local ranges in linear instruction order. Since they 317 // are singly defined, this produces optimal coloring in the absence of 318 // global interference and other constraints. 319 if (!ReverseLocal) 320 Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex()); 321 else { 322 // Allocating bottom up may allow many short LRGs to be assigned first 323 // to one of the cheap registers. This could be much faster for very 324 // large blocks on targets with many physical registers. 325 Prio = Indexes->getZeroIndex().getInstrDistance(LI->endIndex()); 326 } 327 Prio |= RC.AllocationPriority << 24; 328 } else { 329 // Allocate global and split ranges in long->short order. Long ranges that 330 // don't fit should be spilled (or split) ASAP so they don't create 331 // interference. Mark a bit to prioritize global above local ranges. 332 Prio = (1u << 29) + Size; 333 334 Prio |= RC.AllocationPriority << 24; 335 } 336 // Mark a higher bit to prioritize global and local above RS_Split. 337 Prio |= (1u << 31); 338 339 // Boost ranges that have a physical register hint. 340 if (VRM->hasKnownPreference(Reg)) 341 Prio |= (1u << 30); 342 } 343 // The virtual register number is a tie breaker for same-sized ranges. 344 // Give lower vreg numbers higher priority to assign them first. 345 CurQueue.push(std::make_pair(Prio, ~Reg)); 346 } 347 348 LiveInterval *RAGreedy::dequeue() { return dequeue(Queue); } 349 350 LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) { 351 if (CurQueue.empty()) 352 return nullptr; 353 LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second); 354 CurQueue.pop(); 355 return LI; 356 } 357 358 //===----------------------------------------------------------------------===// 359 // Direct Assignment 360 //===----------------------------------------------------------------------===// 361 362 /// tryAssign - Try to assign VirtReg to an available register. 363 MCRegister RAGreedy::tryAssign(LiveInterval &VirtReg, 364 AllocationOrder &Order, 365 SmallVectorImpl<Register> &NewVRegs, 366 const SmallVirtRegSet &FixedRegisters) { 367 MCRegister PhysReg; 368 for (auto I = Order.begin(), E = Order.end(); I != E && !PhysReg; ++I) { 369 assert(*I); 370 if (!Matrix->checkInterference(VirtReg, *I)) { 371 if (I.isHint()) 372 return *I; 373 else 374 PhysReg = *I; 375 } 376 } 377 if (!PhysReg.isValid()) 378 return PhysReg; 379 380 // PhysReg is available, but there may be a better choice. 381 382 // If we missed a simple hint, try to cheaply evict interference from the 383 // preferred register. 384 if (Register Hint = MRI->getSimpleHint(VirtReg.reg())) 385 if (Order.isHint(Hint)) { 386 MCRegister PhysHint = Hint.asMCReg(); 387 LLVM_DEBUG(dbgs() << "missed hint " << printReg(PhysHint, TRI) << '\n'); 388 389 if (EvictAdvisor->canEvictHintInterference(VirtReg, PhysHint, 390 FixedRegisters)) { 391 evictInterference(VirtReg, PhysHint, NewVRegs); 392 return PhysHint; 393 } 394 // Record the missed hint, we may be able to recover 395 // at the end if the surrounding allocation changed. 396 SetOfBrokenHints.insert(&VirtReg); 397 } 398 399 // Try to evict interference from a cheaper alternative. 400 uint8_t Cost = RegCosts[PhysReg]; 401 402 // Most registers have 0 additional cost. 403 if (!Cost) 404 return PhysReg; 405 406 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << " is available at cost " 407 << (unsigned)Cost << '\n'); 408 MCRegister CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost, FixedRegisters); 409 return CheapReg ? CheapReg : PhysReg; 410 } 411 412 //===----------------------------------------------------------------------===// 413 // Interference eviction 414 //===----------------------------------------------------------------------===// 415 416 Register RegAllocEvictionAdvisor::canReassign(LiveInterval &VirtReg, 417 Register PrevReg) const { 418 auto Order = 419 AllocationOrder::create(VirtReg.reg(), *VRM, RegClassInfo, Matrix); 420 MCRegister PhysReg; 421 for (auto I = Order.begin(), E = Order.end(); I != E && !PhysReg; ++I) { 422 if ((*I).id() == PrevReg.id()) 423 continue; 424 425 MCRegUnitIterator Units(*I, TRI); 426 for (; Units.isValid(); ++Units) { 427 // Instantiate a "subquery", not to be confused with the Queries array. 428 LiveIntervalUnion::Query subQ(VirtReg, Matrix->getLiveUnions()[*Units]); 429 if (subQ.checkInterference()) 430 break; 431 } 432 // If no units have interference, break out with the current PhysReg. 433 if (!Units.isValid()) 434 PhysReg = *I; 435 } 436 if (PhysReg) 437 LLVM_DEBUG(dbgs() << "can reassign: " << VirtReg << " from " 438 << printReg(PrevReg, TRI) << " to " 439 << printReg(PhysReg, TRI) << '\n'); 440 return PhysReg; 441 } 442 443 /// Return true if all interferences between VirtReg and PhysReg between 444 /// Start and End can be evicted. 445 /// 446 /// \param VirtReg Live range that is about to be assigned. 447 /// \param PhysReg Desired register for assignment. 448 /// \param Start Start of range to look for interferences. 449 /// \param End End of range to look for interferences. 450 /// \param MaxCost Only look for cheaper candidates and update with new cost 451 /// when returning true. 452 /// \return True when interference can be evicted cheaper than MaxCost. 453 bool RAGreedy::canEvictInterferenceInRange(const LiveInterval &VirtReg, 454 MCRegister PhysReg, SlotIndex Start, 455 SlotIndex End, 456 EvictionCost &MaxCost) const { 457 EvictionCost Cost; 458 459 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 460 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 461 462 // Check if any interfering live range is heavier than MaxWeight. 463 for (const LiveInterval *Intf : reverse(Q.interferingVRegs())) { 464 // Check if interference overlast the segment in interest. 465 if (!Intf->overlaps(Start, End)) 466 continue; 467 468 // Cannot evict non virtual reg interference. 469 if (!Register::isVirtualRegister(Intf->reg())) 470 return false; 471 // Never evict spill products. They cannot split or spill. 472 if (ExtraInfo->getStage(*Intf) == RS_Done) 473 return false; 474 475 // Would this break a satisfied hint? 476 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg()); 477 // Update eviction cost. 478 Cost.BrokenHints += BreaksHint; 479 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight()); 480 // Abort if this would be too expensive. 481 if (!(Cost < MaxCost)) 482 return false; 483 } 484 } 485 486 if (Cost.MaxWeight == 0) 487 return false; 488 489 MaxCost = Cost; 490 return true; 491 } 492 493 /// Return the physical register that will be best 494 /// candidate for eviction by a local split interval that will be created 495 /// between Start and End. 496 /// 497 /// \param Order The allocation order 498 /// \param VirtReg Live range that is about to be assigned. 499 /// \param Start Start of range to look for interferences 500 /// \param End End of range to look for interferences 501 /// \param BestEvictweight The eviction cost of that eviction 502 /// \return The PhysReg which is the best candidate for eviction and the 503 /// eviction cost in BestEvictweight 504 MCRegister RAGreedy::getCheapestEvicteeWeight(const AllocationOrder &Order, 505 const LiveInterval &VirtReg, 506 SlotIndex Start, SlotIndex End, 507 float *BestEvictweight) const { 508 EvictionCost BestEvictCost; 509 BestEvictCost.setMax(); 510 BestEvictCost.MaxWeight = VirtReg.weight(); 511 MCRegister BestEvicteePhys; 512 513 // Go over all physical registers and find the best candidate for eviction 514 for (MCRegister PhysReg : Order.getOrder()) { 515 516 if (!canEvictInterferenceInRange(VirtReg, PhysReg, Start, End, 517 BestEvictCost)) 518 continue; 519 520 // Best so far. 521 BestEvicteePhys = PhysReg; 522 } 523 *BestEvictweight = BestEvictCost.MaxWeight; 524 return BestEvicteePhys; 525 } 526 527 /// evictInterference - Evict any interferring registers that prevent VirtReg 528 /// from being assigned to Physreg. This assumes that canEvictInterference 529 /// returned true. 530 void RAGreedy::evictInterference(LiveInterval &VirtReg, MCRegister PhysReg, 531 SmallVectorImpl<Register> &NewVRegs) { 532 // Make sure that VirtReg has a cascade number, and assign that cascade 533 // number to every evicted register. These live ranges than then only be 534 // evicted by a newer cascade, preventing infinite loops. 535 unsigned Cascade = ExtraInfo->getOrAssignNewCascade(VirtReg.reg()); 536 537 LLVM_DEBUG(dbgs() << "evicting " << printReg(PhysReg, TRI) 538 << " interference: Cascade " << Cascade << '\n'); 539 540 // Collect all interfering virtregs first. 541 SmallVector<LiveInterval*, 8> Intfs; 542 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 543 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 544 // We usually have the interfering VRegs cached so collectInterferingVRegs() 545 // should be fast, we may need to recalculate if when different physregs 546 // overlap the same register unit so we had different SubRanges queried 547 // against it. 548 ArrayRef<LiveInterval*> IVR = Q.interferingVRegs(); 549 Intfs.append(IVR.begin(), IVR.end()); 550 } 551 552 // Evict them second. This will invalidate the queries. 553 for (LiveInterval *Intf : Intfs) { 554 // The same VirtReg may be present in multiple RegUnits. Skip duplicates. 555 if (!VRM->hasPhys(Intf->reg())) 556 continue; 557 558 LastEvicted.addEviction(PhysReg, VirtReg.reg(), Intf->reg()); 559 560 Matrix->unassign(*Intf); 561 assert((ExtraInfo->getCascade(Intf->reg()) < Cascade || 562 VirtReg.isSpillable() < Intf->isSpillable()) && 563 "Cannot decrease cascade number, illegal eviction"); 564 ExtraInfo->setCascade(Intf->reg(), Cascade); 565 ++NumEvicted; 566 NewVRegs.push_back(Intf->reg()); 567 } 568 } 569 570 /// Returns true if the given \p PhysReg is a callee saved register and has not 571 /// been used for allocation yet. 572 bool RegAllocEvictionAdvisor::isUnusedCalleeSavedReg(MCRegister PhysReg) const { 573 MCRegister CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg); 574 if (!CSR) 575 return false; 576 577 return !Matrix->isPhysRegUsed(PhysReg); 578 } 579 580 Optional<unsigned> 581 RegAllocEvictionAdvisor::getOrderLimit(const LiveInterval &VirtReg, 582 const AllocationOrder &Order, 583 unsigned CostPerUseLimit) const { 584 unsigned OrderLimit = Order.getOrder().size(); 585 586 if (CostPerUseLimit < uint8_t(~0u)) { 587 // Check of any registers in RC are below CostPerUseLimit. 588 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg()); 589 uint8_t MinCost = RegClassInfo.getMinCost(RC); 590 if (MinCost >= CostPerUseLimit) { 591 LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = " 592 << MinCost << ", no cheaper registers to be found.\n"); 593 return None; 594 } 595 596 // It is normal for register classes to have a long tail of registers with 597 // the same cost. We don't need to look at them if they're too expensive. 598 if (RegCosts[Order.getOrder().back()] >= CostPerUseLimit) { 599 OrderLimit = RegClassInfo.getLastCostChange(RC); 600 LLVM_DEBUG(dbgs() << "Only trying the first " << OrderLimit 601 << " regs.\n"); 602 } 603 } 604 return OrderLimit; 605 } 606 607 bool RegAllocEvictionAdvisor::canAllocatePhysReg(unsigned CostPerUseLimit, 608 MCRegister PhysReg) const { 609 if (RegCosts[PhysReg] >= CostPerUseLimit) 610 return false; 611 // The first use of a callee-saved register in a function has cost 1. 612 // Don't start using a CSR when the CostPerUseLimit is low. 613 if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) { 614 LLVM_DEBUG( 615 dbgs() << printReg(PhysReg, TRI) << " would clobber CSR " 616 << printReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI) 617 << '\n'); 618 return false; 619 } 620 return true; 621 } 622 623 /// tryEvict - Try to evict all interferences for a physreg. 624 /// @param VirtReg Currently unassigned virtual register. 625 /// @param Order Physregs to try. 626 /// @return Physreg to assign VirtReg, or 0. 627 MCRegister RAGreedy::tryEvict(LiveInterval &VirtReg, AllocationOrder &Order, 628 SmallVectorImpl<Register> &NewVRegs, 629 uint8_t CostPerUseLimit, 630 const SmallVirtRegSet &FixedRegisters) { 631 NamedRegionTimer T("evict", "Evict", TimerGroupName, TimerGroupDescription, 632 TimePassesIsEnabled); 633 634 MCRegister BestPhys = EvictAdvisor->tryFindEvictionCandidate( 635 VirtReg, Order, CostPerUseLimit, FixedRegisters); 636 if (BestPhys.isValid()) 637 evictInterference(VirtReg, BestPhys, NewVRegs); 638 return BestPhys; 639 } 640 641 //===----------------------------------------------------------------------===// 642 // Region Splitting 643 //===----------------------------------------------------------------------===// 644 645 /// addSplitConstraints - Fill out the SplitConstraints vector based on the 646 /// interference pattern in Physreg and its aliases. Add the constraints to 647 /// SpillPlacement and return the static cost of this split in Cost, assuming 648 /// that all preferences in SplitConstraints are met. 649 /// Return false if there are no bundles with positive bias. 650 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 651 BlockFrequency &Cost) { 652 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 653 654 // Reset interference dependent info. 655 SplitConstraints.resize(UseBlocks.size()); 656 BlockFrequency StaticCost = 0; 657 for (unsigned I = 0; I != UseBlocks.size(); ++I) { 658 const SplitAnalysis::BlockInfo &BI = UseBlocks[I]; 659 SpillPlacement::BlockConstraint &BC = SplitConstraints[I]; 660 661 BC.Number = BI.MBB->getNumber(); 662 Intf.moveToBlock(BC.Number); 663 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 664 BC.Exit = (BI.LiveOut && 665 !LIS->getInstructionFromIndex(BI.LastInstr)->isImplicitDef()) 666 ? SpillPlacement::PrefReg 667 : SpillPlacement::DontCare; 668 BC.ChangesValue = BI.FirstDef.isValid(); 669 670 if (!Intf.hasInterference()) 671 continue; 672 673 // Number of spill code instructions to insert. 674 unsigned Ins = 0; 675 676 // Interference for the live-in value. 677 if (BI.LiveIn) { 678 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) { 679 BC.Entry = SpillPlacement::MustSpill; 680 ++Ins; 681 } else if (Intf.first() < BI.FirstInstr) { 682 BC.Entry = SpillPlacement::PrefSpill; 683 ++Ins; 684 } else if (Intf.first() < BI.LastInstr) { 685 ++Ins; 686 } 687 688 // Abort if the spill cannot be inserted at the MBB' start 689 if (((BC.Entry == SpillPlacement::MustSpill) || 690 (BC.Entry == SpillPlacement::PrefSpill)) && 691 SlotIndex::isEarlierInstr(BI.FirstInstr, 692 SA->getFirstSplitPoint(BC.Number))) 693 return false; 694 } 695 696 // Interference for the live-out value. 697 if (BI.LiveOut) { 698 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) { 699 BC.Exit = SpillPlacement::MustSpill; 700 ++Ins; 701 } else if (Intf.last() > BI.LastInstr) { 702 BC.Exit = SpillPlacement::PrefSpill; 703 ++Ins; 704 } else if (Intf.last() > BI.FirstInstr) { 705 ++Ins; 706 } 707 } 708 709 // Accumulate the total frequency of inserted spill code. 710 while (Ins--) 711 StaticCost += SpillPlacer->getBlockFrequency(BC.Number); 712 } 713 Cost = StaticCost; 714 715 // Add constraints for use-blocks. Note that these are the only constraints 716 // that may add a positive bias, it is downhill from here. 717 SpillPlacer->addConstraints(SplitConstraints); 718 return SpillPlacer->scanActiveBundles(); 719 } 720 721 /// addThroughConstraints - Add constraints and links to SpillPlacer from the 722 /// live-through blocks in Blocks. 723 bool RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 724 ArrayRef<unsigned> Blocks) { 725 const unsigned GroupSize = 8; 726 SpillPlacement::BlockConstraint BCS[GroupSize]; 727 unsigned TBS[GroupSize]; 728 unsigned B = 0, T = 0; 729 730 for (unsigned Number : Blocks) { 731 Intf.moveToBlock(Number); 732 733 if (!Intf.hasInterference()) { 734 assert(T < GroupSize && "Array overflow"); 735 TBS[T] = Number; 736 if (++T == GroupSize) { 737 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 738 T = 0; 739 } 740 continue; 741 } 742 743 assert(B < GroupSize && "Array overflow"); 744 BCS[B].Number = Number; 745 746 // Abort if the spill cannot be inserted at the MBB' start 747 MachineBasicBlock *MBB = MF->getBlockNumbered(Number); 748 auto FirstNonDebugInstr = MBB->getFirstNonDebugInstr(); 749 if (FirstNonDebugInstr != MBB->end() && 750 SlotIndex::isEarlierInstr(LIS->getInstructionIndex(*FirstNonDebugInstr), 751 SA->getFirstSplitPoint(Number))) 752 return false; 753 // Interference for the live-in value. 754 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 755 BCS[B].Entry = SpillPlacement::MustSpill; 756 else 757 BCS[B].Entry = SpillPlacement::PrefSpill; 758 759 // Interference for the live-out value. 760 if (Intf.last() >= SA->getLastSplitPoint(Number)) 761 BCS[B].Exit = SpillPlacement::MustSpill; 762 else 763 BCS[B].Exit = SpillPlacement::PrefSpill; 764 765 if (++B == GroupSize) { 766 SpillPlacer->addConstraints(makeArrayRef(BCS, B)); 767 B = 0; 768 } 769 } 770 771 SpillPlacer->addConstraints(makeArrayRef(BCS, B)); 772 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 773 return true; 774 } 775 776 bool RAGreedy::growRegion(GlobalSplitCandidate &Cand) { 777 // Keep track of through blocks that have not been added to SpillPlacer. 778 BitVector Todo = SA->getThroughBlocks(); 779 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 780 unsigned AddedTo = 0; 781 #ifndef NDEBUG 782 unsigned Visited = 0; 783 #endif 784 785 while (true) { 786 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 787 // Find new through blocks in the periphery of PrefRegBundles. 788 for (unsigned Bundle : NewBundles) { 789 // Look at all blocks connected to Bundle in the full graph. 790 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 791 for (unsigned Block : Blocks) { 792 if (!Todo.test(Block)) 793 continue; 794 Todo.reset(Block); 795 // This is a new through block. Add it to SpillPlacer later. 796 ActiveBlocks.push_back(Block); 797 #ifndef NDEBUG 798 ++Visited; 799 #endif 800 } 801 } 802 // Any new blocks to add? 803 if (ActiveBlocks.size() == AddedTo) 804 break; 805 806 // Compute through constraints from the interference, or assume that all 807 // through blocks prefer spilling when forming compact regions. 808 auto NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); 809 if (Cand.PhysReg) { 810 if (!addThroughConstraints(Cand.Intf, NewBlocks)) 811 return false; 812 } else 813 // Provide a strong negative bias on through blocks to prevent unwanted 814 // liveness on loop backedges. 815 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); 816 AddedTo = ActiveBlocks.size(); 817 818 // Perhaps iterating can enable more bundles? 819 SpillPlacer->iterate(); 820 } 821 LLVM_DEBUG(dbgs() << ", v=" << Visited); 822 return true; 823 } 824 825 /// calcCompactRegion - Compute the set of edge bundles that should be live 826 /// when splitting the current live range into compact regions. Compact 827 /// regions can be computed without looking at interference. They are the 828 /// regions formed by removing all the live-through blocks from the live range. 829 /// 830 /// Returns false if the current live range is already compact, or if the 831 /// compact regions would form single block regions anyway. 832 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { 833 // Without any through blocks, the live range is already compact. 834 if (!SA->getNumThroughBlocks()) 835 return false; 836 837 // Compact regions don't correspond to any physreg. 838 Cand.reset(IntfCache, MCRegister::NoRegister); 839 840 LLVM_DEBUG(dbgs() << "Compact region bundles"); 841 842 // Use the spill placer to determine the live bundles. GrowRegion pretends 843 // that all the through blocks have interference when PhysReg is unset. 844 SpillPlacer->prepare(Cand.LiveBundles); 845 846 // The static split cost will be zero since Cand.Intf reports no interference. 847 BlockFrequency Cost; 848 if (!addSplitConstraints(Cand.Intf, Cost)) { 849 LLVM_DEBUG(dbgs() << ", none.\n"); 850 return false; 851 } 852 853 if (!growRegion(Cand)) { 854 LLVM_DEBUG(dbgs() << ", cannot spill all interferences.\n"); 855 return false; 856 } 857 858 SpillPlacer->finish(); 859 860 if (!Cand.LiveBundles.any()) { 861 LLVM_DEBUG(dbgs() << ", none.\n"); 862 return false; 863 } 864 865 LLVM_DEBUG({ 866 for (int I : Cand.LiveBundles.set_bits()) 867 dbgs() << " EB#" << I; 868 dbgs() << ".\n"; 869 }); 870 return true; 871 } 872 873 /// calcSpillCost - Compute how expensive it would be to split the live range in 874 /// SA around all use blocks instead of forming bundle regions. 875 BlockFrequency RAGreedy::calcSpillCost() { 876 BlockFrequency Cost = 0; 877 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 878 for (const SplitAnalysis::BlockInfo &BI : UseBlocks) { 879 unsigned Number = BI.MBB->getNumber(); 880 // We normally only need one spill instruction - a load or a store. 881 Cost += SpillPlacer->getBlockFrequency(Number); 882 883 // Unless the value is redefined in the block. 884 if (BI.LiveIn && BI.LiveOut && BI.FirstDef) 885 Cost += SpillPlacer->getBlockFrequency(Number); 886 } 887 return Cost; 888 } 889 890 /// Check if splitting Evictee will create a local split interval in 891 /// basic block number BBNumber that may cause a bad eviction chain. This is 892 /// intended to prevent bad eviction sequences like: 893 /// movl %ebp, 8(%esp) # 4-byte Spill 894 /// movl %ecx, %ebp 895 /// movl %ebx, %ecx 896 /// movl %edi, %ebx 897 /// movl %edx, %edi 898 /// cltd 899 /// idivl %esi 900 /// movl %edi, %edx 901 /// movl %ebx, %edi 902 /// movl %ecx, %ebx 903 /// movl %ebp, %ecx 904 /// movl 16(%esp), %ebp # 4 - byte Reload 905 /// 906 /// Such sequences are created in 2 scenarios: 907 /// 908 /// Scenario #1: 909 /// %0 is evicted from physreg0 by %1. 910 /// Evictee %0 is intended for region splitting with split candidate 911 /// physreg0 (the reg %0 was evicted from). 912 /// Region splitting creates a local interval because of interference with the 913 /// evictor %1 (normally region splitting creates 2 interval, the "by reg" 914 /// and "by stack" intervals and local interval created when interference 915 /// occurs). 916 /// One of the split intervals ends up evicting %2 from physreg1. 917 /// Evictee %2 is intended for region splitting with split candidate 918 /// physreg1. 919 /// One of the split intervals ends up evicting %3 from physreg2, etc. 920 /// 921 /// Scenario #2 922 /// %0 is evicted from physreg0 by %1. 923 /// %2 is evicted from physreg2 by %3 etc. 924 /// Evictee %0 is intended for region splitting with split candidate 925 /// physreg1. 926 /// Region splitting creates a local interval because of interference with the 927 /// evictor %1. 928 /// One of the split intervals ends up evicting back original evictor %1 929 /// from physreg0 (the reg %0 was evicted from). 930 /// Another evictee %2 is intended for region splitting with split candidate 931 /// physreg1. 932 /// One of the split intervals ends up evicting %3 from physreg2, etc. 933 /// 934 /// \param Evictee The register considered to be split. 935 /// \param Cand The split candidate that determines the physical register 936 /// we are splitting for and the interferences. 937 /// \param BBNumber The number of a BB for which the region split process will 938 /// create a local split interval. 939 /// \param Order The physical registers that may get evicted by a split 940 /// artifact of Evictee. 941 /// \return True if splitting Evictee may cause a bad eviction chain, false 942 /// otherwise. 943 bool RAGreedy::splitCanCauseEvictionChain(Register Evictee, 944 GlobalSplitCandidate &Cand, 945 unsigned BBNumber, 946 const AllocationOrder &Order) { 947 EvictionTrack::EvictorInfo VregEvictorInfo = LastEvicted.getEvictor(Evictee); 948 unsigned Evictor = VregEvictorInfo.first; 949 MCRegister PhysReg = VregEvictorInfo.second; 950 951 // No actual evictor. 952 if (!Evictor || !PhysReg) 953 return false; 954 955 float MaxWeight = 0; 956 MCRegister FutureEvictedPhysReg = 957 getCheapestEvicteeWeight(Order, LIS->getInterval(Evictee), 958 Cand.Intf.first(), Cand.Intf.last(), &MaxWeight); 959 960 // The bad eviction chain occurs when either the split candidate is the 961 // evicting reg or one of the split artifact will evict the evicting reg. 962 if ((PhysReg != Cand.PhysReg) && (PhysReg != FutureEvictedPhysReg)) 963 return false; 964 965 Cand.Intf.moveToBlock(BBNumber); 966 967 // Check to see if the Evictor contains interference (with Evictee) in the 968 // given BB. If so, this interference caused the eviction of Evictee from 969 // PhysReg. This suggest that we will create a local interval during the 970 // region split to avoid this interference This local interval may cause a bad 971 // eviction chain. 972 if (!LIS->hasInterval(Evictor)) 973 return false; 974 LiveInterval &EvictorLI = LIS->getInterval(Evictor); 975 if (EvictorLI.FindSegmentContaining(Cand.Intf.first()) == EvictorLI.end()) 976 return false; 977 978 // Now, check to see if the local interval we will create is going to be 979 // expensive enough to evict somebody If so, this may cause a bad eviction 980 // chain. 981 float splitArtifactWeight = 982 VRAI->futureWeight(LIS->getInterval(Evictee), 983 Cand.Intf.first().getPrevIndex(), Cand.Intf.last()); 984 if (splitArtifactWeight >= 0 && splitArtifactWeight < MaxWeight) 985 return false; 986 987 return true; 988 } 989 990 /// Check if splitting VirtRegToSplit will create a local split interval 991 /// in basic block number BBNumber that may cause a spill. 992 /// 993 /// \param VirtRegToSplit The register considered to be split. 994 /// \param Cand The split candidate that determines the physical 995 /// register we are splitting for and the interferences. 996 /// \param BBNumber The number of a BB for which the region split process 997 /// will create a local split interval. 998 /// \param Order The physical registers that may get evicted by a 999 /// split artifact of VirtRegToSplit. 1000 /// \return True if splitting VirtRegToSplit may cause a spill, false 1001 /// otherwise. 1002 bool RAGreedy::splitCanCauseLocalSpill(unsigned VirtRegToSplit, 1003 GlobalSplitCandidate &Cand, 1004 unsigned BBNumber, 1005 const AllocationOrder &Order) { 1006 Cand.Intf.moveToBlock(BBNumber); 1007 1008 // Check if the local interval will find a non interfereing assignment. 1009 for (auto PhysReg : Order.getOrder()) { 1010 if (!Matrix->checkInterference(Cand.Intf.first().getPrevIndex(), 1011 Cand.Intf.last(), PhysReg)) 1012 return false; 1013 } 1014 1015 // The local interval is not able to find non interferencing assignment 1016 // and not able to evict a less worthy interval, therfore, it can cause a 1017 // spill. 1018 return true; 1019 } 1020 1021 /// calcGlobalSplitCost - Return the global split cost of following the split 1022 /// pattern in LiveBundles. This cost should be added to the local cost of the 1023 /// interference pattern in SplitConstraints. 1024 /// 1025 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand, 1026 const AllocationOrder &Order, 1027 bool *CanCauseEvictionChain) { 1028 BlockFrequency GlobalCost = 0; 1029 const BitVector &LiveBundles = Cand.LiveBundles; 1030 Register VirtRegToSplit = SA->getParent().reg(); 1031 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1032 for (unsigned I = 0; I != UseBlocks.size(); ++I) { 1033 const SplitAnalysis::BlockInfo &BI = UseBlocks[I]; 1034 SpillPlacement::BlockConstraint &BC = SplitConstraints[I]; 1035 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, false)]; 1036 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, true)]; 1037 unsigned Ins = 0; 1038 1039 Cand.Intf.moveToBlock(BC.Number); 1040 // Check wheather a local interval is going to be created during the region 1041 // split. Calculate adavanced spilt cost (cost of local intervals) if option 1042 // is enabled. 1043 if (EnableAdvancedRASplitCost && Cand.Intf.hasInterference() && BI.LiveIn && 1044 BI.LiveOut && RegIn && RegOut) { 1045 1046 if (CanCauseEvictionChain && 1047 splitCanCauseEvictionChain(VirtRegToSplit, Cand, BC.Number, Order)) { 1048 // This interference causes our eviction from this assignment, we might 1049 // evict somebody else and eventually someone will spill, add that cost. 1050 // See splitCanCauseEvictionChain for detailed description of scenarios. 1051 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1052 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1053 1054 *CanCauseEvictionChain = true; 1055 1056 } else if (splitCanCauseLocalSpill(VirtRegToSplit, Cand, BC.Number, 1057 Order)) { 1058 // This interference causes local interval to spill, add that cost. 1059 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1060 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1061 } 1062 } 1063 1064 if (BI.LiveIn) 1065 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 1066 if (BI.LiveOut) 1067 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 1068 while (Ins--) 1069 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1070 } 1071 1072 for (unsigned Number : Cand.ActiveBlocks) { 1073 bool RegIn = LiveBundles[Bundles->getBundle(Number, false)]; 1074 bool RegOut = LiveBundles[Bundles->getBundle(Number, true)]; 1075 if (!RegIn && !RegOut) 1076 continue; 1077 if (RegIn && RegOut) { 1078 // We need double spill code if this block has interference. 1079 Cand.Intf.moveToBlock(Number); 1080 if (Cand.Intf.hasInterference()) { 1081 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1082 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1083 1084 // Check wheather a local interval is going to be created during the 1085 // region split. 1086 if (EnableAdvancedRASplitCost && CanCauseEvictionChain && 1087 splitCanCauseEvictionChain(VirtRegToSplit, Cand, Number, Order)) { 1088 // This interference cause our eviction from this assignment, we might 1089 // evict somebody else, add that cost. 1090 // See splitCanCauseEvictionChain for detailed description of 1091 // scenarios. 1092 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1093 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1094 1095 *CanCauseEvictionChain = true; 1096 } 1097 } 1098 continue; 1099 } 1100 // live-in / stack-out or stack-in live-out. 1101 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1102 } 1103 return GlobalCost; 1104 } 1105 1106 /// splitAroundRegion - Split the current live range around the regions 1107 /// determined by BundleCand and GlobalCand. 1108 /// 1109 /// Before calling this function, GlobalCand and BundleCand must be initialized 1110 /// so each bundle is assigned to a valid candidate, or NoCand for the 1111 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor 1112 /// objects must be initialized for the current live range, and intervals 1113 /// created for the used candidates. 1114 /// 1115 /// @param LREdit The LiveRangeEdit object handling the current split. 1116 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value 1117 /// must appear in this list. 1118 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, 1119 ArrayRef<unsigned> UsedCands) { 1120 // These are the intervals created for new global ranges. We may create more 1121 // intervals for local ranges. 1122 const unsigned NumGlobalIntvs = LREdit.size(); 1123 LLVM_DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs 1124 << " globals.\n"); 1125 assert(NumGlobalIntvs && "No global intervals configured"); 1126 1127 // Isolate even single instructions when dealing with a proper sub-class. 1128 // That guarantees register class inflation for the stack interval because it 1129 // is all copies. 1130 Register Reg = SA->getParent().reg(); 1131 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1132 1133 // First handle all the blocks with uses. 1134 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1135 for (const SplitAnalysis::BlockInfo &BI : UseBlocks) { 1136 unsigned Number = BI.MBB->getNumber(); 1137 unsigned IntvIn = 0, IntvOut = 0; 1138 SlotIndex IntfIn, IntfOut; 1139 if (BI.LiveIn) { 1140 unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)]; 1141 if (CandIn != NoCand) { 1142 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1143 IntvIn = Cand.IntvIdx; 1144 Cand.Intf.moveToBlock(Number); 1145 IntfIn = Cand.Intf.first(); 1146 } 1147 } 1148 if (BI.LiveOut) { 1149 unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)]; 1150 if (CandOut != NoCand) { 1151 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1152 IntvOut = Cand.IntvIdx; 1153 Cand.Intf.moveToBlock(Number); 1154 IntfOut = Cand.Intf.last(); 1155 } 1156 } 1157 1158 // Create separate intervals for isolated blocks with multiple uses. 1159 if (!IntvIn && !IntvOut) { 1160 LLVM_DEBUG(dbgs() << printMBBReference(*BI.MBB) << " isolated.\n"); 1161 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1162 SE->splitSingleBlock(BI); 1163 continue; 1164 } 1165 1166 if (IntvIn && IntvOut) 1167 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1168 else if (IntvIn) 1169 SE->splitRegInBlock(BI, IntvIn, IntfIn); 1170 else 1171 SE->splitRegOutBlock(BI, IntvOut, IntfOut); 1172 } 1173 1174 // Handle live-through blocks. The relevant live-through blocks are stored in 1175 // the ActiveBlocks list with each candidate. We need to filter out 1176 // duplicates. 1177 BitVector Todo = SA->getThroughBlocks(); 1178 for (unsigned UsedCand : UsedCands) { 1179 ArrayRef<unsigned> Blocks = GlobalCand[UsedCand].ActiveBlocks; 1180 for (unsigned Number : Blocks) { 1181 if (!Todo.test(Number)) 1182 continue; 1183 Todo.reset(Number); 1184 1185 unsigned IntvIn = 0, IntvOut = 0; 1186 SlotIndex IntfIn, IntfOut; 1187 1188 unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)]; 1189 if (CandIn != NoCand) { 1190 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1191 IntvIn = Cand.IntvIdx; 1192 Cand.Intf.moveToBlock(Number); 1193 IntfIn = Cand.Intf.first(); 1194 } 1195 1196 unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)]; 1197 if (CandOut != NoCand) { 1198 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1199 IntvOut = Cand.IntvIdx; 1200 Cand.Intf.moveToBlock(Number); 1201 IntfOut = Cand.Intf.last(); 1202 } 1203 if (!IntvIn && !IntvOut) 1204 continue; 1205 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1206 } 1207 } 1208 1209 ++NumGlobalSplits; 1210 1211 SmallVector<unsigned, 8> IntvMap; 1212 SE->finish(&IntvMap); 1213 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); 1214 1215 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1216 1217 // Sort out the new intervals created by splitting. We get four kinds: 1218 // - Remainder intervals should not be split again. 1219 // - Candidate intervals can be assigned to Cand.PhysReg. 1220 // - Block-local splits are candidates for local splitting. 1221 // - DCE leftovers should go back on the queue. 1222 for (unsigned I = 0, E = LREdit.size(); I != E; ++I) { 1223 const LiveInterval &Reg = LIS->getInterval(LREdit.get(I)); 1224 1225 // Ignore old intervals from DCE. 1226 if (ExtraInfo->getOrInitStage(Reg.reg()) != RS_New) 1227 continue; 1228 1229 // Remainder interval. Don't try splitting again, spill if it doesn't 1230 // allocate. 1231 if (IntvMap[I] == 0) { 1232 ExtraInfo->setStage(Reg, RS_Spill); 1233 continue; 1234 } 1235 1236 // Global intervals. Allow repeated splitting as long as the number of live 1237 // blocks is strictly decreasing. 1238 if (IntvMap[I] < NumGlobalIntvs) { 1239 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1240 LLVM_DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1241 << " blocks as original.\n"); 1242 // Don't allow repeated splitting as a safe guard against looping. 1243 ExtraInfo->setStage(Reg, RS_Split2); 1244 } 1245 continue; 1246 } 1247 1248 // Other intervals are treated as new. This includes local intervals created 1249 // for blocks with multiple uses, and anything created by DCE. 1250 } 1251 1252 if (VerifyEnabled) 1253 MF->verify(this, "After splitting live range around region"); 1254 } 1255 1256 MCRegister RAGreedy::tryRegionSplit(LiveInterval &VirtReg, 1257 AllocationOrder &Order, 1258 SmallVectorImpl<Register> &NewVRegs) { 1259 if (!TRI->shouldRegionSplitForVirtReg(*MF, VirtReg)) 1260 return MCRegister::NoRegister; 1261 unsigned NumCands = 0; 1262 BlockFrequency SpillCost = calcSpillCost(); 1263 BlockFrequency BestCost; 1264 1265 // Check if we can split this live range around a compact region. 1266 bool HasCompact = calcCompactRegion(GlobalCand.front()); 1267 if (HasCompact) { 1268 // Yes, keep GlobalCand[0] as the compact region candidate. 1269 NumCands = 1; 1270 BestCost = BlockFrequency::getMaxFrequency(); 1271 } else { 1272 // No benefit from the compact region, our fallback will be per-block 1273 // splitting. Make sure we find a solution that is cheaper than spilling. 1274 BestCost = SpillCost; 1275 LLVM_DEBUG(dbgs() << "Cost of isolating all blocks = "; 1276 MBFI->printBlockFreq(dbgs(), BestCost) << '\n'); 1277 } 1278 1279 bool CanCauseEvictionChain = false; 1280 unsigned BestCand = 1281 calculateRegionSplitCost(VirtReg, Order, BestCost, NumCands, 1282 false /*IgnoreCSR*/, &CanCauseEvictionChain); 1283 1284 // Split candidates with compact regions can cause a bad eviction sequence. 1285 // See splitCanCauseEvictionChain for detailed description of scenarios. 1286 // To avoid it, we need to comapre the cost with the spill cost and not the 1287 // current max frequency. 1288 if (HasCompact && (BestCost > SpillCost) && (BestCand != NoCand) && 1289 CanCauseEvictionChain) { 1290 return MCRegister::NoRegister; 1291 } 1292 1293 // No solutions found, fall back to single block splitting. 1294 if (!HasCompact && BestCand == NoCand) 1295 return MCRegister::NoRegister; 1296 1297 return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs); 1298 } 1299 1300 unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg, 1301 AllocationOrder &Order, 1302 BlockFrequency &BestCost, 1303 unsigned &NumCands, bool IgnoreCSR, 1304 bool *CanCauseEvictionChain) { 1305 unsigned BestCand = NoCand; 1306 for (MCPhysReg PhysReg : Order) { 1307 assert(PhysReg); 1308 if (IgnoreCSR && EvictAdvisor->isUnusedCalleeSavedReg(PhysReg)) 1309 continue; 1310 1311 // Discard bad candidates before we run out of interference cache cursors. 1312 // This will only affect register classes with a lot of registers (>32). 1313 if (NumCands == IntfCache.getMaxCursors()) { 1314 unsigned WorstCount = ~0u; 1315 unsigned Worst = 0; 1316 for (unsigned CandIndex = 0; CandIndex != NumCands; ++CandIndex) { 1317 if (CandIndex == BestCand || !GlobalCand[CandIndex].PhysReg) 1318 continue; 1319 unsigned Count = GlobalCand[CandIndex].LiveBundles.count(); 1320 if (Count < WorstCount) { 1321 Worst = CandIndex; 1322 WorstCount = Count; 1323 } 1324 } 1325 --NumCands; 1326 GlobalCand[Worst] = GlobalCand[NumCands]; 1327 if (BestCand == NumCands) 1328 BestCand = Worst; 1329 } 1330 1331 if (GlobalCand.size() <= NumCands) 1332 GlobalCand.resize(NumCands+1); 1333 GlobalSplitCandidate &Cand = GlobalCand[NumCands]; 1334 Cand.reset(IntfCache, PhysReg); 1335 1336 SpillPlacer->prepare(Cand.LiveBundles); 1337 BlockFrequency Cost; 1338 if (!addSplitConstraints(Cand.Intf, Cost)) { 1339 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tno positive bundles\n"); 1340 continue; 1341 } 1342 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tstatic = "; 1343 MBFI->printBlockFreq(dbgs(), Cost)); 1344 if (Cost >= BestCost) { 1345 LLVM_DEBUG({ 1346 if (BestCand == NoCand) 1347 dbgs() << " worse than no bundles\n"; 1348 else 1349 dbgs() << " worse than " 1350 << printReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1351 }); 1352 continue; 1353 } 1354 if (!growRegion(Cand)) { 1355 LLVM_DEBUG(dbgs() << ", cannot spill all interferences.\n"); 1356 continue; 1357 } 1358 1359 SpillPlacer->finish(); 1360 1361 // No live bundles, defer to splitSingleBlocks(). 1362 if (!Cand.LiveBundles.any()) { 1363 LLVM_DEBUG(dbgs() << " no bundles.\n"); 1364 continue; 1365 } 1366 1367 bool HasEvictionChain = false; 1368 Cost += calcGlobalSplitCost(Cand, Order, &HasEvictionChain); 1369 LLVM_DEBUG({ 1370 dbgs() << ", total = "; 1371 MBFI->printBlockFreq(dbgs(), Cost) << " with bundles"; 1372 for (int I : Cand.LiveBundles.set_bits()) 1373 dbgs() << " EB#" << I; 1374 dbgs() << ".\n"; 1375 }); 1376 if (Cost < BestCost) { 1377 BestCand = NumCands; 1378 BestCost = Cost; 1379 // See splitCanCauseEvictionChain for detailed description of bad 1380 // eviction chain scenarios. 1381 if (CanCauseEvictionChain) 1382 *CanCauseEvictionChain = HasEvictionChain; 1383 } 1384 ++NumCands; 1385 } 1386 1387 if (CanCauseEvictionChain && BestCand != NoCand) { 1388 // See splitCanCauseEvictionChain for detailed description of bad 1389 // eviction chain scenarios. 1390 LLVM_DEBUG(dbgs() << "Best split candidate of vreg " 1391 << printReg(VirtReg.reg(), TRI) << " may "); 1392 if (!(*CanCauseEvictionChain)) 1393 LLVM_DEBUG(dbgs() << "not "); 1394 LLVM_DEBUG(dbgs() << "cause bad eviction chain\n"); 1395 } 1396 1397 return BestCand; 1398 } 1399 1400 unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand, 1401 bool HasCompact, 1402 SmallVectorImpl<Register> &NewVRegs) { 1403 SmallVector<unsigned, 8> UsedCands; 1404 // Prepare split editor. 1405 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 1406 SE->reset(LREdit, SplitSpillMode); 1407 1408 // Assign all edge bundles to the preferred candidate, or NoCand. 1409 BundleCand.assign(Bundles->getNumBundles(), NoCand); 1410 1411 // Assign bundles for the best candidate region. 1412 if (BestCand != NoCand) { 1413 GlobalSplitCandidate &Cand = GlobalCand[BestCand]; 1414 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { 1415 UsedCands.push_back(BestCand); 1416 Cand.IntvIdx = SE->openIntv(); 1417 LLVM_DEBUG(dbgs() << "Split for " << printReg(Cand.PhysReg, TRI) << " in " 1418 << B << " bundles, intv " << Cand.IntvIdx << ".\n"); 1419 (void)B; 1420 } 1421 } 1422 1423 // Assign bundles for the compact region. 1424 if (HasCompact) { 1425 GlobalSplitCandidate &Cand = GlobalCand.front(); 1426 assert(!Cand.PhysReg && "Compact region has no physreg"); 1427 if (unsigned B = Cand.getBundles(BundleCand, 0)) { 1428 UsedCands.push_back(0); 1429 Cand.IntvIdx = SE->openIntv(); 1430 LLVM_DEBUG(dbgs() << "Split for compact region in " << B 1431 << " bundles, intv " << Cand.IntvIdx << ".\n"); 1432 (void)B; 1433 } 1434 } 1435 1436 splitAroundRegion(LREdit, UsedCands); 1437 return 0; 1438 } 1439 1440 //===----------------------------------------------------------------------===// 1441 // Per-Block Splitting 1442 //===----------------------------------------------------------------------===// 1443 1444 /// tryBlockSplit - Split a global live range around every block with uses. This 1445 /// creates a lot of local live ranges, that will be split by tryLocalSplit if 1446 /// they don't allocate. 1447 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1448 SmallVectorImpl<Register> &NewVRegs) { 1449 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); 1450 Register Reg = VirtReg.reg(); 1451 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1452 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 1453 SE->reset(LREdit, SplitSpillMode); 1454 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1455 for (const SplitAnalysis::BlockInfo &BI : UseBlocks) { 1456 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1457 SE->splitSingleBlock(BI); 1458 } 1459 // No blocks were split. 1460 if (LREdit.empty()) 1461 return 0; 1462 1463 // We did split for some blocks. 1464 SmallVector<unsigned, 8> IntvMap; 1465 SE->finish(&IntvMap); 1466 1467 // Tell LiveDebugVariables about the new ranges. 1468 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); 1469 1470 // Sort out the new intervals created by splitting. The remainder interval 1471 // goes straight to spilling, the new local ranges get to stay RS_New. 1472 for (unsigned I = 0, E = LREdit.size(); I != E; ++I) { 1473 const LiveInterval &LI = LIS->getInterval(LREdit.get(I)); 1474 if (ExtraInfo->getOrInitStage(LI.reg()) == RS_New && IntvMap[I] == 0) 1475 ExtraInfo->setStage(LI, RS_Spill); 1476 } 1477 1478 if (VerifyEnabled) 1479 MF->verify(this, "After splitting live range around basic blocks"); 1480 return 0; 1481 } 1482 1483 //===----------------------------------------------------------------------===// 1484 // Per-Instruction Splitting 1485 //===----------------------------------------------------------------------===// 1486 1487 /// Get the number of allocatable registers that match the constraints of \p Reg 1488 /// on \p MI and that are also in \p SuperRC. 1489 static unsigned getNumAllocatableRegsForConstraints( 1490 const MachineInstr *MI, Register Reg, const TargetRegisterClass *SuperRC, 1491 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, 1492 const RegisterClassInfo &RCI) { 1493 assert(SuperRC && "Invalid register class"); 1494 1495 const TargetRegisterClass *ConstrainedRC = 1496 MI->getRegClassConstraintEffectForVReg(Reg, SuperRC, TII, TRI, 1497 /* ExploreBundle */ true); 1498 if (!ConstrainedRC) 1499 return 0; 1500 return RCI.getNumAllocatableRegs(ConstrainedRC); 1501 } 1502 1503 /// tryInstructionSplit - Split a live range around individual instructions. 1504 /// This is normally not worthwhile since the spiller is doing essentially the 1505 /// same thing. However, when the live range is in a constrained register 1506 /// class, it may help to insert copies such that parts of the live range can 1507 /// be moved to a larger register class. 1508 /// 1509 /// This is similar to spilling to a larger register class. 1510 unsigned 1511 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1512 SmallVectorImpl<Register> &NewVRegs) { 1513 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg()); 1514 // There is no point to this if there are no larger sub-classes. 1515 if (!RegClassInfo.isProperSubClass(CurRC)) 1516 return 0; 1517 1518 // Always enable split spill mode, since we're effectively spilling to a 1519 // register. 1520 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 1521 SE->reset(LREdit, SplitEditor::SM_Size); 1522 1523 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1524 if (Uses.size() <= 1) 1525 return 0; 1526 1527 LLVM_DEBUG(dbgs() << "Split around " << Uses.size() 1528 << " individual instrs.\n"); 1529 1530 const TargetRegisterClass *SuperRC = 1531 TRI->getLargestLegalSuperClass(CurRC, *MF); 1532 unsigned SuperRCNumAllocatableRegs = RCI.getNumAllocatableRegs(SuperRC); 1533 // Split around every non-copy instruction if this split will relax 1534 // the constraints on the virtual register. 1535 // Otherwise, splitting just inserts uncoalescable copies that do not help 1536 // the allocation. 1537 for (const SlotIndex Use : Uses) { 1538 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Use)) 1539 if (MI->isFullCopy() || 1540 SuperRCNumAllocatableRegs == 1541 getNumAllocatableRegsForConstraints(MI, VirtReg.reg(), SuperRC, 1542 TII, TRI, RCI)) { 1543 LLVM_DEBUG(dbgs() << " skip:\t" << Use << '\t' << *MI); 1544 continue; 1545 } 1546 SE->openIntv(); 1547 SlotIndex SegStart = SE->enterIntvBefore(Use); 1548 SlotIndex SegStop = SE->leaveIntvAfter(Use); 1549 SE->useIntv(SegStart, SegStop); 1550 } 1551 1552 if (LREdit.empty()) { 1553 LLVM_DEBUG(dbgs() << "All uses were copies.\n"); 1554 return 0; 1555 } 1556 1557 SmallVector<unsigned, 8> IntvMap; 1558 SE->finish(&IntvMap); 1559 DebugVars->splitRegister(VirtReg.reg(), LREdit.regs(), *LIS); 1560 // Assign all new registers to RS_Spill. This was the last chance. 1561 ExtraInfo->setStage(LREdit.begin(), LREdit.end(), RS_Spill); 1562 return 0; 1563 } 1564 1565 //===----------------------------------------------------------------------===// 1566 // Local Splitting 1567 //===----------------------------------------------------------------------===// 1568 1569 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted 1570 /// in order to use PhysReg between two entries in SA->UseSlots. 1571 /// 1572 /// GapWeight[I] represents the gap between UseSlots[I] and UseSlots[I + 1]. 1573 /// 1574 void RAGreedy::calcGapWeights(MCRegister PhysReg, 1575 SmallVectorImpl<float> &GapWeight) { 1576 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1577 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1578 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1579 const unsigned NumGaps = Uses.size()-1; 1580 1581 // Start and end points for the interference check. 1582 SlotIndex StartIdx = 1583 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; 1584 SlotIndex StopIdx = 1585 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; 1586 1587 GapWeight.assign(NumGaps, 0.0f); 1588 1589 // Add interference from each overlapping register. 1590 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1591 if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units) 1592 .checkInterference()) 1593 continue; 1594 1595 // We know that VirtReg is a continuous interval from FirstInstr to 1596 // LastInstr, so we don't need InterferenceQuery. 1597 // 1598 // Interference that overlaps an instruction is counted in both gaps 1599 // surrounding the instruction. The exception is interference before 1600 // StartIdx and after StopIdx. 1601 // 1602 LiveIntervalUnion::SegmentIter IntI = 1603 Matrix->getLiveUnions()[*Units] .find(StartIdx); 1604 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 1605 // Skip the gaps before IntI. 1606 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 1607 if (++Gap == NumGaps) 1608 break; 1609 if (Gap == NumGaps) 1610 break; 1611 1612 // Update the gaps covered by IntI. 1613 const float weight = IntI.value()->weight(); 1614 for (; Gap != NumGaps; ++Gap) { 1615 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 1616 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 1617 break; 1618 } 1619 if (Gap == NumGaps) 1620 break; 1621 } 1622 } 1623 1624 // Add fixed interference. 1625 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1626 const LiveRange &LR = LIS->getRegUnit(*Units); 1627 LiveRange::const_iterator I = LR.find(StartIdx); 1628 LiveRange::const_iterator E = LR.end(); 1629 1630 // Same loop as above. Mark any overlapped gaps as HUGE_VALF. 1631 for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) { 1632 while (Uses[Gap+1].getBoundaryIndex() < I->start) 1633 if (++Gap == NumGaps) 1634 break; 1635 if (Gap == NumGaps) 1636 break; 1637 1638 for (; Gap != NumGaps; ++Gap) { 1639 GapWeight[Gap] = huge_valf; 1640 if (Uses[Gap+1].getBaseIndex() >= I->end) 1641 break; 1642 } 1643 if (Gap == NumGaps) 1644 break; 1645 } 1646 } 1647 } 1648 1649 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 1650 /// basic block. 1651 /// 1652 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1653 SmallVectorImpl<Register> &NewVRegs) { 1654 // TODO: the function currently only handles a single UseBlock; it should be 1655 // possible to generalize. 1656 if (SA->getUseBlocks().size() != 1) 1657 return 0; 1658 1659 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1660 1661 // Note that it is possible to have an interval that is live-in or live-out 1662 // while only covering a single block - A phi-def can use undef values from 1663 // predecessors, and the block could be a single-block loop. 1664 // We don't bother doing anything clever about such a case, we simply assume 1665 // that the interval is continuous from FirstInstr to LastInstr. We should 1666 // make sure that we don't do anything illegal to such an interval, though. 1667 1668 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1669 if (Uses.size() <= 2) 1670 return 0; 1671 const unsigned NumGaps = Uses.size()-1; 1672 1673 LLVM_DEBUG({ 1674 dbgs() << "tryLocalSplit: "; 1675 for (const auto &Use : Uses) 1676 dbgs() << ' ' << Use; 1677 dbgs() << '\n'; 1678 }); 1679 1680 // If VirtReg is live across any register mask operands, compute a list of 1681 // gaps with register masks. 1682 SmallVector<unsigned, 8> RegMaskGaps; 1683 if (Matrix->checkRegMaskInterference(VirtReg)) { 1684 // Get regmask slots for the whole block. 1685 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber()); 1686 LLVM_DEBUG(dbgs() << RMS.size() << " regmasks in block:"); 1687 // Constrain to VirtReg's live range. 1688 unsigned RI = 1689 llvm::lower_bound(RMS, Uses.front().getRegSlot()) - RMS.begin(); 1690 unsigned RE = RMS.size(); 1691 for (unsigned I = 0; I != NumGaps && RI != RE; ++I) { 1692 // Look for Uses[I] <= RMS <= Uses[I + 1]. 1693 assert(!SlotIndex::isEarlierInstr(RMS[RI], Uses[I])); 1694 if (SlotIndex::isEarlierInstr(Uses[I + 1], RMS[RI])) 1695 continue; 1696 // Skip a regmask on the same instruction as the last use. It doesn't 1697 // overlap the live range. 1698 if (SlotIndex::isSameInstr(Uses[I + 1], RMS[RI]) && I + 1 == NumGaps) 1699 break; 1700 LLVM_DEBUG(dbgs() << ' ' << RMS[RI] << ':' << Uses[I] << '-' 1701 << Uses[I + 1]); 1702 RegMaskGaps.push_back(I); 1703 // Advance ri to the next gap. A regmask on one of the uses counts in 1704 // both gaps. 1705 while (RI != RE && SlotIndex::isEarlierInstr(RMS[RI], Uses[I + 1])) 1706 ++RI; 1707 } 1708 LLVM_DEBUG(dbgs() << '\n'); 1709 } 1710 1711 // Since we allow local split results to be split again, there is a risk of 1712 // creating infinite loops. It is tempting to require that the new live 1713 // ranges have less instructions than the original. That would guarantee 1714 // convergence, but it is too strict. A live range with 3 instructions can be 1715 // split 2+3 (including the COPY), and we want to allow that. 1716 // 1717 // Instead we use these rules: 1718 // 1719 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the 1720 // noop split, of course). 1721 // 2. Require progress be made for ranges with getStage() == RS_Split2. All 1722 // the new ranges must have fewer instructions than before the split. 1723 // 3. New ranges with the same number of instructions are marked RS_Split2, 1724 // smaller ranges are marked RS_New. 1725 // 1726 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 1727 // excessive splitting and infinite loops. 1728 // 1729 bool ProgressRequired = ExtraInfo->getStage(VirtReg) >= RS_Split2; 1730 1731 // Best split candidate. 1732 unsigned BestBefore = NumGaps; 1733 unsigned BestAfter = 0; 1734 float BestDiff = 0; 1735 1736 const float blockFreq = 1737 SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() * 1738 (1.0f / MBFI->getEntryFreq()); 1739 SmallVector<float, 8> GapWeight; 1740 1741 for (MCPhysReg PhysReg : Order) { 1742 assert(PhysReg); 1743 // Keep track of the largest spill weight that would need to be evicted in 1744 // order to make use of PhysReg between UseSlots[I] and UseSlots[I + 1]. 1745 calcGapWeights(PhysReg, GapWeight); 1746 1747 // Remove any gaps with regmask clobbers. 1748 if (Matrix->checkRegMaskInterference(VirtReg, PhysReg)) 1749 for (unsigned I = 0, E = RegMaskGaps.size(); I != E; ++I) 1750 GapWeight[RegMaskGaps[I]] = huge_valf; 1751 1752 // Try to find the best sequence of gaps to close. 1753 // The new spill weight must be larger than any gap interference. 1754 1755 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 1756 unsigned SplitBefore = 0, SplitAfter = 1; 1757 1758 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 1759 // It is the spill weight that needs to be evicted. 1760 float MaxGap = GapWeight[0]; 1761 1762 while (true) { 1763 // Live before/after split? 1764 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 1765 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 1766 1767 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << ' ' << Uses[SplitBefore] 1768 << '-' << Uses[SplitAfter] << " I=" << MaxGap); 1769 1770 // Stop before the interval gets so big we wouldn't be making progress. 1771 if (!LiveBefore && !LiveAfter) { 1772 LLVM_DEBUG(dbgs() << " all\n"); 1773 break; 1774 } 1775 // Should the interval be extended or shrunk? 1776 bool Shrink = true; 1777 1778 // How many gaps would the new range have? 1779 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 1780 1781 // Legally, without causing looping? 1782 bool Legal = !ProgressRequired || NewGaps < NumGaps; 1783 1784 if (Legal && MaxGap < huge_valf) { 1785 // Estimate the new spill weight. Each instruction reads or writes the 1786 // register. Conservatively assume there are no read-modify-write 1787 // instructions. 1788 // 1789 // Try to guess the size of the new interval. 1790 const float EstWeight = normalizeSpillWeight( 1791 blockFreq * (NewGaps + 1), 1792 Uses[SplitBefore].distance(Uses[SplitAfter]) + 1793 (LiveBefore + LiveAfter) * SlotIndex::InstrDist, 1794 1); 1795 // Would this split be possible to allocate? 1796 // Never allocate all gaps, we wouldn't be making progress. 1797 LLVM_DEBUG(dbgs() << " w=" << EstWeight); 1798 if (EstWeight * Hysteresis >= MaxGap) { 1799 Shrink = false; 1800 float Diff = EstWeight - MaxGap; 1801 if (Diff > BestDiff) { 1802 LLVM_DEBUG(dbgs() << " (best)"); 1803 BestDiff = Hysteresis * Diff; 1804 BestBefore = SplitBefore; 1805 BestAfter = SplitAfter; 1806 } 1807 } 1808 } 1809 1810 // Try to shrink. 1811 if (Shrink) { 1812 if (++SplitBefore < SplitAfter) { 1813 LLVM_DEBUG(dbgs() << " shrink\n"); 1814 // Recompute the max when necessary. 1815 if (GapWeight[SplitBefore - 1] >= MaxGap) { 1816 MaxGap = GapWeight[SplitBefore]; 1817 for (unsigned I = SplitBefore + 1; I != SplitAfter; ++I) 1818 MaxGap = std::max(MaxGap, GapWeight[I]); 1819 } 1820 continue; 1821 } 1822 MaxGap = 0; 1823 } 1824 1825 // Try to extend the interval. 1826 if (SplitAfter >= NumGaps) { 1827 LLVM_DEBUG(dbgs() << " end\n"); 1828 break; 1829 } 1830 1831 LLVM_DEBUG(dbgs() << " extend\n"); 1832 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 1833 } 1834 } 1835 1836 // Didn't find any candidates? 1837 if (BestBefore == NumGaps) 1838 return 0; 1839 1840 LLVM_DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] << '-' 1841 << Uses[BestAfter] << ", " << BestDiff << ", " 1842 << (BestAfter - BestBefore + 1) << " instrs\n"); 1843 1844 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 1845 SE->reset(LREdit); 1846 1847 SE->openIntv(); 1848 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1849 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1850 SE->useIntv(SegStart, SegStop); 1851 SmallVector<unsigned, 8> IntvMap; 1852 SE->finish(&IntvMap); 1853 DebugVars->splitRegister(VirtReg.reg(), LREdit.regs(), *LIS); 1854 // If the new range has the same number of instructions as before, mark it as 1855 // RS_Split2 so the next split will be forced to make progress. Otherwise, 1856 // leave the new intervals as RS_New so they can compete. 1857 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 1858 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 1859 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 1860 if (NewGaps >= NumGaps) { 1861 LLVM_DEBUG(dbgs() << "Tagging non-progress ranges:"); 1862 assert(!ProgressRequired && "Didn't make progress when it was required."); 1863 for (unsigned I = 0, E = IntvMap.size(); I != E; ++I) 1864 if (IntvMap[I] == 1) { 1865 ExtraInfo->setStage(LIS->getInterval(LREdit.get(I)), RS_Split2); 1866 LLVM_DEBUG(dbgs() << ' ' << printReg(LREdit.get(I))); 1867 } 1868 LLVM_DEBUG(dbgs() << '\n'); 1869 } 1870 ++NumLocalSplits; 1871 1872 return 0; 1873 } 1874 1875 //===----------------------------------------------------------------------===// 1876 // Live Range Splitting 1877 //===----------------------------------------------------------------------===// 1878 1879 /// trySplit - Try to split VirtReg or one of its interferences, making it 1880 /// assignable. 1881 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 1882 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 1883 SmallVectorImpl<Register> &NewVRegs, 1884 const SmallVirtRegSet &FixedRegisters) { 1885 // Ranges must be Split2 or less. 1886 if (ExtraInfo->getStage(VirtReg) >= RS_Spill) 1887 return 0; 1888 1889 // Local intervals are handled separately. 1890 if (LIS->intervalIsInOneMBB(VirtReg)) { 1891 NamedRegionTimer T("local_split", "Local Splitting", TimerGroupName, 1892 TimerGroupDescription, TimePassesIsEnabled); 1893 SA->analyze(&VirtReg); 1894 Register PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs); 1895 if (PhysReg || !NewVRegs.empty()) 1896 return PhysReg; 1897 return tryInstructionSplit(VirtReg, Order, NewVRegs); 1898 } 1899 1900 NamedRegionTimer T("global_split", "Global Splitting", TimerGroupName, 1901 TimerGroupDescription, TimePassesIsEnabled); 1902 1903 SA->analyze(&VirtReg); 1904 1905 // First try to split around a region spanning multiple blocks. RS_Split2 1906 // ranges already made dubious progress with region splitting, so they go 1907 // straight to single block splitting. 1908 if (ExtraInfo->getStage(VirtReg) < RS_Split2) { 1909 MCRegister PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 1910 if (PhysReg || !NewVRegs.empty()) 1911 return PhysReg; 1912 } 1913 1914 // Then isolate blocks. 1915 return tryBlockSplit(VirtReg, Order, NewVRegs); 1916 } 1917 1918 //===----------------------------------------------------------------------===// 1919 // Last Chance Recoloring 1920 //===----------------------------------------------------------------------===// 1921 1922 /// Return true if \p reg has any tied def operand. 1923 static bool hasTiedDef(MachineRegisterInfo *MRI, unsigned reg) { 1924 for (const MachineOperand &MO : MRI->def_operands(reg)) 1925 if (MO.isTied()) 1926 return true; 1927 1928 return false; 1929 } 1930 1931 /// mayRecolorAllInterferences - Check if the virtual registers that 1932 /// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be 1933 /// recolored to free \p PhysReg. 1934 /// When true is returned, \p RecoloringCandidates has been augmented with all 1935 /// the live intervals that need to be recolored in order to free \p PhysReg 1936 /// for \p VirtReg. 1937 /// \p FixedRegisters contains all the virtual registers that cannot be 1938 /// recolored. 1939 bool RAGreedy::mayRecolorAllInterferences( 1940 MCRegister PhysReg, LiveInterval &VirtReg, SmallLISet &RecoloringCandidates, 1941 const SmallVirtRegSet &FixedRegisters) { 1942 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg()); 1943 1944 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1945 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 1946 // If there is LastChanceRecoloringMaxInterference or more interferences, 1947 // chances are one would not be recolorable. 1948 if (Q.interferingVRegs(LastChanceRecoloringMaxInterference).size() >= 1949 LastChanceRecoloringMaxInterference && 1950 !ExhaustiveSearch) { 1951 LLVM_DEBUG(dbgs() << "Early abort: too many interferences.\n"); 1952 CutOffInfo |= CO_Interf; 1953 return false; 1954 } 1955 for (LiveInterval *Intf : reverse(Q.interferingVRegs())) { 1956 // If Intf is done and sit on the same register class as VirtReg, 1957 // it would not be recolorable as it is in the same state as VirtReg. 1958 // However, if VirtReg has tied defs and Intf doesn't, then 1959 // there is still a point in examining if it can be recolorable. 1960 if (((ExtraInfo->getStage(*Intf) == RS_Done && 1961 MRI->getRegClass(Intf->reg()) == CurRC) && 1962 !(hasTiedDef(MRI, VirtReg.reg()) && 1963 !hasTiedDef(MRI, Intf->reg()))) || 1964 FixedRegisters.count(Intf->reg())) { 1965 LLVM_DEBUG( 1966 dbgs() << "Early abort: the interference is not recolorable.\n"); 1967 return false; 1968 } 1969 RecoloringCandidates.insert(Intf); 1970 } 1971 } 1972 return true; 1973 } 1974 1975 /// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring 1976 /// its interferences. 1977 /// Last chance recoloring chooses a color for \p VirtReg and recolors every 1978 /// virtual register that was using it. The recoloring process may recursively 1979 /// use the last chance recoloring. Therefore, when a virtual register has been 1980 /// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot 1981 /// be last-chance-recolored again during this recoloring "session". 1982 /// E.g., 1983 /// Let 1984 /// vA can use {R1, R2 } 1985 /// vB can use { R2, R3} 1986 /// vC can use {R1 } 1987 /// Where vA, vB, and vC cannot be split anymore (they are reloads for 1988 /// instance) and they all interfere. 1989 /// 1990 /// vA is assigned R1 1991 /// vB is assigned R2 1992 /// vC tries to evict vA but vA is already done. 1993 /// Regular register allocation fails. 1994 /// 1995 /// Last chance recoloring kicks in: 1996 /// vC does as if vA was evicted => vC uses R1. 1997 /// vC is marked as fixed. 1998 /// vA needs to find a color. 1999 /// None are available. 2000 /// vA cannot evict vC: vC is a fixed virtual register now. 2001 /// vA does as if vB was evicted => vA uses R2. 2002 /// vB needs to find a color. 2003 /// R3 is available. 2004 /// Recoloring => vC = R1, vA = R2, vB = R3 2005 /// 2006 /// \p Order defines the preferred allocation order for \p VirtReg. 2007 /// \p NewRegs will contain any new virtual register that have been created 2008 /// (split, spill) during the process and that must be assigned. 2009 /// \p FixedRegisters contains all the virtual registers that cannot be 2010 /// recolored. 2011 /// \p Depth gives the current depth of the last chance recoloring. 2012 /// \return a physical register that can be used for VirtReg or ~0u if none 2013 /// exists. 2014 unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg, 2015 AllocationOrder &Order, 2016 SmallVectorImpl<Register> &NewVRegs, 2017 SmallVirtRegSet &FixedRegisters, 2018 unsigned Depth) { 2019 if (!TRI->shouldUseLastChanceRecoloringForVirtReg(*MF, VirtReg)) 2020 return ~0u; 2021 2022 LLVM_DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n'); 2023 // Ranges must be Done. 2024 assert((ExtraInfo->getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) && 2025 "Last chance recoloring should really be last chance"); 2026 // Set the max depth to LastChanceRecoloringMaxDepth. 2027 // We may want to reconsider that if we end up with a too large search space 2028 // for target with hundreds of registers. 2029 // Indeed, in that case we may want to cut the search space earlier. 2030 if (Depth >= LastChanceRecoloringMaxDepth && !ExhaustiveSearch) { 2031 LLVM_DEBUG(dbgs() << "Abort because max depth has been reached.\n"); 2032 CutOffInfo |= CO_Depth; 2033 return ~0u; 2034 } 2035 2036 // Set of Live intervals that will need to be recolored. 2037 SmallLISet RecoloringCandidates; 2038 // Record the original mapping virtual register to physical register in case 2039 // the recoloring fails. 2040 DenseMap<Register, MCRegister> VirtRegToPhysReg; 2041 // Mark VirtReg as fixed, i.e., it will not be recolored pass this point in 2042 // this recoloring "session". 2043 assert(!FixedRegisters.count(VirtReg.reg())); 2044 FixedRegisters.insert(VirtReg.reg()); 2045 SmallVector<Register, 4> CurrentNewVRegs; 2046 2047 for (MCRegister PhysReg : Order) { 2048 assert(PhysReg.isValid()); 2049 LLVM_DEBUG(dbgs() << "Try to assign: " << VirtReg << " to " 2050 << printReg(PhysReg, TRI) << '\n'); 2051 RecoloringCandidates.clear(); 2052 VirtRegToPhysReg.clear(); 2053 CurrentNewVRegs.clear(); 2054 2055 // It is only possible to recolor virtual register interference. 2056 if (Matrix->checkInterference(VirtReg, PhysReg) > 2057 LiveRegMatrix::IK_VirtReg) { 2058 LLVM_DEBUG( 2059 dbgs() << "Some interferences are not with virtual registers.\n"); 2060 2061 continue; 2062 } 2063 2064 // Early give up on this PhysReg if it is obvious we cannot recolor all 2065 // the interferences. 2066 if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates, 2067 FixedRegisters)) { 2068 LLVM_DEBUG(dbgs() << "Some interferences cannot be recolored.\n"); 2069 continue; 2070 } 2071 2072 // RecoloringCandidates contains all the virtual registers that interfer 2073 // with VirtReg on PhysReg (or one of its aliases). 2074 // Enqueue them for recoloring and perform the actual recoloring. 2075 PQueue RecoloringQueue; 2076 for (LiveInterval *RC : RecoloringCandidates) { 2077 Register ItVirtReg = RC->reg(); 2078 enqueue(RecoloringQueue, RC); 2079 assert(VRM->hasPhys(ItVirtReg) && 2080 "Interferences are supposed to be with allocated variables"); 2081 2082 // Record the current allocation. 2083 VirtRegToPhysReg[ItVirtReg] = VRM->getPhys(ItVirtReg); 2084 // unset the related struct. 2085 Matrix->unassign(*RC); 2086 } 2087 2088 // Do as if VirtReg was assigned to PhysReg so that the underlying 2089 // recoloring has the right information about the interferes and 2090 // available colors. 2091 Matrix->assign(VirtReg, PhysReg); 2092 2093 // Save the current recoloring state. 2094 // If we cannot recolor all the interferences, we will have to start again 2095 // at this point for the next physical register. 2096 SmallVirtRegSet SaveFixedRegisters(FixedRegisters); 2097 if (tryRecoloringCandidates(RecoloringQueue, CurrentNewVRegs, 2098 FixedRegisters, Depth)) { 2099 // Push the queued vregs into the main queue. 2100 for (Register NewVReg : CurrentNewVRegs) 2101 NewVRegs.push_back(NewVReg); 2102 // Do not mess up with the global assignment process. 2103 // I.e., VirtReg must be unassigned. 2104 Matrix->unassign(VirtReg); 2105 return PhysReg; 2106 } 2107 2108 LLVM_DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to " 2109 << printReg(PhysReg, TRI) << '\n'); 2110 2111 // The recoloring attempt failed, undo the changes. 2112 FixedRegisters = SaveFixedRegisters; 2113 Matrix->unassign(VirtReg); 2114 2115 // For a newly created vreg which is also in RecoloringCandidates, 2116 // don't add it to NewVRegs because its physical register will be restored 2117 // below. Other vregs in CurrentNewVRegs are created by calling 2118 // selectOrSplit and should be added into NewVRegs. 2119 for (Register &R : CurrentNewVRegs) { 2120 if (RecoloringCandidates.count(&LIS->getInterval(R))) 2121 continue; 2122 NewVRegs.push_back(R); 2123 } 2124 2125 for (LiveInterval *RC : RecoloringCandidates) { 2126 Register ItVirtReg = RC->reg(); 2127 if (VRM->hasPhys(ItVirtReg)) 2128 Matrix->unassign(*RC); 2129 MCRegister ItPhysReg = VirtRegToPhysReg[ItVirtReg]; 2130 Matrix->assign(*RC, ItPhysReg); 2131 } 2132 } 2133 2134 // Last chance recoloring did not worked either, give up. 2135 return ~0u; 2136 } 2137 2138 /// tryRecoloringCandidates - Try to assign a new color to every register 2139 /// in \RecoloringQueue. 2140 /// \p NewRegs will contain any new virtual register created during the 2141 /// recoloring process. 2142 /// \p FixedRegisters[in/out] contains all the registers that have been 2143 /// recolored. 2144 /// \return true if all virtual registers in RecoloringQueue were successfully 2145 /// recolored, false otherwise. 2146 bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue, 2147 SmallVectorImpl<Register> &NewVRegs, 2148 SmallVirtRegSet &FixedRegisters, 2149 unsigned Depth) { 2150 while (!RecoloringQueue.empty()) { 2151 LiveInterval *LI = dequeue(RecoloringQueue); 2152 LLVM_DEBUG(dbgs() << "Try to recolor: " << *LI << '\n'); 2153 MCRegister PhysReg = 2154 selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1); 2155 // When splitting happens, the live-range may actually be empty. 2156 // In that case, this is okay to continue the recoloring even 2157 // if we did not find an alternative color for it. Indeed, 2158 // there will not be anything to color for LI in the end. 2159 if (PhysReg == ~0u || (!PhysReg && !LI->empty())) 2160 return false; 2161 2162 if (!PhysReg) { 2163 assert(LI->empty() && "Only empty live-range do not require a register"); 2164 LLVM_DEBUG(dbgs() << "Recoloring of " << *LI 2165 << " succeeded. Empty LI.\n"); 2166 continue; 2167 } 2168 LLVM_DEBUG(dbgs() << "Recoloring of " << *LI 2169 << " succeeded with: " << printReg(PhysReg, TRI) << '\n'); 2170 2171 Matrix->assign(*LI, PhysReg); 2172 FixedRegisters.insert(LI->reg()); 2173 } 2174 return true; 2175 } 2176 2177 //===----------------------------------------------------------------------===// 2178 // Main Entry Point 2179 //===----------------------------------------------------------------------===// 2180 2181 MCRegister RAGreedy::selectOrSplit(LiveInterval &VirtReg, 2182 SmallVectorImpl<Register> &NewVRegs) { 2183 CutOffInfo = CO_None; 2184 LLVMContext &Ctx = MF->getFunction().getContext(); 2185 SmallVirtRegSet FixedRegisters; 2186 MCRegister Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters); 2187 if (Reg == ~0U && (CutOffInfo != CO_None)) { 2188 uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf); 2189 if (CutOffEncountered == CO_Depth) 2190 Ctx.emitError("register allocation failed: maximum depth for recoloring " 2191 "reached. Use -fexhaustive-register-search to skip " 2192 "cutoffs"); 2193 else if (CutOffEncountered == CO_Interf) 2194 Ctx.emitError("register allocation failed: maximum interference for " 2195 "recoloring reached. Use -fexhaustive-register-search " 2196 "to skip cutoffs"); 2197 else if (CutOffEncountered == (CO_Depth | CO_Interf)) 2198 Ctx.emitError("register allocation failed: maximum interference and " 2199 "depth for recoloring reached. Use " 2200 "-fexhaustive-register-search to skip cutoffs"); 2201 } 2202 return Reg; 2203 } 2204 2205 /// Using a CSR for the first time has a cost because it causes push|pop 2206 /// to be added to prologue|epilogue. Splitting a cold section of the live 2207 /// range can have lower cost than using the CSR for the first time; 2208 /// Spilling a live range in the cold path can have lower cost than using 2209 /// the CSR for the first time. Returns the physical register if we decide 2210 /// to use the CSR; otherwise return 0. 2211 MCRegister 2212 RAGreedy::tryAssignCSRFirstTime(LiveInterval &VirtReg, AllocationOrder &Order, 2213 MCRegister PhysReg, uint8_t &CostPerUseLimit, 2214 SmallVectorImpl<Register> &NewVRegs) { 2215 if (ExtraInfo->getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) { 2216 // We choose spill over using the CSR for the first time if the spill cost 2217 // is lower than CSRCost. 2218 SA->analyze(&VirtReg); 2219 if (calcSpillCost() >= CSRCost) 2220 return PhysReg; 2221 2222 // We are going to spill, set CostPerUseLimit to 1 to make sure that 2223 // we will not use a callee-saved register in tryEvict. 2224 CostPerUseLimit = 1; 2225 return 0; 2226 } 2227 if (ExtraInfo->getStage(VirtReg) < RS_Split) { 2228 // We choose pre-splitting over using the CSR for the first time if 2229 // the cost of splitting is lower than CSRCost. 2230 SA->analyze(&VirtReg); 2231 unsigned NumCands = 0; 2232 BlockFrequency BestCost = CSRCost; // Don't modify CSRCost. 2233 unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost, 2234 NumCands, true /*IgnoreCSR*/); 2235 if (BestCand == NoCand) 2236 // Use the CSR if we can't find a region split below CSRCost. 2237 return PhysReg; 2238 2239 // Perform the actual pre-splitting. 2240 doRegionSplit(VirtReg, BestCand, false/*HasCompact*/, NewVRegs); 2241 return 0; 2242 } 2243 return PhysReg; 2244 } 2245 2246 void RAGreedy::aboutToRemoveInterval(LiveInterval &LI) { 2247 // Do not keep invalid information around. 2248 SetOfBrokenHints.remove(&LI); 2249 } 2250 2251 void RAGreedy::initializeCSRCost() { 2252 // We use the larger one out of the command-line option and the value report 2253 // by TRI. 2254 CSRCost = BlockFrequency( 2255 std::max((unsigned)CSRFirstTimeCost, TRI->getCSRFirstUseCost())); 2256 if (!CSRCost.getFrequency()) 2257 return; 2258 2259 // Raw cost is relative to Entry == 2^14; scale it appropriately. 2260 uint64_t ActualEntry = MBFI->getEntryFreq(); 2261 if (!ActualEntry) { 2262 CSRCost = 0; 2263 return; 2264 } 2265 uint64_t FixedEntry = 1 << 14; 2266 if (ActualEntry < FixedEntry) 2267 CSRCost *= BranchProbability(ActualEntry, FixedEntry); 2268 else if (ActualEntry <= UINT32_MAX) 2269 // Invert the fraction and divide. 2270 CSRCost /= BranchProbability(FixedEntry, ActualEntry); 2271 else 2272 // Can't use BranchProbability in general, since it takes 32-bit numbers. 2273 CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry); 2274 } 2275 2276 /// Collect the hint info for \p Reg. 2277 /// The results are stored into \p Out. 2278 /// \p Out is not cleared before being populated. 2279 void RAGreedy::collectHintInfo(Register Reg, HintsInfo &Out) { 2280 for (const MachineInstr &Instr : MRI->reg_nodbg_instructions(Reg)) { 2281 if (!Instr.isFullCopy()) 2282 continue; 2283 // Look for the other end of the copy. 2284 Register OtherReg = Instr.getOperand(0).getReg(); 2285 if (OtherReg == Reg) { 2286 OtherReg = Instr.getOperand(1).getReg(); 2287 if (OtherReg == Reg) 2288 continue; 2289 } 2290 // Get the current assignment. 2291 MCRegister OtherPhysReg = 2292 OtherReg.isPhysical() ? OtherReg.asMCReg() : VRM->getPhys(OtherReg); 2293 // Push the collected information. 2294 Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg, 2295 OtherPhysReg)); 2296 } 2297 } 2298 2299 /// Using the given \p List, compute the cost of the broken hints if 2300 /// \p PhysReg was used. 2301 /// \return The cost of \p List for \p PhysReg. 2302 BlockFrequency RAGreedy::getBrokenHintFreq(const HintsInfo &List, 2303 MCRegister PhysReg) { 2304 BlockFrequency Cost = 0; 2305 for (const HintInfo &Info : List) { 2306 if (Info.PhysReg != PhysReg) 2307 Cost += Info.Freq; 2308 } 2309 return Cost; 2310 } 2311 2312 /// Using the register assigned to \p VirtReg, try to recolor 2313 /// all the live ranges that are copy-related with \p VirtReg. 2314 /// The recoloring is then propagated to all the live-ranges that have 2315 /// been recolored and so on, until no more copies can be coalesced or 2316 /// it is not profitable. 2317 /// For a given live range, profitability is determined by the sum of the 2318 /// frequencies of the non-identity copies it would introduce with the old 2319 /// and new register. 2320 void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) { 2321 // We have a broken hint, check if it is possible to fix it by 2322 // reusing PhysReg for the copy-related live-ranges. Indeed, we evicted 2323 // some register and PhysReg may be available for the other live-ranges. 2324 SmallSet<Register, 4> Visited; 2325 SmallVector<unsigned, 2> RecoloringCandidates; 2326 HintsInfo Info; 2327 Register Reg = VirtReg.reg(); 2328 MCRegister PhysReg = VRM->getPhys(Reg); 2329 // Start the recoloring algorithm from the input live-interval, then 2330 // it will propagate to the ones that are copy-related with it. 2331 Visited.insert(Reg); 2332 RecoloringCandidates.push_back(Reg); 2333 2334 LLVM_DEBUG(dbgs() << "Trying to reconcile hints for: " << printReg(Reg, TRI) 2335 << '(' << printReg(PhysReg, TRI) << ")\n"); 2336 2337 do { 2338 Reg = RecoloringCandidates.pop_back_val(); 2339 2340 // We cannot recolor physical register. 2341 if (Register::isPhysicalRegister(Reg)) 2342 continue; 2343 2344 // This may be a skipped class 2345 if (!VRM->hasPhys(Reg)) { 2346 assert(!ShouldAllocateClass(*TRI, *MRI->getRegClass(Reg)) && 2347 "We have an unallocated variable which should have been handled"); 2348 continue; 2349 } 2350 2351 // Get the live interval mapped with this virtual register to be able 2352 // to check for the interference with the new color. 2353 LiveInterval &LI = LIS->getInterval(Reg); 2354 MCRegister CurrPhys = VRM->getPhys(Reg); 2355 // Check that the new color matches the register class constraints and 2356 // that it is free for this live range. 2357 if (CurrPhys != PhysReg && (!MRI->getRegClass(Reg)->contains(PhysReg) || 2358 Matrix->checkInterference(LI, PhysReg))) 2359 continue; 2360 2361 LLVM_DEBUG(dbgs() << printReg(Reg, TRI) << '(' << printReg(CurrPhys, TRI) 2362 << ") is recolorable.\n"); 2363 2364 // Gather the hint info. 2365 Info.clear(); 2366 collectHintInfo(Reg, Info); 2367 // Check if recoloring the live-range will increase the cost of the 2368 // non-identity copies. 2369 if (CurrPhys != PhysReg) { 2370 LLVM_DEBUG(dbgs() << "Checking profitability:\n"); 2371 BlockFrequency OldCopiesCost = getBrokenHintFreq(Info, CurrPhys); 2372 BlockFrequency NewCopiesCost = getBrokenHintFreq(Info, PhysReg); 2373 LLVM_DEBUG(dbgs() << "Old Cost: " << OldCopiesCost.getFrequency() 2374 << "\nNew Cost: " << NewCopiesCost.getFrequency() 2375 << '\n'); 2376 if (OldCopiesCost < NewCopiesCost) { 2377 LLVM_DEBUG(dbgs() << "=> Not profitable.\n"); 2378 continue; 2379 } 2380 // At this point, the cost is either cheaper or equal. If it is 2381 // equal, we consider this is profitable because it may expose 2382 // more recoloring opportunities. 2383 LLVM_DEBUG(dbgs() << "=> Profitable.\n"); 2384 // Recolor the live-range. 2385 Matrix->unassign(LI); 2386 Matrix->assign(LI, PhysReg); 2387 } 2388 // Push all copy-related live-ranges to keep reconciling the broken 2389 // hints. 2390 for (const HintInfo &HI : Info) { 2391 if (Visited.insert(HI.Reg).second) 2392 RecoloringCandidates.push_back(HI.Reg); 2393 } 2394 } while (!RecoloringCandidates.empty()); 2395 } 2396 2397 /// Try to recolor broken hints. 2398 /// Broken hints may be repaired by recoloring when an evicted variable 2399 /// freed up a register for a larger live-range. 2400 /// Consider the following example: 2401 /// BB1: 2402 /// a = 2403 /// b = 2404 /// BB2: 2405 /// ... 2406 /// = b 2407 /// = a 2408 /// Let us assume b gets split: 2409 /// BB1: 2410 /// a = 2411 /// b = 2412 /// BB2: 2413 /// c = b 2414 /// ... 2415 /// d = c 2416 /// = d 2417 /// = a 2418 /// Because of how the allocation work, b, c, and d may be assigned different 2419 /// colors. Now, if a gets evicted later: 2420 /// BB1: 2421 /// a = 2422 /// st a, SpillSlot 2423 /// b = 2424 /// BB2: 2425 /// c = b 2426 /// ... 2427 /// d = c 2428 /// = d 2429 /// e = ld SpillSlot 2430 /// = e 2431 /// This is likely that we can assign the same register for b, c, and d, 2432 /// getting rid of 2 copies. 2433 void RAGreedy::tryHintsRecoloring() { 2434 for (LiveInterval *LI : SetOfBrokenHints) { 2435 assert(Register::isVirtualRegister(LI->reg()) && 2436 "Recoloring is possible only for virtual registers"); 2437 // Some dead defs may be around (e.g., because of debug uses). 2438 // Ignore those. 2439 if (!VRM->hasPhys(LI->reg())) 2440 continue; 2441 tryHintRecoloring(*LI); 2442 } 2443 } 2444 2445 MCRegister RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg, 2446 SmallVectorImpl<Register> &NewVRegs, 2447 SmallVirtRegSet &FixedRegisters, 2448 unsigned Depth) { 2449 uint8_t CostPerUseLimit = uint8_t(~0u); 2450 // First try assigning a free register. 2451 auto Order = 2452 AllocationOrder::create(VirtReg.reg(), *VRM, RegClassInfo, Matrix); 2453 if (MCRegister PhysReg = 2454 tryAssign(VirtReg, Order, NewVRegs, FixedRegisters)) { 2455 // If VirtReg got an assignment, the eviction info is no longer relevant. 2456 LastEvicted.clearEvicteeInfo(VirtReg.reg()); 2457 // When NewVRegs is not empty, we may have made decisions such as evicting 2458 // a virtual register, go with the earlier decisions and use the physical 2459 // register. 2460 if (CSRCost.getFrequency() && 2461 EvictAdvisor->isUnusedCalleeSavedReg(PhysReg) && NewVRegs.empty()) { 2462 MCRegister CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg, 2463 CostPerUseLimit, NewVRegs); 2464 if (CSRReg || !NewVRegs.empty()) 2465 // Return now if we decide to use a CSR or create new vregs due to 2466 // pre-splitting. 2467 return CSRReg; 2468 } else 2469 return PhysReg; 2470 } 2471 2472 LiveRangeStage Stage = ExtraInfo->getStage(VirtReg); 2473 LLVM_DEBUG(dbgs() << StageName[Stage] << " Cascade " 2474 << ExtraInfo->getCascade(VirtReg.reg()) << '\n'); 2475 2476 // Try to evict a less worthy live range, but only for ranges from the primary 2477 // queue. The RS_Split ranges already failed to do this, and they should not 2478 // get a second chance until they have been split. 2479 if (Stage != RS_Split) 2480 if (Register PhysReg = 2481 tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit, 2482 FixedRegisters)) { 2483 Register Hint = MRI->getSimpleHint(VirtReg.reg()); 2484 // If VirtReg has a hint and that hint is broken record this 2485 // virtual register as a recoloring candidate for broken hint. 2486 // Indeed, since we evicted a variable in its neighborhood it is 2487 // likely we can at least partially recolor some of the 2488 // copy-related live-ranges. 2489 if (Hint && Hint != PhysReg) 2490 SetOfBrokenHints.insert(&VirtReg); 2491 // If VirtReg eviction someone, the eviction info for it as an evictee is 2492 // no longer relevant. 2493 LastEvicted.clearEvicteeInfo(VirtReg.reg()); 2494 return PhysReg; 2495 } 2496 2497 assert((NewVRegs.empty() || Depth) && "Cannot append to existing NewVRegs"); 2498 2499 // The first time we see a live range, don't try to split or spill. 2500 // Wait until the second time, when all smaller ranges have been allocated. 2501 // This gives a better picture of the interference to split around. 2502 if (Stage < RS_Split) { 2503 ExtraInfo->setStage(VirtReg, RS_Split); 2504 LLVM_DEBUG(dbgs() << "wait for second round\n"); 2505 NewVRegs.push_back(VirtReg.reg()); 2506 return 0; 2507 } 2508 2509 if (Stage < RS_Spill) { 2510 // Try splitting VirtReg or interferences. 2511 unsigned NewVRegSizeBefore = NewVRegs.size(); 2512 Register PhysReg = trySplit(VirtReg, Order, NewVRegs, FixedRegisters); 2513 if (PhysReg || (NewVRegs.size() - NewVRegSizeBefore)) { 2514 // If VirtReg got split, the eviction info is no longer relevant. 2515 LastEvicted.clearEvicteeInfo(VirtReg.reg()); 2516 return PhysReg; 2517 } 2518 } 2519 2520 // If we couldn't allocate a register from spilling, there is probably some 2521 // invalid inline assembly. The base class will report it. 2522 if (Stage >= RS_Done || !VirtReg.isSpillable()) 2523 return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters, 2524 Depth); 2525 2526 // Finally spill VirtReg itself. 2527 if ((EnableDeferredSpilling || 2528 TRI->shouldUseDeferredSpillingForVirtReg(*MF, VirtReg)) && 2529 ExtraInfo->getStage(VirtReg) < RS_Memory) { 2530 // TODO: This is experimental and in particular, we do not model 2531 // the live range splitting done by spilling correctly. 2532 // We would need a deep integration with the spiller to do the 2533 // right thing here. Anyway, that is still good for early testing. 2534 ExtraInfo->setStage(VirtReg, RS_Memory); 2535 LLVM_DEBUG(dbgs() << "Do as if this register is in memory\n"); 2536 NewVRegs.push_back(VirtReg.reg()); 2537 } else { 2538 NamedRegionTimer T("spill", "Spiller", TimerGroupName, 2539 TimerGroupDescription, TimePassesIsEnabled); 2540 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 2541 spiller().spill(LRE); 2542 ExtraInfo->setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); 2543 2544 // Tell LiveDebugVariables about the new ranges. Ranges not being covered by 2545 // the new regs are kept in LDV (still mapping to the old register), until 2546 // we rewrite spilled locations in LDV at a later stage. 2547 DebugVars->splitRegister(VirtReg.reg(), LRE.regs(), *LIS); 2548 2549 if (VerifyEnabled) 2550 MF->verify(this, "After spilling"); 2551 } 2552 2553 // The live virtual register requesting allocation was spilled, so tell 2554 // the caller not to allocate anything during this round. 2555 return 0; 2556 } 2557 2558 void RAGreedy::RAGreedyStats::report(MachineOptimizationRemarkMissed &R) { 2559 using namespace ore; 2560 if (Spills) { 2561 R << NV("NumSpills", Spills) << " spills "; 2562 R << NV("TotalSpillsCost", SpillsCost) << " total spills cost "; 2563 } 2564 if (FoldedSpills) { 2565 R << NV("NumFoldedSpills", FoldedSpills) << " folded spills "; 2566 R << NV("TotalFoldedSpillsCost", FoldedSpillsCost) 2567 << " total folded spills cost "; 2568 } 2569 if (Reloads) { 2570 R << NV("NumReloads", Reloads) << " reloads "; 2571 R << NV("TotalReloadsCost", ReloadsCost) << " total reloads cost "; 2572 } 2573 if (FoldedReloads) { 2574 R << NV("NumFoldedReloads", FoldedReloads) << " folded reloads "; 2575 R << NV("TotalFoldedReloadsCost", FoldedReloadsCost) 2576 << " total folded reloads cost "; 2577 } 2578 if (ZeroCostFoldedReloads) 2579 R << NV("NumZeroCostFoldedReloads", ZeroCostFoldedReloads) 2580 << " zero cost folded reloads "; 2581 if (Copies) { 2582 R << NV("NumVRCopies", Copies) << " virtual registers copies "; 2583 R << NV("TotalCopiesCost", CopiesCost) << " total copies cost "; 2584 } 2585 } 2586 2587 RAGreedy::RAGreedyStats RAGreedy::computeStats(MachineBasicBlock &MBB) { 2588 RAGreedyStats Stats; 2589 const MachineFrameInfo &MFI = MF->getFrameInfo(); 2590 int FI; 2591 2592 auto isSpillSlotAccess = [&MFI](const MachineMemOperand *A) { 2593 return MFI.isSpillSlotObjectIndex(cast<FixedStackPseudoSourceValue>( 2594 A->getPseudoValue())->getFrameIndex()); 2595 }; 2596 auto isPatchpointInstr = [](const MachineInstr &MI) { 2597 return MI.getOpcode() == TargetOpcode::PATCHPOINT || 2598 MI.getOpcode() == TargetOpcode::STACKMAP || 2599 MI.getOpcode() == TargetOpcode::STATEPOINT; 2600 }; 2601 for (MachineInstr &MI : MBB) { 2602 if (MI.isCopy()) { 2603 MachineOperand &Dest = MI.getOperand(0); 2604 MachineOperand &Src = MI.getOperand(1); 2605 if (Dest.isReg() && Src.isReg() && Dest.getReg().isVirtual() && 2606 Src.getReg().isVirtual()) 2607 ++Stats.Copies; 2608 continue; 2609 } 2610 2611 SmallVector<const MachineMemOperand *, 2> Accesses; 2612 if (TII->isLoadFromStackSlot(MI, FI) && MFI.isSpillSlotObjectIndex(FI)) { 2613 ++Stats.Reloads; 2614 continue; 2615 } 2616 if (TII->isStoreToStackSlot(MI, FI) && MFI.isSpillSlotObjectIndex(FI)) { 2617 ++Stats.Spills; 2618 continue; 2619 } 2620 if (TII->hasLoadFromStackSlot(MI, Accesses) && 2621 llvm::any_of(Accesses, isSpillSlotAccess)) { 2622 if (!isPatchpointInstr(MI)) { 2623 Stats.FoldedReloads += Accesses.size(); 2624 continue; 2625 } 2626 // For statepoint there may be folded and zero cost folded stack reloads. 2627 std::pair<unsigned, unsigned> NonZeroCostRange = 2628 TII->getPatchpointUnfoldableRange(MI); 2629 SmallSet<unsigned, 16> FoldedReloads; 2630 SmallSet<unsigned, 16> ZeroCostFoldedReloads; 2631 for (unsigned Idx = 0, E = MI.getNumOperands(); Idx < E; ++Idx) { 2632 MachineOperand &MO = MI.getOperand(Idx); 2633 if (!MO.isFI() || !MFI.isSpillSlotObjectIndex(MO.getIndex())) 2634 continue; 2635 if (Idx >= NonZeroCostRange.first && Idx < NonZeroCostRange.second) 2636 FoldedReloads.insert(MO.getIndex()); 2637 else 2638 ZeroCostFoldedReloads.insert(MO.getIndex()); 2639 } 2640 // If stack slot is used in folded reload it is not zero cost then. 2641 for (unsigned Slot : FoldedReloads) 2642 ZeroCostFoldedReloads.erase(Slot); 2643 Stats.FoldedReloads += FoldedReloads.size(); 2644 Stats.ZeroCostFoldedReloads += ZeroCostFoldedReloads.size(); 2645 continue; 2646 } 2647 Accesses.clear(); 2648 if (TII->hasStoreToStackSlot(MI, Accesses) && 2649 llvm::any_of(Accesses, isSpillSlotAccess)) { 2650 Stats.FoldedSpills += Accesses.size(); 2651 } 2652 } 2653 // Set cost of collected statistic by multiplication to relative frequency of 2654 // this basic block. 2655 float RelFreq = MBFI->getBlockFreqRelativeToEntryBlock(&MBB); 2656 Stats.ReloadsCost = RelFreq * Stats.Reloads; 2657 Stats.FoldedReloadsCost = RelFreq * Stats.FoldedReloads; 2658 Stats.SpillsCost = RelFreq * Stats.Spills; 2659 Stats.FoldedSpillsCost = RelFreq * Stats.FoldedSpills; 2660 Stats.CopiesCost = RelFreq * Stats.Copies; 2661 return Stats; 2662 } 2663 2664 RAGreedy::RAGreedyStats RAGreedy::reportStats(MachineLoop *L) { 2665 RAGreedyStats Stats; 2666 2667 // Sum up the spill and reloads in subloops. 2668 for (MachineLoop *SubLoop : *L) 2669 Stats.add(reportStats(SubLoop)); 2670 2671 for (MachineBasicBlock *MBB : L->getBlocks()) 2672 // Handle blocks that were not included in subloops. 2673 if (Loops->getLoopFor(MBB) == L) 2674 Stats.add(computeStats(*MBB)); 2675 2676 if (!Stats.isEmpty()) { 2677 using namespace ore; 2678 2679 ORE->emit([&]() { 2680 MachineOptimizationRemarkMissed R(DEBUG_TYPE, "LoopSpillReloadCopies", 2681 L->getStartLoc(), L->getHeader()); 2682 Stats.report(R); 2683 R << "generated in loop"; 2684 return R; 2685 }); 2686 } 2687 return Stats; 2688 } 2689 2690 void RAGreedy::reportStats() { 2691 if (!ORE->allowExtraAnalysis(DEBUG_TYPE)) 2692 return; 2693 RAGreedyStats Stats; 2694 for (MachineLoop *L : *Loops) 2695 Stats.add(reportStats(L)); 2696 // Process non-loop blocks. 2697 for (MachineBasicBlock &MBB : *MF) 2698 if (!Loops->getLoopFor(&MBB)) 2699 Stats.add(computeStats(MBB)); 2700 if (!Stats.isEmpty()) { 2701 using namespace ore; 2702 2703 ORE->emit([&]() { 2704 DebugLoc Loc; 2705 if (auto *SP = MF->getFunction().getSubprogram()) 2706 Loc = DILocation::get(SP->getContext(), SP->getLine(), 1, SP); 2707 MachineOptimizationRemarkMissed R(DEBUG_TYPE, "SpillReloadCopies", Loc, 2708 &MF->front()); 2709 Stats.report(R); 2710 R << "generated in function"; 2711 return R; 2712 }); 2713 } 2714 } 2715 2716 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 2717 LLVM_DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 2718 << "********** Function: " << mf.getName() << '\n'); 2719 2720 MF = &mf; 2721 TRI = MF->getSubtarget().getRegisterInfo(); 2722 TII = MF->getSubtarget().getInstrInfo(); 2723 RCI.runOnMachineFunction(mf); 2724 2725 EnableAdvancedRASplitCost = 2726 ConsiderLocalIntervalCost.getNumOccurrences() 2727 ? ConsiderLocalIntervalCost 2728 : MF->getSubtarget().enableAdvancedRASplitCost(); 2729 2730 if (VerifyEnabled) 2731 MF->verify(this, "Before greedy register allocator"); 2732 2733 RegAllocBase::init(getAnalysis<VirtRegMap>(), 2734 getAnalysis<LiveIntervals>(), 2735 getAnalysis<LiveRegMatrix>()); 2736 Indexes = &getAnalysis<SlotIndexes>(); 2737 MBFI = &getAnalysis<MachineBlockFrequencyInfo>(); 2738 DomTree = &getAnalysis<MachineDominatorTree>(); 2739 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE(); 2740 Loops = &getAnalysis<MachineLoopInfo>(); 2741 Bundles = &getAnalysis<EdgeBundles>(); 2742 SpillPlacer = &getAnalysis<SpillPlacement>(); 2743 DebugVars = &getAnalysis<LiveDebugVariables>(); 2744 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2745 2746 initializeCSRCost(); 2747 2748 RegCosts = TRI->getRegisterCosts(*MF); 2749 2750 ExtraInfo.emplace(); 2751 EvictAdvisor = 2752 getAnalysis<RegAllocEvictionAdvisorAnalysis>().getAdvisor(*MF, *this); 2753 2754 VRAI = std::make_unique<VirtRegAuxInfo>(*MF, *LIS, *VRM, *Loops, *MBFI); 2755 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM, *VRAI)); 2756 2757 VRAI->calculateSpillWeightsAndHints(); 2758 2759 LLVM_DEBUG(LIS->dump()); 2760 2761 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 2762 SE.reset(new SplitEditor(*SA, *AA, *LIS, *VRM, *DomTree, *MBFI, *VRAI)); 2763 2764 IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI); 2765 GlobalCand.resize(32); // This will grow as needed. 2766 SetOfBrokenHints.clear(); 2767 LastEvicted.clear(); 2768 2769 allocatePhysRegs(); 2770 tryHintsRecoloring(); 2771 2772 if (VerifyEnabled) 2773 MF->verify(this, "Before post optimization"); 2774 postOptimization(); 2775 reportStats(); 2776 2777 releaseMemory(); 2778 return true; 2779 } 2780