xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.cpp (revision 81ad626541db97eb356e2c1d4a20eb2a26a766ab)
1 //===- RegAllocGreedy.cpp - greedy register allocator ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the RAGreedy function pass for register allocation in
10 // optimized builds.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RegAllocGreedy.h"
15 #include "AllocationOrder.h"
16 #include "InterferenceCache.h"
17 #include "LiveDebugVariables.h"
18 #include "RegAllocBase.h"
19 #include "RegAllocEvictionAdvisor.h"
20 #include "SpillPlacement.h"
21 #include "SplitKit.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/BitVector.h"
24 #include "llvm/ADT/IndexedMap.h"
25 #include "llvm/ADT/SetVector.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallSet.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/ADT/StringRef.h"
31 #include "llvm/Analysis/AliasAnalysis.h"
32 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
33 #include "llvm/CodeGen/CalcSpillWeights.h"
34 #include "llvm/CodeGen/EdgeBundles.h"
35 #include "llvm/CodeGen/LiveInterval.h"
36 #include "llvm/CodeGen/LiveIntervalUnion.h"
37 #include "llvm/CodeGen/LiveIntervals.h"
38 #include "llvm/CodeGen/LiveRangeEdit.h"
39 #include "llvm/CodeGen/LiveRegMatrix.h"
40 #include "llvm/CodeGen/LiveStacks.h"
41 #include "llvm/CodeGen/MachineBasicBlock.h"
42 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
43 #include "llvm/CodeGen/MachineDominators.h"
44 #include "llvm/CodeGen/MachineFrameInfo.h"
45 #include "llvm/CodeGen/MachineFunction.h"
46 #include "llvm/CodeGen/MachineFunctionPass.h"
47 #include "llvm/CodeGen/MachineInstr.h"
48 #include "llvm/CodeGen/MachineLoopInfo.h"
49 #include "llvm/CodeGen/MachineOperand.h"
50 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
51 #include "llvm/CodeGen/MachineRegisterInfo.h"
52 #include "llvm/CodeGen/RegAllocRegistry.h"
53 #include "llvm/CodeGen/RegisterClassInfo.h"
54 #include "llvm/CodeGen/SlotIndexes.h"
55 #include "llvm/CodeGen/Spiller.h"
56 #include "llvm/CodeGen/TargetInstrInfo.h"
57 #include "llvm/CodeGen/TargetRegisterInfo.h"
58 #include "llvm/CodeGen/TargetSubtargetInfo.h"
59 #include "llvm/CodeGen/VirtRegMap.h"
60 #include "llvm/IR/DebugInfoMetadata.h"
61 #include "llvm/IR/Function.h"
62 #include "llvm/IR/LLVMContext.h"
63 #include "llvm/InitializePasses.h"
64 #include "llvm/MC/MCRegisterInfo.h"
65 #include "llvm/Pass.h"
66 #include "llvm/Support/BlockFrequency.h"
67 #include "llvm/Support/BranchProbability.h"
68 #include "llvm/Support/CommandLine.h"
69 #include "llvm/Support/Debug.h"
70 #include "llvm/Support/MathExtras.h"
71 #include "llvm/Support/Timer.h"
72 #include "llvm/Support/raw_ostream.h"
73 #include <algorithm>
74 #include <cassert>
75 #include <cstdint>
76 #include <utility>
77 
78 using namespace llvm;
79 
80 #define DEBUG_TYPE "regalloc"
81 
82 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
83 STATISTIC(NumLocalSplits,  "Number of split local live ranges");
84 STATISTIC(NumEvicted,      "Number of interferences evicted");
85 
86 static cl::opt<SplitEditor::ComplementSpillMode> SplitSpillMode(
87     "split-spill-mode", cl::Hidden,
88     cl::desc("Spill mode for splitting live ranges"),
89     cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),
90                clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"),
91                clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed")),
92     cl::init(SplitEditor::SM_Speed));
93 
94 static cl::opt<unsigned>
95 LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden,
96                              cl::desc("Last chance recoloring max depth"),
97                              cl::init(5));
98 
99 static cl::opt<unsigned> LastChanceRecoloringMaxInterference(
100     "lcr-max-interf", cl::Hidden,
101     cl::desc("Last chance recoloring maximum number of considered"
102              " interference at a time"),
103     cl::init(8));
104 
105 static cl::opt<bool> ExhaustiveSearch(
106     "exhaustive-register-search", cl::NotHidden,
107     cl::desc("Exhaustive Search for registers bypassing the depth "
108              "and interference cutoffs of last chance recoloring"),
109     cl::Hidden);
110 
111 static cl::opt<bool> EnableDeferredSpilling(
112     "enable-deferred-spilling", cl::Hidden,
113     cl::desc("Instead of spilling a variable right away, defer the actual "
114              "code insertion to the end of the allocation. That way the "
115              "allocator might still find a suitable coloring for this "
116              "variable because of other evicted variables."),
117     cl::init(false));
118 
119 // FIXME: Find a good default for this flag and remove the flag.
120 static cl::opt<unsigned>
121 CSRFirstTimeCost("regalloc-csr-first-time-cost",
122               cl::desc("Cost for first time use of callee-saved register."),
123               cl::init(0), cl::Hidden);
124 
125 static cl::opt<unsigned long> GrowRegionComplexityBudget(
126     "grow-region-complexity-budget",
127     cl::desc("growRegion() does not scale with the number of BB edges, so "
128              "limit its budget and bail out once we reach the limit."),
129     cl::init(10000), cl::Hidden);
130 
131 static cl::opt<bool> GreedyRegClassPriorityTrumpsGlobalness(
132     "greedy-regclass-priority-trumps-globalness",
133     cl::desc("Change the greedy register allocator's live range priority "
134              "calculation to make the AllocationPriority of the register class "
135              "more important then whether the range is global"),
136     cl::Hidden);
137 
138 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
139                                        createGreedyRegisterAllocator);
140 
141 char RAGreedy::ID = 0;
142 char &llvm::RAGreedyID = RAGreedy::ID;
143 
144 INITIALIZE_PASS_BEGIN(RAGreedy, "greedy",
145                 "Greedy Register Allocator", false, false)
146 INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables)
147 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
148 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
149 INITIALIZE_PASS_DEPENDENCY(RegisterCoalescer)
150 INITIALIZE_PASS_DEPENDENCY(MachineScheduler)
151 INITIALIZE_PASS_DEPENDENCY(LiveStacks)
152 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
153 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
154 INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
155 INITIALIZE_PASS_DEPENDENCY(LiveRegMatrix)
156 INITIALIZE_PASS_DEPENDENCY(EdgeBundles)
157 INITIALIZE_PASS_DEPENDENCY(SpillPlacement)
158 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass)
159 INITIALIZE_PASS_DEPENDENCY(RegAllocEvictionAdvisorAnalysis)
160 INITIALIZE_PASS_END(RAGreedy, "greedy",
161                 "Greedy Register Allocator", false, false)
162 
163 #ifndef NDEBUG
164 const char *const RAGreedy::StageName[] = {
165     "RS_New",
166     "RS_Assign",
167     "RS_Split",
168     "RS_Split2",
169     "RS_Spill",
170     "RS_Memory",
171     "RS_Done"
172 };
173 #endif
174 
175 // Hysteresis to use when comparing floats.
176 // This helps stabilize decisions based on float comparisons.
177 const float Hysteresis = (2007 / 2048.0f); // 0.97998046875
178 
179 FunctionPass* llvm::createGreedyRegisterAllocator() {
180   return new RAGreedy();
181 }
182 
183 namespace llvm {
184 FunctionPass* createGreedyRegisterAllocator(
185   std::function<bool(const TargetRegisterInfo &TRI,
186                      const TargetRegisterClass &RC)> Ftor);
187 
188 }
189 
190 FunctionPass* llvm::createGreedyRegisterAllocator(
191   std::function<bool(const TargetRegisterInfo &TRI,
192                      const TargetRegisterClass &RC)> Ftor) {
193   return new RAGreedy(Ftor);
194 }
195 
196 RAGreedy::RAGreedy(RegClassFilterFunc F):
197   MachineFunctionPass(ID),
198   RegAllocBase(F) {
199 }
200 
201 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
202   AU.setPreservesCFG();
203   AU.addRequired<MachineBlockFrequencyInfo>();
204   AU.addPreserved<MachineBlockFrequencyInfo>();
205   AU.addRequired<AAResultsWrapperPass>();
206   AU.addPreserved<AAResultsWrapperPass>();
207   AU.addRequired<LiveIntervals>();
208   AU.addPreserved<LiveIntervals>();
209   AU.addRequired<SlotIndexes>();
210   AU.addPreserved<SlotIndexes>();
211   AU.addRequired<LiveDebugVariables>();
212   AU.addPreserved<LiveDebugVariables>();
213   AU.addRequired<LiveStacks>();
214   AU.addPreserved<LiveStacks>();
215   AU.addRequired<MachineDominatorTree>();
216   AU.addPreserved<MachineDominatorTree>();
217   AU.addRequired<MachineLoopInfo>();
218   AU.addPreserved<MachineLoopInfo>();
219   AU.addRequired<VirtRegMap>();
220   AU.addPreserved<VirtRegMap>();
221   AU.addRequired<LiveRegMatrix>();
222   AU.addPreserved<LiveRegMatrix>();
223   AU.addRequired<EdgeBundles>();
224   AU.addRequired<SpillPlacement>();
225   AU.addRequired<MachineOptimizationRemarkEmitterPass>();
226   AU.addRequired<RegAllocEvictionAdvisorAnalysis>();
227   MachineFunctionPass::getAnalysisUsage(AU);
228 }
229 
230 //===----------------------------------------------------------------------===//
231 //                     LiveRangeEdit delegate methods
232 //===----------------------------------------------------------------------===//
233 
234 bool RAGreedy::LRE_CanEraseVirtReg(Register VirtReg) {
235   LiveInterval &LI = LIS->getInterval(VirtReg);
236   if (VRM->hasPhys(VirtReg)) {
237     Matrix->unassign(LI);
238     aboutToRemoveInterval(LI);
239     return true;
240   }
241   // Unassigned virtreg is probably in the priority queue.
242   // RegAllocBase will erase it after dequeueing.
243   // Nonetheless, clear the live-range so that the debug
244   // dump will show the right state for that VirtReg.
245   LI.clear();
246   return false;
247 }
248 
249 void RAGreedy::LRE_WillShrinkVirtReg(Register VirtReg) {
250   if (!VRM->hasPhys(VirtReg))
251     return;
252 
253   // Register is assigned, put it back on the queue for reassignment.
254   LiveInterval &LI = LIS->getInterval(VirtReg);
255   Matrix->unassign(LI);
256   RegAllocBase::enqueue(&LI);
257 }
258 
259 void RAGreedy::LRE_DidCloneVirtReg(Register New, Register Old) {
260   ExtraInfo->LRE_DidCloneVirtReg(New, Old);
261 }
262 
263 void RAGreedy::ExtraRegInfo::LRE_DidCloneVirtReg(Register New, Register Old) {
264   // Cloning a register we haven't even heard about yet?  Just ignore it.
265   if (!Info.inBounds(Old))
266     return;
267 
268   // LRE may clone a virtual register because dead code elimination causes it to
269   // be split into connected components. The new components are much smaller
270   // than the original, so they should get a new chance at being assigned.
271   // same stage as the parent.
272   Info[Old].Stage = RS_Assign;
273   Info.grow(New.id());
274   Info[New] = Info[Old];
275 }
276 
277 void RAGreedy::releaseMemory() {
278   SpillerInstance.reset();
279   GlobalCand.clear();
280 }
281 
282 void RAGreedy::enqueueImpl(const LiveInterval *LI) { enqueue(Queue, LI); }
283 
284 void RAGreedy::enqueue(PQueue &CurQueue, const LiveInterval *LI) {
285   // Prioritize live ranges by size, assigning larger ranges first.
286   // The queue holds (size, reg) pairs.
287   const unsigned Size = LI->getSize();
288   const Register Reg = LI->reg();
289   assert(Reg.isVirtual() && "Can only enqueue virtual registers");
290   unsigned Prio;
291 
292   auto Stage = ExtraInfo->getOrInitStage(Reg);
293   if (Stage == RS_New) {
294     Stage = RS_Assign;
295     ExtraInfo->setStage(Reg, Stage);
296   }
297   if (Stage == RS_Split) {
298     // Unsplit ranges that couldn't be allocated immediately are deferred until
299     // everything else has been allocated.
300     Prio = Size;
301   } else if (Stage == RS_Memory) {
302     // Memory operand should be considered last.
303     // Change the priority such that Memory operand are assigned in
304     // the reverse order that they came in.
305     // TODO: Make this a member variable and probably do something about hints.
306     static unsigned MemOp = 0;
307     Prio = MemOp++;
308   } else {
309     // Giant live ranges fall back to the global assignment heuristic, which
310     // prevents excessive spilling in pathological cases.
311     bool ReverseLocal = TRI->reverseLocalAssignment();
312     const TargetRegisterClass &RC = *MRI->getRegClass(Reg);
313     bool ForceGlobal =
314         !ReverseLocal && (Size / SlotIndex::InstrDist) >
315                              (2 * RegClassInfo.getNumAllocatableRegs(&RC));
316     unsigned GlobalBit = 0;
317 
318     if (Stage == RS_Assign && !ForceGlobal && !LI->empty() &&
319         LIS->intervalIsInOneMBB(*LI)) {
320       // Allocate original local ranges in linear instruction order. Since they
321       // are singly defined, this produces optimal coloring in the absence of
322       // global interference and other constraints.
323       if (!ReverseLocal)
324         Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex());
325       else {
326         // Allocating bottom up may allow many short LRGs to be assigned first
327         // to one of the cheap registers. This could be much faster for very
328         // large blocks on targets with many physical registers.
329         Prio = Indexes->getZeroIndex().getInstrDistance(LI->endIndex());
330       }
331     } else {
332       // Allocate global and split ranges in long->short order. Long ranges that
333       // don't fit should be spilled (or split) ASAP so they don't create
334       // interference.  Mark a bit to prioritize global above local ranges.
335       Prio = Size;
336       GlobalBit = 1;
337     }
338     if (RegClassPriorityTrumpsGlobalness)
339       Prio |= RC.AllocationPriority << 25 | GlobalBit << 24;
340     else
341       Prio |= GlobalBit << 29 | RC.AllocationPriority << 24;
342 
343     // Mark a higher bit to prioritize global and local above RS_Split.
344     Prio |= (1u << 31);
345 
346     // Boost ranges that have a physical register hint.
347     if (VRM->hasKnownPreference(Reg))
348       Prio |= (1u << 30);
349   }
350   // The virtual register number is a tie breaker for same-sized ranges.
351   // Give lower vreg numbers higher priority to assign them first.
352   CurQueue.push(std::make_pair(Prio, ~Reg));
353 }
354 
355 const LiveInterval *RAGreedy::dequeue() { return dequeue(Queue); }
356 
357 const LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) {
358   if (CurQueue.empty())
359     return nullptr;
360   LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second);
361   CurQueue.pop();
362   return LI;
363 }
364 
365 //===----------------------------------------------------------------------===//
366 //                            Direct Assignment
367 //===----------------------------------------------------------------------===//
368 
369 /// tryAssign - Try to assign VirtReg to an available register.
370 MCRegister RAGreedy::tryAssign(const LiveInterval &VirtReg,
371                                AllocationOrder &Order,
372                                SmallVectorImpl<Register> &NewVRegs,
373                                const SmallVirtRegSet &FixedRegisters) {
374   MCRegister PhysReg;
375   for (auto I = Order.begin(), E = Order.end(); I != E && !PhysReg; ++I) {
376     assert(*I);
377     if (!Matrix->checkInterference(VirtReg, *I)) {
378       if (I.isHint())
379         return *I;
380       else
381         PhysReg = *I;
382     }
383   }
384   if (!PhysReg.isValid())
385     return PhysReg;
386 
387   // PhysReg is available, but there may be a better choice.
388 
389   // If we missed a simple hint, try to cheaply evict interference from the
390   // preferred register.
391   if (Register Hint = MRI->getSimpleHint(VirtReg.reg()))
392     if (Order.isHint(Hint)) {
393       MCRegister PhysHint = Hint.asMCReg();
394       LLVM_DEBUG(dbgs() << "missed hint " << printReg(PhysHint, TRI) << '\n');
395 
396       if (EvictAdvisor->canEvictHintInterference(VirtReg, PhysHint,
397                                                  FixedRegisters)) {
398         evictInterference(VirtReg, PhysHint, NewVRegs);
399         return PhysHint;
400       }
401       // Record the missed hint, we may be able to recover
402       // at the end if the surrounding allocation changed.
403       SetOfBrokenHints.insert(&VirtReg);
404     }
405 
406   // Try to evict interference from a cheaper alternative.
407   uint8_t Cost = RegCosts[PhysReg];
408 
409   // Most registers have 0 additional cost.
410   if (!Cost)
411     return PhysReg;
412 
413   LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << " is available at cost "
414                     << (unsigned)Cost << '\n');
415   MCRegister CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost, FixedRegisters);
416   return CheapReg ? CheapReg : PhysReg;
417 }
418 
419 //===----------------------------------------------------------------------===//
420 //                         Interference eviction
421 //===----------------------------------------------------------------------===//
422 
423 Register RegAllocEvictionAdvisor::canReassign(const LiveInterval &VirtReg,
424                                               Register PrevReg) const {
425   auto Order =
426       AllocationOrder::create(VirtReg.reg(), *VRM, RegClassInfo, Matrix);
427   MCRegister PhysReg;
428   for (auto I = Order.begin(), E = Order.end(); I != E && !PhysReg; ++I) {
429     if ((*I).id() == PrevReg.id())
430       continue;
431 
432     MCRegUnitIterator Units(*I, TRI);
433     for (; Units.isValid(); ++Units) {
434       // Instantiate a "subquery", not to be confused with the Queries array.
435       LiveIntervalUnion::Query subQ(VirtReg, Matrix->getLiveUnions()[*Units]);
436       if (subQ.checkInterference())
437         break;
438     }
439     // If no units have interference, break out with the current PhysReg.
440     if (!Units.isValid())
441       PhysReg = *I;
442   }
443   if (PhysReg)
444     LLVM_DEBUG(dbgs() << "can reassign: " << VirtReg << " from "
445                       << printReg(PrevReg, TRI) << " to "
446                       << printReg(PhysReg, TRI) << '\n');
447   return PhysReg;
448 }
449 
450 /// evictInterference - Evict any interferring registers that prevent VirtReg
451 /// from being assigned to Physreg. This assumes that canEvictInterference
452 /// returned true.
453 void RAGreedy::evictInterference(const LiveInterval &VirtReg,
454                                  MCRegister PhysReg,
455                                  SmallVectorImpl<Register> &NewVRegs) {
456   // Make sure that VirtReg has a cascade number, and assign that cascade
457   // number to every evicted register. These live ranges than then only be
458   // evicted by a newer cascade, preventing infinite loops.
459   unsigned Cascade = ExtraInfo->getOrAssignNewCascade(VirtReg.reg());
460 
461   LLVM_DEBUG(dbgs() << "evicting " << printReg(PhysReg, TRI)
462                     << " interference: Cascade " << Cascade << '\n');
463 
464   // Collect all interfering virtregs first.
465   SmallVector<const LiveInterval *, 8> Intfs;
466   for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
467     LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
468     // We usually have the interfering VRegs cached so collectInterferingVRegs()
469     // should be fast, we may need to recalculate if when different physregs
470     // overlap the same register unit so we had different SubRanges queried
471     // against it.
472     ArrayRef<const LiveInterval *> IVR = Q.interferingVRegs();
473     Intfs.append(IVR.begin(), IVR.end());
474   }
475 
476   // Evict them second. This will invalidate the queries.
477   for (const LiveInterval *Intf : Intfs) {
478     // The same VirtReg may be present in multiple RegUnits. Skip duplicates.
479     if (!VRM->hasPhys(Intf->reg()))
480       continue;
481 
482     Matrix->unassign(*Intf);
483     assert((ExtraInfo->getCascade(Intf->reg()) < Cascade ||
484             VirtReg.isSpillable() < Intf->isSpillable()) &&
485            "Cannot decrease cascade number, illegal eviction");
486     ExtraInfo->setCascade(Intf->reg(), Cascade);
487     ++NumEvicted;
488     NewVRegs.push_back(Intf->reg());
489   }
490 }
491 
492 /// Returns true if the given \p PhysReg is a callee saved register and has not
493 /// been used for allocation yet.
494 bool RegAllocEvictionAdvisor::isUnusedCalleeSavedReg(MCRegister PhysReg) const {
495   MCRegister CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg);
496   if (!CSR)
497     return false;
498 
499   return !Matrix->isPhysRegUsed(PhysReg);
500 }
501 
502 Optional<unsigned>
503 RegAllocEvictionAdvisor::getOrderLimit(const LiveInterval &VirtReg,
504                                        const AllocationOrder &Order,
505                                        unsigned CostPerUseLimit) const {
506   unsigned OrderLimit = Order.getOrder().size();
507 
508   if (CostPerUseLimit < uint8_t(~0u)) {
509     // Check of any registers in RC are below CostPerUseLimit.
510     const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg());
511     uint8_t MinCost = RegClassInfo.getMinCost(RC);
512     if (MinCost >= CostPerUseLimit) {
513       LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = "
514                         << MinCost << ", no cheaper registers to be found.\n");
515       return None;
516     }
517 
518     // It is normal for register classes to have a long tail of registers with
519     // the same cost. We don't need to look at them if they're too expensive.
520     if (RegCosts[Order.getOrder().back()] >= CostPerUseLimit) {
521       OrderLimit = RegClassInfo.getLastCostChange(RC);
522       LLVM_DEBUG(dbgs() << "Only trying the first " << OrderLimit
523                         << " regs.\n");
524     }
525   }
526   return OrderLimit;
527 }
528 
529 bool RegAllocEvictionAdvisor::canAllocatePhysReg(unsigned CostPerUseLimit,
530                                                  MCRegister PhysReg) const {
531   if (RegCosts[PhysReg] >= CostPerUseLimit)
532     return false;
533   // The first use of a callee-saved register in a function has cost 1.
534   // Don't start using a CSR when the CostPerUseLimit is low.
535   if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) {
536     LLVM_DEBUG(
537         dbgs() << printReg(PhysReg, TRI) << " would clobber CSR "
538                << printReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI)
539                << '\n');
540     return false;
541   }
542   return true;
543 }
544 
545 /// tryEvict - Try to evict all interferences for a physreg.
546 /// @param  VirtReg Currently unassigned virtual register.
547 /// @param  Order   Physregs to try.
548 /// @return         Physreg to assign VirtReg, or 0.
549 MCRegister RAGreedy::tryEvict(const LiveInterval &VirtReg,
550                               AllocationOrder &Order,
551                               SmallVectorImpl<Register> &NewVRegs,
552                               uint8_t CostPerUseLimit,
553                               const SmallVirtRegSet &FixedRegisters) {
554   NamedRegionTimer T("evict", "Evict", TimerGroupName, TimerGroupDescription,
555                      TimePassesIsEnabled);
556 
557   MCRegister BestPhys = EvictAdvisor->tryFindEvictionCandidate(
558       VirtReg, Order, CostPerUseLimit, FixedRegisters);
559   if (BestPhys.isValid())
560     evictInterference(VirtReg, BestPhys, NewVRegs);
561   return BestPhys;
562 }
563 
564 //===----------------------------------------------------------------------===//
565 //                              Region Splitting
566 //===----------------------------------------------------------------------===//
567 
568 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
569 /// interference pattern in Physreg and its aliases. Add the constraints to
570 /// SpillPlacement and return the static cost of this split in Cost, assuming
571 /// that all preferences in SplitConstraints are met.
572 /// Return false if there are no bundles with positive bias.
573 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
574                                    BlockFrequency &Cost) {
575   ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
576 
577   // Reset interference dependent info.
578   SplitConstraints.resize(UseBlocks.size());
579   BlockFrequency StaticCost = 0;
580   for (unsigned I = 0; I != UseBlocks.size(); ++I) {
581     const SplitAnalysis::BlockInfo &BI = UseBlocks[I];
582     SpillPlacement::BlockConstraint &BC = SplitConstraints[I];
583 
584     BC.Number = BI.MBB->getNumber();
585     Intf.moveToBlock(BC.Number);
586     BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
587     BC.Exit = (BI.LiveOut &&
588                !LIS->getInstructionFromIndex(BI.LastInstr)->isImplicitDef())
589                   ? SpillPlacement::PrefReg
590                   : SpillPlacement::DontCare;
591     BC.ChangesValue = BI.FirstDef.isValid();
592 
593     if (!Intf.hasInterference())
594       continue;
595 
596     // Number of spill code instructions to insert.
597     unsigned Ins = 0;
598 
599     // Interference for the live-in value.
600     if (BI.LiveIn) {
601       if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) {
602         BC.Entry = SpillPlacement::MustSpill;
603         ++Ins;
604       } else if (Intf.first() < BI.FirstInstr) {
605         BC.Entry = SpillPlacement::PrefSpill;
606         ++Ins;
607       } else if (Intf.first() < BI.LastInstr) {
608         ++Ins;
609       }
610 
611       // Abort if the spill cannot be inserted at the MBB' start
612       if (((BC.Entry == SpillPlacement::MustSpill) ||
613            (BC.Entry == SpillPlacement::PrefSpill)) &&
614           SlotIndex::isEarlierInstr(BI.FirstInstr,
615                                     SA->getFirstSplitPoint(BC.Number)))
616         return false;
617     }
618 
619     // Interference for the live-out value.
620     if (BI.LiveOut) {
621       if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) {
622         BC.Exit = SpillPlacement::MustSpill;
623         ++Ins;
624       } else if (Intf.last() > BI.LastInstr) {
625         BC.Exit = SpillPlacement::PrefSpill;
626         ++Ins;
627       } else if (Intf.last() > BI.FirstInstr) {
628         ++Ins;
629       }
630     }
631 
632     // Accumulate the total frequency of inserted spill code.
633     while (Ins--)
634       StaticCost += SpillPlacer->getBlockFrequency(BC.Number);
635   }
636   Cost = StaticCost;
637 
638   // Add constraints for use-blocks. Note that these are the only constraints
639   // that may add a positive bias, it is downhill from here.
640   SpillPlacer->addConstraints(SplitConstraints);
641   return SpillPlacer->scanActiveBundles();
642 }
643 
644 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
645 /// live-through blocks in Blocks.
646 bool RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
647                                      ArrayRef<unsigned> Blocks) {
648   const unsigned GroupSize = 8;
649   SpillPlacement::BlockConstraint BCS[GroupSize];
650   unsigned TBS[GroupSize];
651   unsigned B = 0, T = 0;
652 
653   for (unsigned Number : Blocks) {
654     Intf.moveToBlock(Number);
655 
656     if (!Intf.hasInterference()) {
657       assert(T < GroupSize && "Array overflow");
658       TBS[T] = Number;
659       if (++T == GroupSize) {
660         SpillPlacer->addLinks(makeArrayRef(TBS, T));
661         T = 0;
662       }
663       continue;
664     }
665 
666     assert(B < GroupSize && "Array overflow");
667     BCS[B].Number = Number;
668 
669     // Abort if the spill cannot be inserted at the MBB' start
670     MachineBasicBlock *MBB = MF->getBlockNumbered(Number);
671     auto FirstNonDebugInstr = MBB->getFirstNonDebugInstr();
672     if (FirstNonDebugInstr != MBB->end() &&
673         SlotIndex::isEarlierInstr(LIS->getInstructionIndex(*FirstNonDebugInstr),
674                                   SA->getFirstSplitPoint(Number)))
675       return false;
676     // Interference for the live-in value.
677     if (Intf.first() <= Indexes->getMBBStartIdx(Number))
678       BCS[B].Entry = SpillPlacement::MustSpill;
679     else
680       BCS[B].Entry = SpillPlacement::PrefSpill;
681 
682     // Interference for the live-out value.
683     if (Intf.last() >= SA->getLastSplitPoint(Number))
684       BCS[B].Exit = SpillPlacement::MustSpill;
685     else
686       BCS[B].Exit = SpillPlacement::PrefSpill;
687 
688     if (++B == GroupSize) {
689       SpillPlacer->addConstraints(makeArrayRef(BCS, B));
690       B = 0;
691     }
692   }
693 
694   SpillPlacer->addConstraints(makeArrayRef(BCS, B));
695   SpillPlacer->addLinks(makeArrayRef(TBS, T));
696   return true;
697 }
698 
699 bool RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
700   // Keep track of through blocks that have not been added to SpillPlacer.
701   BitVector Todo = SA->getThroughBlocks();
702   SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
703   unsigned AddedTo = 0;
704 #ifndef NDEBUG
705   unsigned Visited = 0;
706 #endif
707 
708   unsigned long Budget = GrowRegionComplexityBudget;
709   while (true) {
710     ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
711     // Find new through blocks in the periphery of PrefRegBundles.
712     for (unsigned Bundle : NewBundles) {
713       // Look at all blocks connected to Bundle in the full graph.
714       ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
715       // Limit compilation time by bailing out after we use all our budget.
716       if (Blocks.size() >= Budget)
717         return false;
718       Budget -= Blocks.size();
719       for (unsigned Block : Blocks) {
720         if (!Todo.test(Block))
721           continue;
722         Todo.reset(Block);
723         // This is a new through block. Add it to SpillPlacer later.
724         ActiveBlocks.push_back(Block);
725 #ifndef NDEBUG
726         ++Visited;
727 #endif
728       }
729     }
730     // Any new blocks to add?
731     if (ActiveBlocks.size() == AddedTo)
732       break;
733 
734     // Compute through constraints from the interference, or assume that all
735     // through blocks prefer spilling when forming compact regions.
736     auto NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
737     if (Cand.PhysReg) {
738       if (!addThroughConstraints(Cand.Intf, NewBlocks))
739         return false;
740     } else
741       // Provide a strong negative bias on through blocks to prevent unwanted
742       // liveness on loop backedges.
743       SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true);
744     AddedTo = ActiveBlocks.size();
745 
746     // Perhaps iterating can enable more bundles?
747     SpillPlacer->iterate();
748   }
749   LLVM_DEBUG(dbgs() << ", v=" << Visited);
750   return true;
751 }
752 
753 /// calcCompactRegion - Compute the set of edge bundles that should be live
754 /// when splitting the current live range into compact regions.  Compact
755 /// regions can be computed without looking at interference.  They are the
756 /// regions formed by removing all the live-through blocks from the live range.
757 ///
758 /// Returns false if the current live range is already compact, or if the
759 /// compact regions would form single block regions anyway.
760 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
761   // Without any through blocks, the live range is already compact.
762   if (!SA->getNumThroughBlocks())
763     return false;
764 
765   // Compact regions don't correspond to any physreg.
766   Cand.reset(IntfCache, MCRegister::NoRegister);
767 
768   LLVM_DEBUG(dbgs() << "Compact region bundles");
769 
770   // Use the spill placer to determine the live bundles. GrowRegion pretends
771   // that all the through blocks have interference when PhysReg is unset.
772   SpillPlacer->prepare(Cand.LiveBundles);
773 
774   // The static split cost will be zero since Cand.Intf reports no interference.
775   BlockFrequency Cost;
776   if (!addSplitConstraints(Cand.Intf, Cost)) {
777     LLVM_DEBUG(dbgs() << ", none.\n");
778     return false;
779   }
780 
781   if (!growRegion(Cand)) {
782     LLVM_DEBUG(dbgs() << ", cannot spill all interferences.\n");
783     return false;
784   }
785 
786   SpillPlacer->finish();
787 
788   if (!Cand.LiveBundles.any()) {
789     LLVM_DEBUG(dbgs() << ", none.\n");
790     return false;
791   }
792 
793   LLVM_DEBUG({
794     for (int I : Cand.LiveBundles.set_bits())
795       dbgs() << " EB#" << I;
796     dbgs() << ".\n";
797   });
798   return true;
799 }
800 
801 /// calcSpillCost - Compute how expensive it would be to split the live range in
802 /// SA around all use blocks instead of forming bundle regions.
803 BlockFrequency RAGreedy::calcSpillCost() {
804   BlockFrequency Cost = 0;
805   ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
806   for (const SplitAnalysis::BlockInfo &BI : UseBlocks) {
807     unsigned Number = BI.MBB->getNumber();
808     // We normally only need one spill instruction - a load or a store.
809     Cost += SpillPlacer->getBlockFrequency(Number);
810 
811     // Unless the value is redefined in the block.
812     if (BI.LiveIn && BI.LiveOut && BI.FirstDef)
813       Cost += SpillPlacer->getBlockFrequency(Number);
814   }
815   return Cost;
816 }
817 
818 /// calcGlobalSplitCost - Return the global split cost of following the split
819 /// pattern in LiveBundles. This cost should be added to the local cost of the
820 /// interference pattern in SplitConstraints.
821 ///
822 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand,
823                                              const AllocationOrder &Order) {
824   BlockFrequency GlobalCost = 0;
825   const BitVector &LiveBundles = Cand.LiveBundles;
826   ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
827   for (unsigned I = 0; I != UseBlocks.size(); ++I) {
828     const SplitAnalysis::BlockInfo &BI = UseBlocks[I];
829     SpillPlacement::BlockConstraint &BC = SplitConstraints[I];
830     bool RegIn  = LiveBundles[Bundles->getBundle(BC.Number, false)];
831     bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, true)];
832     unsigned Ins = 0;
833 
834     Cand.Intf.moveToBlock(BC.Number);
835 
836     if (BI.LiveIn)
837       Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
838     if (BI.LiveOut)
839       Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
840     while (Ins--)
841       GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
842   }
843 
844   for (unsigned Number : Cand.ActiveBlocks) {
845     bool RegIn  = LiveBundles[Bundles->getBundle(Number, false)];
846     bool RegOut = LiveBundles[Bundles->getBundle(Number, true)];
847     if (!RegIn && !RegOut)
848       continue;
849     if (RegIn && RegOut) {
850       // We need double spill code if this block has interference.
851       Cand.Intf.moveToBlock(Number);
852       if (Cand.Intf.hasInterference()) {
853         GlobalCost += SpillPlacer->getBlockFrequency(Number);
854         GlobalCost += SpillPlacer->getBlockFrequency(Number);
855       }
856       continue;
857     }
858     // live-in / stack-out or stack-in live-out.
859     GlobalCost += SpillPlacer->getBlockFrequency(Number);
860   }
861   return GlobalCost;
862 }
863 
864 /// splitAroundRegion - Split the current live range around the regions
865 /// determined by BundleCand and GlobalCand.
866 ///
867 /// Before calling this function, GlobalCand and BundleCand must be initialized
868 /// so each bundle is assigned to a valid candidate, or NoCand for the
869 /// stack-bound bundles.  The shared SA/SE SplitAnalysis and SplitEditor
870 /// objects must be initialized for the current live range, and intervals
871 /// created for the used candidates.
872 ///
873 /// @param LREdit    The LiveRangeEdit object handling the current split.
874 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value
875 ///                  must appear in this list.
876 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
877                                  ArrayRef<unsigned> UsedCands) {
878   // These are the intervals created for new global ranges. We may create more
879   // intervals for local ranges.
880   const unsigned NumGlobalIntvs = LREdit.size();
881   LLVM_DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs
882                     << " globals.\n");
883   assert(NumGlobalIntvs && "No global intervals configured");
884 
885   // Isolate even single instructions when dealing with a proper sub-class.
886   // That guarantees register class inflation for the stack interval because it
887   // is all copies.
888   Register Reg = SA->getParent().reg();
889   bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
890 
891   // First handle all the blocks with uses.
892   ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
893   for (const SplitAnalysis::BlockInfo &BI : UseBlocks) {
894     unsigned Number = BI.MBB->getNumber();
895     unsigned IntvIn = 0, IntvOut = 0;
896     SlotIndex IntfIn, IntfOut;
897     if (BI.LiveIn) {
898       unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)];
899       if (CandIn != NoCand) {
900         GlobalSplitCandidate &Cand = GlobalCand[CandIn];
901         IntvIn = Cand.IntvIdx;
902         Cand.Intf.moveToBlock(Number);
903         IntfIn = Cand.Intf.first();
904       }
905     }
906     if (BI.LiveOut) {
907       unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)];
908       if (CandOut != NoCand) {
909         GlobalSplitCandidate &Cand = GlobalCand[CandOut];
910         IntvOut = Cand.IntvIdx;
911         Cand.Intf.moveToBlock(Number);
912         IntfOut = Cand.Intf.last();
913       }
914     }
915 
916     // Create separate intervals for isolated blocks with multiple uses.
917     if (!IntvIn && !IntvOut) {
918       LLVM_DEBUG(dbgs() << printMBBReference(*BI.MBB) << " isolated.\n");
919       if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
920         SE->splitSingleBlock(BI);
921       continue;
922     }
923 
924     if (IntvIn && IntvOut)
925       SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
926     else if (IntvIn)
927       SE->splitRegInBlock(BI, IntvIn, IntfIn);
928     else
929       SE->splitRegOutBlock(BI, IntvOut, IntfOut);
930   }
931 
932   // Handle live-through blocks. The relevant live-through blocks are stored in
933   // the ActiveBlocks list with each candidate. We need to filter out
934   // duplicates.
935   BitVector Todo = SA->getThroughBlocks();
936   for (unsigned UsedCand : UsedCands) {
937     ArrayRef<unsigned> Blocks = GlobalCand[UsedCand].ActiveBlocks;
938     for (unsigned Number : Blocks) {
939       if (!Todo.test(Number))
940         continue;
941       Todo.reset(Number);
942 
943       unsigned IntvIn = 0, IntvOut = 0;
944       SlotIndex IntfIn, IntfOut;
945 
946       unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)];
947       if (CandIn != NoCand) {
948         GlobalSplitCandidate &Cand = GlobalCand[CandIn];
949         IntvIn = Cand.IntvIdx;
950         Cand.Intf.moveToBlock(Number);
951         IntfIn = Cand.Intf.first();
952       }
953 
954       unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)];
955       if (CandOut != NoCand) {
956         GlobalSplitCandidate &Cand = GlobalCand[CandOut];
957         IntvOut = Cand.IntvIdx;
958         Cand.Intf.moveToBlock(Number);
959         IntfOut = Cand.Intf.last();
960       }
961       if (!IntvIn && !IntvOut)
962         continue;
963       SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
964     }
965   }
966 
967   ++NumGlobalSplits;
968 
969   SmallVector<unsigned, 8> IntvMap;
970   SE->finish(&IntvMap);
971   DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
972 
973   unsigned OrigBlocks = SA->getNumLiveBlocks();
974 
975   // Sort out the new intervals created by splitting. We get four kinds:
976   // - Remainder intervals should not be split again.
977   // - Candidate intervals can be assigned to Cand.PhysReg.
978   // - Block-local splits are candidates for local splitting.
979   // - DCE leftovers should go back on the queue.
980   for (unsigned I = 0, E = LREdit.size(); I != E; ++I) {
981     const LiveInterval &Reg = LIS->getInterval(LREdit.get(I));
982 
983     // Ignore old intervals from DCE.
984     if (ExtraInfo->getOrInitStage(Reg.reg()) != RS_New)
985       continue;
986 
987     // Remainder interval. Don't try splitting again, spill if it doesn't
988     // allocate.
989     if (IntvMap[I] == 0) {
990       ExtraInfo->setStage(Reg, RS_Spill);
991       continue;
992     }
993 
994     // Global intervals. Allow repeated splitting as long as the number of live
995     // blocks is strictly decreasing.
996     if (IntvMap[I] < NumGlobalIntvs) {
997       if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
998         LLVM_DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
999                           << " blocks as original.\n");
1000         // Don't allow repeated splitting as a safe guard against looping.
1001         ExtraInfo->setStage(Reg, RS_Split2);
1002       }
1003       continue;
1004     }
1005 
1006     // Other intervals are treated as new. This includes local intervals created
1007     // for blocks with multiple uses, and anything created by DCE.
1008   }
1009 
1010   if (VerifyEnabled)
1011     MF->verify(this, "After splitting live range around region");
1012 }
1013 
1014 MCRegister RAGreedy::tryRegionSplit(const LiveInterval &VirtReg,
1015                                     AllocationOrder &Order,
1016                                     SmallVectorImpl<Register> &NewVRegs) {
1017   if (!TRI->shouldRegionSplitForVirtReg(*MF, VirtReg))
1018     return MCRegister::NoRegister;
1019   unsigned NumCands = 0;
1020   BlockFrequency SpillCost = calcSpillCost();
1021   BlockFrequency BestCost;
1022 
1023   // Check if we can split this live range around a compact region.
1024   bool HasCompact = calcCompactRegion(GlobalCand.front());
1025   if (HasCompact) {
1026     // Yes, keep GlobalCand[0] as the compact region candidate.
1027     NumCands = 1;
1028     BestCost = BlockFrequency::getMaxFrequency();
1029   } else {
1030     // No benefit from the compact region, our fallback will be per-block
1031     // splitting. Make sure we find a solution that is cheaper than spilling.
1032     BestCost = SpillCost;
1033     LLVM_DEBUG(dbgs() << "Cost of isolating all blocks = ";
1034                MBFI->printBlockFreq(dbgs(), BestCost) << '\n');
1035   }
1036 
1037   unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
1038                                                NumCands, false /*IgnoreCSR*/);
1039 
1040   // No solutions found, fall back to single block splitting.
1041   if (!HasCompact && BestCand == NoCand)
1042     return MCRegister::NoRegister;
1043 
1044   return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs);
1045 }
1046 
1047 unsigned RAGreedy::calculateRegionSplitCost(const LiveInterval &VirtReg,
1048                                             AllocationOrder &Order,
1049                                             BlockFrequency &BestCost,
1050                                             unsigned &NumCands,
1051                                             bool IgnoreCSR) {
1052   unsigned BestCand = NoCand;
1053   for (MCPhysReg PhysReg : Order) {
1054     assert(PhysReg);
1055     if (IgnoreCSR && EvictAdvisor->isUnusedCalleeSavedReg(PhysReg))
1056       continue;
1057 
1058     // Discard bad candidates before we run out of interference cache cursors.
1059     // This will only affect register classes with a lot of registers (>32).
1060     if (NumCands == IntfCache.getMaxCursors()) {
1061       unsigned WorstCount = ~0u;
1062       unsigned Worst = 0;
1063       for (unsigned CandIndex = 0; CandIndex != NumCands; ++CandIndex) {
1064         if (CandIndex == BestCand || !GlobalCand[CandIndex].PhysReg)
1065           continue;
1066         unsigned Count = GlobalCand[CandIndex].LiveBundles.count();
1067         if (Count < WorstCount) {
1068           Worst = CandIndex;
1069           WorstCount = Count;
1070         }
1071       }
1072       --NumCands;
1073       GlobalCand[Worst] = GlobalCand[NumCands];
1074       if (BestCand == NumCands)
1075         BestCand = Worst;
1076     }
1077 
1078     if (GlobalCand.size() <= NumCands)
1079       GlobalCand.resize(NumCands+1);
1080     GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1081     Cand.reset(IntfCache, PhysReg);
1082 
1083     SpillPlacer->prepare(Cand.LiveBundles);
1084     BlockFrequency Cost;
1085     if (!addSplitConstraints(Cand.Intf, Cost)) {
1086       LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tno positive bundles\n");
1087       continue;
1088     }
1089     LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tstatic = ";
1090                MBFI->printBlockFreq(dbgs(), Cost));
1091     if (Cost >= BestCost) {
1092       LLVM_DEBUG({
1093         if (BestCand == NoCand)
1094           dbgs() << " worse than no bundles\n";
1095         else
1096           dbgs() << " worse than "
1097                  << printReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1098       });
1099       continue;
1100     }
1101     if (!growRegion(Cand)) {
1102       LLVM_DEBUG(dbgs() << ", cannot spill all interferences.\n");
1103       continue;
1104     }
1105 
1106     SpillPlacer->finish();
1107 
1108     // No live bundles, defer to splitSingleBlocks().
1109     if (!Cand.LiveBundles.any()) {
1110       LLVM_DEBUG(dbgs() << " no bundles.\n");
1111       continue;
1112     }
1113 
1114     Cost += calcGlobalSplitCost(Cand, Order);
1115     LLVM_DEBUG({
1116       dbgs() << ", total = ";
1117       MBFI->printBlockFreq(dbgs(), Cost) << " with bundles";
1118       for (int I : Cand.LiveBundles.set_bits())
1119         dbgs() << " EB#" << I;
1120       dbgs() << ".\n";
1121     });
1122     if (Cost < BestCost) {
1123       BestCand = NumCands;
1124       BestCost = Cost;
1125     }
1126     ++NumCands;
1127   }
1128 
1129   return BestCand;
1130 }
1131 
1132 unsigned RAGreedy::doRegionSplit(const LiveInterval &VirtReg, unsigned BestCand,
1133                                  bool HasCompact,
1134                                  SmallVectorImpl<Register> &NewVRegs) {
1135   SmallVector<unsigned, 8> UsedCands;
1136   // Prepare split editor.
1137   LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
1138   SE->reset(LREdit, SplitSpillMode);
1139 
1140   // Assign all edge bundles to the preferred candidate, or NoCand.
1141   BundleCand.assign(Bundles->getNumBundles(), NoCand);
1142 
1143   // Assign bundles for the best candidate region.
1144   if (BestCand != NoCand) {
1145     GlobalSplitCandidate &Cand = GlobalCand[BestCand];
1146     if (unsigned B = Cand.getBundles(BundleCand, BestCand)) {
1147       UsedCands.push_back(BestCand);
1148       Cand.IntvIdx = SE->openIntv();
1149       LLVM_DEBUG(dbgs() << "Split for " << printReg(Cand.PhysReg, TRI) << " in "
1150                         << B << " bundles, intv " << Cand.IntvIdx << ".\n");
1151       (void)B;
1152     }
1153   }
1154 
1155   // Assign bundles for the compact region.
1156   if (HasCompact) {
1157     GlobalSplitCandidate &Cand = GlobalCand.front();
1158     assert(!Cand.PhysReg && "Compact region has no physreg");
1159     if (unsigned B = Cand.getBundles(BundleCand, 0)) {
1160       UsedCands.push_back(0);
1161       Cand.IntvIdx = SE->openIntv();
1162       LLVM_DEBUG(dbgs() << "Split for compact region in " << B
1163                         << " bundles, intv " << Cand.IntvIdx << ".\n");
1164       (void)B;
1165     }
1166   }
1167 
1168   splitAroundRegion(LREdit, UsedCands);
1169   return 0;
1170 }
1171 
1172 //===----------------------------------------------------------------------===//
1173 //                            Per-Block Splitting
1174 //===----------------------------------------------------------------------===//
1175 
1176 /// tryBlockSplit - Split a global live range around every block with uses. This
1177 /// creates a lot of local live ranges, that will be split by tryLocalSplit if
1178 /// they don't allocate.
1179 unsigned RAGreedy::tryBlockSplit(const LiveInterval &VirtReg,
1180                                  AllocationOrder &Order,
1181                                  SmallVectorImpl<Register> &NewVRegs) {
1182   assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
1183   Register Reg = VirtReg.reg();
1184   bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1185   LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
1186   SE->reset(LREdit, SplitSpillMode);
1187   ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1188   for (const SplitAnalysis::BlockInfo &BI : UseBlocks) {
1189     if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1190       SE->splitSingleBlock(BI);
1191   }
1192   // No blocks were split.
1193   if (LREdit.empty())
1194     return 0;
1195 
1196   // We did split for some blocks.
1197   SmallVector<unsigned, 8> IntvMap;
1198   SE->finish(&IntvMap);
1199 
1200   // Tell LiveDebugVariables about the new ranges.
1201   DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
1202 
1203   // Sort out the new intervals created by splitting. The remainder interval
1204   // goes straight to spilling, the new local ranges get to stay RS_New.
1205   for (unsigned I = 0, E = LREdit.size(); I != E; ++I) {
1206     const LiveInterval &LI = LIS->getInterval(LREdit.get(I));
1207     if (ExtraInfo->getOrInitStage(LI.reg()) == RS_New && IntvMap[I] == 0)
1208       ExtraInfo->setStage(LI, RS_Spill);
1209   }
1210 
1211   if (VerifyEnabled)
1212     MF->verify(this, "After splitting live range around basic blocks");
1213   return 0;
1214 }
1215 
1216 //===----------------------------------------------------------------------===//
1217 //                         Per-Instruction Splitting
1218 //===----------------------------------------------------------------------===//
1219 
1220 /// Get the number of allocatable registers that match the constraints of \p Reg
1221 /// on \p MI and that are also in \p SuperRC.
1222 static unsigned getNumAllocatableRegsForConstraints(
1223     const MachineInstr *MI, Register Reg, const TargetRegisterClass *SuperRC,
1224     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
1225     const RegisterClassInfo &RCI) {
1226   assert(SuperRC && "Invalid register class");
1227 
1228   const TargetRegisterClass *ConstrainedRC =
1229       MI->getRegClassConstraintEffectForVReg(Reg, SuperRC, TII, TRI,
1230                                              /* ExploreBundle */ true);
1231   if (!ConstrainedRC)
1232     return 0;
1233   return RCI.getNumAllocatableRegs(ConstrainedRC);
1234 }
1235 
1236 /// tryInstructionSplit - Split a live range around individual instructions.
1237 /// This is normally not worthwhile since the spiller is doing essentially the
1238 /// same thing. However, when the live range is in a constrained register
1239 /// class, it may help to insert copies such that parts of the live range can
1240 /// be moved to a larger register class.
1241 ///
1242 /// This is similar to spilling to a larger register class.
1243 unsigned RAGreedy::tryInstructionSplit(const LiveInterval &VirtReg,
1244                                        AllocationOrder &Order,
1245                                        SmallVectorImpl<Register> &NewVRegs) {
1246   const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg());
1247   // There is no point to this if there are no larger sub-classes.
1248   if (!RegClassInfo.isProperSubClass(CurRC))
1249     return 0;
1250 
1251   // Always enable split spill mode, since we're effectively spilling to a
1252   // register.
1253   LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
1254   SE->reset(LREdit, SplitEditor::SM_Size);
1255 
1256   ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1257   if (Uses.size() <= 1)
1258     return 0;
1259 
1260   LLVM_DEBUG(dbgs() << "Split around " << Uses.size()
1261                     << " individual instrs.\n");
1262 
1263   const TargetRegisterClass *SuperRC =
1264       TRI->getLargestLegalSuperClass(CurRC, *MF);
1265   unsigned SuperRCNumAllocatableRegs =
1266       RegClassInfo.getNumAllocatableRegs(SuperRC);
1267   // Split around every non-copy instruction if this split will relax
1268   // the constraints on the virtual register.
1269   // Otherwise, splitting just inserts uncoalescable copies that do not help
1270   // the allocation.
1271   for (const SlotIndex Use : Uses) {
1272     if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Use))
1273       if (MI->isFullCopy() ||
1274           SuperRCNumAllocatableRegs ==
1275               getNumAllocatableRegsForConstraints(MI, VirtReg.reg(), SuperRC,
1276                                                   TII, TRI, RegClassInfo)) {
1277         LLVM_DEBUG(dbgs() << "    skip:\t" << Use << '\t' << *MI);
1278         continue;
1279       }
1280     SE->openIntv();
1281     SlotIndex SegStart = SE->enterIntvBefore(Use);
1282     SlotIndex SegStop = SE->leaveIntvAfter(Use);
1283     SE->useIntv(SegStart, SegStop);
1284   }
1285 
1286   if (LREdit.empty()) {
1287     LLVM_DEBUG(dbgs() << "All uses were copies.\n");
1288     return 0;
1289   }
1290 
1291   SmallVector<unsigned, 8> IntvMap;
1292   SE->finish(&IntvMap);
1293   DebugVars->splitRegister(VirtReg.reg(), LREdit.regs(), *LIS);
1294   // Assign all new registers to RS_Spill. This was the last chance.
1295   ExtraInfo->setStage(LREdit.begin(), LREdit.end(), RS_Spill);
1296   return 0;
1297 }
1298 
1299 //===----------------------------------------------------------------------===//
1300 //                             Local Splitting
1301 //===----------------------------------------------------------------------===//
1302 
1303 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1304 /// in order to use PhysReg between two entries in SA->UseSlots.
1305 ///
1306 /// GapWeight[I] represents the gap between UseSlots[I] and UseSlots[I + 1].
1307 ///
1308 void RAGreedy::calcGapWeights(MCRegister PhysReg,
1309                               SmallVectorImpl<float> &GapWeight) {
1310   assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1311   const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1312   ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1313   const unsigned NumGaps = Uses.size()-1;
1314 
1315   // Start and end points for the interference check.
1316   SlotIndex StartIdx =
1317     BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr;
1318   SlotIndex StopIdx =
1319     BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr;
1320 
1321   GapWeight.assign(NumGaps, 0.0f);
1322 
1323   // Add interference from each overlapping register.
1324   for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1325     if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units)
1326           .checkInterference())
1327       continue;
1328 
1329     // We know that VirtReg is a continuous interval from FirstInstr to
1330     // LastInstr, so we don't need InterferenceQuery.
1331     //
1332     // Interference that overlaps an instruction is counted in both gaps
1333     // surrounding the instruction. The exception is interference before
1334     // StartIdx and after StopIdx.
1335     //
1336     LiveIntervalUnion::SegmentIter IntI =
1337       Matrix->getLiveUnions()[*Units] .find(StartIdx);
1338     for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1339       // Skip the gaps before IntI.
1340       while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1341         if (++Gap == NumGaps)
1342           break;
1343       if (Gap == NumGaps)
1344         break;
1345 
1346       // Update the gaps covered by IntI.
1347       const float weight = IntI.value()->weight();
1348       for (; Gap != NumGaps; ++Gap) {
1349         GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1350         if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1351           break;
1352       }
1353       if (Gap == NumGaps)
1354         break;
1355     }
1356   }
1357 
1358   // Add fixed interference.
1359   for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1360     const LiveRange &LR = LIS->getRegUnit(*Units);
1361     LiveRange::const_iterator I = LR.find(StartIdx);
1362     LiveRange::const_iterator E = LR.end();
1363 
1364     // Same loop as above. Mark any overlapped gaps as HUGE_VALF.
1365     for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) {
1366       while (Uses[Gap+1].getBoundaryIndex() < I->start)
1367         if (++Gap == NumGaps)
1368           break;
1369       if (Gap == NumGaps)
1370         break;
1371 
1372       for (; Gap != NumGaps; ++Gap) {
1373         GapWeight[Gap] = huge_valf;
1374         if (Uses[Gap+1].getBaseIndex() >= I->end)
1375           break;
1376       }
1377       if (Gap == NumGaps)
1378         break;
1379     }
1380   }
1381 }
1382 
1383 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1384 /// basic block.
1385 ///
1386 unsigned RAGreedy::tryLocalSplit(const LiveInterval &VirtReg,
1387                                  AllocationOrder &Order,
1388                                  SmallVectorImpl<Register> &NewVRegs) {
1389   // TODO: the function currently only handles a single UseBlock; it should be
1390   // possible to generalize.
1391   if (SA->getUseBlocks().size() != 1)
1392     return 0;
1393 
1394   const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1395 
1396   // Note that it is possible to have an interval that is live-in or live-out
1397   // while only covering a single block - A phi-def can use undef values from
1398   // predecessors, and the block could be a single-block loop.
1399   // We don't bother doing anything clever about such a case, we simply assume
1400   // that the interval is continuous from FirstInstr to LastInstr. We should
1401   // make sure that we don't do anything illegal to such an interval, though.
1402 
1403   ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1404   if (Uses.size() <= 2)
1405     return 0;
1406   const unsigned NumGaps = Uses.size()-1;
1407 
1408   LLVM_DEBUG({
1409     dbgs() << "tryLocalSplit: ";
1410     for (const auto &Use : Uses)
1411       dbgs() << ' ' << Use;
1412     dbgs() << '\n';
1413   });
1414 
1415   // If VirtReg is live across any register mask operands, compute a list of
1416   // gaps with register masks.
1417   SmallVector<unsigned, 8> RegMaskGaps;
1418   if (Matrix->checkRegMaskInterference(VirtReg)) {
1419     // Get regmask slots for the whole block.
1420     ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
1421     LLVM_DEBUG(dbgs() << RMS.size() << " regmasks in block:");
1422     // Constrain to VirtReg's live range.
1423     unsigned RI =
1424         llvm::lower_bound(RMS, Uses.front().getRegSlot()) - RMS.begin();
1425     unsigned RE = RMS.size();
1426     for (unsigned I = 0; I != NumGaps && RI != RE; ++I) {
1427       // Look for Uses[I] <= RMS <= Uses[I + 1].
1428       assert(!SlotIndex::isEarlierInstr(RMS[RI], Uses[I]));
1429       if (SlotIndex::isEarlierInstr(Uses[I + 1], RMS[RI]))
1430         continue;
1431       // Skip a regmask on the same instruction as the last use. It doesn't
1432       // overlap the live range.
1433       if (SlotIndex::isSameInstr(Uses[I + 1], RMS[RI]) && I + 1 == NumGaps)
1434         break;
1435       LLVM_DEBUG(dbgs() << ' ' << RMS[RI] << ':' << Uses[I] << '-'
1436                         << Uses[I + 1]);
1437       RegMaskGaps.push_back(I);
1438       // Advance ri to the next gap. A regmask on one of the uses counts in
1439       // both gaps.
1440       while (RI != RE && SlotIndex::isEarlierInstr(RMS[RI], Uses[I + 1]))
1441         ++RI;
1442     }
1443     LLVM_DEBUG(dbgs() << '\n');
1444   }
1445 
1446   // Since we allow local split results to be split again, there is a risk of
1447   // creating infinite loops. It is tempting to require that the new live
1448   // ranges have less instructions than the original. That would guarantee
1449   // convergence, but it is too strict. A live range with 3 instructions can be
1450   // split 2+3 (including the COPY), and we want to allow that.
1451   //
1452   // Instead we use these rules:
1453   //
1454   // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
1455   //    noop split, of course).
1456   // 2. Require progress be made for ranges with getStage() == RS_Split2. All
1457   //    the new ranges must have fewer instructions than before the split.
1458   // 3. New ranges with the same number of instructions are marked RS_Split2,
1459   //    smaller ranges are marked RS_New.
1460   //
1461   // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
1462   // excessive splitting and infinite loops.
1463   //
1464   bool ProgressRequired = ExtraInfo->getStage(VirtReg) >= RS_Split2;
1465 
1466   // Best split candidate.
1467   unsigned BestBefore = NumGaps;
1468   unsigned BestAfter = 0;
1469   float BestDiff = 0;
1470 
1471   const float blockFreq =
1472     SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() *
1473     (1.0f / MBFI->getEntryFreq());
1474   SmallVector<float, 8> GapWeight;
1475 
1476   for (MCPhysReg PhysReg : Order) {
1477     assert(PhysReg);
1478     // Keep track of the largest spill weight that would need to be evicted in
1479     // order to make use of PhysReg between UseSlots[I] and UseSlots[I + 1].
1480     calcGapWeights(PhysReg, GapWeight);
1481 
1482     // Remove any gaps with regmask clobbers.
1483     if (Matrix->checkRegMaskInterference(VirtReg, PhysReg))
1484       for (unsigned I = 0, E = RegMaskGaps.size(); I != E; ++I)
1485         GapWeight[RegMaskGaps[I]] = huge_valf;
1486 
1487     // Try to find the best sequence of gaps to close.
1488     // The new spill weight must be larger than any gap interference.
1489 
1490     // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1491     unsigned SplitBefore = 0, SplitAfter = 1;
1492 
1493     // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1494     // It is the spill weight that needs to be evicted.
1495     float MaxGap = GapWeight[0];
1496 
1497     while (true) {
1498       // Live before/after split?
1499       const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1500       const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1501 
1502       LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << ' ' << Uses[SplitBefore]
1503                         << '-' << Uses[SplitAfter] << " I=" << MaxGap);
1504 
1505       // Stop before the interval gets so big we wouldn't be making progress.
1506       if (!LiveBefore && !LiveAfter) {
1507         LLVM_DEBUG(dbgs() << " all\n");
1508         break;
1509       }
1510       // Should the interval be extended or shrunk?
1511       bool Shrink = true;
1512 
1513       // How many gaps would the new range have?
1514       unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1515 
1516       // Legally, without causing looping?
1517       bool Legal = !ProgressRequired || NewGaps < NumGaps;
1518 
1519       if (Legal && MaxGap < huge_valf) {
1520         // Estimate the new spill weight. Each instruction reads or writes the
1521         // register. Conservatively assume there are no read-modify-write
1522         // instructions.
1523         //
1524         // Try to guess the size of the new interval.
1525         const float EstWeight = normalizeSpillWeight(
1526             blockFreq * (NewGaps + 1),
1527             Uses[SplitBefore].distance(Uses[SplitAfter]) +
1528                 (LiveBefore + LiveAfter) * SlotIndex::InstrDist,
1529             1);
1530         // Would this split be possible to allocate?
1531         // Never allocate all gaps, we wouldn't be making progress.
1532         LLVM_DEBUG(dbgs() << " w=" << EstWeight);
1533         if (EstWeight * Hysteresis >= MaxGap) {
1534           Shrink = false;
1535           float Diff = EstWeight - MaxGap;
1536           if (Diff > BestDiff) {
1537             LLVM_DEBUG(dbgs() << " (best)");
1538             BestDiff = Hysteresis * Diff;
1539             BestBefore = SplitBefore;
1540             BestAfter = SplitAfter;
1541           }
1542         }
1543       }
1544 
1545       // Try to shrink.
1546       if (Shrink) {
1547         if (++SplitBefore < SplitAfter) {
1548           LLVM_DEBUG(dbgs() << " shrink\n");
1549           // Recompute the max when necessary.
1550           if (GapWeight[SplitBefore - 1] >= MaxGap) {
1551             MaxGap = GapWeight[SplitBefore];
1552             for (unsigned I = SplitBefore + 1; I != SplitAfter; ++I)
1553               MaxGap = std::max(MaxGap, GapWeight[I]);
1554           }
1555           continue;
1556         }
1557         MaxGap = 0;
1558       }
1559 
1560       // Try to extend the interval.
1561       if (SplitAfter >= NumGaps) {
1562         LLVM_DEBUG(dbgs() << " end\n");
1563         break;
1564       }
1565 
1566       LLVM_DEBUG(dbgs() << " extend\n");
1567       MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1568     }
1569   }
1570 
1571   // Didn't find any candidates?
1572   if (BestBefore == NumGaps)
1573     return 0;
1574 
1575   LLVM_DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] << '-'
1576                     << Uses[BestAfter] << ", " << BestDiff << ", "
1577                     << (BestAfter - BestBefore + 1) << " instrs\n");
1578 
1579   LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
1580   SE->reset(LREdit);
1581 
1582   SE->openIntv();
1583   SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1584   SlotIndex SegStop  = SE->leaveIntvAfter(Uses[BestAfter]);
1585   SE->useIntv(SegStart, SegStop);
1586   SmallVector<unsigned, 8> IntvMap;
1587   SE->finish(&IntvMap);
1588   DebugVars->splitRegister(VirtReg.reg(), LREdit.regs(), *LIS);
1589   // If the new range has the same number of instructions as before, mark it as
1590   // RS_Split2 so the next split will be forced to make progress. Otherwise,
1591   // leave the new intervals as RS_New so they can compete.
1592   bool LiveBefore = BestBefore != 0 || BI.LiveIn;
1593   bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
1594   unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1595   if (NewGaps >= NumGaps) {
1596     LLVM_DEBUG(dbgs() << "Tagging non-progress ranges:");
1597     assert(!ProgressRequired && "Didn't make progress when it was required.");
1598     for (unsigned I = 0, E = IntvMap.size(); I != E; ++I)
1599       if (IntvMap[I] == 1) {
1600         ExtraInfo->setStage(LIS->getInterval(LREdit.get(I)), RS_Split2);
1601         LLVM_DEBUG(dbgs() << ' ' << printReg(LREdit.get(I)));
1602       }
1603     LLVM_DEBUG(dbgs() << '\n');
1604   }
1605   ++NumLocalSplits;
1606 
1607   return 0;
1608 }
1609 
1610 //===----------------------------------------------------------------------===//
1611 //                          Live Range Splitting
1612 //===----------------------------------------------------------------------===//
1613 
1614 /// trySplit - Try to split VirtReg or one of its interferences, making it
1615 /// assignable.
1616 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1617 unsigned RAGreedy::trySplit(const LiveInterval &VirtReg, AllocationOrder &Order,
1618                             SmallVectorImpl<Register> &NewVRegs,
1619                             const SmallVirtRegSet &FixedRegisters) {
1620   // Ranges must be Split2 or less.
1621   if (ExtraInfo->getStage(VirtReg) >= RS_Spill)
1622     return 0;
1623 
1624   // Local intervals are handled separately.
1625   if (LIS->intervalIsInOneMBB(VirtReg)) {
1626     NamedRegionTimer T("local_split", "Local Splitting", TimerGroupName,
1627                        TimerGroupDescription, TimePassesIsEnabled);
1628     SA->analyze(&VirtReg);
1629     Register PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
1630     if (PhysReg || !NewVRegs.empty())
1631       return PhysReg;
1632     return tryInstructionSplit(VirtReg, Order, NewVRegs);
1633   }
1634 
1635   NamedRegionTimer T("global_split", "Global Splitting", TimerGroupName,
1636                      TimerGroupDescription, TimePassesIsEnabled);
1637 
1638   SA->analyze(&VirtReg);
1639 
1640   // First try to split around a region spanning multiple blocks. RS_Split2
1641   // ranges already made dubious progress with region splitting, so they go
1642   // straight to single block splitting.
1643   if (ExtraInfo->getStage(VirtReg) < RS_Split2) {
1644     MCRegister PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1645     if (PhysReg || !NewVRegs.empty())
1646       return PhysReg;
1647   }
1648 
1649   // Then isolate blocks.
1650   return tryBlockSplit(VirtReg, Order, NewVRegs);
1651 }
1652 
1653 //===----------------------------------------------------------------------===//
1654 //                          Last Chance Recoloring
1655 //===----------------------------------------------------------------------===//
1656 
1657 /// Return true if \p reg has any tied def operand.
1658 static bool hasTiedDef(MachineRegisterInfo *MRI, unsigned reg) {
1659   for (const MachineOperand &MO : MRI->def_operands(reg))
1660     if (MO.isTied())
1661       return true;
1662 
1663   return false;
1664 }
1665 
1666 /// Return true if the existing assignment of \p Intf overlaps, but is not the
1667 /// same, as \p PhysReg.
1668 static bool assignedRegPartiallyOverlaps(const TargetRegisterInfo &TRI,
1669                                          const VirtRegMap &VRM,
1670                                          MCRegister PhysReg,
1671                                          const LiveInterval &Intf) {
1672   MCRegister AssignedReg = VRM.getPhys(Intf.reg());
1673   if (PhysReg == AssignedReg)
1674     return false;
1675   return TRI.regsOverlap(PhysReg, AssignedReg);
1676 }
1677 
1678 /// mayRecolorAllInterferences - Check if the virtual registers that
1679 /// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be
1680 /// recolored to free \p PhysReg.
1681 /// When true is returned, \p RecoloringCandidates has been augmented with all
1682 /// the live intervals that need to be recolored in order to free \p PhysReg
1683 /// for \p VirtReg.
1684 /// \p FixedRegisters contains all the virtual registers that cannot be
1685 /// recolored.
1686 bool RAGreedy::mayRecolorAllInterferences(
1687     MCRegister PhysReg, const LiveInterval &VirtReg,
1688     SmallLISet &RecoloringCandidates, const SmallVirtRegSet &FixedRegisters) {
1689   const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg());
1690 
1691   for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1692     LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
1693     // If there is LastChanceRecoloringMaxInterference or more interferences,
1694     // chances are one would not be recolorable.
1695     if (Q.interferingVRegs(LastChanceRecoloringMaxInterference).size() >=
1696             LastChanceRecoloringMaxInterference &&
1697         !ExhaustiveSearch) {
1698       LLVM_DEBUG(dbgs() << "Early abort: too many interferences.\n");
1699       CutOffInfo |= CO_Interf;
1700       return false;
1701     }
1702     for (const LiveInterval *Intf : reverse(Q.interferingVRegs())) {
1703       // If Intf is done and sits on the same register class as VirtReg, it
1704       // would not be recolorable as it is in the same state as
1705       // VirtReg. However there are at least two exceptions.
1706       //
1707       // If VirtReg has tied defs and Intf doesn't, then
1708       // there is still a point in examining if it can be recolorable.
1709       //
1710       // Additionally, if the register class has overlapping tuple members, it
1711       // may still be recolorable using a different tuple. This is more likely
1712       // if the existing assignment aliases with the candidate.
1713       //
1714       if (((ExtraInfo->getStage(*Intf) == RS_Done &&
1715             MRI->getRegClass(Intf->reg()) == CurRC &&
1716             !assignedRegPartiallyOverlaps(*TRI, *VRM, PhysReg, *Intf)) &&
1717            !(hasTiedDef(MRI, VirtReg.reg()) &&
1718              !hasTiedDef(MRI, Intf->reg()))) ||
1719           FixedRegisters.count(Intf->reg())) {
1720         LLVM_DEBUG(
1721             dbgs() << "Early abort: the interference is not recolorable.\n");
1722         return false;
1723       }
1724       RecoloringCandidates.insert(Intf);
1725     }
1726   }
1727   return true;
1728 }
1729 
1730 /// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring
1731 /// its interferences.
1732 /// Last chance recoloring chooses a color for \p VirtReg and recolors every
1733 /// virtual register that was using it. The recoloring process may recursively
1734 /// use the last chance recoloring. Therefore, when a virtual register has been
1735 /// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot
1736 /// be last-chance-recolored again during this recoloring "session".
1737 /// E.g.,
1738 /// Let
1739 /// vA can use {R1, R2    }
1740 /// vB can use {    R2, R3}
1741 /// vC can use {R1        }
1742 /// Where vA, vB, and vC cannot be split anymore (they are reloads for
1743 /// instance) and they all interfere.
1744 ///
1745 /// vA is assigned R1
1746 /// vB is assigned R2
1747 /// vC tries to evict vA but vA is already done.
1748 /// Regular register allocation fails.
1749 ///
1750 /// Last chance recoloring kicks in:
1751 /// vC does as if vA was evicted => vC uses R1.
1752 /// vC is marked as fixed.
1753 /// vA needs to find a color.
1754 /// None are available.
1755 /// vA cannot evict vC: vC is a fixed virtual register now.
1756 /// vA does as if vB was evicted => vA uses R2.
1757 /// vB needs to find a color.
1758 /// R3 is available.
1759 /// Recoloring => vC = R1, vA = R2, vB = R3
1760 ///
1761 /// \p Order defines the preferred allocation order for \p VirtReg.
1762 /// \p NewRegs will contain any new virtual register that have been created
1763 /// (split, spill) during the process and that must be assigned.
1764 /// \p FixedRegisters contains all the virtual registers that cannot be
1765 /// recolored.
1766 ///
1767 /// \p RecolorStack tracks the original assignments of successfully recolored
1768 /// registers.
1769 ///
1770 /// \p Depth gives the current depth of the last chance recoloring.
1771 /// \return a physical register that can be used for VirtReg or ~0u if none
1772 /// exists.
1773 unsigned RAGreedy::tryLastChanceRecoloring(const LiveInterval &VirtReg,
1774                                            AllocationOrder &Order,
1775                                            SmallVectorImpl<Register> &NewVRegs,
1776                                            SmallVirtRegSet &FixedRegisters,
1777                                            RecoloringStack &RecolorStack,
1778                                            unsigned Depth) {
1779   if (!TRI->shouldUseLastChanceRecoloringForVirtReg(*MF, VirtReg))
1780     return ~0u;
1781 
1782   LLVM_DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n');
1783 
1784   const ssize_t EntryStackSize = RecolorStack.size();
1785 
1786   // Ranges must be Done.
1787   assert((ExtraInfo->getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) &&
1788          "Last chance recoloring should really be last chance");
1789   // Set the max depth to LastChanceRecoloringMaxDepth.
1790   // We may want to reconsider that if we end up with a too large search space
1791   // for target with hundreds of registers.
1792   // Indeed, in that case we may want to cut the search space earlier.
1793   if (Depth >= LastChanceRecoloringMaxDepth && !ExhaustiveSearch) {
1794     LLVM_DEBUG(dbgs() << "Abort because max depth has been reached.\n");
1795     CutOffInfo |= CO_Depth;
1796     return ~0u;
1797   }
1798 
1799   // Set of Live intervals that will need to be recolored.
1800   SmallLISet RecoloringCandidates;
1801 
1802   // Mark VirtReg as fixed, i.e., it will not be recolored pass this point in
1803   // this recoloring "session".
1804   assert(!FixedRegisters.count(VirtReg.reg()));
1805   FixedRegisters.insert(VirtReg.reg());
1806   SmallVector<Register, 4> CurrentNewVRegs;
1807 
1808   for (MCRegister PhysReg : Order) {
1809     assert(PhysReg.isValid());
1810     LLVM_DEBUG(dbgs() << "Try to assign: " << VirtReg << " to "
1811                       << printReg(PhysReg, TRI) << '\n');
1812     RecoloringCandidates.clear();
1813     CurrentNewVRegs.clear();
1814 
1815     // It is only possible to recolor virtual register interference.
1816     if (Matrix->checkInterference(VirtReg, PhysReg) >
1817         LiveRegMatrix::IK_VirtReg) {
1818       LLVM_DEBUG(
1819           dbgs() << "Some interferences are not with virtual registers.\n");
1820 
1821       continue;
1822     }
1823 
1824     // Early give up on this PhysReg if it is obvious we cannot recolor all
1825     // the interferences.
1826     if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates,
1827                                     FixedRegisters)) {
1828       LLVM_DEBUG(dbgs() << "Some interferences cannot be recolored.\n");
1829       continue;
1830     }
1831 
1832     // RecoloringCandidates contains all the virtual registers that interfere
1833     // with VirtReg on PhysReg (or one of its aliases). Enqueue them for
1834     // recoloring and perform the actual recoloring.
1835     PQueue RecoloringQueue;
1836     for (const LiveInterval *RC : RecoloringCandidates) {
1837       Register ItVirtReg = RC->reg();
1838       enqueue(RecoloringQueue, RC);
1839       assert(VRM->hasPhys(ItVirtReg) &&
1840              "Interferences are supposed to be with allocated variables");
1841 
1842       // Record the current allocation.
1843       RecolorStack.push_back(std::make_pair(RC, VRM->getPhys(ItVirtReg)));
1844 
1845       // unset the related struct.
1846       Matrix->unassign(*RC);
1847     }
1848 
1849     // Do as if VirtReg was assigned to PhysReg so that the underlying
1850     // recoloring has the right information about the interferes and
1851     // available colors.
1852     Matrix->assign(VirtReg, PhysReg);
1853 
1854     // Save the current recoloring state.
1855     // If we cannot recolor all the interferences, we will have to start again
1856     // at this point for the next physical register.
1857     SmallVirtRegSet SaveFixedRegisters(FixedRegisters);
1858     if (tryRecoloringCandidates(RecoloringQueue, CurrentNewVRegs,
1859                                 FixedRegisters, RecolorStack, Depth)) {
1860       // Push the queued vregs into the main queue.
1861       for (Register NewVReg : CurrentNewVRegs)
1862         NewVRegs.push_back(NewVReg);
1863       // Do not mess up with the global assignment process.
1864       // I.e., VirtReg must be unassigned.
1865       Matrix->unassign(VirtReg);
1866       return PhysReg;
1867     }
1868 
1869     LLVM_DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to "
1870                       << printReg(PhysReg, TRI) << '\n');
1871 
1872     // The recoloring attempt failed, undo the changes.
1873     FixedRegisters = SaveFixedRegisters;
1874     Matrix->unassign(VirtReg);
1875 
1876     // For a newly created vreg which is also in RecoloringCandidates,
1877     // don't add it to NewVRegs because its physical register will be restored
1878     // below. Other vregs in CurrentNewVRegs are created by calling
1879     // selectOrSplit and should be added into NewVRegs.
1880     for (Register &R : CurrentNewVRegs) {
1881       if (RecoloringCandidates.count(&LIS->getInterval(R)))
1882         continue;
1883       NewVRegs.push_back(R);
1884     }
1885 
1886     // Roll back our unsuccessful recoloring. Also roll back any successful
1887     // recolorings in any recursive recoloring attempts, since it's possible
1888     // they would have introduced conflicts with assignments we will be
1889     // restoring further up the stack. Perform all unassignments prior to
1890     // reassigning, since sub-recolorings may have conflicted with the registers
1891     // we are going to restore to their original assignments.
1892     for (ssize_t I = RecolorStack.size() - 1; I >= EntryStackSize; --I) {
1893       const LiveInterval *LI;
1894       MCRegister PhysReg;
1895       std::tie(LI, PhysReg) = RecolorStack[I];
1896 
1897       if (VRM->hasPhys(LI->reg()))
1898         Matrix->unassign(*LI);
1899     }
1900 
1901     for (size_t I = EntryStackSize; I != RecolorStack.size(); ++I) {
1902       const LiveInterval *LI;
1903       MCRegister PhysReg;
1904       std::tie(LI, PhysReg) = RecolorStack[I];
1905       if (!LI->empty() && !MRI->reg_nodbg_empty(LI->reg()))
1906         Matrix->assign(*LI, PhysReg);
1907     }
1908 
1909     // Pop the stack of recoloring attempts.
1910     RecolorStack.resize(EntryStackSize);
1911   }
1912 
1913   // Last chance recoloring did not worked either, give up.
1914   return ~0u;
1915 }
1916 
1917 /// tryRecoloringCandidates - Try to assign a new color to every register
1918 /// in \RecoloringQueue.
1919 /// \p NewRegs will contain any new virtual register created during the
1920 /// recoloring process.
1921 /// \p FixedRegisters[in/out] contains all the registers that have been
1922 /// recolored.
1923 /// \return true if all virtual registers in RecoloringQueue were successfully
1924 /// recolored, false otherwise.
1925 bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
1926                                        SmallVectorImpl<Register> &NewVRegs,
1927                                        SmallVirtRegSet &FixedRegisters,
1928                                        RecoloringStack &RecolorStack,
1929                                        unsigned Depth) {
1930   while (!RecoloringQueue.empty()) {
1931     const LiveInterval *LI = dequeue(RecoloringQueue);
1932     LLVM_DEBUG(dbgs() << "Try to recolor: " << *LI << '\n');
1933     MCRegister PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters,
1934                                            RecolorStack, Depth + 1);
1935     // When splitting happens, the live-range may actually be empty.
1936     // In that case, this is okay to continue the recoloring even
1937     // if we did not find an alternative color for it. Indeed,
1938     // there will not be anything to color for LI in the end.
1939     if (PhysReg == ~0u || (!PhysReg && !LI->empty()))
1940       return false;
1941 
1942     if (!PhysReg) {
1943       assert(LI->empty() && "Only empty live-range do not require a register");
1944       LLVM_DEBUG(dbgs() << "Recoloring of " << *LI
1945                         << " succeeded. Empty LI.\n");
1946       continue;
1947     }
1948     LLVM_DEBUG(dbgs() << "Recoloring of " << *LI
1949                       << " succeeded with: " << printReg(PhysReg, TRI) << '\n');
1950 
1951     Matrix->assign(*LI, PhysReg);
1952     FixedRegisters.insert(LI->reg());
1953   }
1954   return true;
1955 }
1956 
1957 //===----------------------------------------------------------------------===//
1958 //                            Main Entry Point
1959 //===----------------------------------------------------------------------===//
1960 
1961 MCRegister RAGreedy::selectOrSplit(const LiveInterval &VirtReg,
1962                                    SmallVectorImpl<Register> &NewVRegs) {
1963   CutOffInfo = CO_None;
1964   LLVMContext &Ctx = MF->getFunction().getContext();
1965   SmallVirtRegSet FixedRegisters;
1966   RecoloringStack RecolorStack;
1967   MCRegister Reg =
1968       selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters, RecolorStack);
1969   if (Reg == ~0U && (CutOffInfo != CO_None)) {
1970     uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf);
1971     if (CutOffEncountered == CO_Depth)
1972       Ctx.emitError("register allocation failed: maximum depth for recoloring "
1973                     "reached. Use -fexhaustive-register-search to skip "
1974                     "cutoffs");
1975     else if (CutOffEncountered == CO_Interf)
1976       Ctx.emitError("register allocation failed: maximum interference for "
1977                     "recoloring reached. Use -fexhaustive-register-search "
1978                     "to skip cutoffs");
1979     else if (CutOffEncountered == (CO_Depth | CO_Interf))
1980       Ctx.emitError("register allocation failed: maximum interference and "
1981                     "depth for recoloring reached. Use "
1982                     "-fexhaustive-register-search to skip cutoffs");
1983   }
1984   return Reg;
1985 }
1986 
1987 /// Using a CSR for the first time has a cost because it causes push|pop
1988 /// to be added to prologue|epilogue. Splitting a cold section of the live
1989 /// range can have lower cost than using the CSR for the first time;
1990 /// Spilling a live range in the cold path can have lower cost than using
1991 /// the CSR for the first time. Returns the physical register if we decide
1992 /// to use the CSR; otherwise return 0.
1993 MCRegister RAGreedy::tryAssignCSRFirstTime(
1994     const LiveInterval &VirtReg, AllocationOrder &Order, MCRegister PhysReg,
1995     uint8_t &CostPerUseLimit, SmallVectorImpl<Register> &NewVRegs) {
1996   if (ExtraInfo->getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) {
1997     // We choose spill over using the CSR for the first time if the spill cost
1998     // is lower than CSRCost.
1999     SA->analyze(&VirtReg);
2000     if (calcSpillCost() >= CSRCost)
2001       return PhysReg;
2002 
2003     // We are going to spill, set CostPerUseLimit to 1 to make sure that
2004     // we will not use a callee-saved register in tryEvict.
2005     CostPerUseLimit = 1;
2006     return 0;
2007   }
2008   if (ExtraInfo->getStage(VirtReg) < RS_Split) {
2009     // We choose pre-splitting over using the CSR for the first time if
2010     // the cost of splitting is lower than CSRCost.
2011     SA->analyze(&VirtReg);
2012     unsigned NumCands = 0;
2013     BlockFrequency BestCost = CSRCost; // Don't modify CSRCost.
2014     unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
2015                                                  NumCands, true /*IgnoreCSR*/);
2016     if (BestCand == NoCand)
2017       // Use the CSR if we can't find a region split below CSRCost.
2018       return PhysReg;
2019 
2020     // Perform the actual pre-splitting.
2021     doRegionSplit(VirtReg, BestCand, false/*HasCompact*/, NewVRegs);
2022     return 0;
2023   }
2024   return PhysReg;
2025 }
2026 
2027 void RAGreedy::aboutToRemoveInterval(const LiveInterval &LI) {
2028   // Do not keep invalid information around.
2029   SetOfBrokenHints.remove(&LI);
2030 }
2031 
2032 void RAGreedy::initializeCSRCost() {
2033   // We use the larger one out of the command-line option and the value report
2034   // by TRI.
2035   CSRCost = BlockFrequency(
2036       std::max((unsigned)CSRFirstTimeCost, TRI->getCSRFirstUseCost()));
2037   if (!CSRCost.getFrequency())
2038     return;
2039 
2040   // Raw cost is relative to Entry == 2^14; scale it appropriately.
2041   uint64_t ActualEntry = MBFI->getEntryFreq();
2042   if (!ActualEntry) {
2043     CSRCost = 0;
2044     return;
2045   }
2046   uint64_t FixedEntry = 1 << 14;
2047   if (ActualEntry < FixedEntry)
2048     CSRCost *= BranchProbability(ActualEntry, FixedEntry);
2049   else if (ActualEntry <= UINT32_MAX)
2050     // Invert the fraction and divide.
2051     CSRCost /= BranchProbability(FixedEntry, ActualEntry);
2052   else
2053     // Can't use BranchProbability in general, since it takes 32-bit numbers.
2054     CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry);
2055 }
2056 
2057 /// Collect the hint info for \p Reg.
2058 /// The results are stored into \p Out.
2059 /// \p Out is not cleared before being populated.
2060 void RAGreedy::collectHintInfo(Register Reg, HintsInfo &Out) {
2061   for (const MachineInstr &Instr : MRI->reg_nodbg_instructions(Reg)) {
2062     if (!Instr.isFullCopy())
2063       continue;
2064     // Look for the other end of the copy.
2065     Register OtherReg = Instr.getOperand(0).getReg();
2066     if (OtherReg == Reg) {
2067       OtherReg = Instr.getOperand(1).getReg();
2068       if (OtherReg == Reg)
2069         continue;
2070     }
2071     // Get the current assignment.
2072     MCRegister OtherPhysReg =
2073         OtherReg.isPhysical() ? OtherReg.asMCReg() : VRM->getPhys(OtherReg);
2074     // Push the collected information.
2075     Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg,
2076                            OtherPhysReg));
2077   }
2078 }
2079 
2080 /// Using the given \p List, compute the cost of the broken hints if
2081 /// \p PhysReg was used.
2082 /// \return The cost of \p List for \p PhysReg.
2083 BlockFrequency RAGreedy::getBrokenHintFreq(const HintsInfo &List,
2084                                            MCRegister PhysReg) {
2085   BlockFrequency Cost = 0;
2086   for (const HintInfo &Info : List) {
2087     if (Info.PhysReg != PhysReg)
2088       Cost += Info.Freq;
2089   }
2090   return Cost;
2091 }
2092 
2093 /// Using the register assigned to \p VirtReg, try to recolor
2094 /// all the live ranges that are copy-related with \p VirtReg.
2095 /// The recoloring is then propagated to all the live-ranges that have
2096 /// been recolored and so on, until no more copies can be coalesced or
2097 /// it is not profitable.
2098 /// For a given live range, profitability is determined by the sum of the
2099 /// frequencies of the non-identity copies it would introduce with the old
2100 /// and new register.
2101 void RAGreedy::tryHintRecoloring(const LiveInterval &VirtReg) {
2102   // We have a broken hint, check if it is possible to fix it by
2103   // reusing PhysReg for the copy-related live-ranges. Indeed, we evicted
2104   // some register and PhysReg may be available for the other live-ranges.
2105   SmallSet<Register, 4> Visited;
2106   SmallVector<unsigned, 2> RecoloringCandidates;
2107   HintsInfo Info;
2108   Register Reg = VirtReg.reg();
2109   MCRegister PhysReg = VRM->getPhys(Reg);
2110   // Start the recoloring algorithm from the input live-interval, then
2111   // it will propagate to the ones that are copy-related with it.
2112   Visited.insert(Reg);
2113   RecoloringCandidates.push_back(Reg);
2114 
2115   LLVM_DEBUG(dbgs() << "Trying to reconcile hints for: " << printReg(Reg, TRI)
2116                     << '(' << printReg(PhysReg, TRI) << ")\n");
2117 
2118   do {
2119     Reg = RecoloringCandidates.pop_back_val();
2120 
2121     // We cannot recolor physical register.
2122     if (Register::isPhysicalRegister(Reg))
2123       continue;
2124 
2125     // This may be a skipped class
2126     if (!VRM->hasPhys(Reg)) {
2127       assert(!ShouldAllocateClass(*TRI, *MRI->getRegClass(Reg)) &&
2128              "We have an unallocated variable which should have been handled");
2129       continue;
2130     }
2131 
2132     // Get the live interval mapped with this virtual register to be able
2133     // to check for the interference with the new color.
2134     LiveInterval &LI = LIS->getInterval(Reg);
2135     MCRegister CurrPhys = VRM->getPhys(Reg);
2136     // Check that the new color matches the register class constraints and
2137     // that it is free for this live range.
2138     if (CurrPhys != PhysReg && (!MRI->getRegClass(Reg)->contains(PhysReg) ||
2139                                 Matrix->checkInterference(LI, PhysReg)))
2140       continue;
2141 
2142     LLVM_DEBUG(dbgs() << printReg(Reg, TRI) << '(' << printReg(CurrPhys, TRI)
2143                       << ") is recolorable.\n");
2144 
2145     // Gather the hint info.
2146     Info.clear();
2147     collectHintInfo(Reg, Info);
2148     // Check if recoloring the live-range will increase the cost of the
2149     // non-identity copies.
2150     if (CurrPhys != PhysReg) {
2151       LLVM_DEBUG(dbgs() << "Checking profitability:\n");
2152       BlockFrequency OldCopiesCost = getBrokenHintFreq(Info, CurrPhys);
2153       BlockFrequency NewCopiesCost = getBrokenHintFreq(Info, PhysReg);
2154       LLVM_DEBUG(dbgs() << "Old Cost: " << OldCopiesCost.getFrequency()
2155                         << "\nNew Cost: " << NewCopiesCost.getFrequency()
2156                         << '\n');
2157       if (OldCopiesCost < NewCopiesCost) {
2158         LLVM_DEBUG(dbgs() << "=> Not profitable.\n");
2159         continue;
2160       }
2161       // At this point, the cost is either cheaper or equal. If it is
2162       // equal, we consider this is profitable because it may expose
2163       // more recoloring opportunities.
2164       LLVM_DEBUG(dbgs() << "=> Profitable.\n");
2165       // Recolor the live-range.
2166       Matrix->unassign(LI);
2167       Matrix->assign(LI, PhysReg);
2168     }
2169     // Push all copy-related live-ranges to keep reconciling the broken
2170     // hints.
2171     for (const HintInfo &HI : Info) {
2172       if (Visited.insert(HI.Reg).second)
2173         RecoloringCandidates.push_back(HI.Reg);
2174     }
2175   } while (!RecoloringCandidates.empty());
2176 }
2177 
2178 /// Try to recolor broken hints.
2179 /// Broken hints may be repaired by recoloring when an evicted variable
2180 /// freed up a register for a larger live-range.
2181 /// Consider the following example:
2182 /// BB1:
2183 ///   a =
2184 ///   b =
2185 /// BB2:
2186 ///   ...
2187 ///   = b
2188 ///   = a
2189 /// Let us assume b gets split:
2190 /// BB1:
2191 ///   a =
2192 ///   b =
2193 /// BB2:
2194 ///   c = b
2195 ///   ...
2196 ///   d = c
2197 ///   = d
2198 ///   = a
2199 /// Because of how the allocation work, b, c, and d may be assigned different
2200 /// colors. Now, if a gets evicted later:
2201 /// BB1:
2202 ///   a =
2203 ///   st a, SpillSlot
2204 ///   b =
2205 /// BB2:
2206 ///   c = b
2207 ///   ...
2208 ///   d = c
2209 ///   = d
2210 ///   e = ld SpillSlot
2211 ///   = e
2212 /// This is likely that we can assign the same register for b, c, and d,
2213 /// getting rid of 2 copies.
2214 void RAGreedy::tryHintsRecoloring() {
2215   for (const LiveInterval *LI : SetOfBrokenHints) {
2216     assert(Register::isVirtualRegister(LI->reg()) &&
2217            "Recoloring is possible only for virtual registers");
2218     // Some dead defs may be around (e.g., because of debug uses).
2219     // Ignore those.
2220     if (!VRM->hasPhys(LI->reg()))
2221       continue;
2222     tryHintRecoloring(*LI);
2223   }
2224 }
2225 
2226 MCRegister RAGreedy::selectOrSplitImpl(const LiveInterval &VirtReg,
2227                                        SmallVectorImpl<Register> &NewVRegs,
2228                                        SmallVirtRegSet &FixedRegisters,
2229                                        RecoloringStack &RecolorStack,
2230                                        unsigned Depth) {
2231   uint8_t CostPerUseLimit = uint8_t(~0u);
2232   // First try assigning a free register.
2233   auto Order =
2234       AllocationOrder::create(VirtReg.reg(), *VRM, RegClassInfo, Matrix);
2235   if (MCRegister PhysReg =
2236           tryAssign(VirtReg, Order, NewVRegs, FixedRegisters)) {
2237     // When NewVRegs is not empty, we may have made decisions such as evicting
2238     // a virtual register, go with the earlier decisions and use the physical
2239     // register.
2240     if (CSRCost.getFrequency() &&
2241         EvictAdvisor->isUnusedCalleeSavedReg(PhysReg) && NewVRegs.empty()) {
2242       MCRegister CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
2243                                                 CostPerUseLimit, NewVRegs);
2244       if (CSRReg || !NewVRegs.empty())
2245         // Return now if we decide to use a CSR or create new vregs due to
2246         // pre-splitting.
2247         return CSRReg;
2248     } else
2249       return PhysReg;
2250   }
2251 
2252   LiveRangeStage Stage = ExtraInfo->getStage(VirtReg);
2253   LLVM_DEBUG(dbgs() << StageName[Stage] << " Cascade "
2254                     << ExtraInfo->getCascade(VirtReg.reg()) << '\n');
2255 
2256   // Try to evict a less worthy live range, but only for ranges from the primary
2257   // queue. The RS_Split ranges already failed to do this, and they should not
2258   // get a second chance until they have been split.
2259   if (Stage != RS_Split)
2260     if (Register PhysReg =
2261             tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit,
2262                      FixedRegisters)) {
2263       Register Hint = MRI->getSimpleHint(VirtReg.reg());
2264       // If VirtReg has a hint and that hint is broken record this
2265       // virtual register as a recoloring candidate for broken hint.
2266       // Indeed, since we evicted a variable in its neighborhood it is
2267       // likely we can at least partially recolor some of the
2268       // copy-related live-ranges.
2269       if (Hint && Hint != PhysReg)
2270         SetOfBrokenHints.insert(&VirtReg);
2271       return PhysReg;
2272     }
2273 
2274   assert((NewVRegs.empty() || Depth) && "Cannot append to existing NewVRegs");
2275 
2276   // The first time we see a live range, don't try to split or spill.
2277   // Wait until the second time, when all smaller ranges have been allocated.
2278   // This gives a better picture of the interference to split around.
2279   if (Stage < RS_Split) {
2280     ExtraInfo->setStage(VirtReg, RS_Split);
2281     LLVM_DEBUG(dbgs() << "wait for second round\n");
2282     NewVRegs.push_back(VirtReg.reg());
2283     return 0;
2284   }
2285 
2286   if (Stage < RS_Spill) {
2287     // Try splitting VirtReg or interferences.
2288     unsigned NewVRegSizeBefore = NewVRegs.size();
2289     Register PhysReg = trySplit(VirtReg, Order, NewVRegs, FixedRegisters);
2290     if (PhysReg || (NewVRegs.size() - NewVRegSizeBefore))
2291       return PhysReg;
2292   }
2293 
2294   // If we couldn't allocate a register from spilling, there is probably some
2295   // invalid inline assembly. The base class will report it.
2296   if (Stage >= RS_Done || !VirtReg.isSpillable()) {
2297     return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters,
2298                                    RecolorStack, Depth);
2299   }
2300 
2301   // Finally spill VirtReg itself.
2302   if ((EnableDeferredSpilling ||
2303        TRI->shouldUseDeferredSpillingForVirtReg(*MF, VirtReg)) &&
2304       ExtraInfo->getStage(VirtReg) < RS_Memory) {
2305     // TODO: This is experimental and in particular, we do not model
2306     // the live range splitting done by spilling correctly.
2307     // We would need a deep integration with the spiller to do the
2308     // right thing here. Anyway, that is still good for early testing.
2309     ExtraInfo->setStage(VirtReg, RS_Memory);
2310     LLVM_DEBUG(dbgs() << "Do as if this register is in memory\n");
2311     NewVRegs.push_back(VirtReg.reg());
2312   } else {
2313     NamedRegionTimer T("spill", "Spiller", TimerGroupName,
2314                        TimerGroupDescription, TimePassesIsEnabled);
2315     LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
2316     spiller().spill(LRE);
2317     ExtraInfo->setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
2318 
2319     // Tell LiveDebugVariables about the new ranges. Ranges not being covered by
2320     // the new regs are kept in LDV (still mapping to the old register), until
2321     // we rewrite spilled locations in LDV at a later stage.
2322     DebugVars->splitRegister(VirtReg.reg(), LRE.regs(), *LIS);
2323 
2324     if (VerifyEnabled)
2325       MF->verify(this, "After spilling");
2326   }
2327 
2328   // The live virtual register requesting allocation was spilled, so tell
2329   // the caller not to allocate anything during this round.
2330   return 0;
2331 }
2332 
2333 void RAGreedy::RAGreedyStats::report(MachineOptimizationRemarkMissed &R) {
2334   using namespace ore;
2335   if (Spills) {
2336     R << NV("NumSpills", Spills) << " spills ";
2337     R << NV("TotalSpillsCost", SpillsCost) << " total spills cost ";
2338   }
2339   if (FoldedSpills) {
2340     R << NV("NumFoldedSpills", FoldedSpills) << " folded spills ";
2341     R << NV("TotalFoldedSpillsCost", FoldedSpillsCost)
2342       << " total folded spills cost ";
2343   }
2344   if (Reloads) {
2345     R << NV("NumReloads", Reloads) << " reloads ";
2346     R << NV("TotalReloadsCost", ReloadsCost) << " total reloads cost ";
2347   }
2348   if (FoldedReloads) {
2349     R << NV("NumFoldedReloads", FoldedReloads) << " folded reloads ";
2350     R << NV("TotalFoldedReloadsCost", FoldedReloadsCost)
2351       << " total folded reloads cost ";
2352   }
2353   if (ZeroCostFoldedReloads)
2354     R << NV("NumZeroCostFoldedReloads", ZeroCostFoldedReloads)
2355       << " zero cost folded reloads ";
2356   if (Copies) {
2357     R << NV("NumVRCopies", Copies) << " virtual registers copies ";
2358     R << NV("TotalCopiesCost", CopiesCost) << " total copies cost ";
2359   }
2360 }
2361 
2362 RAGreedy::RAGreedyStats RAGreedy::computeStats(MachineBasicBlock &MBB) {
2363   RAGreedyStats Stats;
2364   const MachineFrameInfo &MFI = MF->getFrameInfo();
2365   int FI;
2366 
2367   auto isSpillSlotAccess = [&MFI](const MachineMemOperand *A) {
2368     return MFI.isSpillSlotObjectIndex(cast<FixedStackPseudoSourceValue>(
2369         A->getPseudoValue())->getFrameIndex());
2370   };
2371   auto isPatchpointInstr = [](const MachineInstr &MI) {
2372     return MI.getOpcode() == TargetOpcode::PATCHPOINT ||
2373            MI.getOpcode() == TargetOpcode::STACKMAP ||
2374            MI.getOpcode() == TargetOpcode::STATEPOINT;
2375   };
2376   for (MachineInstr &MI : MBB) {
2377     if (MI.isCopy()) {
2378       MachineOperand &Dest = MI.getOperand(0);
2379       MachineOperand &Src = MI.getOperand(1);
2380       if (Dest.isReg() && Src.isReg() && Dest.getReg().isVirtual() &&
2381           Src.getReg().isVirtual())
2382         ++Stats.Copies;
2383       continue;
2384     }
2385 
2386     SmallVector<const MachineMemOperand *, 2> Accesses;
2387     if (TII->isLoadFromStackSlot(MI, FI) && MFI.isSpillSlotObjectIndex(FI)) {
2388       ++Stats.Reloads;
2389       continue;
2390     }
2391     if (TII->isStoreToStackSlot(MI, FI) && MFI.isSpillSlotObjectIndex(FI)) {
2392       ++Stats.Spills;
2393       continue;
2394     }
2395     if (TII->hasLoadFromStackSlot(MI, Accesses) &&
2396         llvm::any_of(Accesses, isSpillSlotAccess)) {
2397       if (!isPatchpointInstr(MI)) {
2398         Stats.FoldedReloads += Accesses.size();
2399         continue;
2400       }
2401       // For statepoint there may be folded and zero cost folded stack reloads.
2402       std::pair<unsigned, unsigned> NonZeroCostRange =
2403           TII->getPatchpointUnfoldableRange(MI);
2404       SmallSet<unsigned, 16> FoldedReloads;
2405       SmallSet<unsigned, 16> ZeroCostFoldedReloads;
2406       for (unsigned Idx = 0, E = MI.getNumOperands(); Idx < E; ++Idx) {
2407         MachineOperand &MO = MI.getOperand(Idx);
2408         if (!MO.isFI() || !MFI.isSpillSlotObjectIndex(MO.getIndex()))
2409           continue;
2410         if (Idx >= NonZeroCostRange.first && Idx < NonZeroCostRange.second)
2411           FoldedReloads.insert(MO.getIndex());
2412         else
2413           ZeroCostFoldedReloads.insert(MO.getIndex());
2414       }
2415       // If stack slot is used in folded reload it is not zero cost then.
2416       for (unsigned Slot : FoldedReloads)
2417         ZeroCostFoldedReloads.erase(Slot);
2418       Stats.FoldedReloads += FoldedReloads.size();
2419       Stats.ZeroCostFoldedReloads += ZeroCostFoldedReloads.size();
2420       continue;
2421     }
2422     Accesses.clear();
2423     if (TII->hasStoreToStackSlot(MI, Accesses) &&
2424         llvm::any_of(Accesses, isSpillSlotAccess)) {
2425       Stats.FoldedSpills += Accesses.size();
2426     }
2427   }
2428   // Set cost of collected statistic by multiplication to relative frequency of
2429   // this basic block.
2430   float RelFreq = MBFI->getBlockFreqRelativeToEntryBlock(&MBB);
2431   Stats.ReloadsCost = RelFreq * Stats.Reloads;
2432   Stats.FoldedReloadsCost = RelFreq * Stats.FoldedReloads;
2433   Stats.SpillsCost = RelFreq * Stats.Spills;
2434   Stats.FoldedSpillsCost = RelFreq * Stats.FoldedSpills;
2435   Stats.CopiesCost = RelFreq * Stats.Copies;
2436   return Stats;
2437 }
2438 
2439 RAGreedy::RAGreedyStats RAGreedy::reportStats(MachineLoop *L) {
2440   RAGreedyStats Stats;
2441 
2442   // Sum up the spill and reloads in subloops.
2443   for (MachineLoop *SubLoop : *L)
2444     Stats.add(reportStats(SubLoop));
2445 
2446   for (MachineBasicBlock *MBB : L->getBlocks())
2447     // Handle blocks that were not included in subloops.
2448     if (Loops->getLoopFor(MBB) == L)
2449       Stats.add(computeStats(*MBB));
2450 
2451   if (!Stats.isEmpty()) {
2452     using namespace ore;
2453 
2454     ORE->emit([&]() {
2455       MachineOptimizationRemarkMissed R(DEBUG_TYPE, "LoopSpillReloadCopies",
2456                                         L->getStartLoc(), L->getHeader());
2457       Stats.report(R);
2458       R << "generated in loop";
2459       return R;
2460     });
2461   }
2462   return Stats;
2463 }
2464 
2465 void RAGreedy::reportStats() {
2466   if (!ORE->allowExtraAnalysis(DEBUG_TYPE))
2467     return;
2468   RAGreedyStats Stats;
2469   for (MachineLoop *L : *Loops)
2470     Stats.add(reportStats(L));
2471   // Process non-loop blocks.
2472   for (MachineBasicBlock &MBB : *MF)
2473     if (!Loops->getLoopFor(&MBB))
2474       Stats.add(computeStats(MBB));
2475   if (!Stats.isEmpty()) {
2476     using namespace ore;
2477 
2478     ORE->emit([&]() {
2479       DebugLoc Loc;
2480       if (auto *SP = MF->getFunction().getSubprogram())
2481         Loc = DILocation::get(SP->getContext(), SP->getLine(), 1, SP);
2482       MachineOptimizationRemarkMissed R(DEBUG_TYPE, "SpillReloadCopies", Loc,
2483                                         &MF->front());
2484       Stats.report(R);
2485       R << "generated in function";
2486       return R;
2487     });
2488   }
2489 }
2490 
2491 bool RAGreedy::hasVirtRegAlloc() {
2492   for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2493     Register Reg = Register::index2VirtReg(I);
2494     if (MRI->reg_nodbg_empty(Reg))
2495       continue;
2496     const TargetRegisterClass *RC = MRI->getRegClass(Reg);
2497     if (!RC)
2498       continue;
2499     if (ShouldAllocateClass(*TRI, *RC))
2500       return true;
2501   }
2502 
2503   return false;
2504 }
2505 
2506 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
2507   LLVM_DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
2508                     << "********** Function: " << mf.getName() << '\n');
2509 
2510   MF = &mf;
2511   TII = MF->getSubtarget().getInstrInfo();
2512 
2513   if (VerifyEnabled)
2514     MF->verify(this, "Before greedy register allocator");
2515 
2516   RegAllocBase::init(getAnalysis<VirtRegMap>(),
2517                      getAnalysis<LiveIntervals>(),
2518                      getAnalysis<LiveRegMatrix>());
2519 
2520   // Early return if there is no virtual register to be allocated to a
2521   // physical register.
2522   if (!hasVirtRegAlloc())
2523     return false;
2524 
2525   Indexes = &getAnalysis<SlotIndexes>();
2526   MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
2527   DomTree = &getAnalysis<MachineDominatorTree>();
2528   ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE();
2529   Loops = &getAnalysis<MachineLoopInfo>();
2530   Bundles = &getAnalysis<EdgeBundles>();
2531   SpillPlacer = &getAnalysis<SpillPlacement>();
2532   DebugVars = &getAnalysis<LiveDebugVariables>();
2533   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2534 
2535   initializeCSRCost();
2536 
2537   RegCosts = TRI->getRegisterCosts(*MF);
2538   RegClassPriorityTrumpsGlobalness =
2539       GreedyRegClassPriorityTrumpsGlobalness.getNumOccurrences()
2540           ? GreedyRegClassPriorityTrumpsGlobalness
2541           : TRI->regClassPriorityTrumpsGlobalness(*MF);
2542 
2543   ExtraInfo.emplace();
2544   EvictAdvisor =
2545       getAnalysis<RegAllocEvictionAdvisorAnalysis>().getAdvisor(*MF, *this);
2546 
2547   VRAI = std::make_unique<VirtRegAuxInfo>(*MF, *LIS, *VRM, *Loops, *MBFI);
2548   SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM, *VRAI));
2549 
2550   VRAI->calculateSpillWeightsAndHints();
2551 
2552   LLVM_DEBUG(LIS->dump());
2553 
2554   SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
2555   SE.reset(new SplitEditor(*SA, *AA, *LIS, *VRM, *DomTree, *MBFI, *VRAI));
2556 
2557   IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI);
2558   GlobalCand.resize(32);  // This will grow as needed.
2559   SetOfBrokenHints.clear();
2560 
2561   allocatePhysRegs();
2562   tryHintsRecoloring();
2563 
2564   if (VerifyEnabled)
2565     MF->verify(this, "Before post optimization");
2566   postOptimization();
2567   reportStats();
2568 
2569   releaseMemory();
2570   return true;
2571 }
2572