xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/CalcSpillWeights.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===- CalcSpillWeights.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/CodeGen/CalcSpillWeights.h"
10 #include "llvm/ADT/SmallPtrSet.h"
11 #include "llvm/ADT/SmallSet.h"
12 #include "llvm/CodeGen/LiveInterval.h"
13 #include "llvm/CodeGen/LiveIntervals.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineInstr.h"
16 #include "llvm/CodeGen/MachineLoopInfo.h"
17 #include "llvm/CodeGen/MachineOperand.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/StackMaps.h"
20 #include "llvm/CodeGen/TargetInstrInfo.h"
21 #include "llvm/CodeGen/TargetRegisterInfo.h"
22 #include "llvm/CodeGen/TargetSubtargetInfo.h"
23 #include "llvm/CodeGen/VirtRegMap.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/MathExtras.h"
26 #include "llvm/Support/raw_ostream.h"
27 #include <cassert>
28 #include <tuple>
29 
30 using namespace llvm;
31 
32 #define DEBUG_TYPE "calcspillweights"
33 
calculateSpillWeightsAndHints()34 void VirtRegAuxInfo::calculateSpillWeightsAndHints() {
35   LLVM_DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
36                     << "********** Function: " << MF.getName() << '\n');
37 
38   MachineRegisterInfo &MRI = MF.getRegInfo();
39   for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
40     Register Reg = Register::index2VirtReg(I);
41     if (MRI.reg_nodbg_empty(Reg))
42       continue;
43     calculateSpillWeightAndHint(LIS.getInterval(Reg));
44   }
45 }
46 
47 // Return the preferred allocation register for reg, given a COPY instruction.
copyHint(const MachineInstr * MI,unsigned Reg,const TargetRegisterInfo & TRI,const MachineRegisterInfo & MRI)48 Register VirtRegAuxInfo::copyHint(const MachineInstr *MI, unsigned Reg,
49                                   const TargetRegisterInfo &TRI,
50                                   const MachineRegisterInfo &MRI) {
51   unsigned Sub, HSub;
52   Register HReg;
53   if (MI->getOperand(0).getReg() == Reg) {
54     Sub = MI->getOperand(0).getSubReg();
55     HReg = MI->getOperand(1).getReg();
56     HSub = MI->getOperand(1).getSubReg();
57   } else {
58     Sub = MI->getOperand(1).getSubReg();
59     HReg = MI->getOperand(0).getReg();
60     HSub = MI->getOperand(0).getSubReg();
61   }
62 
63   if (!HReg)
64     return 0;
65 
66   if (HReg.isVirtual())
67     return Sub == HSub ? HReg : Register();
68 
69   const TargetRegisterClass *RC = MRI.getRegClass(Reg);
70   MCRegister CopiedPReg = HSub ? TRI.getSubReg(HReg, HSub) : HReg.asMCReg();
71   if (RC->contains(CopiedPReg))
72     return CopiedPReg;
73 
74   // Check if reg:sub matches so that a super register could be hinted.
75   if (Sub)
76     return TRI.getMatchingSuperReg(CopiedPReg, Sub, RC);
77 
78   return 0;
79 }
80 
81 // Check if all values in LI are rematerializable
isRematerializable(const LiveInterval & LI,const LiveIntervals & LIS,const VirtRegMap & VRM,const TargetInstrInfo & TII)82 bool VirtRegAuxInfo::isRematerializable(const LiveInterval &LI,
83                                         const LiveIntervals &LIS,
84                                         const VirtRegMap &VRM,
85                                         const TargetInstrInfo &TII) {
86   Register Reg = LI.reg();
87   Register Original = VRM.getOriginal(Reg);
88   for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
89        I != E; ++I) {
90     const VNInfo *VNI = *I;
91     if (VNI->isUnused())
92       continue;
93     if (VNI->isPHIDef())
94       return false;
95 
96     MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
97     assert(MI && "Dead valno in interval");
98 
99     // Trace copies introduced by live range splitting.  The inline
100     // spiller can rematerialize through these copies, so the spill
101     // weight must reflect this.
102     while (TII.isFullCopyInstr(*MI)) {
103       // The copy destination must match the interval register.
104       if (MI->getOperand(0).getReg() != Reg)
105         return false;
106 
107       // Get the source register.
108       Reg = MI->getOperand(1).getReg();
109 
110       // If the original (pre-splitting) registers match this
111       // copy came from a split.
112       if (!Reg.isVirtual() || VRM.getOriginal(Reg) != Original)
113         return false;
114 
115       // Follow the copy live-in value.
116       const LiveInterval &SrcLI = LIS.getInterval(Reg);
117       LiveQueryResult SrcQ = SrcLI.Query(VNI->def);
118       VNI = SrcQ.valueIn();
119       assert(VNI && "Copy from non-existing value");
120       if (VNI->isPHIDef())
121         return false;
122       MI = LIS.getInstructionFromIndex(VNI->def);
123       assert(MI && "Dead valno in interval");
124     }
125 
126     if (!TII.isTriviallyReMaterializable(*MI))
127       return false;
128   }
129   return true;
130 }
131 
isLiveAtStatepointVarArg(LiveInterval & LI)132 bool VirtRegAuxInfo::isLiveAtStatepointVarArg(LiveInterval &LI) {
133   return any_of(VRM.getRegInfo().reg_operands(LI.reg()),
134                 [](MachineOperand &MO) {
135     MachineInstr *MI = MO.getParent();
136     if (MI->getOpcode() != TargetOpcode::STATEPOINT)
137       return false;
138     return StatepointOpers(MI).getVarIdx() <= MO.getOperandNo();
139   });
140 }
141 
calculateSpillWeightAndHint(LiveInterval & LI)142 void VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &LI) {
143   float Weight = weightCalcHelper(LI);
144   // Check if unspillable.
145   if (Weight < 0)
146     return;
147   LI.setWeight(Weight);
148 }
149 
canMemFoldInlineAsm(LiveInterval & LI,const MachineRegisterInfo & MRI)150 static bool canMemFoldInlineAsm(LiveInterval &LI,
151                                 const MachineRegisterInfo &MRI) {
152   for (const MachineOperand &MO : MRI.reg_operands(LI.reg())) {
153     const MachineInstr *MI = MO.getParent();
154     if (MI->isInlineAsm() && MI->mayFoldInlineAsmRegOp(MI->getOperandNo(&MO)))
155       return true;
156   }
157 
158   return false;
159 }
160 
weightCalcHelper(LiveInterval & LI,SlotIndex * Start,SlotIndex * End)161 float VirtRegAuxInfo::weightCalcHelper(LiveInterval &LI, SlotIndex *Start,
162                                        SlotIndex *End) {
163   MachineRegisterInfo &MRI = MF.getRegInfo();
164   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
165   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
166   MachineBasicBlock *MBB = nullptr;
167   float TotalWeight = 0;
168   unsigned NumInstr = 0; // Number of instructions using LI
169   SmallPtrSet<MachineInstr *, 8> Visited;
170 
171   std::pair<unsigned, Register> TargetHint = MRI.getRegAllocationHint(LI.reg());
172 
173   if (LI.isSpillable()) {
174     Register Reg = LI.reg();
175     Register Original = VRM.getOriginal(Reg);
176     const LiveInterval &OrigInt = LIS.getInterval(Original);
177     // li comes from a split of OrigInt. If OrigInt was marked
178     // as not spillable, make sure the new interval is marked
179     // as not spillable as well.
180     if (!OrigInt.isSpillable())
181       LI.markNotSpillable();
182   }
183 
184   // Don't recompute spill weight for an unspillable register.
185   bool IsSpillable = LI.isSpillable();
186 
187   bool IsLocalSplitArtifact = Start && End;
188 
189   // Do not update future local split artifacts.
190   bool ShouldUpdateLI = !IsLocalSplitArtifact;
191 
192   if (IsLocalSplitArtifact) {
193     MachineBasicBlock *LocalMBB = LIS.getMBBFromIndex(*End);
194     assert(LocalMBB == LIS.getMBBFromIndex(*Start) &&
195            "start and end are expected to be in the same basic block");
196 
197     // Local split artifact will have 2 additional copy instructions and they
198     // will be in the same BB.
199     // localLI = COPY other
200     // ...
201     // other   = COPY localLI
202     TotalWeight += LiveIntervals::getSpillWeight(true, false, &MBFI, LocalMBB);
203     TotalWeight += LiveIntervals::getSpillWeight(false, true, &MBFI, LocalMBB);
204 
205     NumInstr += 2;
206   }
207 
208   // CopyHint is a sortable hint derived from a COPY instruction.
209   struct CopyHint {
210     const Register Reg;
211     const float Weight;
212     CopyHint(Register R, float W) : Reg(R), Weight(W) {}
213     bool operator<(const CopyHint &Rhs) const {
214       // Always prefer any physreg hint.
215       if (Reg.isPhysical() != Rhs.Reg.isPhysical())
216         return Reg.isPhysical();
217       if (Weight != Rhs.Weight)
218         return (Weight > Rhs.Weight);
219       return Reg.id() < Rhs.Reg.id(); // Tie-breaker.
220     }
221   };
222 
223   bool IsExiting = false;
224   std::set<CopyHint> CopyHints;
225   DenseMap<unsigned, float> Hint;
226   for (MachineRegisterInfo::reg_instr_nodbg_iterator
227            I = MRI.reg_instr_nodbg_begin(LI.reg()),
228            E = MRI.reg_instr_nodbg_end();
229        I != E;) {
230     MachineInstr *MI = &*(I++);
231 
232     // For local split artifacts, we are interested only in instructions between
233     // the expected start and end of the range.
234     SlotIndex SI = LIS.getInstructionIndex(*MI);
235     if (IsLocalSplitArtifact && ((SI < *Start) || (SI > *End)))
236       continue;
237 
238     NumInstr++;
239     bool identityCopy = false;
240     auto DestSrc = TII.isCopyInstr(*MI);
241     if (DestSrc) {
242       const MachineOperand *DestRegOp = DestSrc->Destination;
243       const MachineOperand *SrcRegOp = DestSrc->Source;
244       identityCopy = DestRegOp->getReg() == SrcRegOp->getReg() &&
245                      DestRegOp->getSubReg() == SrcRegOp->getSubReg();
246     }
247 
248     if (identityCopy || MI->isImplicitDef())
249       continue;
250     if (!Visited.insert(MI).second)
251       continue;
252 
253     // For terminators that produce values, ask the backend if the register is
254     // not spillable.
255     if (TII.isUnspillableTerminator(MI) &&
256         MI->definesRegister(LI.reg(), /*TRI=*/nullptr)) {
257       LI.markNotSpillable();
258       return -1.0f;
259     }
260 
261     // Force Weight onto the stack so that x86 doesn't add hidden precision,
262     // similar to HWeight below.
263     stack_float_t Weight = 1.0f;
264     if (IsSpillable) {
265       // Get loop info for mi.
266       if (MI->getParent() != MBB) {
267         MBB = MI->getParent();
268         const MachineLoop *Loop = Loops.getLoopFor(MBB);
269         IsExiting = Loop ? Loop->isLoopExiting(MBB) : false;
270       }
271 
272       // Calculate instr weight.
273       bool Reads, Writes;
274       std::tie(Reads, Writes) = MI->readsWritesVirtualRegister(LI.reg());
275       Weight = LiveIntervals::getSpillWeight(Writes, Reads, &MBFI, *MI);
276 
277       // Give extra weight to what looks like a loop induction variable update.
278       if (Writes && IsExiting && LIS.isLiveOutOfMBB(LI, MBB))
279         Weight *= 3;
280 
281       TotalWeight += Weight;
282     }
283 
284     // Get allocation hints from copies.
285     if (!TII.isCopyInstr(*MI))
286       continue;
287     Register HintReg = copyHint(MI, LI.reg(), TRI, MRI);
288     if (!HintReg)
289       continue;
290     // Force HWeight onto the stack so that x86 doesn't add hidden precision,
291     // making the comparison incorrectly pass (i.e., 1 > 1 == true??).
292     stack_float_t HWeight = Hint[HintReg] += Weight;
293     if (HintReg.isVirtual() || MRI.isAllocatable(HintReg))
294       CopyHints.insert(CopyHint(HintReg, HWeight));
295   }
296 
297   // Pass all the sorted copy hints to mri.
298   if (ShouldUpdateLI && CopyHints.size()) {
299     // Remove a generic hint if previously added by target.
300     if (TargetHint.first == 0 && TargetHint.second)
301       MRI.clearSimpleHint(LI.reg());
302 
303     SmallSet<Register, 4> HintedRegs;
304     for (const auto &Hint : CopyHints) {
305       if (!HintedRegs.insert(Hint.Reg).second ||
306           (TargetHint.first != 0 && Hint.Reg == TargetHint.second))
307         // Don't add the same reg twice or the target-type hint again.
308         continue;
309       MRI.addRegAllocationHint(LI.reg(), Hint.Reg);
310     }
311 
312     // Weakly boost the spill weight of hinted registers.
313     TotalWeight *= 1.01F;
314   }
315 
316   // If the live interval was already unspillable, leave it that way.
317   if (!IsSpillable)
318     return -1.0;
319 
320   // Mark li as unspillable if all live ranges are tiny and the interval
321   // is not live at any reg mask.  If the interval is live at a reg mask
322   // spilling may be required. If li is live as use in statepoint instruction
323   // spilling may be required due to if we mark interval with use in statepoint
324   // as not spillable we are risky to end up with no register to allocate.
325   // At the same time STATEPOINT instruction is perfectly fine to have this
326   // operand on stack, so spilling such interval and folding its load from stack
327   // into instruction itself makes perfect sense.
328   if (ShouldUpdateLI && LI.isZeroLength(LIS.getSlotIndexes()) &&
329       !LI.isLiveAtIndexes(LIS.getRegMaskSlots()) &&
330       !isLiveAtStatepointVarArg(LI) && !canMemFoldInlineAsm(LI, MRI)) {
331     LI.markNotSpillable();
332     return -1.0;
333   }
334 
335   // If all of the definitions of the interval are re-materializable,
336   // it is a preferred candidate for spilling.
337   // FIXME: this gets much more complicated once we support non-trivial
338   // re-materialization.
339   if (isRematerializable(LI, LIS, VRM, *MF.getSubtarget().getInstrInfo()))
340     TotalWeight *= 0.5F;
341 
342   if (IsLocalSplitArtifact)
343     return normalize(TotalWeight, Start->distance(*End), NumInstr);
344   return normalize(TotalWeight, LI.getSize(), NumInstr);
345 }
346