1 //===- CalcSpillWeights.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/CodeGen/CalcSpillWeights.h" 10 #include "llvm/ADT/SmallPtrSet.h" 11 #include "llvm/CodeGen/LiveInterval.h" 12 #include "llvm/CodeGen/LiveIntervals.h" 13 #include "llvm/CodeGen/MachineFunction.h" 14 #include "llvm/CodeGen/MachineInstr.h" 15 #include "llvm/CodeGen/MachineLoopInfo.h" 16 #include "llvm/CodeGen/MachineOperand.h" 17 #include "llvm/CodeGen/MachineRegisterInfo.h" 18 #include "llvm/CodeGen/TargetInstrInfo.h" 19 #include "llvm/CodeGen/TargetRegisterInfo.h" 20 #include "llvm/CodeGen/TargetSubtargetInfo.h" 21 #include "llvm/CodeGen/VirtRegMap.h" 22 #include "llvm/Support/Debug.h" 23 #include "llvm/Support/raw_ostream.h" 24 #include "llvm/CodeGen/StackMaps.h" 25 #include <cassert> 26 #include <tuple> 27 28 using namespace llvm; 29 30 #define DEBUG_TYPE "calcspillweights" 31 32 void VirtRegAuxInfo::calculateSpillWeightsAndHints() { 33 LLVM_DEBUG(dbgs() << "********** Compute Spill Weights **********\n" 34 << "********** Function: " << MF.getName() << '\n'); 35 36 MachineRegisterInfo &MRI = MF.getRegInfo(); 37 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { 38 unsigned Reg = Register::index2VirtReg(I); 39 if (MRI.reg_nodbg_empty(Reg)) 40 continue; 41 calculateSpillWeightAndHint(LIS.getInterval(Reg)); 42 } 43 } 44 45 // Return the preferred allocation register for reg, given a COPY instruction. 46 static Register copyHint(const MachineInstr *MI, unsigned Reg, 47 const TargetRegisterInfo &TRI, 48 const MachineRegisterInfo &MRI) { 49 unsigned Sub, HSub; 50 Register HReg; 51 if (MI->getOperand(0).getReg() == Reg) { 52 Sub = MI->getOperand(0).getSubReg(); 53 HReg = MI->getOperand(1).getReg(); 54 HSub = MI->getOperand(1).getSubReg(); 55 } else { 56 Sub = MI->getOperand(1).getSubReg(); 57 HReg = MI->getOperand(0).getReg(); 58 HSub = MI->getOperand(0).getSubReg(); 59 } 60 61 if (!HReg) 62 return 0; 63 64 if (Register::isVirtualRegister(HReg)) 65 return Sub == HSub ? HReg : Register(); 66 67 const TargetRegisterClass *rc = MRI.getRegClass(Reg); 68 MCRegister CopiedPReg = HSub ? TRI.getSubReg(HReg, HSub) : HReg.asMCReg(); 69 if (rc->contains(CopiedPReg)) 70 return CopiedPReg; 71 72 // Check if reg:sub matches so that a super register could be hinted. 73 if (Sub) 74 return TRI.getMatchingSuperReg(CopiedPReg, Sub, rc); 75 76 return 0; 77 } 78 79 // Check if all values in LI are rematerializable 80 static bool isRematerializable(const LiveInterval &LI, const LiveIntervals &LIS, 81 const VirtRegMap &VRM, 82 const TargetInstrInfo &TII) { 83 unsigned Reg = LI.reg(); 84 unsigned Original = VRM.getOriginal(Reg); 85 for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end(); 86 I != E; ++I) { 87 const VNInfo *VNI = *I; 88 if (VNI->isUnused()) 89 continue; 90 if (VNI->isPHIDef()) 91 return false; 92 93 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 94 assert(MI && "Dead valno in interval"); 95 96 // Trace copies introduced by live range splitting. The inline 97 // spiller can rematerialize through these copies, so the spill 98 // weight must reflect this. 99 while (MI->isFullCopy()) { 100 // The copy destination must match the interval register. 101 if (MI->getOperand(0).getReg() != Reg) 102 return false; 103 104 // Get the source register. 105 Reg = MI->getOperand(1).getReg(); 106 107 // If the original (pre-splitting) registers match this 108 // copy came from a split. 109 if (!Register::isVirtualRegister(Reg) || VRM.getOriginal(Reg) != Original) 110 return false; 111 112 // Follow the copy live-in value. 113 const LiveInterval &SrcLI = LIS.getInterval(Reg); 114 LiveQueryResult SrcQ = SrcLI.Query(VNI->def); 115 VNI = SrcQ.valueIn(); 116 assert(VNI && "Copy from non-existing value"); 117 if (VNI->isPHIDef()) 118 return false; 119 MI = LIS.getInstructionFromIndex(VNI->def); 120 assert(MI && "Dead valno in interval"); 121 } 122 123 if (!TII.isTriviallyReMaterializable(*MI, LIS.getAliasAnalysis())) 124 return false; 125 } 126 return true; 127 } 128 129 bool VirtRegAuxInfo::isLiveAtStatepointVarArg(LiveInterval &LI) { 130 return any_of(VRM.getRegInfo().reg_operands(LI.reg()), 131 [](MachineOperand &MO) { 132 MachineInstr *MI = MO.getParent(); 133 if (MI->getOpcode() != TargetOpcode::STATEPOINT) 134 return false; 135 return StatepointOpers(MI).getVarIdx() <= MI->getOperandNo(&MO); 136 }); 137 } 138 139 void VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &LI) { 140 float Weight = weightCalcHelper(LI); 141 // Check if unspillable. 142 if (Weight < 0) 143 return; 144 LI.setWeight(Weight); 145 } 146 147 float VirtRegAuxInfo::futureWeight(LiveInterval &LI, SlotIndex Start, 148 SlotIndex End) { 149 return weightCalcHelper(LI, &Start, &End); 150 } 151 152 float VirtRegAuxInfo::weightCalcHelper(LiveInterval &LI, SlotIndex *Start, 153 SlotIndex *End) { 154 MachineRegisterInfo &MRI = MF.getRegInfo(); 155 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 156 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 157 MachineBasicBlock *MBB = nullptr; 158 MachineLoop *Loop = nullptr; 159 bool IsExiting = false; 160 float TotalWeight = 0; 161 unsigned NumInstr = 0; // Number of instructions using LI 162 SmallPtrSet<MachineInstr *, 8> Visited; 163 164 std::pair<Register, Register> TargetHint = MRI.getRegAllocationHint(LI.reg()); 165 166 if (LI.isSpillable()) { 167 Register Reg = LI.reg(); 168 Register Original = VRM.getOriginal(Reg); 169 const LiveInterval &OrigInt = LIS.getInterval(Original); 170 // li comes from a split of OrigInt. If OrigInt was marked 171 // as not spillable, make sure the new interval is marked 172 // as not spillable as well. 173 if (!OrigInt.isSpillable()) 174 LI.markNotSpillable(); 175 } 176 177 // Don't recompute spill weight for an unspillable register. 178 bool IsSpillable = LI.isSpillable(); 179 180 bool IsLocalSplitArtifact = Start && End; 181 182 // Do not update future local split artifacts. 183 bool ShouldUpdateLI = !IsLocalSplitArtifact; 184 185 if (IsLocalSplitArtifact) { 186 MachineBasicBlock *localMBB = LIS.getMBBFromIndex(*End); 187 assert(localMBB == LIS.getMBBFromIndex(*Start) && 188 "start and end are expected to be in the same basic block"); 189 190 // Local split artifact will have 2 additional copy instructions and they 191 // will be in the same BB. 192 // localLI = COPY other 193 // ... 194 // other = COPY localLI 195 TotalWeight += LiveIntervals::getSpillWeight(true, false, &MBFI, localMBB); 196 TotalWeight += LiveIntervals::getSpillWeight(false, true, &MBFI, localMBB); 197 198 NumInstr += 2; 199 } 200 201 // CopyHint is a sortable hint derived from a COPY instruction. 202 struct CopyHint { 203 const Register Reg; 204 const float Weight; 205 CopyHint(Register R, float W) : Reg(R), Weight(W) {} 206 bool operator<(const CopyHint &Rhs) const { 207 // Always prefer any physreg hint. 208 if (Reg.isPhysical() != Rhs.Reg.isPhysical()) 209 return Reg.isPhysical(); 210 if (Weight != Rhs.Weight) 211 return (Weight > Rhs.Weight); 212 return Reg.id() < Rhs.Reg.id(); // Tie-breaker. 213 } 214 }; 215 216 std::set<CopyHint> CopyHints; 217 DenseMap<unsigned, float> Hint; 218 for (MachineRegisterInfo::reg_instr_nodbg_iterator 219 I = MRI.reg_instr_nodbg_begin(LI.reg()), 220 E = MRI.reg_instr_nodbg_end(); 221 I != E;) { 222 MachineInstr *MI = &*(I++); 223 224 // For local split artifacts, we are interested only in instructions between 225 // the expected start and end of the range. 226 SlotIndex SI = LIS.getInstructionIndex(*MI); 227 if (IsLocalSplitArtifact && ((SI < *Start) || (SI > *End))) 228 continue; 229 230 NumInstr++; 231 if (MI->isIdentityCopy() || MI->isImplicitDef()) 232 continue; 233 if (!Visited.insert(MI).second) 234 continue; 235 236 // For terminators that produce values, ask the backend if the register is 237 // not spillable. 238 if (TII.isUnspillableTerminator(MI) && MI->definesRegister(LI.reg())) { 239 LI.markNotSpillable(); 240 return -1.0f; 241 } 242 243 float Weight = 1.0f; 244 if (IsSpillable) { 245 // Get loop info for mi. 246 if (MI->getParent() != MBB) { 247 MBB = MI->getParent(); 248 Loop = Loops.getLoopFor(MBB); 249 IsExiting = Loop ? Loop->isLoopExiting(MBB) : false; 250 } 251 252 // Calculate instr weight. 253 bool Reads, Writes; 254 std::tie(Reads, Writes) = MI->readsWritesVirtualRegister(LI.reg()); 255 Weight = LiveIntervals::getSpillWeight(Writes, Reads, &MBFI, *MI); 256 257 // Give extra weight to what looks like a loop induction variable update. 258 if (Writes && IsExiting && LIS.isLiveOutOfMBB(LI, MBB)) 259 Weight *= 3; 260 261 TotalWeight += Weight; 262 } 263 264 // Get allocation hints from copies. 265 if (!MI->isCopy()) 266 continue; 267 Register HintReg = copyHint(MI, LI.reg(), TRI, MRI); 268 if (!HintReg) 269 continue; 270 // Force hweight onto the stack so that x86 doesn't add hidden precision, 271 // making the comparison incorrectly pass (i.e., 1 > 1 == true??). 272 // 273 // FIXME: we probably shouldn't use floats at all. 274 volatile float HWeight = Hint[HintReg] += Weight; 275 if (HintReg.isVirtual() || MRI.isAllocatable(HintReg)) 276 CopyHints.insert(CopyHint(HintReg, HWeight)); 277 } 278 279 // Pass all the sorted copy hints to mri. 280 if (ShouldUpdateLI && CopyHints.size()) { 281 // Remove a generic hint if previously added by target. 282 if (TargetHint.first == 0 && TargetHint.second) 283 MRI.clearSimpleHint(LI.reg()); 284 285 std::set<Register> HintedRegs; 286 for (auto &Hint : CopyHints) { 287 if (!HintedRegs.insert(Hint.Reg).second || 288 (TargetHint.first != 0 && Hint.Reg == TargetHint.second)) 289 // Don't add the same reg twice or the target-type hint again. 290 continue; 291 MRI.addRegAllocationHint(LI.reg(), Hint.Reg); 292 } 293 294 // Weakly boost the spill weight of hinted registers. 295 TotalWeight *= 1.01F; 296 } 297 298 // If the live interval was already unspillable, leave it that way. 299 if (!IsSpillable) 300 return -1.0; 301 302 // Mark li as unspillable if all live ranges are tiny and the interval 303 // is not live at any reg mask. If the interval is live at a reg mask 304 // spilling may be required. If li is live as use in statepoint instruction 305 // spilling may be required due to if we mark interval with use in statepoint 306 // as not spillable we are risky to end up with no register to allocate. 307 // At the same time STATEPOINT instruction is perfectly fine to have this 308 // operand on stack, so spilling such interval and folding its load from stack 309 // into instruction itself makes perfect sense. 310 if (ShouldUpdateLI && LI.isZeroLength(LIS.getSlotIndexes()) && 311 !LI.isLiveAtIndexes(LIS.getRegMaskSlots()) && 312 !isLiveAtStatepointVarArg(LI)) { 313 LI.markNotSpillable(); 314 return -1.0; 315 } 316 317 // If all of the definitions of the interval are re-materializable, 318 // it is a preferred candidate for spilling. 319 // FIXME: this gets much more complicated once we support non-trivial 320 // re-materialization. 321 if (isRematerializable(LI, LIS, VRM, *MF.getSubtarget().getInstrInfo())) 322 TotalWeight *= 0.5F; 323 324 if (IsLocalSplitArtifact) 325 return normalize(TotalWeight, Start->distance(*End), NumInstr); 326 return normalize(TotalWeight, LI.getSize(), NumInstr); 327 } 328