1 //===- CalcSpillWeights.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/CodeGen/CalcSpillWeights.h" 10 #include "llvm/ADT/SmallPtrSet.h" 11 #include "llvm/CodeGen/LiveInterval.h" 12 #include "llvm/CodeGen/LiveIntervals.h" 13 #include "llvm/CodeGen/MachineFunction.h" 14 #include "llvm/CodeGen/MachineInstr.h" 15 #include "llvm/CodeGen/MachineLoopInfo.h" 16 #include "llvm/CodeGen/MachineOperand.h" 17 #include "llvm/CodeGen/MachineRegisterInfo.h" 18 #include "llvm/CodeGen/StackMaps.h" 19 #include "llvm/CodeGen/TargetInstrInfo.h" 20 #include "llvm/CodeGen/TargetRegisterInfo.h" 21 #include "llvm/CodeGen/TargetSubtargetInfo.h" 22 #include "llvm/CodeGen/VirtRegMap.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Support/raw_ostream.h" 25 #include <cassert> 26 #include <tuple> 27 28 using namespace llvm; 29 30 #define DEBUG_TYPE "calcspillweights" 31 32 void VirtRegAuxInfo::calculateSpillWeightsAndHints() { 33 LLVM_DEBUG(dbgs() << "********** Compute Spill Weights **********\n" 34 << "********** Function: " << MF.getName() << '\n'); 35 36 MachineRegisterInfo &MRI = MF.getRegInfo(); 37 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { 38 Register Reg = Register::index2VirtReg(I); 39 if (MRI.reg_nodbg_empty(Reg)) 40 continue; 41 calculateSpillWeightAndHint(LIS.getInterval(Reg)); 42 } 43 } 44 45 // Return the preferred allocation register for reg, given a COPY instruction. 46 Register VirtRegAuxInfo::copyHint(const MachineInstr *MI, unsigned Reg, 47 const TargetRegisterInfo &TRI, 48 const MachineRegisterInfo &MRI) { 49 unsigned Sub, HSub; 50 Register HReg; 51 if (MI->getOperand(0).getReg() == Reg) { 52 Sub = MI->getOperand(0).getSubReg(); 53 HReg = MI->getOperand(1).getReg(); 54 HSub = MI->getOperand(1).getSubReg(); 55 } else { 56 Sub = MI->getOperand(1).getSubReg(); 57 HReg = MI->getOperand(0).getReg(); 58 HSub = MI->getOperand(0).getSubReg(); 59 } 60 61 if (!HReg) 62 return 0; 63 64 if (HReg.isVirtual()) 65 return Sub == HSub ? HReg : Register(); 66 67 const TargetRegisterClass *RC = MRI.getRegClass(Reg); 68 MCRegister CopiedPReg = HSub ? TRI.getSubReg(HReg, HSub) : HReg.asMCReg(); 69 if (RC->contains(CopiedPReg)) 70 return CopiedPReg; 71 72 // Check if reg:sub matches so that a super register could be hinted. 73 if (Sub) 74 return TRI.getMatchingSuperReg(CopiedPReg, Sub, RC); 75 76 return 0; 77 } 78 79 // Check if all values in LI are rematerializable 80 bool VirtRegAuxInfo::isRematerializable(const LiveInterval &LI, 81 const LiveIntervals &LIS, 82 const VirtRegMap &VRM, 83 const TargetInstrInfo &TII) { 84 Register Reg = LI.reg(); 85 Register Original = VRM.getOriginal(Reg); 86 for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end(); 87 I != E; ++I) { 88 const VNInfo *VNI = *I; 89 if (VNI->isUnused()) 90 continue; 91 if (VNI->isPHIDef()) 92 return false; 93 94 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 95 assert(MI && "Dead valno in interval"); 96 97 // Trace copies introduced by live range splitting. The inline 98 // spiller can rematerialize through these copies, so the spill 99 // weight must reflect this. 100 while (MI->isFullCopy()) { 101 // The copy destination must match the interval register. 102 if (MI->getOperand(0).getReg() != Reg) 103 return false; 104 105 // Get the source register. 106 Reg = MI->getOperand(1).getReg(); 107 108 // If the original (pre-splitting) registers match this 109 // copy came from a split. 110 if (!Reg.isVirtual() || VRM.getOriginal(Reg) != Original) 111 return false; 112 113 // Follow the copy live-in value. 114 const LiveInterval &SrcLI = LIS.getInterval(Reg); 115 LiveQueryResult SrcQ = SrcLI.Query(VNI->def); 116 VNI = SrcQ.valueIn(); 117 assert(VNI && "Copy from non-existing value"); 118 if (VNI->isPHIDef()) 119 return false; 120 MI = LIS.getInstructionFromIndex(VNI->def); 121 assert(MI && "Dead valno in interval"); 122 } 123 124 if (!TII.isTriviallyReMaterializable(*MI)) 125 return false; 126 } 127 return true; 128 } 129 130 bool VirtRegAuxInfo::isLiveAtStatepointVarArg(LiveInterval &LI) { 131 return any_of(VRM.getRegInfo().reg_operands(LI.reg()), 132 [](MachineOperand &MO) { 133 MachineInstr *MI = MO.getParent(); 134 if (MI->getOpcode() != TargetOpcode::STATEPOINT) 135 return false; 136 return StatepointOpers(MI).getVarIdx() <= MI->getOperandNo(&MO); 137 }); 138 } 139 140 void VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &LI) { 141 float Weight = weightCalcHelper(LI); 142 // Check if unspillable. 143 if (Weight < 0) 144 return; 145 LI.setWeight(Weight); 146 } 147 148 float VirtRegAuxInfo::weightCalcHelper(LiveInterval &LI, SlotIndex *Start, 149 SlotIndex *End) { 150 MachineRegisterInfo &MRI = MF.getRegInfo(); 151 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 152 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 153 MachineBasicBlock *MBB = nullptr; 154 MachineLoop *Loop = nullptr; 155 bool IsExiting = false; 156 float TotalWeight = 0; 157 unsigned NumInstr = 0; // Number of instructions using LI 158 SmallPtrSet<MachineInstr *, 8> Visited; 159 160 std::pair<Register, Register> TargetHint = MRI.getRegAllocationHint(LI.reg()); 161 162 if (LI.isSpillable()) { 163 Register Reg = LI.reg(); 164 Register Original = VRM.getOriginal(Reg); 165 const LiveInterval &OrigInt = LIS.getInterval(Original); 166 // li comes from a split of OrigInt. If OrigInt was marked 167 // as not spillable, make sure the new interval is marked 168 // as not spillable as well. 169 if (!OrigInt.isSpillable()) 170 LI.markNotSpillable(); 171 } 172 173 // Don't recompute spill weight for an unspillable register. 174 bool IsSpillable = LI.isSpillable(); 175 176 bool IsLocalSplitArtifact = Start && End; 177 178 // Do not update future local split artifacts. 179 bool ShouldUpdateLI = !IsLocalSplitArtifact; 180 181 if (IsLocalSplitArtifact) { 182 MachineBasicBlock *LocalMBB = LIS.getMBBFromIndex(*End); 183 assert(LocalMBB == LIS.getMBBFromIndex(*Start) && 184 "start and end are expected to be in the same basic block"); 185 186 // Local split artifact will have 2 additional copy instructions and they 187 // will be in the same BB. 188 // localLI = COPY other 189 // ... 190 // other = COPY localLI 191 TotalWeight += LiveIntervals::getSpillWeight(true, false, &MBFI, LocalMBB); 192 TotalWeight += LiveIntervals::getSpillWeight(false, true, &MBFI, LocalMBB); 193 194 NumInstr += 2; 195 } 196 197 // CopyHint is a sortable hint derived from a COPY instruction. 198 struct CopyHint { 199 const Register Reg; 200 const float Weight; 201 CopyHint(Register R, float W) : Reg(R), Weight(W) {} 202 bool operator<(const CopyHint &Rhs) const { 203 // Always prefer any physreg hint. 204 if (Reg.isPhysical() != Rhs.Reg.isPhysical()) 205 return Reg.isPhysical(); 206 if (Weight != Rhs.Weight) 207 return (Weight > Rhs.Weight); 208 return Reg.id() < Rhs.Reg.id(); // Tie-breaker. 209 } 210 }; 211 212 std::set<CopyHint> CopyHints; 213 DenseMap<unsigned, float> Hint; 214 for (MachineRegisterInfo::reg_instr_nodbg_iterator 215 I = MRI.reg_instr_nodbg_begin(LI.reg()), 216 E = MRI.reg_instr_nodbg_end(); 217 I != E;) { 218 MachineInstr *MI = &*(I++); 219 220 // For local split artifacts, we are interested only in instructions between 221 // the expected start and end of the range. 222 SlotIndex SI = LIS.getInstructionIndex(*MI); 223 if (IsLocalSplitArtifact && ((SI < *Start) || (SI > *End))) 224 continue; 225 226 NumInstr++; 227 if (MI->isIdentityCopy() || MI->isImplicitDef()) 228 continue; 229 if (!Visited.insert(MI).second) 230 continue; 231 232 // For terminators that produce values, ask the backend if the register is 233 // not spillable. 234 if (TII.isUnspillableTerminator(MI) && MI->definesRegister(LI.reg())) { 235 LI.markNotSpillable(); 236 return -1.0f; 237 } 238 239 float Weight = 1.0f; 240 if (IsSpillable) { 241 // Get loop info for mi. 242 if (MI->getParent() != MBB) { 243 MBB = MI->getParent(); 244 Loop = Loops.getLoopFor(MBB); 245 IsExiting = Loop ? Loop->isLoopExiting(MBB) : false; 246 } 247 248 // Calculate instr weight. 249 bool Reads, Writes; 250 std::tie(Reads, Writes) = MI->readsWritesVirtualRegister(LI.reg()); 251 Weight = LiveIntervals::getSpillWeight(Writes, Reads, &MBFI, *MI); 252 253 // Give extra weight to what looks like a loop induction variable update. 254 if (Writes && IsExiting && LIS.isLiveOutOfMBB(LI, MBB)) 255 Weight *= 3; 256 257 TotalWeight += Weight; 258 } 259 260 // Get allocation hints from copies. 261 if (!MI->isCopy()) 262 continue; 263 Register HintReg = copyHint(MI, LI.reg(), TRI, MRI); 264 if (!HintReg) 265 continue; 266 // Force hweight onto the stack so that x86 doesn't add hidden precision, 267 // making the comparison incorrectly pass (i.e., 1 > 1 == true??). 268 // 269 // FIXME: we probably shouldn't use floats at all. 270 volatile float HWeight = Hint[HintReg] += Weight; 271 if (HintReg.isVirtual() || MRI.isAllocatable(HintReg)) 272 CopyHints.insert(CopyHint(HintReg, HWeight)); 273 } 274 275 // Pass all the sorted copy hints to mri. 276 if (ShouldUpdateLI && CopyHints.size()) { 277 // Remove a generic hint if previously added by target. 278 if (TargetHint.first == 0 && TargetHint.second) 279 MRI.clearSimpleHint(LI.reg()); 280 281 SmallSet<Register, 4> HintedRegs; 282 for (const auto &Hint : CopyHints) { 283 if (!HintedRegs.insert(Hint.Reg).second || 284 (TargetHint.first != 0 && Hint.Reg == TargetHint.second)) 285 // Don't add the same reg twice or the target-type hint again. 286 continue; 287 MRI.addRegAllocationHint(LI.reg(), Hint.Reg); 288 } 289 290 // Weakly boost the spill weight of hinted registers. 291 TotalWeight *= 1.01F; 292 } 293 294 // If the live interval was already unspillable, leave it that way. 295 if (!IsSpillable) 296 return -1.0; 297 298 // Mark li as unspillable if all live ranges are tiny and the interval 299 // is not live at any reg mask. If the interval is live at a reg mask 300 // spilling may be required. If li is live as use in statepoint instruction 301 // spilling may be required due to if we mark interval with use in statepoint 302 // as not spillable we are risky to end up with no register to allocate. 303 // At the same time STATEPOINT instruction is perfectly fine to have this 304 // operand on stack, so spilling such interval and folding its load from stack 305 // into instruction itself makes perfect sense. 306 if (ShouldUpdateLI && LI.isZeroLength(LIS.getSlotIndexes()) && 307 !LI.isLiveAtIndexes(LIS.getRegMaskSlots()) && 308 !isLiveAtStatepointVarArg(LI)) { 309 LI.markNotSpillable(); 310 return -1.0; 311 } 312 313 // If all of the definitions of the interval are re-materializable, 314 // it is a preferred candidate for spilling. 315 // FIXME: this gets much more complicated once we support non-trivial 316 // re-materialization. 317 if (isRematerializable(LI, LIS, VRM, *MF.getSubtarget().getInstrInfo())) 318 TotalWeight *= 0.5F; 319 320 if (IsLocalSplitArtifact) 321 return normalize(TotalWeight, Start->distance(*End), NumInstr); 322 return normalize(TotalWeight, LI.getSize(), NumInstr); 323 } 324