1 //===- llvm/Target/TargetSchedule.cpp - Sched Machine Model ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a wrapper around MCSchedModel that allows the interface 10 // to benefit from information currently only available in TargetInstrInfo. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/TargetSchedule.h" 15 #include "llvm/CodeGen/MachineFunction.h" 16 #include "llvm/CodeGen/MachineInstr.h" 17 #include "llvm/CodeGen/MachineOperand.h" 18 #include "llvm/CodeGen/TargetInstrInfo.h" 19 #include "llvm/CodeGen/TargetRegisterInfo.h" 20 #include "llvm/CodeGen/TargetSubtargetInfo.h" 21 #include "llvm/MC/MCInstrDesc.h" 22 #include "llvm/MC/MCInstrItineraries.h" 23 #include "llvm/MC/MCSchedule.h" 24 #include "llvm/Support/CommandLine.h" 25 #include "llvm/Support/ErrorHandling.h" 26 #include "llvm/Support/raw_ostream.h" 27 #include <algorithm> 28 #include <cassert> 29 #include <cstdint> 30 31 using namespace llvm; 32 33 static cl::opt<bool> EnableSchedModel("schedmodel", cl::Hidden, cl::init(true), 34 cl::desc("Use TargetSchedModel for latency lookup")); 35 36 static cl::opt<bool> EnableSchedItins("scheditins", cl::Hidden, cl::init(true), 37 cl::desc("Use InstrItineraryData for latency lookup")); 38 39 bool TargetSchedModel::hasInstrSchedModel() const { 40 return EnableSchedModel && SchedModel.hasInstrSchedModel(); 41 } 42 43 bool TargetSchedModel::hasInstrItineraries() const { 44 return EnableSchedItins && !InstrItins.isEmpty(); 45 } 46 47 static unsigned gcd(unsigned Dividend, unsigned Divisor) { 48 // Dividend and Divisor will be naturally swapped as needed. 49 while (Divisor) { 50 unsigned Rem = Dividend % Divisor; 51 Dividend = Divisor; 52 Divisor = Rem; 53 }; 54 return Dividend; 55 } 56 57 static unsigned lcm(unsigned A, unsigned B) { 58 unsigned LCM = (uint64_t(A) * B) / gcd(A, B); 59 assert((LCM >= A && LCM >= B) && "LCM overflow"); 60 return LCM; 61 } 62 63 void TargetSchedModel::init(const TargetSubtargetInfo *TSInfo) { 64 STI = TSInfo; 65 SchedModel = TSInfo->getSchedModel(); 66 TII = TSInfo->getInstrInfo(); 67 STI->initInstrItins(InstrItins); 68 69 unsigned NumRes = SchedModel.getNumProcResourceKinds(); 70 ResourceFactors.resize(NumRes); 71 ResourceLCM = SchedModel.IssueWidth; 72 for (unsigned Idx = 0; Idx < NumRes; ++Idx) { 73 unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits; 74 if (NumUnits > 0) 75 ResourceLCM = lcm(ResourceLCM, NumUnits); 76 } 77 MicroOpFactor = ResourceLCM / SchedModel.IssueWidth; 78 for (unsigned Idx = 0; Idx < NumRes; ++Idx) { 79 unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits; 80 ResourceFactors[Idx] = NumUnits ? (ResourceLCM / NumUnits) : 0; 81 } 82 } 83 84 /// Returns true only if instruction is specified as single issue. 85 bool TargetSchedModel::mustBeginGroup(const MachineInstr *MI, 86 const MCSchedClassDesc *SC) const { 87 if (hasInstrSchedModel()) { 88 if (!SC) 89 SC = resolveSchedClass(MI); 90 if (SC->isValid()) 91 return SC->BeginGroup; 92 } 93 return false; 94 } 95 96 bool TargetSchedModel::mustEndGroup(const MachineInstr *MI, 97 const MCSchedClassDesc *SC) const { 98 if (hasInstrSchedModel()) { 99 if (!SC) 100 SC = resolveSchedClass(MI); 101 if (SC->isValid()) 102 return SC->EndGroup; 103 } 104 return false; 105 } 106 107 unsigned TargetSchedModel::getNumMicroOps(const MachineInstr *MI, 108 const MCSchedClassDesc *SC) const { 109 if (hasInstrItineraries()) { 110 int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass()); 111 return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, *MI); 112 } 113 if (hasInstrSchedModel()) { 114 if (!SC) 115 SC = resolveSchedClass(MI); 116 if (SC->isValid()) 117 return SC->NumMicroOps; 118 } 119 return MI->isTransient() ? 0 : 1; 120 } 121 122 // The machine model may explicitly specify an invalid latency, which 123 // effectively means infinite latency. Since users of the TargetSchedule API 124 // don't know how to handle this, we convert it to a very large latency that is 125 // easy to distinguish when debugging the DAG but won't induce overflow. 126 static unsigned capLatency(int Cycles) { 127 return Cycles >= 0 ? Cycles : 1000; 128 } 129 130 /// Return the MCSchedClassDesc for this instruction. Some SchedClasses require 131 /// evaluation of predicates that depend on instruction operands or flags. 132 const MCSchedClassDesc *TargetSchedModel:: 133 resolveSchedClass(const MachineInstr *MI) const { 134 // Get the definition's scheduling class descriptor from this machine model. 135 unsigned SchedClass = MI->getDesc().getSchedClass(); 136 const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass); 137 if (!SCDesc->isValid()) 138 return SCDesc; 139 140 #ifndef NDEBUG 141 unsigned NIter = 0; 142 #endif 143 while (SCDesc->isVariant()) { 144 assert(++NIter < 6 && "Variants are nested deeper than the magic number"); 145 146 SchedClass = STI->resolveSchedClass(SchedClass, MI, this); 147 SCDesc = SchedModel.getSchedClassDesc(SchedClass); 148 } 149 return SCDesc; 150 } 151 152 /// Find the def index of this operand. This index maps to the machine model and 153 /// is independent of use operands. Def operands may be reordered with uses or 154 /// merged with uses without affecting the def index (e.g. before/after 155 /// regalloc). However, an instruction's def operands must never be reordered 156 /// with respect to each other. 157 static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx) { 158 unsigned DefIdx = 0; 159 for (unsigned i = 0; i != DefOperIdx; ++i) { 160 const MachineOperand &MO = MI->getOperand(i); 161 if (MO.isReg() && MO.isDef()) 162 ++DefIdx; 163 } 164 return DefIdx; 165 } 166 167 /// Find the use index of this operand. This is independent of the instruction's 168 /// def operands. 169 /// 170 /// Note that uses are not determined by the operand's isUse property, which 171 /// is simply the inverse of isDef. Here we consider any readsReg operand to be 172 /// a "use". The machine model allows an operand to be both a Def and Use. 173 static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx) { 174 unsigned UseIdx = 0; 175 for (unsigned i = 0; i != UseOperIdx; ++i) { 176 const MachineOperand &MO = MI->getOperand(i); 177 if (MO.isReg() && MO.readsReg() && !MO.isDef()) 178 ++UseIdx; 179 } 180 return UseIdx; 181 } 182 183 // Top-level API for clients that know the operand indices. 184 unsigned TargetSchedModel::computeOperandLatency( 185 const MachineInstr *DefMI, unsigned DefOperIdx, 186 const MachineInstr *UseMI, unsigned UseOperIdx) const { 187 188 if (!hasInstrSchedModel() && !hasInstrItineraries()) 189 return TII->defaultDefLatency(SchedModel, *DefMI); 190 191 if (hasInstrItineraries()) { 192 int OperLatency = 0; 193 if (UseMI) { 194 OperLatency = TII->getOperandLatency(&InstrItins, *DefMI, DefOperIdx, 195 *UseMI, UseOperIdx); 196 } 197 else { 198 unsigned DefClass = DefMI->getDesc().getSchedClass(); 199 OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx); 200 } 201 if (OperLatency >= 0) 202 return OperLatency; 203 204 // No operand latency was found. 205 unsigned InstrLatency = TII->getInstrLatency(&InstrItins, *DefMI); 206 207 // Expected latency is the max of the stage latency and itinerary props. 208 // Rather than directly querying InstrItins stage latency, we call a TII 209 // hook to allow subtargets to specialize latency. This hook is only 210 // applicable to the InstrItins model. InstrSchedModel should model all 211 // special cases without TII hooks. 212 InstrLatency = 213 std::max(InstrLatency, TII->defaultDefLatency(SchedModel, *DefMI)); 214 return InstrLatency; 215 } 216 // hasInstrSchedModel() 217 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI); 218 unsigned DefIdx = findDefIdx(DefMI, DefOperIdx); 219 if (DefIdx < SCDesc->NumWriteLatencyEntries) { 220 // Lookup the definition's write latency in SubtargetInfo. 221 const MCWriteLatencyEntry *WLEntry = 222 STI->getWriteLatencyEntry(SCDesc, DefIdx); 223 unsigned WriteID = WLEntry->WriteResourceID; 224 unsigned Latency = capLatency(WLEntry->Cycles); 225 if (!UseMI) 226 return Latency; 227 228 // Lookup the use's latency adjustment in SubtargetInfo. 229 const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI); 230 if (UseDesc->NumReadAdvanceEntries == 0) 231 return Latency; 232 unsigned UseIdx = findUseIdx(UseMI, UseOperIdx); 233 int Advance = STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID); 234 if (Advance > 0 && (unsigned)Advance > Latency) // unsigned wrap 235 return 0; 236 return Latency - Advance; 237 } 238 // If DefIdx does not exist in the model (e.g. implicit defs), then return 239 // unit latency (defaultDefLatency may be too conservative). 240 #ifndef NDEBUG 241 if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit() 242 && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef() 243 && SchedModel.isComplete()) { 244 errs() << "DefIdx " << DefIdx << " exceeds machine model writes for " 245 << *DefMI << " (Try with MCSchedModel.CompleteModel set to false)"; 246 llvm_unreachable("incomplete machine model"); 247 } 248 #endif 249 // FIXME: Automatically giving all implicit defs defaultDefLatency is 250 // undesirable. We should only do it for defs that are known to the MC 251 // desc like flags. Truly implicit defs should get 1 cycle latency. 252 return DefMI->isTransient() ? 0 : TII->defaultDefLatency(SchedModel, *DefMI); 253 } 254 255 unsigned 256 TargetSchedModel::computeInstrLatency(const MCSchedClassDesc &SCDesc) const { 257 return capLatency(MCSchedModel::computeInstrLatency(*STI, SCDesc)); 258 } 259 260 unsigned TargetSchedModel::computeInstrLatency(unsigned Opcode) const { 261 assert(hasInstrSchedModel() && "Only call this function with a SchedModel"); 262 unsigned SCIdx = TII->get(Opcode).getSchedClass(); 263 return capLatency(SchedModel.computeInstrLatency(*STI, SCIdx)); 264 } 265 266 unsigned TargetSchedModel::computeInstrLatency(const MCInst &Inst) const { 267 if (hasInstrSchedModel()) 268 return capLatency(SchedModel.computeInstrLatency(*STI, *TII, Inst)); 269 return computeInstrLatency(Inst.getOpcode()); 270 } 271 272 unsigned 273 TargetSchedModel::computeInstrLatency(const MachineInstr *MI, 274 bool UseDefaultDefLatency) const { 275 // For the itinerary model, fall back to the old subtarget hook. 276 // Allow subtargets to compute Bundle latencies outside the machine model. 277 if (hasInstrItineraries() || MI->isBundle() || 278 (!hasInstrSchedModel() && !UseDefaultDefLatency)) 279 return TII->getInstrLatency(&InstrItins, *MI); 280 281 if (hasInstrSchedModel()) { 282 const MCSchedClassDesc *SCDesc = resolveSchedClass(MI); 283 if (SCDesc->isValid()) 284 return computeInstrLatency(*SCDesc); 285 } 286 return TII->defaultDefLatency(SchedModel, *MI); 287 } 288 289 unsigned TargetSchedModel:: 290 computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx, 291 const MachineInstr *DepMI) const { 292 if (!SchedModel.isOutOfOrder()) 293 return 1; 294 295 // Out-of-order processor can dispatch WAW dependencies in the same cycle. 296 297 // Treat predication as a data dependency for out-of-order cpus. In-order 298 // cpus do not need to treat predicated writes specially. 299 // 300 // TODO: The following hack exists because predication passes do not 301 // correctly append imp-use operands, and readsReg() strangely returns false 302 // for predicated defs. 303 Register Reg = DefMI->getOperand(DefOperIdx).getReg(); 304 const MachineFunction &MF = *DefMI->getMF(); 305 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 306 if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(*DepMI)) 307 return computeInstrLatency(DefMI); 308 309 // If we have a per operand scheduling model, check if this def is writing 310 // an unbuffered resource. If so, it treated like an in-order cpu. 311 if (hasInstrSchedModel()) { 312 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI); 313 if (SCDesc->isValid()) { 314 for (const MCWriteProcResEntry *PRI = STI->getWriteProcResBegin(SCDesc), 315 *PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) { 316 if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->BufferSize) 317 return 1; 318 } 319 } 320 } 321 return 0; 322 } 323 324 double 325 TargetSchedModel::computeReciprocalThroughput(const MachineInstr *MI) const { 326 if (hasInstrItineraries()) { 327 unsigned SchedClass = MI->getDesc().getSchedClass(); 328 return MCSchedModel::getReciprocalThroughput(SchedClass, 329 *getInstrItineraries()); 330 } 331 332 if (hasInstrSchedModel()) 333 return MCSchedModel::getReciprocalThroughput(*STI, *resolveSchedClass(MI)); 334 335 return 0.0; 336 } 337 338 double 339 TargetSchedModel::computeReciprocalThroughput(unsigned Opcode) const { 340 unsigned SchedClass = TII->get(Opcode).getSchedClass(); 341 if (hasInstrItineraries()) 342 return MCSchedModel::getReciprocalThroughput(SchedClass, 343 *getInstrItineraries()); 344 if (hasInstrSchedModel()) { 345 const MCSchedClassDesc &SCDesc = *SchedModel.getSchedClassDesc(SchedClass); 346 if (SCDesc.isValid() && !SCDesc.isVariant()) 347 return MCSchedModel::getReciprocalThroughput(*STI, SCDesc); 348 } 349 350 return 0.0; 351 } 352 353 double 354 TargetSchedModel::computeReciprocalThroughput(const MCInst &MI) const { 355 if (hasInstrSchedModel()) 356 return SchedModel.getReciprocalThroughput(*STI, *TII, MI); 357 return computeReciprocalThroughput(MI.getOpcode()); 358 } 359 360