1 //===- MCSchedule.cpp - Scheduling ------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the default scheduling model. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/MC/MCSchedule.h" 14 #include "llvm/MC/MCInst.h" 15 #include "llvm/MC/MCInstrDesc.h" 16 #include "llvm/MC/MCInstrInfo.h" 17 #include "llvm/MC/MCSubtargetInfo.h" 18 #include <optional> 19 #include <type_traits> 20 21 using namespace llvm; 22 23 static_assert(std::is_pod<MCSchedModel>::value, 24 "We shouldn't have a static constructor here"); 25 const MCSchedModel MCSchedModel::Default = {DefaultIssueWidth, 26 DefaultMicroOpBufferSize, 27 DefaultLoopMicroOpBufferSize, 28 DefaultLoadLatency, 29 DefaultHighLatency, 30 DefaultMispredictPenalty, 31 false, 32 true, 33 false /*EnableIntervals*/, 34 0, 35 nullptr, 36 nullptr, 37 0, 38 0, 39 nullptr, 40 nullptr}; 41 42 int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI, 43 const MCSchedClassDesc &SCDesc) { 44 int Latency = 0; 45 for (unsigned DefIdx = 0, DefEnd = SCDesc.NumWriteLatencyEntries; 46 DefIdx != DefEnd; ++DefIdx) { 47 // Lookup the definition's write latency in SubtargetInfo. 48 const MCWriteLatencyEntry *WLEntry = 49 STI.getWriteLatencyEntry(&SCDesc, DefIdx); 50 // Early exit if we found an invalid latency. 51 if (WLEntry->Cycles < 0) 52 return WLEntry->Cycles; 53 Latency = std::max(Latency, static_cast<int>(WLEntry->Cycles)); 54 } 55 return Latency; 56 } 57 58 int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI, 59 unsigned SchedClass) const { 60 const MCSchedClassDesc &SCDesc = *getSchedClassDesc(SchedClass); 61 if (!SCDesc.isValid()) 62 return 0; 63 if (!SCDesc.isVariant()) 64 return MCSchedModel::computeInstrLatency(STI, SCDesc); 65 66 llvm_unreachable("unsupported variant scheduling class"); 67 } 68 69 int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI, 70 const MCInstrInfo &MCII, 71 const MCInst &Inst) const { 72 unsigned SchedClass = MCII.get(Inst.getOpcode()).getSchedClass(); 73 const MCSchedClassDesc *SCDesc = getSchedClassDesc(SchedClass); 74 if (!SCDesc->isValid()) 75 return 0; 76 77 unsigned CPUID = getProcessorID(); 78 while (SCDesc->isVariant()) { 79 SchedClass = STI.resolveVariantSchedClass(SchedClass, &Inst, &MCII, CPUID); 80 SCDesc = getSchedClassDesc(SchedClass); 81 } 82 83 if (SchedClass) 84 return MCSchedModel::computeInstrLatency(STI, *SCDesc); 85 86 llvm_unreachable("unsupported variant scheduling class"); 87 } 88 89 double 90 MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI, 91 const MCSchedClassDesc &SCDesc) { 92 std::optional<double> Throughput; 93 const MCSchedModel &SM = STI.getSchedModel(); 94 const MCWriteProcResEntry *I = STI.getWriteProcResBegin(&SCDesc); 95 const MCWriteProcResEntry *E = STI.getWriteProcResEnd(&SCDesc); 96 for (; I != E; ++I) { 97 if (!I->Cycles) 98 continue; 99 unsigned NumUnits = SM.getProcResource(I->ProcResourceIdx)->NumUnits; 100 double Temp = NumUnits * 1.0 / I->Cycles; 101 Throughput = Throughput ? std::min(*Throughput, Temp) : Temp; 102 } 103 if (Throughput) 104 return 1.0 / *Throughput; 105 106 // If no throughput value was calculated, assume that we can execute at the 107 // maximum issue width scaled by number of micro-ops for the schedule class. 108 return ((double)SCDesc.NumMicroOps) / SM.IssueWidth; 109 } 110 111 double 112 MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI, 113 const MCInstrInfo &MCII, 114 const MCInst &Inst) const { 115 unsigned SchedClass = MCII.get(Inst.getOpcode()).getSchedClass(); 116 const MCSchedClassDesc *SCDesc = getSchedClassDesc(SchedClass); 117 118 // If there's no valid class, assume that the instruction executes/completes 119 // at the maximum issue width. 120 if (!SCDesc->isValid()) 121 return 1.0 / IssueWidth; 122 123 unsigned CPUID = getProcessorID(); 124 while (SCDesc->isVariant()) { 125 SchedClass = STI.resolveVariantSchedClass(SchedClass, &Inst, &MCII, CPUID); 126 SCDesc = getSchedClassDesc(SchedClass); 127 } 128 129 if (SchedClass) 130 return MCSchedModel::getReciprocalThroughput(STI, *SCDesc); 131 132 llvm_unreachable("unsupported variant scheduling class"); 133 } 134 135 double 136 MCSchedModel::getReciprocalThroughput(unsigned SchedClass, 137 const InstrItineraryData &IID) { 138 std::optional<double> Throughput; 139 const InstrStage *I = IID.beginStage(SchedClass); 140 const InstrStage *E = IID.endStage(SchedClass); 141 for (; I != E; ++I) { 142 if (!I->getCycles()) 143 continue; 144 double Temp = llvm::popcount(I->getUnits()) * 1.0 / I->getCycles(); 145 Throughput = Throughput ? std::min(*Throughput, Temp) : Temp; 146 } 147 if (Throughput) 148 return 1.0 / *Throughput; 149 150 // If there are no execution resources specified for this class, then assume 151 // that it can execute at the maximum default issue width. 152 return 1.0 / DefaultIssueWidth; 153 } 154 155 unsigned 156 MCSchedModel::getForwardingDelayCycles(ArrayRef<MCReadAdvanceEntry> Entries, 157 unsigned WriteResourceID) { 158 if (Entries.empty()) 159 return 0; 160 161 int DelayCycles = 0; 162 for (const MCReadAdvanceEntry &E : Entries) { 163 if (E.WriteResourceID != WriteResourceID) 164 continue; 165 DelayCycles = std::min(DelayCycles, E.Cycles); 166 } 167 168 return std::abs(DelayCycles); 169 } 170