1 //===- MCSchedule.cpp - Scheduling ------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the default scheduling model. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/MC/MCSchedule.h" 14 #include "llvm/MC/MCInst.h" 15 #include "llvm/MC/MCInstrDesc.h" 16 #include "llvm/MC/MCInstrInfo.h" 17 #include "llvm/MC/MCSubtargetInfo.h" 18 #include <type_traits> 19 20 using namespace llvm; 21 22 static_assert(std::is_pod<MCSchedModel>::value, 23 "We shouldn't have a static constructor here"); 24 const MCSchedModel MCSchedModel::Default = {DefaultIssueWidth, 25 DefaultMicroOpBufferSize, 26 DefaultLoopMicroOpBufferSize, 27 DefaultLoadLatency, 28 DefaultHighLatency, 29 DefaultMispredictPenalty, 30 false, 31 true, 32 0, 33 nullptr, 34 nullptr, 35 0, 36 0, 37 nullptr, 38 nullptr}; 39 40 int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI, 41 const MCSchedClassDesc &SCDesc) { 42 int Latency = 0; 43 for (unsigned DefIdx = 0, DefEnd = SCDesc.NumWriteLatencyEntries; 44 DefIdx != DefEnd; ++DefIdx) { 45 // Lookup the definition's write latency in SubtargetInfo. 46 const MCWriteLatencyEntry *WLEntry = 47 STI.getWriteLatencyEntry(&SCDesc, DefIdx); 48 // Early exit if we found an invalid latency. 49 if (WLEntry->Cycles < 0) 50 return WLEntry->Cycles; 51 Latency = std::max(Latency, static_cast<int>(WLEntry->Cycles)); 52 } 53 return Latency; 54 } 55 56 int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI, 57 unsigned SchedClass) const { 58 const MCSchedClassDesc &SCDesc = *getSchedClassDesc(SchedClass); 59 if (!SCDesc.isValid()) 60 return 0; 61 if (!SCDesc.isVariant()) 62 return MCSchedModel::computeInstrLatency(STI, SCDesc); 63 64 llvm_unreachable("unsupported variant scheduling class"); 65 } 66 67 int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI, 68 const MCInstrInfo &MCII, 69 const MCInst &Inst) const { 70 unsigned SchedClass = MCII.get(Inst.getOpcode()).getSchedClass(); 71 const MCSchedClassDesc *SCDesc = getSchedClassDesc(SchedClass); 72 if (!SCDesc->isValid()) 73 return 0; 74 75 unsigned CPUID = getProcessorID(); 76 while (SCDesc->isVariant()) { 77 SchedClass = STI.resolveVariantSchedClass(SchedClass, &Inst, &MCII, CPUID); 78 SCDesc = getSchedClassDesc(SchedClass); 79 } 80 81 if (SchedClass) 82 return MCSchedModel::computeInstrLatency(STI, *SCDesc); 83 84 llvm_unreachable("unsupported variant scheduling class"); 85 } 86 87 double 88 MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI, 89 const MCSchedClassDesc &SCDesc) { 90 Optional<double> Throughput; 91 const MCSchedModel &SM = STI.getSchedModel(); 92 const MCWriteProcResEntry *I = STI.getWriteProcResBegin(&SCDesc); 93 const MCWriteProcResEntry *E = STI.getWriteProcResEnd(&SCDesc); 94 for (; I != E; ++I) { 95 if (!I->Cycles) 96 continue; 97 unsigned NumUnits = SM.getProcResource(I->ProcResourceIdx)->NumUnits; 98 double Temp = NumUnits * 1.0 / I->Cycles; 99 Throughput = Throughput ? std::min(Throughput.value(), Temp) : Temp; 100 } 101 if (Throughput) 102 return 1.0 / Throughput.value(); 103 104 // If no throughput value was calculated, assume that we can execute at the 105 // maximum issue width scaled by number of micro-ops for the schedule class. 106 return ((double)SCDesc.NumMicroOps) / SM.IssueWidth; 107 } 108 109 double 110 MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI, 111 const MCInstrInfo &MCII, 112 const MCInst &Inst) const { 113 unsigned SchedClass = MCII.get(Inst.getOpcode()).getSchedClass(); 114 const MCSchedClassDesc *SCDesc = getSchedClassDesc(SchedClass); 115 116 // If there's no valid class, assume that the instruction executes/completes 117 // at the maximum issue width. 118 if (!SCDesc->isValid()) 119 return 1.0 / IssueWidth; 120 121 unsigned CPUID = getProcessorID(); 122 while (SCDesc->isVariant()) { 123 SchedClass = STI.resolveVariantSchedClass(SchedClass, &Inst, &MCII, CPUID); 124 SCDesc = getSchedClassDesc(SchedClass); 125 } 126 127 if (SchedClass) 128 return MCSchedModel::getReciprocalThroughput(STI, *SCDesc); 129 130 llvm_unreachable("unsupported variant scheduling class"); 131 } 132 133 double 134 MCSchedModel::getReciprocalThroughput(unsigned SchedClass, 135 const InstrItineraryData &IID) { 136 Optional<double> Throughput; 137 const InstrStage *I = IID.beginStage(SchedClass); 138 const InstrStage *E = IID.endStage(SchedClass); 139 for (; I != E; ++I) { 140 if (!I->getCycles()) 141 continue; 142 double Temp = countPopulation(I->getUnits()) * 1.0 / I->getCycles(); 143 Throughput = Throughput ? std::min(Throughput.value(), Temp) : Temp; 144 } 145 if (Throughput) 146 return 1.0 / Throughput.value(); 147 148 // If there are no execution resources specified for this class, then assume 149 // that it can execute at the maximum default issue width. 150 return 1.0 / DefaultIssueWidth; 151 } 152 153 unsigned 154 MCSchedModel::getForwardingDelayCycles(ArrayRef<MCReadAdvanceEntry> Entries, 155 unsigned WriteResourceID) { 156 if (Entries.empty()) 157 return 0; 158 159 int DelayCycles = 0; 160 for (const MCReadAdvanceEntry &E : Entries) { 161 if (E.WriteResourceID != WriteResourceID) 162 continue; 163 DelayCycles = std::min(DelayCycles, E.Cycles); 164 } 165 166 return std::abs(DelayCycles); 167 } 168