xref: /freebsd/contrib/llvm-project/llvm/tools/llvm-mca/Views/SchedulerStatistics.cpp (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 //===--------------------- SchedulerStatistics.cpp --------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 ///
10 /// This file implements the SchedulerStatistics interface.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "Views/SchedulerStatistics.h"
15 #include "llvm/Support/Format.h"
16 #include "llvm/Support/FormattedStream.h"
17 
18 namespace llvm {
19 namespace mca {
20 
21 SchedulerStatistics::SchedulerStatistics(const llvm::MCSubtargetInfo &STI)
22     : SM(STI.getSchedModel()), LQResourceID(0), SQResourceID(0), NumIssued(0),
23       NumCycles(0), MostRecentLoadDispatched(~0U),
24       MostRecentStoreDispatched(~0U),
25       Usage(STI.getSchedModel().NumProcResourceKinds, {0, 0, 0}) {
26   if (SM.hasExtraProcessorInfo()) {
27     const MCExtraProcessorInfo &EPI = SM.getExtraProcessorInfo();
28     LQResourceID = EPI.LoadQueueID;
29     SQResourceID = EPI.StoreQueueID;
30   }
31 }
32 
33 // FIXME: This implementation works under the assumption that load/store queue
34 // entries are reserved at 'instruction dispatched' stage, and released at
35 // 'instruction executed' stage. This currently matches the behavior of LSUnit.
36 //
37 // The current design minimizes the number of events generated by the
38 // Dispatch/Execute stages, at the cost of doing extra bookkeeping in method
39 // `onEvent`. However, it introduces a subtle dependency between this view and
40 // how the LSUnit works.
41 //
42 // In future we should add a new "memory queue" event type, so that we stop
43 // making assumptions on how LSUnit internally works (See PR39828).
44 void SchedulerStatistics::onEvent(const HWInstructionEvent &Event) {
45   if (Event.Type == HWInstructionEvent::Issued) {
46     const Instruction &Inst = *Event.IR.getInstruction();
47     NumIssued += Inst.getDesc().NumMicroOps;
48   } else if (Event.Type == HWInstructionEvent::Dispatched) {
49     const Instruction &Inst = *Event.IR.getInstruction();
50     const unsigned Index = Event.IR.getSourceIndex();
51     if (LQResourceID && Inst.getDesc().MayLoad &&
52         MostRecentLoadDispatched != Index) {
53       Usage[LQResourceID].SlotsInUse++;
54       MostRecentLoadDispatched = Index;
55     }
56     if (SQResourceID && Inst.getDesc().MayStore &&
57         MostRecentStoreDispatched != Index) {
58       Usage[SQResourceID].SlotsInUse++;
59       MostRecentStoreDispatched = Index;
60     }
61   } else if (Event.Type == HWInstructionEvent::Executed) {
62     const Instruction &Inst = *Event.IR.getInstruction();
63     if (LQResourceID && Inst.getDesc().MayLoad) {
64       assert(Usage[LQResourceID].SlotsInUse);
65       Usage[LQResourceID].SlotsInUse--;
66     }
67     if (SQResourceID && Inst.getDesc().MayStore) {
68       assert(Usage[SQResourceID].SlotsInUse);
69       Usage[SQResourceID].SlotsInUse--;
70     }
71   }
72 }
73 
74 void SchedulerStatistics::onReservedBuffers(const InstRef & /* unused */,
75                                             ArrayRef<unsigned> Buffers) {
76   for (const unsigned Buffer : Buffers) {
77     if (Buffer == LQResourceID || Buffer == SQResourceID)
78       continue;
79     Usage[Buffer].SlotsInUse++;
80   }
81 }
82 
83 void SchedulerStatistics::onReleasedBuffers(const InstRef & /* unused */,
84                                             ArrayRef<unsigned> Buffers) {
85   for (const unsigned Buffer : Buffers) {
86     if (Buffer == LQResourceID || Buffer == SQResourceID)
87       continue;
88     Usage[Buffer].SlotsInUse--;
89   }
90 }
91 
92 void SchedulerStatistics::updateHistograms() {
93   for (BufferUsage &BU : Usage) {
94     BU.CumulativeNumUsedSlots += BU.SlotsInUse;
95     BU.MaxUsedSlots = std::max(BU.MaxUsedSlots, BU.SlotsInUse);
96   }
97 
98   IssueWidthPerCycle[NumIssued]++;
99   NumIssued = 0;
100 }
101 
102 void SchedulerStatistics::printSchedulerStats(raw_ostream &OS) const {
103   OS << "\n\nSchedulers - "
104      << "number of cycles where we saw N micro opcodes issued:\n";
105   OS << "[# issued], [# cycles]\n";
106 
107   bool HasColors = OS.has_colors();
108   const auto It =
109       std::max_element(IssueWidthPerCycle.begin(), IssueWidthPerCycle.end());
110   for (const std::pair<const unsigned, unsigned> &Entry : IssueWidthPerCycle) {
111     unsigned NumIssued = Entry.first;
112     if (NumIssued == It->first && HasColors)
113       OS.changeColor(raw_ostream::SAVEDCOLOR, true, false);
114 
115     unsigned IPC = Entry.second;
116     OS << " " << NumIssued << ",          " << IPC << "  ("
117        << format("%.1f", ((double)IPC / NumCycles) * 100) << "%)\n";
118     if (HasColors)
119       OS.resetColor();
120   }
121 }
122 
123 void SchedulerStatistics::printSchedulerUsage(raw_ostream &OS) const {
124   assert(NumCycles && "Unexpected number of cycles!");
125 
126   OS << "\nScheduler's queue usage:\n";
127   if (all_of(Usage, [](const BufferUsage &BU) { return !BU.MaxUsedSlots; })) {
128     OS << "No scheduler resources used.\n";
129     return;
130   }
131 
132   OS << "[1] Resource name.\n"
133      << "[2] Average number of used buffer entries.\n"
134      << "[3] Maximum number of used buffer entries.\n"
135      << "[4] Total number of buffer entries.\n\n"
136      << " [1]            [2]        [3]        [4]\n";
137 
138   formatted_raw_ostream FOS(OS);
139   bool HasColors = FOS.has_colors();
140   for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
141     const MCProcResourceDesc &ProcResource = *SM.getProcResource(I);
142     if (ProcResource.BufferSize <= 0)
143       continue;
144 
145     const BufferUsage &BU = Usage[I];
146     double AvgUsage = (double)BU.CumulativeNumUsedSlots / NumCycles;
147     double AlmostFullThreshold = (double)(ProcResource.BufferSize * 4) / 5;
148     unsigned NormalizedAvg = floor((AvgUsage * 10) + 0.5) / 10;
149     unsigned NormalizedThreshold = floor((AlmostFullThreshold * 10) + 0.5) / 10;
150 
151     FOS << ProcResource.Name;
152     FOS.PadToColumn(17);
153     if (HasColors && NormalizedAvg >= NormalizedThreshold)
154       FOS.changeColor(raw_ostream::YELLOW, true, false);
155     FOS << NormalizedAvg;
156     if (HasColors)
157       FOS.resetColor();
158     FOS.PadToColumn(28);
159     if (HasColors &&
160         BU.MaxUsedSlots == static_cast<unsigned>(ProcResource.BufferSize))
161       FOS.changeColor(raw_ostream::RED, true, false);
162     FOS << BU.MaxUsedSlots;
163     if (HasColors)
164       FOS.resetColor();
165     FOS.PadToColumn(39);
166     FOS << ProcResource.BufferSize << '\n';
167   }
168 
169   FOS.flush();
170 }
171 
172 void SchedulerStatistics::printView(raw_ostream &OS) const {
173   printSchedulerStats(OS);
174   printSchedulerUsage(OS);
175 }
176 
177 } // namespace mca
178 } // namespace llvm
179