xref: /freebsd/contrib/llvm-project/llvm/include/llvm/MC/MCSchedule.h (revision a03411e84728e9b267056fd31c7d1d9d1dc1b01e)
1 //===-- llvm/MC/MCSchedule.h - Scheduling -----------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the classes used to describe a subtarget's machine model
10 // for scheduling and other instruction cost heuristics.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_MC_MCSCHEDULE_H
15 #define LLVM_MC_MCSCHEDULE_H
16 
17 #include "llvm/Config/llvm-config.h"
18 #include "llvm/Support/DataTypes.h"
19 #include <cassert>
20 
21 namespace llvm {
22 
23 template <typename T> class ArrayRef;
24 struct InstrItinerary;
25 class MCSubtargetInfo;
26 class MCInstrInfo;
27 class MCInst;
28 class InstrItineraryData;
29 
30 /// Define a kind of processor resource that will be modeled by the scheduler.
31 struct MCProcResourceDesc {
32   const char *Name;
33   unsigned NumUnits; // Number of resource of this kind
34   unsigned SuperIdx; // Index of the resources kind that contains this kind.
35 
36   // Number of resources that may be buffered.
37   //
38   // Buffered resources (BufferSize != 0) may be consumed at some indeterminate
39   // cycle after dispatch. This should be used for out-of-order cpus when
40   // instructions that use this resource can be buffered in a reservaton
41   // station.
42   //
43   // Unbuffered resources (BufferSize == 0) always consume their resource some
44   // fixed number of cycles after dispatch. If a resource is unbuffered, then
45   // the scheduler will avoid scheduling instructions with conflicting resources
46   // in the same cycle. This is for in-order cpus, or the in-order portion of
47   // an out-of-order cpus.
48   int BufferSize;
49 
50   // If the resource has sub-units, a pointer to the first element of an array
51   // of `NumUnits` elements containing the ProcResourceIdx of the sub units.
52   // nullptr if the resource does not have sub-units.
53   const unsigned *SubUnitsIdxBegin;
54 
55   bool operator==(const MCProcResourceDesc &Other) const {
56     return NumUnits == Other.NumUnits && SuperIdx == Other.SuperIdx
57       && BufferSize == Other.BufferSize;
58   }
59 };
60 
61 /// Identify one of the processor resource kinds consumed by a
62 /// particular scheduling class for the specified number of cycles.
63 /// TODO: consider renaming the field `StartAtCycle` and `Cycles` to
64 /// `AcquireAtCycle` and `ReleaseAtCycle` respectively, to stress the
65 /// fact that resource allocation is now represented as an interval,
66 /// relatively to the issue cycle of the instruction.
67 struct MCWriteProcResEntry {
68   uint16_t ProcResourceIdx;
69   /// Cycle at which the resource will be released by an instruction,
70   /// relatively to the cycle in which the instruction is issued
71   /// (assuming no stalls inbetween).
72   uint16_t Cycles;
73   /// Cycle at which the resource will be grabbed by an instruction,
74   /// relatively to the cycle in which the instruction is issued
75   /// (assuming no stalls inbetween).
76   uint16_t StartAtCycle;
77 
78   bool operator==(const MCWriteProcResEntry &Other) const {
79     return ProcResourceIdx == Other.ProcResourceIdx && Cycles == Other.Cycles &&
80            StartAtCycle == Other.StartAtCycle;
81   }
82 };
83 
84 /// Specify the latency in cpu cycles for a particular scheduling class and def
85 /// index. -1 indicates an invalid latency. Heuristics would typically consider
86 /// an instruction with invalid latency to have infinite latency.  Also identify
87 /// the WriteResources of this def. When the operand expands to a sequence of
88 /// writes, this ID is the last write in the sequence.
89 struct MCWriteLatencyEntry {
90   int16_t Cycles;
91   uint16_t WriteResourceID;
92 
93   bool operator==(const MCWriteLatencyEntry &Other) const {
94     return Cycles == Other.Cycles && WriteResourceID == Other.WriteResourceID;
95   }
96 };
97 
98 /// Specify the number of cycles allowed after instruction issue before a
99 /// particular use operand reads its registers. This effectively reduces the
100 /// write's latency. Here we allow negative cycles for corner cases where
101 /// latency increases. This rule only applies when the entry's WriteResource
102 /// matches the write's WriteResource.
103 ///
104 /// MCReadAdvanceEntries are sorted first by operand index (UseIdx), then by
105 /// WriteResourceIdx.
106 struct MCReadAdvanceEntry {
107   unsigned UseIdx;
108   unsigned WriteResourceID;
109   int Cycles;
110 
111   bool operator==(const MCReadAdvanceEntry &Other) const {
112     return UseIdx == Other.UseIdx && WriteResourceID == Other.WriteResourceID
113       && Cycles == Other.Cycles;
114   }
115 };
116 
117 /// Summarize the scheduling resources required for an instruction of a
118 /// particular scheduling class.
119 ///
120 /// Defined as an aggregate struct for creating tables with initializer lists.
121 struct MCSchedClassDesc {
122   static const unsigned short InvalidNumMicroOps = (1U << 13) - 1;
123   static const unsigned short VariantNumMicroOps = InvalidNumMicroOps - 1;
124 
125 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
126   const char* Name;
127 #endif
128   uint16_t NumMicroOps : 13;
129   uint16_t BeginGroup : 1;
130   uint16_t EndGroup : 1;
131   uint16_t RetireOOO : 1;
132   uint16_t WriteProcResIdx; // First index into WriteProcResTable.
133   uint16_t NumWriteProcResEntries;
134   uint16_t WriteLatencyIdx; // First index into WriteLatencyTable.
135   uint16_t NumWriteLatencyEntries;
136   uint16_t ReadAdvanceIdx; // First index into ReadAdvanceTable.
137   uint16_t NumReadAdvanceEntries;
138 
139   bool isValid() const {
140     return NumMicroOps != InvalidNumMicroOps;
141   }
142   bool isVariant() const {
143     return NumMicroOps == VariantNumMicroOps;
144   }
145 };
146 
147 /// Specify the cost of a register definition in terms of number of physical
148 /// register allocated at register renaming stage. For example, AMD Jaguar.
149 /// natively supports 128-bit data types, and operations on 256-bit registers
150 /// (i.e. YMM registers) are internally split into two COPs (complex operations)
151 /// and each COP updates a physical register. Basically, on Jaguar, a YMM
152 /// register write effectively consumes two physical registers. That means,
153 /// the cost of a YMM write in the BtVer2 model is 2.
154 struct MCRegisterCostEntry {
155   unsigned RegisterClassID;
156   unsigned Cost;
157   bool AllowMoveElimination;
158 };
159 
160 /// A register file descriptor.
161 ///
162 /// This struct allows to describe processor register files. In particular, it
163 /// helps describing the size of the register file, as well as the cost of
164 /// allocating a register file at register renaming stage.
165 /// FIXME: this struct can be extended to provide information about the number
166 /// of read/write ports to the register file.  A value of zero for field
167 /// 'NumPhysRegs' means: this register file has an unbounded number of physical
168 /// registers.
169 struct MCRegisterFileDesc {
170   const char *Name;
171   uint16_t NumPhysRegs;
172   uint16_t NumRegisterCostEntries;
173   // Index of the first cost entry in MCExtraProcessorInfo::RegisterCostTable.
174   uint16_t RegisterCostEntryIdx;
175   // A value of zero means: there is no limit in the number of moves that can be
176   // eliminated every cycle.
177   uint16_t MaxMovesEliminatedPerCycle;
178   // Ture if this register file only knows how to optimize register moves from
179   // known zero registers.
180   bool AllowZeroMoveEliminationOnly;
181 };
182 
183 /// Provide extra details about the machine processor.
184 ///
185 /// This is a collection of "optional" processor information that is not
186 /// normally used by the LLVM machine schedulers, but that can be consumed by
187 /// external tools like llvm-mca to improve the quality of the peformance
188 /// analysis.
189 struct MCExtraProcessorInfo {
190   // Actual size of the reorder buffer in hardware.
191   unsigned ReorderBufferSize;
192   // Number of instructions retired per cycle.
193   unsigned MaxRetirePerCycle;
194   const MCRegisterFileDesc *RegisterFiles;
195   unsigned NumRegisterFiles;
196   const MCRegisterCostEntry *RegisterCostTable;
197   unsigned NumRegisterCostEntries;
198   unsigned LoadQueueID;
199   unsigned StoreQueueID;
200 };
201 
202 /// Machine model for scheduling, bundling, and heuristics.
203 ///
204 /// The machine model directly provides basic information about the
205 /// microarchitecture to the scheduler in the form of properties. It also
206 /// optionally refers to scheduler resource tables and itinerary
207 /// tables. Scheduler resource tables model the latency and cost for each
208 /// instruction type. Itinerary tables are an independent mechanism that
209 /// provides a detailed reservation table describing each cycle of instruction
210 /// execution. Subtargets may define any or all of the above categories of data
211 /// depending on the type of CPU and selected scheduler.
212 ///
213 /// The machine independent properties defined here are used by the scheduler as
214 /// an abstract machine model. A real micro-architecture has a number of
215 /// buffers, queues, and stages. Declaring that a given machine-independent
216 /// abstract property corresponds to a specific physical property across all
217 /// subtargets can't be done. Nonetheless, the abstract model is
218 /// useful. Futhermore, subtargets typically extend this model with processor
219 /// specific resources to model any hardware features that can be exploited by
220 /// scheduling heuristics and aren't sufficiently represented in the abstract.
221 ///
222 /// The abstract pipeline is built around the notion of an "issue point". This
223 /// is merely a reference point for counting machine cycles. The physical
224 /// machine will have pipeline stages that delay execution. The scheduler does
225 /// not model those delays because they are irrelevant as long as they are
226 /// consistent. Inaccuracies arise when instructions have different execution
227 /// delays relative to each other, in addition to their intrinsic latency. Those
228 /// special cases can be handled by TableGen constructs such as, ReadAdvance,
229 /// which reduces latency when reading data, and ResourceCycles, which consumes
230 /// a processor resource when writing data for a number of abstract
231 /// cycles.
232 ///
233 /// TODO: One tool currently missing is the ability to add a delay to
234 /// ResourceCycles. That would be easy to add and would likely cover all cases
235 /// currently handled by the legacy itinerary tables.
236 ///
237 /// A note on out-of-order execution and, more generally, instruction
238 /// buffers. Part of the CPU pipeline is always in-order. The issue point, which
239 /// is the point of reference for counting cycles, only makes sense as an
240 /// in-order part of the pipeline. Other parts of the pipeline are sometimes
241 /// falling behind and sometimes catching up. It's only interesting to model
242 /// those other, decoupled parts of the pipeline if they may be predictably
243 /// resource constrained in a way that the scheduler can exploit.
244 ///
245 /// The LLVM machine model distinguishes between in-order constraints and
246 /// out-of-order constraints so that the target's scheduling strategy can apply
247 /// appropriate heuristics. For a well-balanced CPU pipeline, out-of-order
248 /// resources would not typically be treated as a hard scheduling
249 /// constraint. For example, in the GenericScheduler, a delay caused by limited
250 /// out-of-order resources is not directly reflected in the number of cycles
251 /// that the scheduler sees between issuing an instruction and its dependent
252 /// instructions. In other words, out-of-order resources don't directly increase
253 /// the latency between pairs of instructions. However, they can still be used
254 /// to detect potential bottlenecks across a sequence of instructions and bias
255 /// the scheduling heuristics appropriately.
256 struct MCSchedModel {
257   // IssueWidth is the maximum number of instructions that may be scheduled in
258   // the same per-cycle group. This is meant to be a hard in-order constraint
259   // (a.k.a. "hazard"). In the GenericScheduler strategy, no more than
260   // IssueWidth micro-ops can ever be scheduled in a particular cycle.
261   //
262   // In practice, IssueWidth is useful to model any bottleneck between the
263   // decoder (after micro-op expansion) and the out-of-order reservation
264   // stations or the decoder bandwidth itself. If the total number of
265   // reservation stations is also a bottleneck, or if any other pipeline stage
266   // has a bandwidth limitation, then that can be naturally modeled by adding an
267   // out-of-order processor resource.
268   unsigned IssueWidth;
269   static const unsigned DefaultIssueWidth = 1;
270 
271   // MicroOpBufferSize is the number of micro-ops that the processor may buffer
272   // for out-of-order execution.
273   //
274   // "0" means operations that are not ready in this cycle are not considered
275   // for scheduling (they go in the pending queue). Latency is paramount. This
276   // may be more efficient if many instructions are pending in a schedule.
277   //
278   // "1" means all instructions are considered for scheduling regardless of
279   // whether they are ready in this cycle. Latency still causes issue stalls,
280   // but we balance those stalls against other heuristics.
281   //
282   // "> 1" means the processor is out-of-order. This is a machine independent
283   // estimate of highly machine specific characteristics such as the register
284   // renaming pool and reorder buffer.
285   unsigned MicroOpBufferSize;
286   static const unsigned DefaultMicroOpBufferSize = 0;
287 
288   // LoopMicroOpBufferSize is the number of micro-ops that the processor may
289   // buffer for optimized loop execution. More generally, this represents the
290   // optimal number of micro-ops in a loop body. A loop may be partially
291   // unrolled to bring the count of micro-ops in the loop body closer to this
292   // number.
293   unsigned LoopMicroOpBufferSize;
294   static const unsigned DefaultLoopMicroOpBufferSize = 0;
295 
296   // LoadLatency is the expected latency of load instructions.
297   unsigned LoadLatency;
298   static const unsigned DefaultLoadLatency = 4;
299 
300   // HighLatency is the expected latency of "very high latency" operations.
301   // See TargetInstrInfo::isHighLatencyDef().
302   // By default, this is set to an arbitrarily high number of cycles
303   // likely to have some impact on scheduling heuristics.
304   unsigned HighLatency;
305   static const unsigned DefaultHighLatency = 10;
306 
307   // MispredictPenalty is the typical number of extra cycles the processor
308   // takes to recover from a branch misprediction.
309   unsigned MispredictPenalty;
310   static const unsigned DefaultMispredictPenalty = 10;
311 
312   bool PostRAScheduler; // default value is false
313 
314   bool CompleteModel;
315 
316   // Tells the MachineScheduler whether or not to track resource usage
317   // using intervals via ResourceSegments (see
318   // llvm/include/llvm/CodeGen/MachineScheduler.h).
319   bool EnableIntervals;
320 
321   unsigned ProcID;
322   const MCProcResourceDesc *ProcResourceTable;
323   const MCSchedClassDesc *SchedClassTable;
324   unsigned NumProcResourceKinds;
325   unsigned NumSchedClasses;
326   // Instruction itinerary tables used by InstrItineraryData.
327   friend class InstrItineraryData;
328   const InstrItinerary *InstrItineraries;
329 
330   const MCExtraProcessorInfo *ExtraProcessorInfo;
331 
332   bool hasExtraProcessorInfo() const { return ExtraProcessorInfo; }
333 
334   unsigned getProcessorID() const { return ProcID; }
335 
336   /// Does this machine model include instruction-level scheduling.
337   bool hasInstrSchedModel() const { return SchedClassTable; }
338 
339   const MCExtraProcessorInfo &getExtraProcessorInfo() const {
340     assert(hasExtraProcessorInfo() &&
341            "No extra information available for this model");
342     return *ExtraProcessorInfo;
343   }
344 
345   /// Return true if this machine model data for all instructions with a
346   /// scheduling class (itinerary class or SchedRW list).
347   bool isComplete() const { return CompleteModel; }
348 
349   /// Return true if machine supports out of order execution.
350   bool isOutOfOrder() const { return MicroOpBufferSize > 1; }
351 
352   unsigned getNumProcResourceKinds() const {
353     return NumProcResourceKinds;
354   }
355 
356   const MCProcResourceDesc *getProcResource(unsigned ProcResourceIdx) const {
357     assert(hasInstrSchedModel() && "No scheduling machine model");
358 
359     assert(ProcResourceIdx < NumProcResourceKinds && "bad proc resource idx");
360     return &ProcResourceTable[ProcResourceIdx];
361   }
362 
363   const MCSchedClassDesc *getSchedClassDesc(unsigned SchedClassIdx) const {
364     assert(hasInstrSchedModel() && "No scheduling machine model");
365 
366     assert(SchedClassIdx < NumSchedClasses && "bad scheduling class idx");
367     return &SchedClassTable[SchedClassIdx];
368   }
369 
370   /// Returns the latency value for the scheduling class.
371   static int computeInstrLatency(const MCSubtargetInfo &STI,
372                                  const MCSchedClassDesc &SCDesc);
373 
374   int computeInstrLatency(const MCSubtargetInfo &STI, unsigned SClass) const;
375   int computeInstrLatency(const MCSubtargetInfo &STI, const MCInstrInfo &MCII,
376                           const MCInst &Inst) const;
377 
378   // Returns the reciprocal throughput information from a MCSchedClassDesc.
379   static double
380   getReciprocalThroughput(const MCSubtargetInfo &STI,
381                           const MCSchedClassDesc &SCDesc);
382 
383   static double
384   getReciprocalThroughput(unsigned SchedClass, const InstrItineraryData &IID);
385 
386   double
387   getReciprocalThroughput(const MCSubtargetInfo &STI, const MCInstrInfo &MCII,
388                           const MCInst &Inst) const;
389 
390   /// Returns the maximum forwarding delay for register reads dependent on
391   /// writes of scheduling class WriteResourceIdx.
392   static unsigned getForwardingDelayCycles(ArrayRef<MCReadAdvanceEntry> Entries,
393                                            unsigned WriteResourceIdx = 0);
394 
395   /// Returns the default initialized model.
396   static const MCSchedModel &GetDefaultSchedModel() { return Default; }
397   static const MCSchedModel Default;
398 };
399 
400 } // namespace llvm
401 
402 #endif
403