xref: /linux/drivers/gpu/drm/i915/gt/intel_gt_types.h (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #ifndef __INTEL_GT_TYPES__
7 #define __INTEL_GT_TYPES__
8 
9 #include <linux/ktime.h>
10 #include <linux/list.h>
11 #include <linux/llist.h>
12 #include <linux/mutex.h>
13 #include <linux/notifier.h>
14 #include <linux/seqlock.h>
15 #include <linux/spinlock.h>
16 #include <linux/types.h>
17 #include <linux/workqueue.h>
18 
19 #include "uc/intel_uc.h"
20 #include "intel_gsc.h"
21 
22 #include "i915_vma.h"
23 #include "i915_perf_types.h"
24 #include "intel_engine_types.h"
25 #include "intel_gt_buffer_pool_types.h"
26 #include "intel_hwconfig.h"
27 #include "intel_llc_types.h"
28 #include "intel_reset_types.h"
29 #include "intel_rc6_types.h"
30 #include "intel_rps_types.h"
31 #include "intel_migrate_types.h"
32 #include "intel_wakeref.h"
33 #include "intel_wopcm.h"
34 
35 struct drm_i915_private;
36 struct i915_ggtt;
37 struct intel_engine_cs;
38 struct intel_uncore;
39 
40 struct intel_mmio_range {
41 	u32 start;
42 	u32 end;
43 };
44 
45 /*
46  * The hardware has multiple kinds of multicast register ranges that need
47  * special register steering (and future platforms are expected to add
48  * additional types).
49  *
50  * During driver startup, we initialize the steering control register to
51  * direct reads to a slice/subslice that are valid for the 'subslice' class
52  * of multicast registers.  If another type of steering does not have any
53  * overlap in valid steering targets with 'subslice' style registers, we will
54  * need to explicitly re-steer reads of registers of the other type.
55  *
56  * Only the replication types that may need additional non-default steering
57  * are listed here.
58  */
59 enum intel_steering_type {
60 	L3BANK,
61 	MSLICE,
62 	LNCF,
63 	GAM,
64 	DSS,
65 	OADDRM,
66 
67 	/*
68 	 * On some platforms there are multiple types of MCR registers that
69 	 * will always return a non-terminated value at instance (0, 0).  We'll
70 	 * lump those all into a single category to keep things simple.
71 	 */
72 	INSTANCE0,
73 
74 	NUM_STEERING_TYPES
75 };
76 
77 enum intel_submission_method {
78 	INTEL_SUBMISSION_RING,
79 	INTEL_SUBMISSION_ELSP,
80 	INTEL_SUBMISSION_GUC,
81 };
82 
83 struct gt_defaults {
84 	u32 min_freq;
85 	u32 max_freq;
86 
87 	u8 rps_up_threshold;
88 	u8 rps_down_threshold;
89 };
90 
91 enum intel_gt_type {
92 	GT_PRIMARY,
93 	GT_TILE,
94 	GT_MEDIA,
95 };
96 
97 struct intel_gt {
98 	struct drm_i915_private *i915;
99 	const char *name;
100 	enum intel_gt_type type;
101 
102 	struct intel_uncore *uncore;
103 	struct i915_ggtt *ggtt;
104 
105 	struct intel_uc uc;
106 	struct intel_gsc gsc;
107 	struct intel_wopcm wopcm;
108 
109 	struct {
110 		/* Serialize global tlb invalidations */
111 		struct mutex invalidate_lock;
112 
113 		/*
114 		 * Batch TLB invalidations
115 		 *
116 		 * After unbinding the PTE, we need to ensure the TLB
117 		 * are invalidated prior to releasing the physical pages.
118 		 * But we only need one such invalidation for all unbinds,
119 		 * so we track how many TLB invalidations have been
120 		 * performed since unbind the PTE and only emit an extra
121 		 * invalidate if no full barrier has been passed.
122 		 */
123 		seqcount_mutex_t seqno;
124 	} tlb;
125 
126 	struct i915_wa_list wa_list;
127 
128 	struct intel_gt_timelines {
129 		spinlock_t lock; /* protects active_list */
130 		struct list_head active_list;
131 	} timelines;
132 
133 	struct intel_gt_requests {
134 		/**
135 		 * We leave the user IRQ off as much as possible,
136 		 * but this means that requests will finish and never
137 		 * be retired once the system goes idle. Set a timer to
138 		 * fire periodically while the ring is running. When it
139 		 * fires, go retire requests.
140 		 */
141 		struct delayed_work retire_work;
142 	} requests;
143 
144 	struct {
145 		struct llist_head list;
146 		struct work_struct work;
147 	} watchdog;
148 
149 	struct intel_wakeref wakeref;
150 	atomic_t user_wakeref;
151 
152 	struct list_head closed_vma;
153 	spinlock_t closed_lock; /* guards the list of closed_vma */
154 
155 	ktime_t last_init_time;
156 	struct intel_reset reset;
157 
158 	/**
159 	 * Is the GPU currently considered idle, or busy executing
160 	 * userspace requests? Whilst idle, we allow runtime power
161 	 * management to power down the hardware and display clocks.
162 	 * In order to reduce the effect on performance, there
163 	 * is a slight delay before we do so.
164 	 */
165 	intel_wakeref_t awake;
166 
167 	u32 clock_frequency;
168 	u32 clock_period_ns;
169 
170 	struct intel_llc llc;
171 	struct intel_rc6 rc6;
172 	struct intel_rps rps;
173 
174 	spinlock_t *irq_lock;
175 	u32 gt_imr;
176 	u32 pm_ier;
177 	u32 pm_imr;
178 
179 	u32 pm_guc_events;
180 
181 	struct {
182 		bool active;
183 
184 		/**
185 		 * @lock: Lock protecting the below fields.
186 		 */
187 		seqcount_mutex_t lock;
188 
189 		/**
190 		 * @total: Total time this engine was busy.
191 		 *
192 		 * Accumulated time not counting the most recent block in cases
193 		 * where engine is currently busy (active > 0).
194 		 */
195 		ktime_t total;
196 
197 		/**
198 		 * @start: Timestamp of the last idle to active transition.
199 		 *
200 		 * Idle is defined as active == 0, active is active > 0.
201 		 */
202 		ktime_t start;
203 	} stats;
204 
205 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
206 	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
207 					    [MAX_ENGINE_INSTANCE + 1];
208 	enum intel_submission_method submission_method;
209 
210 	struct {
211 		/*
212 		 * Mask of the non fused CCS slices
213 		 * to be used for the load balancing
214 		 */
215 		intel_engine_mask_t cslices;
216 	} ccs;
217 
218 	/*
219 	 * Default address space (either GGTT or ppGTT depending on arch).
220 	 *
221 	 * Reserved for exclusive use by the kernel.
222 	 */
223 	struct i915_address_space *vm;
224 
225 	/*
226 	 * A pool of objects to use as shadow copies of client batch buffers
227 	 * when the command parser is enabled. Prevents the client from
228 	 * modifying the batch contents after software parsing.
229 	 *
230 	 * Buffers older than 1s are periodically reaped from the pool,
231 	 * or may be reclaimed by the shrinker before then.
232 	 */
233 	struct intel_gt_buffer_pool buffer_pool;
234 
235 	struct i915_vma *scratch;
236 
237 	struct intel_migrate migrate;
238 
239 	const struct intel_mmio_range *steering_table[NUM_STEERING_TYPES];
240 
241 	struct {
242 		u8 groupid;
243 		u8 instanceid;
244 	} default_steering;
245 
246 	/**
247 	 * @mcr_lock: Protects the MCR steering register
248 	 *
249 	 * Protects the MCR steering register (e.g., GEN8_MCR_SELECTOR).
250 	 * Should be taken before uncore->lock in cases where both are desired.
251 	 */
252 	spinlock_t mcr_lock;
253 
254 	/*
255 	 * Base of per-tile GTTMMADR where we can derive the MMIO and the GGTT.
256 	 */
257 	phys_addr_t phys_addr;
258 
259 	struct intel_gt_info {
260 		unsigned int id;
261 
262 		intel_engine_mask_t engine_mask;
263 
264 		u32 l3bank_mask;
265 
266 		u8 num_engines;
267 
268 		/* General presence of SFC units */
269 		u8 sfc_mask;
270 
271 		/* Media engine access to SFC per instance */
272 		u8 vdbox_sfc_access;
273 
274 		/* Slice/subslice/EU info */
275 		struct sseu_dev_info sseu;
276 
277 		unsigned long mslice_mask;
278 
279 		/** @hwconfig: hardware configuration data */
280 		struct intel_hwconfig hwconfig;
281 	} info;
282 
283 	struct {
284 		u8 uc_index;
285 		u8 wb_index; /* Only used on HAS_L3_CCS_READ() platforms */
286 	} mocs;
287 
288 	/* gt/gtN sysfs */
289 	struct kobject sysfs_gt;
290 
291 	/* sysfs defaults per gt */
292 	struct gt_defaults defaults;
293 	struct kobject *sysfs_defaults;
294 
295 	struct work_struct wedge;
296 
297 	struct i915_perf_gt perf;
298 
299 	/** link: &ggtt.gt_list */
300 	struct list_head ggtt_link;
301 };
302 
303 struct intel_gt_definition {
304 	enum intel_gt_type type;
305 	char *name;
306 	u32 mapping_base;
307 	u32 gsi_offset;
308 	intel_engine_mask_t engine_mask;
309 };
310 
311 enum intel_gt_scratch_field {
312 	/* 8 bytes */
313 	INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
314 
315 	/* 8 bytes */
316 	INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
317 
318 	/* 8 bytes */
319 	INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
320 };
321 
322 #define intel_gt_support_legacy_fencing(gt) ((gt)->ggtt->num_fences > 0)
323 
324 #endif /* __INTEL_GT_TYPES_H__ */
325