xref: /linux/drivers/gpu/drm/xe/xe_hw_engine.c (revision c156ef573efe4230ef3dc1ff2ec0038fe0eb217f)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_hw_engine.h"
7 
8 #include <linux/nospec.h>
9 
10 #include <drm/drm_managed.h>
11 #include <uapi/drm/xe_drm.h>
12 
13 #include "regs/xe_engine_regs.h"
14 #include "regs/xe_gt_regs.h"
15 #include "regs/xe_irq_regs.h"
16 #include "xe_assert.h"
17 #include "xe_bo.h"
18 #include "xe_device.h"
19 #include "xe_execlist.h"
20 #include "xe_force_wake.h"
21 #include "xe_gsc.h"
22 #include "xe_gt.h"
23 #include "xe_gt_ccs_mode.h"
24 #include "xe_gt_printk.h"
25 #include "xe_gt_mcr.h"
26 #include "xe_gt_topology.h"
27 #include "xe_guc_capture.h"
28 #include "xe_hw_engine_group.h"
29 #include "xe_hw_fence.h"
30 #include "xe_irq.h"
31 #include "xe_lrc.h"
32 #include "xe_macros.h"
33 #include "xe_mmio.h"
34 #include "xe_reg_sr.h"
35 #include "xe_reg_whitelist.h"
36 #include "xe_rtp.h"
37 #include "xe_sched_job.h"
38 #include "xe_sriov.h"
39 #include "xe_tuning.h"
40 #include "xe_uc_fw.h"
41 #include "xe_wa.h"
42 
43 #define MAX_MMIO_BASES 3
44 struct engine_info {
45 	const char *name;
46 	unsigned int class : 8;
47 	unsigned int instance : 8;
48 	unsigned int irq_offset : 8;
49 	enum xe_force_wake_domains domain;
50 	u32 mmio_base;
51 };
52 
53 static const struct engine_info engine_infos[] = {
54 	[XE_HW_ENGINE_RCS0] = {
55 		.name = "rcs0",
56 		.class = XE_ENGINE_CLASS_RENDER,
57 		.instance = 0,
58 		.irq_offset = ilog2(INTR_RCS0),
59 		.domain = XE_FW_RENDER,
60 		.mmio_base = RENDER_RING_BASE,
61 	},
62 	[XE_HW_ENGINE_BCS0] = {
63 		.name = "bcs0",
64 		.class = XE_ENGINE_CLASS_COPY,
65 		.instance = 0,
66 		.irq_offset = ilog2(INTR_BCS(0)),
67 		.domain = XE_FW_RENDER,
68 		.mmio_base = BLT_RING_BASE,
69 	},
70 	[XE_HW_ENGINE_BCS1] = {
71 		.name = "bcs1",
72 		.class = XE_ENGINE_CLASS_COPY,
73 		.instance = 1,
74 		.irq_offset = ilog2(INTR_BCS(1)),
75 		.domain = XE_FW_RENDER,
76 		.mmio_base = XEHPC_BCS1_RING_BASE,
77 	},
78 	[XE_HW_ENGINE_BCS2] = {
79 		.name = "bcs2",
80 		.class = XE_ENGINE_CLASS_COPY,
81 		.instance = 2,
82 		.irq_offset = ilog2(INTR_BCS(2)),
83 		.domain = XE_FW_RENDER,
84 		.mmio_base = XEHPC_BCS2_RING_BASE,
85 	},
86 	[XE_HW_ENGINE_BCS3] = {
87 		.name = "bcs3",
88 		.class = XE_ENGINE_CLASS_COPY,
89 		.instance = 3,
90 		.irq_offset = ilog2(INTR_BCS(3)),
91 		.domain = XE_FW_RENDER,
92 		.mmio_base = XEHPC_BCS3_RING_BASE,
93 	},
94 	[XE_HW_ENGINE_BCS4] = {
95 		.name = "bcs4",
96 		.class = XE_ENGINE_CLASS_COPY,
97 		.instance = 4,
98 		.irq_offset = ilog2(INTR_BCS(4)),
99 		.domain = XE_FW_RENDER,
100 		.mmio_base = XEHPC_BCS4_RING_BASE,
101 	},
102 	[XE_HW_ENGINE_BCS5] = {
103 		.name = "bcs5",
104 		.class = XE_ENGINE_CLASS_COPY,
105 		.instance = 5,
106 		.irq_offset = ilog2(INTR_BCS(5)),
107 		.domain = XE_FW_RENDER,
108 		.mmio_base = XEHPC_BCS5_RING_BASE,
109 	},
110 	[XE_HW_ENGINE_BCS6] = {
111 		.name = "bcs6",
112 		.class = XE_ENGINE_CLASS_COPY,
113 		.instance = 6,
114 		.irq_offset = ilog2(INTR_BCS(6)),
115 		.domain = XE_FW_RENDER,
116 		.mmio_base = XEHPC_BCS6_RING_BASE,
117 	},
118 	[XE_HW_ENGINE_BCS7] = {
119 		.name = "bcs7",
120 		.class = XE_ENGINE_CLASS_COPY,
121 		.irq_offset = ilog2(INTR_BCS(7)),
122 		.instance = 7,
123 		.domain = XE_FW_RENDER,
124 		.mmio_base = XEHPC_BCS7_RING_BASE,
125 	},
126 	[XE_HW_ENGINE_BCS8] = {
127 		.name = "bcs8",
128 		.class = XE_ENGINE_CLASS_COPY,
129 		.instance = 8,
130 		.irq_offset = ilog2(INTR_BCS8),
131 		.domain = XE_FW_RENDER,
132 		.mmio_base = XEHPC_BCS8_RING_BASE,
133 	},
134 
135 	[XE_HW_ENGINE_VCS0] = {
136 		.name = "vcs0",
137 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
138 		.instance = 0,
139 		.irq_offset = 32 + ilog2(INTR_VCS(0)),
140 		.domain = XE_FW_MEDIA_VDBOX0,
141 		.mmio_base = BSD_RING_BASE,
142 	},
143 	[XE_HW_ENGINE_VCS1] = {
144 		.name = "vcs1",
145 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
146 		.instance = 1,
147 		.irq_offset = 32 + ilog2(INTR_VCS(1)),
148 		.domain = XE_FW_MEDIA_VDBOX1,
149 		.mmio_base = BSD2_RING_BASE,
150 	},
151 	[XE_HW_ENGINE_VCS2] = {
152 		.name = "vcs2",
153 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
154 		.instance = 2,
155 		.irq_offset = 32 + ilog2(INTR_VCS(2)),
156 		.domain = XE_FW_MEDIA_VDBOX2,
157 		.mmio_base = BSD3_RING_BASE,
158 	},
159 	[XE_HW_ENGINE_VCS3] = {
160 		.name = "vcs3",
161 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
162 		.instance = 3,
163 		.irq_offset = 32 + ilog2(INTR_VCS(3)),
164 		.domain = XE_FW_MEDIA_VDBOX3,
165 		.mmio_base = BSD4_RING_BASE,
166 	},
167 	[XE_HW_ENGINE_VCS4] = {
168 		.name = "vcs4",
169 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
170 		.instance = 4,
171 		.irq_offset = 32 + ilog2(INTR_VCS(4)),
172 		.domain = XE_FW_MEDIA_VDBOX4,
173 		.mmio_base = XEHP_BSD5_RING_BASE,
174 	},
175 	[XE_HW_ENGINE_VCS5] = {
176 		.name = "vcs5",
177 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
178 		.instance = 5,
179 		.irq_offset = 32 + ilog2(INTR_VCS(5)),
180 		.domain = XE_FW_MEDIA_VDBOX5,
181 		.mmio_base = XEHP_BSD6_RING_BASE,
182 	},
183 	[XE_HW_ENGINE_VCS6] = {
184 		.name = "vcs6",
185 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
186 		.instance = 6,
187 		.irq_offset = 32 + ilog2(INTR_VCS(6)),
188 		.domain = XE_FW_MEDIA_VDBOX6,
189 		.mmio_base = XEHP_BSD7_RING_BASE,
190 	},
191 	[XE_HW_ENGINE_VCS7] = {
192 		.name = "vcs7",
193 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
194 		.instance = 7,
195 		.irq_offset = 32 + ilog2(INTR_VCS(7)),
196 		.domain = XE_FW_MEDIA_VDBOX7,
197 		.mmio_base = XEHP_BSD8_RING_BASE,
198 	},
199 	[XE_HW_ENGINE_VECS0] = {
200 		.name = "vecs0",
201 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
202 		.instance = 0,
203 		.irq_offset = 32 + ilog2(INTR_VECS(0)),
204 		.domain = XE_FW_MEDIA_VEBOX0,
205 		.mmio_base = VEBOX_RING_BASE,
206 	},
207 	[XE_HW_ENGINE_VECS1] = {
208 		.name = "vecs1",
209 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
210 		.instance = 1,
211 		.irq_offset = 32 + ilog2(INTR_VECS(1)),
212 		.domain = XE_FW_MEDIA_VEBOX1,
213 		.mmio_base = VEBOX2_RING_BASE,
214 	},
215 	[XE_HW_ENGINE_VECS2] = {
216 		.name = "vecs2",
217 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
218 		.instance = 2,
219 		.irq_offset = 32 + ilog2(INTR_VECS(2)),
220 		.domain = XE_FW_MEDIA_VEBOX2,
221 		.mmio_base = XEHP_VEBOX3_RING_BASE,
222 	},
223 	[XE_HW_ENGINE_VECS3] = {
224 		.name = "vecs3",
225 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
226 		.instance = 3,
227 		.irq_offset = 32 + ilog2(INTR_VECS(3)),
228 		.domain = XE_FW_MEDIA_VEBOX3,
229 		.mmio_base = XEHP_VEBOX4_RING_BASE,
230 	},
231 	[XE_HW_ENGINE_CCS0] = {
232 		.name = "ccs0",
233 		.class = XE_ENGINE_CLASS_COMPUTE,
234 		.instance = 0,
235 		.irq_offset = ilog2(INTR_CCS(0)),
236 		.domain = XE_FW_RENDER,
237 		.mmio_base = COMPUTE0_RING_BASE,
238 	},
239 	[XE_HW_ENGINE_CCS1] = {
240 		.name = "ccs1",
241 		.class = XE_ENGINE_CLASS_COMPUTE,
242 		.instance = 1,
243 		.irq_offset = ilog2(INTR_CCS(1)),
244 		.domain = XE_FW_RENDER,
245 		.mmio_base = COMPUTE1_RING_BASE,
246 	},
247 	[XE_HW_ENGINE_CCS2] = {
248 		.name = "ccs2",
249 		.class = XE_ENGINE_CLASS_COMPUTE,
250 		.instance = 2,
251 		.irq_offset = ilog2(INTR_CCS(2)),
252 		.domain = XE_FW_RENDER,
253 		.mmio_base = COMPUTE2_RING_BASE,
254 	},
255 	[XE_HW_ENGINE_CCS3] = {
256 		.name = "ccs3",
257 		.class = XE_ENGINE_CLASS_COMPUTE,
258 		.instance = 3,
259 		.irq_offset = ilog2(INTR_CCS(3)),
260 		.domain = XE_FW_RENDER,
261 		.mmio_base = COMPUTE3_RING_BASE,
262 	},
263 	[XE_HW_ENGINE_GSCCS0] = {
264 		.name = "gsccs0",
265 		.class = XE_ENGINE_CLASS_OTHER,
266 		.instance = OTHER_GSC_INSTANCE,
267 		.domain = XE_FW_GSC,
268 		.mmio_base = GSCCS_RING_BASE,
269 	},
270 };
271 
272 static void hw_engine_fini(void *arg)
273 {
274 	struct xe_hw_engine *hwe = arg;
275 
276 	if (hwe->exl_port)
277 		xe_execlist_port_destroy(hwe->exl_port);
278 
279 	hwe->gt = NULL;
280 }
281 
282 /**
283  * xe_hw_engine_mmio_write32() - Write engine register
284  * @hwe: engine
285  * @reg: register to write into
286  * @val: desired 32-bit value to write
287  *
288  * This function will write val into an engine specific register.
289  * Forcewake must be held by the caller.
290  *
291  */
292 void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe,
293 			       struct xe_reg reg, u32 val)
294 {
295 	xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
296 	xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
297 
298 	reg.addr += hwe->mmio_base;
299 
300 	xe_mmio_write32(&hwe->gt->mmio, reg, val);
301 }
302 
303 /**
304  * xe_hw_engine_mmio_read32() - Read engine register
305  * @hwe: engine
306  * @reg: register to read from
307  *
308  * This function will read from an engine specific register.
309  * Forcewake must be held by the caller.
310  *
311  * Return: value of the 32-bit register.
312  */
313 u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
314 {
315 	xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
316 	xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
317 
318 	reg.addr += hwe->mmio_base;
319 
320 	return xe_mmio_read32(&hwe->gt->mmio, reg);
321 }
322 
323 void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
324 {
325 	u32 ccs_mask =
326 		xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
327 
328 	if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
329 		xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
330 				_MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
331 
332 	xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
333 	xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
334 				  xe_bo_ggtt_addr(hwe->hwsp));
335 	xe_hw_engine_mmio_write32(hwe, RING_MODE(0),
336 				  _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
337 	xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
338 				  _MASKED_BIT_DISABLE(STOP_RING));
339 	xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
340 }
341 
342 static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt,
343 						 const struct xe_hw_engine *hwe)
344 {
345 	return xe_gt_ccs_mode_enabled(gt) &&
346 	       xe_rtp_match_first_render_or_compute(gt, hwe);
347 }
348 
349 static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt,
350 				      const struct xe_hw_engine *hwe)
351 {
352 	if (GRAPHICS_VER(gt_to_xe(gt)) < 20)
353 		return false;
354 
355 	if (hwe->class != XE_ENGINE_CLASS_COMPUTE &&
356 	    hwe->class != XE_ENGINE_CLASS_RENDER)
357 		return false;
358 
359 	return xe_mmio_read32(&hwe->gt->mmio, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
360 }
361 
362 void
363 xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
364 {
365 	struct xe_gt *gt = hwe->gt;
366 	const u8 mocs_write_idx = gt->mocs.uc_index;
367 	const u8 mocs_read_idx = gt->mocs.uc_index;
368 	u32 blit_cctl_val = REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, mocs_write_idx) |
369 			    REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, mocs_read_idx);
370 	struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
371 	const struct xe_rtp_entry_sr lrc_setup[] = {
372 		/*
373 		 * Some blitter commands do not have a field for MOCS, those
374 		 * commands will use MOCS index pointed by BLIT_CCTL.
375 		 * BLIT_CCTL registers are needed to be programmed to un-cached.
376 		 */
377 		{ XE_RTP_NAME("BLIT_CCTL_default_MOCS"),
378 		  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
379 			       ENGINE_CLASS(COPY)),
380 		  XE_RTP_ACTIONS(FIELD_SET(BLIT_CCTL(0),
381 				 BLIT_CCTL_DST_MOCS_MASK |
382 				 BLIT_CCTL_SRC_MOCS_MASK,
383 				 blit_cctl_val,
384 				 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
385 		},
386 		/* Use Fixed slice CCS mode */
387 		{ XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
388 		  XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
389 		  XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
390 					   RCU_MODE_FIXED_SLICE_CCS_MODE))
391 		},
392 		/* Disable WMTP if HW doesn't support it */
393 		{ XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
394 		  XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
395 		  XE_RTP_ACTIONS(FIELD_SET(CS_CHICKEN1(0),
396 					   PREEMPT_GPGPU_LEVEL_MASK,
397 					   PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
398 		  XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
399 		},
400 		{}
401 	};
402 
403 	xe_rtp_process_to_sr(&ctx, lrc_setup, &hwe->reg_lrc);
404 }
405 
406 static void
407 hw_engine_setup_default_state(struct xe_hw_engine *hwe)
408 {
409 	struct xe_gt *gt = hwe->gt;
410 	struct xe_device *xe = gt_to_xe(gt);
411 	/*
412 	 * RING_CMD_CCTL specifies the default MOCS entry that will be
413 	 * used by the command streamer when executing commands that
414 	 * don't have a way to explicitly specify a MOCS setting.
415 	 * The default should usually reference whichever MOCS entry
416 	 * corresponds to uncached behavior, although use of a WB cached
417 	 * entry is recommended by the spec in certain circumstances on
418 	 * specific platforms.
419 	 * Bspec: 72161
420 	 */
421 	const u8 mocs_write_idx = gt->mocs.uc_index;
422 	const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
423 				 (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
424 				 gt->mocs.wb_index : gt->mocs.uc_index;
425 	u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
426 				REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, mocs_read_idx);
427 	struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
428 	const struct xe_rtp_entry_sr engine_entries[] = {
429 		{ XE_RTP_NAME("RING_CMD_CCTL_default_MOCS"),
430 		  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED)),
431 		  XE_RTP_ACTIONS(FIELD_SET(RING_CMD_CCTL(0),
432 					   CMD_CCTL_WRITE_OVERRIDE_MASK |
433 					   CMD_CCTL_READ_OVERRIDE_MASK,
434 					   ring_cmd_cctl_val,
435 					   XE_RTP_ACTION_FLAG(ENGINE_BASE)))
436 		},
437 		/*
438 		 * To allow the GSC engine to go idle on MTL we need to enable
439 		 * idle messaging and set the hysteresis value (we use 0xA=5us
440 		 * as recommended in spec). On platforms after MTL this is
441 		 * enabled by default.
442 		 */
443 		{ XE_RTP_NAME("MTL GSCCS IDLE MSG enable"),
444 		  XE_RTP_RULES(MEDIA_VERSION(1300), ENGINE_CLASS(OTHER)),
445 		  XE_RTP_ACTIONS(CLR(RING_PSMI_CTL(0),
446 				     IDLE_MSG_DISABLE,
447 				     XE_RTP_ACTION_FLAG(ENGINE_BASE)),
448 				 FIELD_SET(RING_PWRCTX_MAXCNT(0),
449 					   IDLE_WAIT_TIME,
450 					   0xA,
451 					   XE_RTP_ACTION_FLAG(ENGINE_BASE)))
452 		},
453 		/* Enable Priority Mem Read */
454 		{ XE_RTP_NAME("Priority_Mem_Read"),
455 		  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
456 		  XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
457 				     XE_RTP_ACTION_FLAG(ENGINE_BASE)))
458 		},
459 		{}
460 	};
461 
462 	xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr);
463 }
464 
465 static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance)
466 {
467 	const struct engine_info *info;
468 	enum xe_hw_engine_id id;
469 
470 	for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
471 		info = &engine_infos[id];
472 		if (info->class == class && info->instance == instance)
473 			return info;
474 	}
475 
476 	return NULL;
477 }
478 
479 static u16 get_msix_irq_offset(struct xe_gt *gt, enum xe_engine_class class)
480 {
481 	/* For MSI-X, hw engines report to offset of engine instance zero */
482 	const struct engine_info *info = find_engine_info(class, 0);
483 
484 	xe_gt_assert(gt, info);
485 
486 	return info ? info->irq_offset : 0;
487 }
488 
489 static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
490 				 enum xe_hw_engine_id id)
491 {
492 	const struct engine_info *info;
493 
494 	if (WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name))
495 		return;
496 
497 	if (!(gt->info.engine_mask & BIT(id)))
498 		return;
499 
500 	info = &engine_infos[id];
501 
502 	xe_gt_assert(gt, !hwe->gt);
503 
504 	hwe->gt = gt;
505 	hwe->class = info->class;
506 	hwe->instance = info->instance;
507 	hwe->mmio_base = info->mmio_base;
508 	hwe->irq_offset = xe_device_has_msix(gt_to_xe(gt)) ?
509 		get_msix_irq_offset(gt, info->class) :
510 		info->irq_offset;
511 	hwe->domain = info->domain;
512 	hwe->name = info->name;
513 	hwe->fence_irq = &gt->fence_irq[info->class];
514 	hwe->engine_id = id;
515 
516 	hwe->eclass = &gt->eclass[hwe->class];
517 	if (!hwe->eclass->sched_props.job_timeout_ms) {
518 		hwe->eclass->sched_props.job_timeout_ms = 5 * 1000;
519 		hwe->eclass->sched_props.job_timeout_min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
520 		hwe->eclass->sched_props.job_timeout_max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
521 		hwe->eclass->sched_props.timeslice_us = 1 * 1000;
522 		hwe->eclass->sched_props.timeslice_min = XE_HW_ENGINE_TIMESLICE_MIN;
523 		hwe->eclass->sched_props.timeslice_max = XE_HW_ENGINE_TIMESLICE_MAX;
524 		hwe->eclass->sched_props.preempt_timeout_us = XE_HW_ENGINE_PREEMPT_TIMEOUT;
525 		hwe->eclass->sched_props.preempt_timeout_min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
526 		hwe->eclass->sched_props.preempt_timeout_max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
527 
528 		/*
529 		 * The GSC engine can accept submissions while the GSC shim is
530 		 * being reset, during which time the submission is stalled. In
531 		 * the worst case, the shim reset can take up to the maximum GSC
532 		 * command execution time (250ms), so the request start can be
533 		 * delayed by that much; the request itself can take that long
534 		 * without being preemptible, which means worst case it can
535 		 * theoretically take up to 500ms for a preemption to go through
536 		 * on the GSC engine. Adding to that an extra 100ms as a safety
537 		 * margin, we get a minimum recommended timeout of 600ms.
538 		 * The preempt_timeout value can't be tuned for OTHER_CLASS
539 		 * because the class is reserved for kernel usage, so we just
540 		 * need to make sure that the starting value is above that
541 		 * threshold; since our default value (640ms) is greater than
542 		 * 600ms, the only way we can go below is via a kconfig setting.
543 		 * If that happens, log it in dmesg and update the value.
544 		 */
545 		if (hwe->class == XE_ENGINE_CLASS_OTHER) {
546 			const u32 min_preempt_timeout = 600 * 1000;
547 			if (hwe->eclass->sched_props.preempt_timeout_us < min_preempt_timeout) {
548 				hwe->eclass->sched_props.preempt_timeout_us = min_preempt_timeout;
549 				xe_gt_notice(gt, "Increasing preempt_timeout for GSC to 600ms\n");
550 			}
551 		}
552 
553 		/* Record default props */
554 		hwe->eclass->defaults = hwe->eclass->sched_props;
555 	}
556 
557 	xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt));
558 	xe_tuning_process_engine(hwe);
559 	xe_wa_process_engine(hwe);
560 	hw_engine_setup_default_state(hwe);
561 
562 	xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt));
563 	xe_reg_whitelist_process_engine(hwe);
564 }
565 
566 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
567 			  enum xe_hw_engine_id id)
568 {
569 	struct xe_device *xe = gt_to_xe(gt);
570 	struct xe_tile *tile = gt_to_tile(gt);
571 	int err;
572 
573 	xe_gt_assert(gt, id < ARRAY_SIZE(engine_infos) && engine_infos[id].name);
574 	xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
575 
576 	xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
577 
578 	hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
579 						 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
580 						 XE_BO_FLAG_GGTT |
581 						 XE_BO_FLAG_GGTT_INVALIDATE);
582 	if (IS_ERR(hwe->hwsp)) {
583 		err = PTR_ERR(hwe->hwsp);
584 		goto err_name;
585 	}
586 
587 	if (!xe_device_uc_enabled(xe)) {
588 		hwe->exl_port = xe_execlist_port_create(xe, hwe);
589 		if (IS_ERR(hwe->exl_port)) {
590 			err = PTR_ERR(hwe->exl_port);
591 			goto err_hwsp;
592 		}
593 	} else {
594 		/* GSCCS has a special interrupt for reset */
595 		if (hwe->class == XE_ENGINE_CLASS_OTHER)
596 			hwe->irq_handler = xe_gsc_hwe_irq_handler;
597 
598 		if (!IS_SRIOV_VF(xe))
599 			xe_hw_engine_enable_ring(hwe);
600 	}
601 
602 	/* We reserve the highest BCS instance for USM */
603 	if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
604 		gt->usm.reserved_bcs_instance = hwe->instance;
605 
606 	return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe);
607 
608 err_hwsp:
609 	xe_bo_unpin_map_no_vm(hwe->hwsp);
610 err_name:
611 	hwe->name = NULL;
612 
613 	return err;
614 }
615 
616 static void hw_engine_setup_logical_mapping(struct xe_gt *gt)
617 {
618 	int class;
619 
620 	/* FIXME: Doing a simple logical mapping that works for most hardware */
621 	for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
622 		struct xe_hw_engine *hwe;
623 		enum xe_hw_engine_id id;
624 		int logical_instance = 0;
625 
626 		for_each_hw_engine(hwe, gt, id)
627 			if (hwe->class == class)
628 				hwe->logical_instance = logical_instance++;
629 	}
630 }
631 
632 static void read_media_fuses(struct xe_gt *gt)
633 {
634 	struct xe_device *xe = gt_to_xe(gt);
635 	u32 media_fuse;
636 	u16 vdbox_mask;
637 	u16 vebox_mask;
638 	int i, j;
639 
640 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
641 
642 	media_fuse = xe_mmio_read32(&gt->mmio, GT_VEBOX_VDBOX_DISABLE);
643 
644 	/*
645 	 * Pre-Xe_HP platforms had register bits representing absent engines,
646 	 * whereas Xe_HP and beyond have bits representing present engines.
647 	 * Invert the polarity on old platforms so that we can use common
648 	 * handling below.
649 	 */
650 	if (GRAPHICS_VERx100(xe) < 1250)
651 		media_fuse = ~media_fuse;
652 
653 	vdbox_mask = REG_FIELD_GET(GT_VDBOX_DISABLE_MASK, media_fuse);
654 	vebox_mask = REG_FIELD_GET(GT_VEBOX_DISABLE_MASK, media_fuse);
655 
656 	for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
657 		if (!(gt->info.engine_mask & BIT(i)))
658 			continue;
659 
660 		if (!(BIT(j) & vdbox_mask)) {
661 			gt->info.engine_mask &= ~BIT(i);
662 			drm_info(&xe->drm, "vcs%u fused off\n", j);
663 		}
664 	}
665 
666 	for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
667 		if (!(gt->info.engine_mask & BIT(i)))
668 			continue;
669 
670 		if (!(BIT(j) & vebox_mask)) {
671 			gt->info.engine_mask &= ~BIT(i);
672 			drm_info(&xe->drm, "vecs%u fused off\n", j);
673 		}
674 	}
675 }
676 
677 static void read_copy_fuses(struct xe_gt *gt)
678 {
679 	struct xe_device *xe = gt_to_xe(gt);
680 	u32 bcs_mask;
681 
682 	if (GRAPHICS_VERx100(xe) < 1260 || GRAPHICS_VERx100(xe) >= 1270)
683 		return;
684 
685 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
686 
687 	bcs_mask = xe_mmio_read32(&gt->mmio, MIRROR_FUSE3);
688 	bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask);
689 
690 	/* BCS0 is always present; only BCS1-BCS8 may be fused off */
691 	for (int i = XE_HW_ENGINE_BCS1, j = 0; i <= XE_HW_ENGINE_BCS8; ++i, ++j) {
692 		if (!(gt->info.engine_mask & BIT(i)))
693 			continue;
694 
695 		if (!(BIT(j / 2) & bcs_mask)) {
696 			gt->info.engine_mask &= ~BIT(i);
697 			drm_info(&xe->drm, "bcs%u fused off\n", j);
698 		}
699 	}
700 }
701 
702 static void read_compute_fuses_from_dss(struct xe_gt *gt)
703 {
704 	struct xe_device *xe = gt_to_xe(gt);
705 
706 	/*
707 	 * CCS fusing based on DSS masks only applies to platforms that can
708 	 * have more than one CCS.
709 	 */
710 	if (hweight64(gt->info.engine_mask &
711 		      GENMASK_ULL(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)) <= 1)
712 		return;
713 
714 	/*
715 	 * CCS availability on Xe_HP is inferred from the presence of DSS in
716 	 * each quadrant.
717 	 */
718 	for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
719 		if (!(gt->info.engine_mask & BIT(i)))
720 			continue;
721 
722 		if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
723 			gt->info.engine_mask &= ~BIT(i);
724 			drm_info(&xe->drm, "ccs%u fused off\n", j);
725 		}
726 	}
727 }
728 
729 static void read_compute_fuses_from_reg(struct xe_gt *gt)
730 {
731 	struct xe_device *xe = gt_to_xe(gt);
732 	u32 ccs_mask;
733 
734 	ccs_mask = xe_mmio_read32(&gt->mmio, XEHP_FUSE4);
735 	ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask);
736 
737 	for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
738 		if (!(gt->info.engine_mask & BIT(i)))
739 			continue;
740 
741 		if ((ccs_mask & BIT(j)) == 0) {
742 			gt->info.engine_mask &= ~BIT(i);
743 			drm_info(&xe->drm, "ccs%u fused off\n", j);
744 		}
745 	}
746 }
747 
748 static void read_compute_fuses(struct xe_gt *gt)
749 {
750 	if (GRAPHICS_VER(gt_to_xe(gt)) >= 20)
751 		read_compute_fuses_from_reg(gt);
752 	else
753 		read_compute_fuses_from_dss(gt);
754 }
755 
756 static void check_gsc_availability(struct xe_gt *gt)
757 {
758 	struct xe_device *xe = gt_to_xe(gt);
759 
760 	if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
761 		return;
762 
763 	/*
764 	 * The GSCCS is only used to communicate with the GSC FW, so if we don't
765 	 * have the FW there is nothing we need the engine for and can therefore
766 	 * skip its initialization.
767 	 */
768 	if (!xe_uc_fw_is_available(&gt->uc.gsc.fw)) {
769 		gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0);
770 
771 		/* interrupts where previously enabled, so turn them off */
772 		xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_ENABLE, 0);
773 		xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_MASK, ~0);
774 
775 		drm_info(&xe->drm, "gsccs disabled due to lack of FW\n");
776 	}
777 }
778 
779 int xe_hw_engines_init_early(struct xe_gt *gt)
780 {
781 	int i;
782 
783 	read_media_fuses(gt);
784 	read_copy_fuses(gt);
785 	read_compute_fuses(gt);
786 	check_gsc_availability(gt);
787 
788 	BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
789 	BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
790 
791 	for (i = 0; i < ARRAY_SIZE(gt->hw_engines); i++)
792 		hw_engine_init_early(gt, &gt->hw_engines[i], i);
793 
794 	return 0;
795 }
796 
797 int xe_hw_engines_init(struct xe_gt *gt)
798 {
799 	int err;
800 	struct xe_hw_engine *hwe;
801 	enum xe_hw_engine_id id;
802 
803 	for_each_hw_engine(hwe, gt, id) {
804 		err = hw_engine_init(gt, hwe, id);
805 		if (err)
806 			return err;
807 	}
808 
809 	hw_engine_setup_logical_mapping(gt);
810 	err = xe_hw_engine_setup_groups(gt);
811 	if (err)
812 		return err;
813 
814 	return 0;
815 }
816 
817 void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
818 {
819 	wake_up_all(&gt_to_xe(hwe->gt)->ufence_wq);
820 
821 	if (hwe->irq_handler)
822 		hwe->irq_handler(hwe, intr_vec);
823 
824 	if (intr_vec & GT_RENDER_USER_INTERRUPT)
825 		xe_hw_fence_irq_run(hwe->fence_irq);
826 }
827 
828 /**
829  * xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
830  * @hwe: Xe HW Engine.
831  * @q: The exec queue object.
832  *
833  * This can be printed out in a later stage like during dev_coredump
834  * analysis.
835  *
836  * Returns: a Xe HW Engine snapshot object that must be freed by the
837  * caller, using `xe_hw_engine_snapshot_free`.
838  */
839 struct xe_hw_engine_snapshot *
840 xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q)
841 {
842 	struct xe_hw_engine_snapshot *snapshot;
843 	struct __guc_capture_parsed_output *node;
844 
845 	if (!xe_hw_engine_is_valid(hwe))
846 		return NULL;
847 
848 	snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
849 
850 	if (!snapshot)
851 		return NULL;
852 
853 	snapshot->name = kstrdup(hwe->name, GFP_ATOMIC);
854 	snapshot->hwe = hwe;
855 	snapshot->logical_instance = hwe->logical_instance;
856 	snapshot->forcewake.domain = hwe->domain;
857 	snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
858 						    hwe->domain);
859 	snapshot->mmio_base = hwe->mmio_base;
860 	snapshot->kernel_reserved = xe_hw_engine_is_reserved(hwe);
861 
862 	/* no more VF accessible data below this point */
863 	if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
864 		return snapshot;
865 
866 	if (q) {
867 		/* If got guc capture, set source to GuC */
868 		node = xe_guc_capture_get_matching_and_lock(q);
869 		if (node) {
870 			struct xe_device *xe = gt_to_xe(hwe->gt);
871 			struct xe_devcoredump *coredump = &xe->devcoredump;
872 
873 			coredump->snapshot.matched_node = node;
874 			xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node");
875 			return snapshot;
876 		}
877 	}
878 
879 	/* otherwise, do manual capture */
880 	xe_engine_manual_capture(hwe, snapshot);
881 	xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot");
882 
883 	return snapshot;
884 }
885 
886 /**
887  * xe_hw_engine_snapshot_free - Free all allocated objects for a given snapshot.
888  * @snapshot: Xe HW Engine snapshot object.
889  *
890  * This function free all the memory that needed to be allocated at capture
891  * time.
892  */
893 void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot)
894 {
895 	struct xe_gt *gt;
896 	if (!snapshot)
897 		return;
898 
899 	gt = snapshot->hwe->gt;
900 	/*
901 	 * xe_guc_capture_put_matched_nodes is called here and from
902 	 * xe_devcoredump_snapshot_free, to cover the 2 calling paths
903 	 * of hw_engines - debugfs and devcoredump free.
904 	 */
905 	xe_guc_capture_put_matched_nodes(&gt->uc.guc);
906 
907 	kfree(snapshot->name);
908 	kfree(snapshot);
909 }
910 
911 /**
912  * xe_hw_engine_print - Xe HW Engine Print.
913  * @hwe: Hardware Engine.
914  * @p: drm_printer.
915  *
916  * This function quickly capture a snapshot and immediately print it out.
917  */
918 void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p)
919 {
920 	struct xe_hw_engine_snapshot *snapshot;
921 
922 	snapshot = xe_hw_engine_snapshot_capture(hwe, NULL);
923 	xe_engine_snapshot_print(snapshot, p);
924 	xe_hw_engine_snapshot_free(snapshot);
925 }
926 
927 u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
928 				enum xe_engine_class engine_class)
929 {
930 	u32 mask = 0;
931 	enum xe_hw_engine_id id;
932 
933 	for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
934 		if (engine_infos[id].class == engine_class &&
935 		    gt->info.engine_mask & BIT(id))
936 			mask |= BIT(engine_infos[id].instance);
937 	}
938 	return mask;
939 }
940 
941 bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe)
942 {
943 	struct xe_gt *gt = hwe->gt;
944 	struct xe_device *xe = gt_to_xe(gt);
945 
946 	if (hwe->class == XE_ENGINE_CLASS_OTHER)
947 		return true;
948 
949 	/* Check for engines disabled by ccs_mode setting */
950 	if (xe_gt_ccs_mode_enabled(gt) &&
951 	    hwe->class == XE_ENGINE_CLASS_COMPUTE &&
952 	    hwe->logical_instance >= gt->ccs_mode)
953 		return true;
954 
955 	return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
956 		hwe->instance == gt->usm.reserved_bcs_instance;
957 }
958 
959 const char *xe_hw_engine_class_to_str(enum xe_engine_class class)
960 {
961 	switch (class) {
962 	case XE_ENGINE_CLASS_RENDER:
963 		return "rcs";
964 	case XE_ENGINE_CLASS_VIDEO_DECODE:
965 		return "vcs";
966 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
967 		return "vecs";
968 	case XE_ENGINE_CLASS_COPY:
969 		return "bcs";
970 	case XE_ENGINE_CLASS_OTHER:
971 		return "other";
972 	case XE_ENGINE_CLASS_COMPUTE:
973 		return "ccs";
974 	case XE_ENGINE_CLASS_MAX:
975 		break;
976 	}
977 
978 	return NULL;
979 }
980 
981 u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe)
982 {
983 	return xe_mmio_read64_2x32(&hwe->gt->mmio, RING_TIMESTAMP(hwe->mmio_base));
984 }
985 
986 enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe)
987 {
988 	return engine_infos[hwe->engine_id].domain;
989 }
990 
991 static const enum xe_engine_class user_to_xe_engine_class[] = {
992 	[DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
993 	[DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
994 	[DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
995 	[DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
996 	[DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
997 };
998 
999 /**
1000  * xe_hw_engine_lookup() - Lookup hardware engine for class:instance
1001  * @xe: xe device
1002  * @eci: engine class and instance
1003  *
1004  * This function will find a hardware engine for given engine
1005  * class and instance.
1006  *
1007  * Return: If found xe_hw_engine pointer, NULL otherwise.
1008  */
1009 struct xe_hw_engine *
1010 xe_hw_engine_lookup(struct xe_device *xe,
1011 		    struct drm_xe_engine_class_instance eci)
1012 {
1013 	unsigned int idx;
1014 
1015 	if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
1016 		return NULL;
1017 
1018 	if (eci.gt_id >= xe->info.gt_count)
1019 		return NULL;
1020 
1021 	idx = array_index_nospec(eci.engine_class,
1022 				 ARRAY_SIZE(user_to_xe_engine_class));
1023 
1024 	return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
1025 			       user_to_xe_engine_class[idx],
1026 			       eci.engine_instance, true);
1027 }
1028