xref: /linux/drivers/gpu/drm/xe/xe_hw_engine.c (revision dd08ebf6c3525a7ea2186e636df064ea47281987)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_hw_engine.h"
7 
8 #include <drm/drm_managed.h>
9 
10 #include "xe_bo.h"
11 #include "xe_device.h"
12 #include "xe_execlist.h"
13 #include "xe_force_wake.h"
14 #include "xe_gt.h"
15 #include "xe_gt_topology.h"
16 #include "xe_hw_fence.h"
17 #include "xe_lrc.h"
18 #include "xe_macros.h"
19 #include "xe_mmio.h"
20 #include "xe_reg_sr.h"
21 #include "xe_sched_job.h"
22 #include "xe_wa.h"
23 
24 #include "gt/intel_engine_regs.h"
25 #include "i915_reg.h"
26 #include "gt/intel_gt_regs.h"
27 
28 #define MAX_MMIO_BASES 3
29 struct engine_info {
30 	const char *name;
31 	unsigned int class : 8;
32 	unsigned int instance : 8;
33 	enum xe_force_wake_domains domain;
34 	/* mmio bases table *must* be sorted in reverse graphics_ver order */
35 	struct engine_mmio_base {
36 		unsigned int graphics_ver : 8;
37 		unsigned int base : 24;
38 	} mmio_bases[MAX_MMIO_BASES];
39 };
40 
41 static const struct engine_info engine_infos[] = {
42 	[XE_HW_ENGINE_RCS0] = {
43 		.name = "rcs0",
44 		.class = XE_ENGINE_CLASS_RENDER,
45 		.instance = 0,
46 		.domain = XE_FW_RENDER,
47 		.mmio_bases = {
48 			{ .graphics_ver = 1, .base = RENDER_RING_BASE }
49 		},
50 	},
51 	[XE_HW_ENGINE_BCS0] = {
52 		.name = "bcs0",
53 		.class = XE_ENGINE_CLASS_COPY,
54 		.instance = 0,
55 		.domain = XE_FW_RENDER,
56 		.mmio_bases = {
57 			{ .graphics_ver = 6, .base = BLT_RING_BASE }
58 		},
59 	},
60 	[XE_HW_ENGINE_BCS1] = {
61 		.name = "bcs1",
62 		.class = XE_ENGINE_CLASS_COPY,
63 		.instance = 1,
64 		.domain = XE_FW_RENDER,
65 		.mmio_bases = {
66 			{ .graphics_ver = 12, .base = XEHPC_BCS1_RING_BASE }
67 		},
68 	},
69 	[XE_HW_ENGINE_BCS2] = {
70 		.name = "bcs2",
71 		.class = XE_ENGINE_CLASS_COPY,
72 		.instance = 2,
73 		.domain = XE_FW_RENDER,
74 		.mmio_bases = {
75 			{ .graphics_ver = 12, .base = XEHPC_BCS2_RING_BASE }
76 		},
77 	},
78 	[XE_HW_ENGINE_BCS3] = {
79 		.name = "bcs3",
80 		.class = XE_ENGINE_CLASS_COPY,
81 		.instance = 3,
82 		.domain = XE_FW_RENDER,
83 		.mmio_bases = {
84 			{ .graphics_ver = 12, .base = XEHPC_BCS3_RING_BASE }
85 		},
86 	},
87 	[XE_HW_ENGINE_BCS4] = {
88 		.name = "bcs4",
89 		.class = XE_ENGINE_CLASS_COPY,
90 		.instance = 4,
91 		.domain = XE_FW_RENDER,
92 		.mmio_bases = {
93 			{ .graphics_ver = 12, .base = XEHPC_BCS4_RING_BASE }
94 		},
95 	},
96 	[XE_HW_ENGINE_BCS5] = {
97 		.name = "bcs5",
98 		.class = XE_ENGINE_CLASS_COPY,
99 		.instance = 5,
100 		.domain = XE_FW_RENDER,
101 		.mmio_bases = {
102 			{ .graphics_ver = 12, .base = XEHPC_BCS5_RING_BASE }
103 		},
104 	},
105 	[XE_HW_ENGINE_BCS6] = {
106 		.name = "bcs6",
107 		.class = XE_ENGINE_CLASS_COPY,
108 		.instance = 6,
109 		.domain = XE_FW_RENDER,
110 		.mmio_bases = {
111 			{ .graphics_ver = 12, .base = XEHPC_BCS6_RING_BASE }
112 		},
113 	},
114 	[XE_HW_ENGINE_BCS7] = {
115 		.name = "bcs7",
116 		.class = XE_ENGINE_CLASS_COPY,
117 		.instance = 7,
118 		.domain = XE_FW_RENDER,
119 		.mmio_bases = {
120 			{ .graphics_ver = 12, .base = XEHPC_BCS7_RING_BASE }
121 		},
122 	},
123 	[XE_HW_ENGINE_BCS8] = {
124 		.name = "bcs8",
125 		.class = XE_ENGINE_CLASS_COPY,
126 		.instance = 8,
127 		.domain = XE_FW_RENDER,
128 		.mmio_bases = {
129 			{ .graphics_ver = 12, .base = XEHPC_BCS8_RING_BASE }
130 		},
131 	},
132 
133 	[XE_HW_ENGINE_VCS0] = {
134 		.name = "vcs0",
135 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
136 		.instance = 0,
137 		.domain = XE_FW_MEDIA_VDBOX0,
138 		.mmio_bases = {
139 			{ .graphics_ver = 11, .base = GEN11_BSD_RING_BASE },
140 			{ .graphics_ver = 6, .base = GEN6_BSD_RING_BASE },
141 			{ .graphics_ver = 4, .base = BSD_RING_BASE }
142 		},
143 	},
144 	[XE_HW_ENGINE_VCS1] = {
145 		.name = "vcs1",
146 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
147 		.instance = 1,
148 		.domain = XE_FW_MEDIA_VDBOX1,
149 		.mmio_bases = {
150 			{ .graphics_ver = 11, .base = GEN11_BSD2_RING_BASE },
151 			{ .graphics_ver = 8, .base = GEN8_BSD2_RING_BASE }
152 		},
153 	},
154 	[XE_HW_ENGINE_VCS2] = {
155 		.name = "vcs2",
156 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
157 		.instance = 2,
158 		.domain = XE_FW_MEDIA_VDBOX2,
159 		.mmio_bases = {
160 			{ .graphics_ver = 11, .base = GEN11_BSD3_RING_BASE }
161 		},
162 	},
163 	[XE_HW_ENGINE_VCS3] = {
164 		.name = "vcs3",
165 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
166 		.instance = 3,
167 		.domain = XE_FW_MEDIA_VDBOX3,
168 		.mmio_bases = {
169 			{ .graphics_ver = 11, .base = GEN11_BSD4_RING_BASE }
170 		},
171 	},
172 	[XE_HW_ENGINE_VCS4] = {
173 		.name = "vcs4",
174 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
175 		.instance = 4,
176 		.domain = XE_FW_MEDIA_VDBOX4,
177 		.mmio_bases = {
178 			{ .graphics_ver = 12, .base = XEHP_BSD5_RING_BASE }
179 		},
180 	},
181 	[XE_HW_ENGINE_VCS5] = {
182 		.name = "vcs5",
183 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
184 		.instance = 5,
185 		.domain = XE_FW_MEDIA_VDBOX5,
186 		.mmio_bases = {
187 			{ .graphics_ver = 12, .base = XEHP_BSD6_RING_BASE }
188 		},
189 	},
190 	[XE_HW_ENGINE_VCS6] = {
191 		.name = "vcs6",
192 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
193 		.instance = 6,
194 		.domain = XE_FW_MEDIA_VDBOX6,
195 		.mmio_bases = {
196 			{ .graphics_ver = 12, .base = XEHP_BSD7_RING_BASE }
197 		},
198 	},
199 	[XE_HW_ENGINE_VCS7] = {
200 		.name = "vcs7",
201 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
202 		.instance = 7,
203 		.domain = XE_FW_MEDIA_VDBOX7,
204 		.mmio_bases = {
205 			{ .graphics_ver = 12, .base = XEHP_BSD8_RING_BASE }
206 		},
207 	},
208 	[XE_HW_ENGINE_VECS0] = {
209 		.name = "vecs0",
210 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
211 		.instance = 0,
212 		.domain = XE_FW_MEDIA_VEBOX0,
213 		.mmio_bases = {
214 			{ .graphics_ver = 11, .base = GEN11_VEBOX_RING_BASE },
215 			{ .graphics_ver = 7, .base = VEBOX_RING_BASE }
216 		},
217 	},
218 	[XE_HW_ENGINE_VECS1] = {
219 		.name = "vecs1",
220 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
221 		.instance = 1,
222 		.domain = XE_FW_MEDIA_VEBOX1,
223 		.mmio_bases = {
224 			{ .graphics_ver = 11, .base = GEN11_VEBOX2_RING_BASE }
225 		},
226 	},
227 	[XE_HW_ENGINE_VECS2] = {
228 		.name = "vecs2",
229 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
230 		.instance = 2,
231 		.domain = XE_FW_MEDIA_VEBOX2,
232 		.mmio_bases = {
233 			{ .graphics_ver = 12, .base = XEHP_VEBOX3_RING_BASE }
234 		},
235 	},
236 	[XE_HW_ENGINE_VECS3] = {
237 		.name = "vecs3",
238 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
239 		.instance = 3,
240 		.domain = XE_FW_MEDIA_VEBOX3,
241 		.mmio_bases = {
242 			{ .graphics_ver = 12, .base = XEHP_VEBOX4_RING_BASE }
243 		},
244 	},
245 	[XE_HW_ENGINE_CCS0] = {
246 		.name = "ccs0",
247 		.class = XE_ENGINE_CLASS_COMPUTE,
248 		.instance = 0,
249 		.domain = XE_FW_RENDER,
250 		.mmio_bases = {
251 			{ .graphics_ver = 12, .base = GEN12_COMPUTE0_RING_BASE },
252 		},
253 	},
254 	[XE_HW_ENGINE_CCS1] = {
255 		.name = "ccs1",
256 		.class = XE_ENGINE_CLASS_COMPUTE,
257 		.instance = 1,
258 		.domain = XE_FW_RENDER,
259 		.mmio_bases = {
260 			{ .graphics_ver = 12, .base = GEN12_COMPUTE1_RING_BASE },
261 		},
262 	},
263 	[XE_HW_ENGINE_CCS2] = {
264 		.name = "ccs2",
265 		.class = XE_ENGINE_CLASS_COMPUTE,
266 		.instance = 2,
267 		.domain = XE_FW_RENDER,
268 		.mmio_bases = {
269 			{ .graphics_ver = 12, .base = GEN12_COMPUTE2_RING_BASE },
270 		},
271 	},
272 	[XE_HW_ENGINE_CCS3] = {
273 		.name = "ccs3",
274 		.class = XE_ENGINE_CLASS_COMPUTE,
275 		.instance = 3,
276 		.domain = XE_FW_RENDER,
277 		.mmio_bases = {
278 			{ .graphics_ver = 12, .base = GEN12_COMPUTE3_RING_BASE },
279 		},
280 	},
281 };
282 
283 static u32 engine_info_mmio_base(const struct engine_info *info,
284 				 unsigned int graphics_ver)
285 {
286 	int i;
287 
288 	for (i = 0; i < MAX_MMIO_BASES; i++)
289 		if (graphics_ver >= info->mmio_bases[i].graphics_ver)
290 			break;
291 
292 	XE_BUG_ON(i == MAX_MMIO_BASES);
293 	XE_BUG_ON(!info->mmio_bases[i].base);
294 
295 	return info->mmio_bases[i].base;
296 }
297 
298 static void hw_engine_fini(struct drm_device *drm, void *arg)
299 {
300 	struct xe_hw_engine *hwe = arg;
301 
302 	if (hwe->exl_port)
303 		xe_execlist_port_destroy(hwe->exl_port);
304 	xe_lrc_finish(&hwe->kernel_lrc);
305 
306 	xe_bo_unpin_map_no_vm(hwe->hwsp);
307 
308 	hwe->gt = NULL;
309 }
310 
311 static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, u32 reg, u32 val)
312 {
313 	XE_BUG_ON(reg & hwe->mmio_base);
314 	xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
315 
316 	xe_mmio_write32(hwe->gt, reg + hwe->mmio_base, val);
317 }
318 
319 static u32 hw_engine_mmio_read32(struct xe_hw_engine *hwe, u32 reg)
320 {
321 	XE_BUG_ON(reg & hwe->mmio_base);
322 	xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
323 
324 	return xe_mmio_read32(hwe->gt, reg + hwe->mmio_base);
325 }
326 
327 void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
328 {
329 	u32 ccs_mask =
330 		xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
331 
332 	if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask & BIT(0))
333 		xe_mmio_write32(hwe->gt, GEN12_RCU_MODE.reg,
334 				_MASKED_BIT_ENABLE(GEN12_RCU_MODE_CCS_ENABLE));
335 
336 	hw_engine_mmio_write32(hwe, RING_HWSTAM(0).reg, ~0x0);
337 	hw_engine_mmio_write32(hwe, RING_HWS_PGA(0).reg,
338 			       xe_bo_ggtt_addr(hwe->hwsp));
339 	hw_engine_mmio_write32(hwe, RING_MODE_GEN7(0).reg,
340 			       _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
341 	hw_engine_mmio_write32(hwe, RING_MI_MODE(0).reg,
342 			       _MASKED_BIT_DISABLE(STOP_RING));
343 	hw_engine_mmio_read32(hwe, RING_MI_MODE(0).reg);
344 }
345 
346 static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
347 				 enum xe_hw_engine_id id)
348 {
349 	struct xe_device *xe = gt_to_xe(gt);
350 	const struct engine_info *info;
351 
352 	if (WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name))
353 		return;
354 
355 	if (!(gt->info.engine_mask & BIT(id)))
356 		return;
357 
358 	info = &engine_infos[id];
359 
360 	XE_BUG_ON(hwe->gt);
361 
362 	hwe->gt = gt;
363 	hwe->class = info->class;
364 	hwe->instance = info->instance;
365 	hwe->mmio_base = engine_info_mmio_base(info, GRAPHICS_VER(xe));
366 	hwe->domain = info->domain;
367 	hwe->name = info->name;
368 	hwe->fence_irq = &gt->fence_irq[info->class];
369 	hwe->engine_id = id;
370 
371 	xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt));
372 	xe_wa_process_engine(hwe);
373 
374 	xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt));
375 	xe_reg_whitelist_process_engine(hwe);
376 }
377 
378 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
379 			  enum xe_hw_engine_id id)
380 {
381 	struct xe_device *xe = gt_to_xe(gt);
382 	int err;
383 
384 	XE_BUG_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name);
385 	XE_BUG_ON(!(gt->info.engine_mask & BIT(id)));
386 
387 	xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
388 	xe_reg_sr_apply_whitelist(&hwe->reg_whitelist, hwe->mmio_base, gt);
389 
390 	hwe->hwsp = xe_bo_create_locked(xe, gt, NULL, SZ_4K, ttm_bo_type_kernel,
391 					XE_BO_CREATE_VRAM_IF_DGFX(gt) |
392 					XE_BO_CREATE_GGTT_BIT);
393 	if (IS_ERR(hwe->hwsp)) {
394 		err = PTR_ERR(hwe->hwsp);
395 		goto err_name;
396 	}
397 
398 	err = xe_bo_pin(hwe->hwsp);
399 	if (err)
400 		goto err_unlock_put_hwsp;
401 
402 	err = xe_bo_vmap(hwe->hwsp);
403 	if (err)
404 		goto err_unpin_hwsp;
405 
406 	xe_bo_unlock_no_vm(hwe->hwsp);
407 
408 	err = xe_lrc_init(&hwe->kernel_lrc, hwe, NULL, NULL, SZ_16K);
409 	if (err)
410 		goto err_hwsp;
411 
412 	if (!xe_device_guc_submission_enabled(xe)) {
413 		hwe->exl_port = xe_execlist_port_create(xe, hwe);
414 		if (IS_ERR(hwe->exl_port)) {
415 			err = PTR_ERR(hwe->exl_port);
416 			goto err_kernel_lrc;
417 		}
418 	}
419 
420 	if (xe_device_guc_submission_enabled(xe))
421 		xe_hw_engine_enable_ring(hwe);
422 
423 	/* We reserve the highest BCS instance for USM */
424 	if (xe->info.supports_usm && hwe->class == XE_ENGINE_CLASS_COPY)
425 		gt->usm.reserved_bcs_instance = hwe->instance;
426 
427 	err = drmm_add_action_or_reset(&xe->drm, hw_engine_fini, hwe);
428 	if (err)
429 		return err;
430 
431 	return 0;
432 
433 err_unpin_hwsp:
434 	xe_bo_unpin(hwe->hwsp);
435 err_unlock_put_hwsp:
436 	xe_bo_unlock_no_vm(hwe->hwsp);
437 	xe_bo_put(hwe->hwsp);
438 err_kernel_lrc:
439 	xe_lrc_finish(&hwe->kernel_lrc);
440 err_hwsp:
441 	xe_bo_put(hwe->hwsp);
442 err_name:
443 	hwe->name = NULL;
444 
445 	return err;
446 }
447 
448 static void hw_engine_setup_logical_mapping(struct xe_gt *gt)
449 {
450 	int class;
451 
452 	/* FIXME: Doing a simple logical mapping that works for most hardware */
453 	for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
454 		struct xe_hw_engine *hwe;
455 		enum xe_hw_engine_id id;
456 		int logical_instance = 0;
457 
458 		for_each_hw_engine(hwe, gt, id)
459 			if (hwe->class == class)
460 				hwe->logical_instance = logical_instance++;
461 	}
462 }
463 
464 static void read_fuses(struct xe_gt *gt)
465 {
466 	struct xe_device *xe = gt_to_xe(gt);
467 	u32 media_fuse;
468 	u16 vdbox_mask;
469 	u16 vebox_mask;
470 	u32 bcs_mask;
471 	int i, j;
472 
473 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
474 
475 	/*
476 	 * FIXME: Hack job, thinking we should have table of vfuncs for each
477 	 * class which picks the correct vfunc based on IP version.
478 	 */
479 
480 	media_fuse = xe_mmio_read32(gt, GEN11_GT_VEBOX_VDBOX_DISABLE.reg);
481 	if (GRAPHICS_VERx100(xe) < 1250)
482 		media_fuse = ~media_fuse;
483 
484 	vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
485 	vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
486 		      GEN11_GT_VEBOX_DISABLE_SHIFT;
487 
488 	for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
489 		if (!(gt->info.engine_mask & BIT(i)))
490 			continue;
491 
492 		if (!(BIT(j) & vdbox_mask)) {
493 			gt->info.engine_mask &= ~BIT(i);
494 			drm_info(&xe->drm, "vcs%u fused off\n", j);
495 		}
496 	}
497 
498 	for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
499 		if (!(gt->info.engine_mask & BIT(i)))
500 			continue;
501 
502 		if (!(BIT(j) & vebox_mask)) {
503 			gt->info.engine_mask &= ~BIT(i);
504 			drm_info(&xe->drm, "vecs%u fused off\n", j);
505 		}
506 	}
507 
508 	bcs_mask = xe_mmio_read32(gt, GEN10_MIRROR_FUSE3.reg);
509 	bcs_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, bcs_mask);
510 
511 	for (i = XE_HW_ENGINE_BCS1, j = 0; i <= XE_HW_ENGINE_BCS8; ++i, ++j) {
512 		if (!(gt->info.engine_mask & BIT(i)))
513 			continue;
514 
515 		if (!(BIT(j/2) & bcs_mask)) {
516 			gt->info.engine_mask &= ~BIT(i);
517 			drm_info(&xe->drm, "bcs%u fused off\n", j);
518 		}
519 	}
520 
521 	/* TODO: compute engines */
522 }
523 
524 int xe_hw_engines_init_early(struct xe_gt *gt)
525 {
526 	int i;
527 
528 	read_fuses(gt);
529 
530 	for (i = 0; i < ARRAY_SIZE(gt->hw_engines); i++)
531 		hw_engine_init_early(gt, &gt->hw_engines[i], i);
532 
533 	return 0;
534 }
535 
536 int xe_hw_engines_init(struct xe_gt *gt)
537 {
538 	int err;
539 	struct xe_hw_engine *hwe;
540 	enum xe_hw_engine_id id;
541 
542 	for_each_hw_engine(hwe, gt, id) {
543 		err = hw_engine_init(gt, hwe, id);
544 		if (err)
545 			return err;
546 	}
547 
548 	hw_engine_setup_logical_mapping(gt);
549 
550 	return 0;
551 }
552 
553 void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
554 {
555 	wake_up_all(&gt_to_xe(hwe->gt)->ufence_wq);
556 
557 	if (hwe->irq_handler)
558 		hwe->irq_handler(hwe, intr_vec);
559 
560 	if (intr_vec & GT_RENDER_USER_INTERRUPT)
561 		xe_hw_fence_irq_run(hwe->fence_irq);
562 }
563 
564 void xe_hw_engine_print_state(struct xe_hw_engine *hwe, struct drm_printer *p)
565 {
566 	if (!xe_hw_engine_is_valid(hwe))
567 		return;
568 
569 	drm_printf(p, "%s (physical), logical instance=%d\n", hwe->name,
570 		hwe->logical_instance);
571 	drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n",
572 		hwe->domain,
573 		xe_force_wake_ref(gt_to_fw(hwe->gt), hwe->domain));
574 	drm_printf(p, "\tMMIO base: 0x%08x\n", hwe->mmio_base);
575 
576 	drm_printf(p, "\tHWSTAM: 0x%08x\n",
577 		hw_engine_mmio_read32(hwe, RING_HWSTAM(0).reg));
578 	drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n",
579 		hw_engine_mmio_read32(hwe, RING_HWS_PGA(0).reg));
580 
581 	drm_printf(p, "\tRING_EXECLIST_STATUS_LO: 0x%08x\n",
582 		hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0).reg));
583 	drm_printf(p, "\tRING_EXECLIST_STATUS_HI: 0x%08x\n",
584 		hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0).reg));
585 	drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_LO: 0x%08x\n",
586 		hw_engine_mmio_read32(hwe,
587 					 RING_EXECLIST_SQ_CONTENTS(0).reg));
588 	drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_HI: 0x%08x\n",
589 		hw_engine_mmio_read32(hwe,
590 					 RING_EXECLIST_SQ_CONTENTS(0).reg) + 4);
591 	drm_printf(p, "\tRING_EXECLIST_CONTROL: 0x%08x\n",
592 		hw_engine_mmio_read32(hwe, RING_EXECLIST_CONTROL(0).reg));
593 
594 	drm_printf(p, "\tRING_START: 0x%08x\n",
595 		hw_engine_mmio_read32(hwe, RING_START(0).reg));
596 	drm_printf(p, "\tRING_HEAD:  0x%08x\n",
597 		hw_engine_mmio_read32(hwe, RING_HEAD(0).reg) & HEAD_ADDR);
598 	drm_printf(p, "\tRING_TAIL:  0x%08x\n",
599 		hw_engine_mmio_read32(hwe, RING_TAIL(0).reg) & TAIL_ADDR);
600 	drm_printf(p, "\tRING_CTL: 0x%08x\n",
601 		hw_engine_mmio_read32(hwe, RING_CTL(0).reg));
602 	drm_printf(p, "\tRING_MODE: 0x%08x\n",
603 		hw_engine_mmio_read32(hwe, RING_MI_MODE(0).reg));
604 	drm_printf(p, "\tRING_MODE_GEN7: 0x%08x\n",
605 		hw_engine_mmio_read32(hwe, RING_MODE_GEN7(0).reg));
606 
607 	drm_printf(p, "\tRING_IMR:   0x%08x\n",
608 		hw_engine_mmio_read32(hwe, RING_IMR(0).reg));
609 	drm_printf(p, "\tRING_ESR:   0x%08x\n",
610 		hw_engine_mmio_read32(hwe, RING_ESR(0).reg));
611 	drm_printf(p, "\tRING_EMR:   0x%08x\n",
612 		hw_engine_mmio_read32(hwe, RING_EMR(0).reg));
613 	drm_printf(p, "\tRING_EIR:   0x%08x\n",
614 		hw_engine_mmio_read32(hwe, RING_EIR(0).reg));
615 
616         drm_printf(p, "\tACTHD:  0x%08x_%08x\n",
617 		hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0).reg),
618 		hw_engine_mmio_read32(hwe, RING_ACTHD(0).reg));
619         drm_printf(p, "\tBBADDR: 0x%08x_%08x\n",
620 		hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0).reg),
621 		hw_engine_mmio_read32(hwe, RING_BBADDR(0).reg));
622         drm_printf(p, "\tDMA_FADDR: 0x%08x_%08x\n",
623 		hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0).reg),
624 		hw_engine_mmio_read32(hwe, RING_DMA_FADD(0).reg));
625 
626 	drm_printf(p, "\tIPEIR: 0x%08x\n",
627 		hw_engine_mmio_read32(hwe, IPEIR(0).reg));
628 	drm_printf(p, "\tIPEHR: 0x%08x\n\n",
629 		hw_engine_mmio_read32(hwe, IPEHR(0).reg));
630 
631 	if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
632 		drm_printf(p, "\tGEN12_RCU_MODE: 0x%08x\n",
633 			xe_mmio_read32(hwe->gt, GEN12_RCU_MODE.reg));
634 
635 }
636 
637 u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
638 				enum xe_engine_class engine_class)
639 {
640 	u32 mask = 0;
641 	enum xe_hw_engine_id id;
642 
643 	for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
644 		if (engine_infos[id].class == engine_class &&
645 		    gt->info.engine_mask & BIT(id))
646 			mask |= BIT(engine_infos[id].instance);
647 	}
648 	return mask;
649 }
650 
651 bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe)
652 {
653 	struct xe_gt *gt = hwe->gt;
654 	struct xe_device *xe = gt_to_xe(gt);
655 
656 	return xe->info.supports_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
657 		hwe->instance == gt->usm.reserved_bcs_instance;
658 }
659