xref: /linux/drivers/gpu/drm/xe/xe_hw_engine.c (revision 8cdcef1c2f82d207aa8b2a02298fbc17191c6261)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_hw_engine.h"
7 
8 #include <drm/drm_managed.h>
9 
10 #include "regs/xe_engine_regs.h"
11 #include "regs/xe_gt_regs.h"
12 #include "regs/xe_regs.h"
13 #include "xe_assert.h"
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_execlist.h"
17 #include "xe_force_wake.h"
18 #include "xe_gt.h"
19 #include "xe_gt_topology.h"
20 #include "xe_hw_fence.h"
21 #include "xe_irq.h"
22 #include "xe_lrc.h"
23 #include "xe_macros.h"
24 #include "xe_mmio.h"
25 #include "xe_reg_sr.h"
26 #include "xe_rtp.h"
27 #include "xe_sched_job.h"
28 #include "xe_tuning.h"
29 #include "xe_uc_fw.h"
30 #include "xe_wa.h"
31 
32 #define MAX_MMIO_BASES 3
33 struct engine_info {
34 	const char *name;
35 	unsigned int class : 8;
36 	unsigned int instance : 8;
37 	enum xe_force_wake_domains domain;
38 	u32 mmio_base;
39 };
40 
41 static const struct engine_info engine_infos[] = {
42 	[XE_HW_ENGINE_RCS0] = {
43 		.name = "rcs0",
44 		.class = XE_ENGINE_CLASS_RENDER,
45 		.instance = 0,
46 		.domain = XE_FW_RENDER,
47 		.mmio_base = RENDER_RING_BASE,
48 	},
49 	[XE_HW_ENGINE_BCS0] = {
50 		.name = "bcs0",
51 		.class = XE_ENGINE_CLASS_COPY,
52 		.instance = 0,
53 		.domain = XE_FW_RENDER,
54 		.mmio_base = BLT_RING_BASE,
55 	},
56 	[XE_HW_ENGINE_BCS1] = {
57 		.name = "bcs1",
58 		.class = XE_ENGINE_CLASS_COPY,
59 		.instance = 1,
60 		.domain = XE_FW_RENDER,
61 		.mmio_base = XEHPC_BCS1_RING_BASE,
62 	},
63 	[XE_HW_ENGINE_BCS2] = {
64 		.name = "bcs2",
65 		.class = XE_ENGINE_CLASS_COPY,
66 		.instance = 2,
67 		.domain = XE_FW_RENDER,
68 		.mmio_base = XEHPC_BCS2_RING_BASE,
69 	},
70 	[XE_HW_ENGINE_BCS3] = {
71 		.name = "bcs3",
72 		.class = XE_ENGINE_CLASS_COPY,
73 		.instance = 3,
74 		.domain = XE_FW_RENDER,
75 		.mmio_base = XEHPC_BCS3_RING_BASE,
76 	},
77 	[XE_HW_ENGINE_BCS4] = {
78 		.name = "bcs4",
79 		.class = XE_ENGINE_CLASS_COPY,
80 		.instance = 4,
81 		.domain = XE_FW_RENDER,
82 		.mmio_base = XEHPC_BCS4_RING_BASE,
83 	},
84 	[XE_HW_ENGINE_BCS5] = {
85 		.name = "bcs5",
86 		.class = XE_ENGINE_CLASS_COPY,
87 		.instance = 5,
88 		.domain = XE_FW_RENDER,
89 		.mmio_base = XEHPC_BCS5_RING_BASE,
90 	},
91 	[XE_HW_ENGINE_BCS6] = {
92 		.name = "bcs6",
93 		.class = XE_ENGINE_CLASS_COPY,
94 		.instance = 6,
95 		.domain = XE_FW_RENDER,
96 		.mmio_base = XEHPC_BCS6_RING_BASE,
97 	},
98 	[XE_HW_ENGINE_BCS7] = {
99 		.name = "bcs7",
100 		.class = XE_ENGINE_CLASS_COPY,
101 		.instance = 7,
102 		.domain = XE_FW_RENDER,
103 		.mmio_base = XEHPC_BCS7_RING_BASE,
104 	},
105 	[XE_HW_ENGINE_BCS8] = {
106 		.name = "bcs8",
107 		.class = XE_ENGINE_CLASS_COPY,
108 		.instance = 8,
109 		.domain = XE_FW_RENDER,
110 		.mmio_base = XEHPC_BCS8_RING_BASE,
111 	},
112 
113 	[XE_HW_ENGINE_VCS0] = {
114 		.name = "vcs0",
115 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
116 		.instance = 0,
117 		.domain = XE_FW_MEDIA_VDBOX0,
118 		.mmio_base = BSD_RING_BASE,
119 	},
120 	[XE_HW_ENGINE_VCS1] = {
121 		.name = "vcs1",
122 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
123 		.instance = 1,
124 		.domain = XE_FW_MEDIA_VDBOX1,
125 		.mmio_base = BSD2_RING_BASE,
126 	},
127 	[XE_HW_ENGINE_VCS2] = {
128 		.name = "vcs2",
129 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
130 		.instance = 2,
131 		.domain = XE_FW_MEDIA_VDBOX2,
132 		.mmio_base = BSD3_RING_BASE,
133 	},
134 	[XE_HW_ENGINE_VCS3] = {
135 		.name = "vcs3",
136 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
137 		.instance = 3,
138 		.domain = XE_FW_MEDIA_VDBOX3,
139 		.mmio_base = BSD4_RING_BASE,
140 	},
141 	[XE_HW_ENGINE_VCS4] = {
142 		.name = "vcs4",
143 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
144 		.instance = 4,
145 		.domain = XE_FW_MEDIA_VDBOX4,
146 		.mmio_base = XEHP_BSD5_RING_BASE,
147 	},
148 	[XE_HW_ENGINE_VCS5] = {
149 		.name = "vcs5",
150 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
151 		.instance = 5,
152 		.domain = XE_FW_MEDIA_VDBOX5,
153 		.mmio_base = XEHP_BSD6_RING_BASE,
154 	},
155 	[XE_HW_ENGINE_VCS6] = {
156 		.name = "vcs6",
157 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
158 		.instance = 6,
159 		.domain = XE_FW_MEDIA_VDBOX6,
160 		.mmio_base = XEHP_BSD7_RING_BASE,
161 	},
162 	[XE_HW_ENGINE_VCS7] = {
163 		.name = "vcs7",
164 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
165 		.instance = 7,
166 		.domain = XE_FW_MEDIA_VDBOX7,
167 		.mmio_base = XEHP_BSD8_RING_BASE,
168 	},
169 	[XE_HW_ENGINE_VECS0] = {
170 		.name = "vecs0",
171 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
172 		.instance = 0,
173 		.domain = XE_FW_MEDIA_VEBOX0,
174 		.mmio_base = VEBOX_RING_BASE,
175 	},
176 	[XE_HW_ENGINE_VECS1] = {
177 		.name = "vecs1",
178 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
179 		.instance = 1,
180 		.domain = XE_FW_MEDIA_VEBOX1,
181 		.mmio_base = VEBOX2_RING_BASE,
182 	},
183 	[XE_HW_ENGINE_VECS2] = {
184 		.name = "vecs2",
185 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
186 		.instance = 2,
187 		.domain = XE_FW_MEDIA_VEBOX2,
188 		.mmio_base = XEHP_VEBOX3_RING_BASE,
189 	},
190 	[XE_HW_ENGINE_VECS3] = {
191 		.name = "vecs3",
192 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
193 		.instance = 3,
194 		.domain = XE_FW_MEDIA_VEBOX3,
195 		.mmio_base = XEHP_VEBOX4_RING_BASE,
196 	},
197 	[XE_HW_ENGINE_CCS0] = {
198 		.name = "ccs0",
199 		.class = XE_ENGINE_CLASS_COMPUTE,
200 		.instance = 0,
201 		.domain = XE_FW_RENDER,
202 		.mmio_base = COMPUTE0_RING_BASE,
203 	},
204 	[XE_HW_ENGINE_CCS1] = {
205 		.name = "ccs1",
206 		.class = XE_ENGINE_CLASS_COMPUTE,
207 		.instance = 1,
208 		.domain = XE_FW_RENDER,
209 		.mmio_base = COMPUTE1_RING_BASE,
210 	},
211 	[XE_HW_ENGINE_CCS2] = {
212 		.name = "ccs2",
213 		.class = XE_ENGINE_CLASS_COMPUTE,
214 		.instance = 2,
215 		.domain = XE_FW_RENDER,
216 		.mmio_base = COMPUTE2_RING_BASE,
217 	},
218 	[XE_HW_ENGINE_CCS3] = {
219 		.name = "ccs3",
220 		.class = XE_ENGINE_CLASS_COMPUTE,
221 		.instance = 3,
222 		.domain = XE_FW_RENDER,
223 		.mmio_base = COMPUTE3_RING_BASE,
224 	},
225 	[XE_HW_ENGINE_GSCCS0] = {
226 		.name = "gsccs0",
227 		.class = XE_ENGINE_CLASS_OTHER,
228 		.instance = OTHER_GSC_INSTANCE,
229 		.domain = XE_FW_GSC,
230 		.mmio_base = GSCCS_RING_BASE,
231 	},
232 };
233 
234 static void hw_engine_fini(struct drm_device *drm, void *arg)
235 {
236 	struct xe_hw_engine *hwe = arg;
237 
238 	if (hwe->exl_port)
239 		xe_execlist_port_destroy(hwe->exl_port);
240 	xe_lrc_finish(&hwe->kernel_lrc);
241 
242 	xe_bo_unpin_map_no_vm(hwe->hwsp);
243 
244 	hwe->gt = NULL;
245 }
246 
247 static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg,
248 				   u32 val)
249 {
250 	xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
251 	xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
252 
253 	reg.addr += hwe->mmio_base;
254 
255 	xe_mmio_write32(hwe->gt, reg, val);
256 }
257 
258 static u32 hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
259 {
260 	xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
261 	xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
262 
263 	reg.addr += hwe->mmio_base;
264 
265 	return xe_mmio_read32(hwe->gt, reg);
266 }
267 
268 void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
269 {
270 	u32 ccs_mask =
271 		xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
272 
273 	if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
274 		xe_mmio_write32(hwe->gt, RCU_MODE,
275 				_MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
276 
277 	hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
278 	hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
279 			       xe_bo_ggtt_addr(hwe->hwsp));
280 	hw_engine_mmio_write32(hwe, RING_MODE(0),
281 			       _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
282 	hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
283 			       _MASKED_BIT_DISABLE(STOP_RING));
284 	hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
285 }
286 
287 void
288 xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
289 {
290 	struct xe_gt *gt = hwe->gt;
291 	const u8 mocs_write_idx = gt->mocs.uc_index;
292 	const u8 mocs_read_idx = gt->mocs.uc_index;
293 	u32 blit_cctl_val = REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, mocs_write_idx) |
294 			    REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, mocs_read_idx);
295 	struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
296 	const struct xe_rtp_entry_sr lrc_was[] = {
297 		/*
298 		 * Some blitter commands do not have a field for MOCS, those
299 		 * commands will use MOCS index pointed by BLIT_CCTL.
300 		 * BLIT_CCTL registers are needed to be programmed to un-cached.
301 		 */
302 		{ XE_RTP_NAME("BLIT_CCTL_default_MOCS"),
303 		  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
304 			       ENGINE_CLASS(COPY)),
305 		  XE_RTP_ACTIONS(FIELD_SET(BLIT_CCTL(0),
306 				 BLIT_CCTL_DST_MOCS_MASK |
307 				 BLIT_CCTL_SRC_MOCS_MASK,
308 				 blit_cctl_val,
309 				 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
310 		},
311 		{}
312 	};
313 
314 	xe_rtp_process_to_sr(&ctx, lrc_was, &hwe->reg_lrc);
315 }
316 
317 static void
318 hw_engine_setup_default_state(struct xe_hw_engine *hwe)
319 {
320 	struct xe_gt *gt = hwe->gt;
321 	struct xe_device *xe = gt_to_xe(gt);
322 	/*
323 	 * RING_CMD_CCTL specifies the default MOCS entry that will be
324 	 * used by the command streamer when executing commands that
325 	 * don't have a way to explicitly specify a MOCS setting.
326 	 * The default should usually reference whichever MOCS entry
327 	 * corresponds to uncached behavior, although use of a WB cached
328 	 * entry is recommended by the spec in certain circumstances on
329 	 * specific platforms.
330 	 * Bspec: 72161
331 	 */
332 	const u8 mocs_write_idx = gt->mocs.uc_index;
333 	const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
334 				 (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
335 				 gt->mocs.wb_index : gt->mocs.uc_index;
336 	u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
337 				REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, mocs_read_idx);
338 	struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
339 	const struct xe_rtp_entry_sr engine_entries[] = {
340 		{ XE_RTP_NAME("RING_CMD_CCTL_default_MOCS"),
341 		  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED)),
342 		  XE_RTP_ACTIONS(FIELD_SET(RING_CMD_CCTL(0),
343 					   CMD_CCTL_WRITE_OVERRIDE_MASK |
344 					   CMD_CCTL_READ_OVERRIDE_MASK,
345 					   ring_cmd_cctl_val,
346 					   XE_RTP_ACTION_FLAG(ENGINE_BASE)))
347 		},
348 		/*
349 		 * To allow the GSC engine to go idle on MTL we need to enable
350 		 * idle messaging and set the hysteresis value (we use 0xA=5us
351 		 * as recommended in spec). On platforms after MTL this is
352 		 * enabled by default.
353 		 */
354 		{ XE_RTP_NAME("MTL GSCCS IDLE MSG enable"),
355 		  XE_RTP_RULES(MEDIA_VERSION(1300), ENGINE_CLASS(OTHER)),
356 		  XE_RTP_ACTIONS(CLR(RING_PSMI_CTL(0),
357 				     IDLE_MSG_DISABLE,
358 				     XE_RTP_ACTION_FLAG(ENGINE_BASE)),
359 				 FIELD_SET(RING_PWRCTX_MAXCNT(0),
360 					   IDLE_WAIT_TIME,
361 					   0xA,
362 					   XE_RTP_ACTION_FLAG(ENGINE_BASE)))
363 		},
364 		{}
365 	};
366 
367 	xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr);
368 }
369 
370 static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
371 				 enum xe_hw_engine_id id)
372 {
373 	const struct engine_info *info;
374 
375 	if (WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name))
376 		return;
377 
378 	if (!(gt->info.engine_mask & BIT(id)))
379 		return;
380 
381 	info = &engine_infos[id];
382 
383 	xe_gt_assert(gt, !hwe->gt);
384 
385 	hwe->gt = gt;
386 	hwe->class = info->class;
387 	hwe->instance = info->instance;
388 	hwe->mmio_base = info->mmio_base;
389 	hwe->domain = info->domain;
390 	hwe->name = info->name;
391 	hwe->fence_irq = &gt->fence_irq[info->class];
392 	hwe->engine_id = id;
393 
394 	hwe->eclass = &gt->eclass[hwe->class];
395 	if (!hwe->eclass->sched_props.job_timeout_ms) {
396 		hwe->eclass->sched_props.job_timeout_ms = 5 * 1000;
397 		hwe->eclass->sched_props.job_timeout_min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
398 		hwe->eclass->sched_props.job_timeout_max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
399 		hwe->eclass->sched_props.timeslice_us = 1 * 1000;
400 		hwe->eclass->sched_props.timeslice_min = XE_HW_ENGINE_TIMESLICE_MIN;
401 		hwe->eclass->sched_props.timeslice_max = XE_HW_ENGINE_TIMESLICE_MAX;
402 		hwe->eclass->sched_props.preempt_timeout_us = XE_HW_ENGINE_PREEMPT_TIMEOUT;
403 		hwe->eclass->sched_props.preempt_timeout_min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
404 		hwe->eclass->sched_props.preempt_timeout_max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
405 		/* Record default props */
406 		hwe->eclass->defaults = hwe->eclass->sched_props;
407 	}
408 
409 	xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt));
410 	xe_tuning_process_engine(hwe);
411 	xe_wa_process_engine(hwe);
412 	hw_engine_setup_default_state(hwe);
413 
414 	xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt));
415 	xe_reg_whitelist_process_engine(hwe);
416 }
417 
418 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
419 			  enum xe_hw_engine_id id)
420 {
421 	struct xe_device *xe = gt_to_xe(gt);
422 	struct xe_tile *tile = gt_to_tile(gt);
423 	int err;
424 
425 	xe_gt_assert(gt, id < ARRAY_SIZE(engine_infos) && engine_infos[id].name);
426 	xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
427 
428 	xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
429 	xe_reg_sr_apply_whitelist(hwe);
430 
431 	hwe->hwsp = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, ttm_bo_type_kernel,
432 					 XE_BO_CREATE_VRAM_IF_DGFX(tile) |
433 					 XE_BO_CREATE_GGTT_BIT);
434 	if (IS_ERR(hwe->hwsp)) {
435 		err = PTR_ERR(hwe->hwsp);
436 		goto err_name;
437 	}
438 
439 	err = xe_lrc_init(&hwe->kernel_lrc, hwe, NULL, NULL, SZ_16K);
440 	if (err)
441 		goto err_hwsp;
442 
443 	if (!xe_device_uc_enabled(xe)) {
444 		hwe->exl_port = xe_execlist_port_create(xe, hwe);
445 		if (IS_ERR(hwe->exl_port)) {
446 			err = PTR_ERR(hwe->exl_port);
447 			goto err_kernel_lrc;
448 		}
449 	}
450 
451 	if (xe_device_uc_enabled(xe))
452 		xe_hw_engine_enable_ring(hwe);
453 
454 	/* We reserve the highest BCS instance for USM */
455 	if (xe->info.supports_usm && hwe->class == XE_ENGINE_CLASS_COPY)
456 		gt->usm.reserved_bcs_instance = hwe->instance;
457 
458 	err = drmm_add_action_or_reset(&xe->drm, hw_engine_fini, hwe);
459 	if (err)
460 		return err;
461 
462 	return 0;
463 
464 err_kernel_lrc:
465 	xe_lrc_finish(&hwe->kernel_lrc);
466 err_hwsp:
467 	xe_bo_unpin_map_no_vm(hwe->hwsp);
468 err_name:
469 	hwe->name = NULL;
470 
471 	return err;
472 }
473 
474 static void hw_engine_setup_logical_mapping(struct xe_gt *gt)
475 {
476 	int class;
477 
478 	/* FIXME: Doing a simple logical mapping that works for most hardware */
479 	for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
480 		struct xe_hw_engine *hwe;
481 		enum xe_hw_engine_id id;
482 		int logical_instance = 0;
483 
484 		for_each_hw_engine(hwe, gt, id)
485 			if (hwe->class == class)
486 				hwe->logical_instance = logical_instance++;
487 	}
488 }
489 
490 static void read_media_fuses(struct xe_gt *gt)
491 {
492 	struct xe_device *xe = gt_to_xe(gt);
493 	u32 media_fuse;
494 	u16 vdbox_mask;
495 	u16 vebox_mask;
496 	int i, j;
497 
498 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
499 
500 	media_fuse = xe_mmio_read32(gt, GT_VEBOX_VDBOX_DISABLE);
501 
502 	/*
503 	 * Pre-Xe_HP platforms had register bits representing absent engines,
504 	 * whereas Xe_HP and beyond have bits representing present engines.
505 	 * Invert the polarity on old platforms so that we can use common
506 	 * handling below.
507 	 */
508 	if (GRAPHICS_VERx100(xe) < 1250)
509 		media_fuse = ~media_fuse;
510 
511 	vdbox_mask = REG_FIELD_GET(GT_VDBOX_DISABLE_MASK, media_fuse);
512 	vebox_mask = REG_FIELD_GET(GT_VEBOX_DISABLE_MASK, media_fuse);
513 
514 	for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
515 		if (!(gt->info.engine_mask & BIT(i)))
516 			continue;
517 
518 		if (!(BIT(j) & vdbox_mask)) {
519 			gt->info.engine_mask &= ~BIT(i);
520 			drm_info(&xe->drm, "vcs%u fused off\n", j);
521 		}
522 	}
523 
524 	for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
525 		if (!(gt->info.engine_mask & BIT(i)))
526 			continue;
527 
528 		if (!(BIT(j) & vebox_mask)) {
529 			gt->info.engine_mask &= ~BIT(i);
530 			drm_info(&xe->drm, "vecs%u fused off\n", j);
531 		}
532 	}
533 }
534 
535 static void read_copy_fuses(struct xe_gt *gt)
536 {
537 	struct xe_device *xe = gt_to_xe(gt);
538 	u32 bcs_mask;
539 
540 	if (GRAPHICS_VERx100(xe) < 1260 || GRAPHICS_VERx100(xe) >= 1270)
541 		return;
542 
543 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
544 
545 	bcs_mask = xe_mmio_read32(gt, MIRROR_FUSE3);
546 	bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask);
547 
548 	/* BCS0 is always present; only BCS1-BCS8 may be fused off */
549 	for (int i = XE_HW_ENGINE_BCS1, j = 0; i <= XE_HW_ENGINE_BCS8; ++i, ++j) {
550 		if (!(gt->info.engine_mask & BIT(i)))
551 			continue;
552 
553 		if (!(BIT(j / 2) & bcs_mask)) {
554 			gt->info.engine_mask &= ~BIT(i);
555 			drm_info(&xe->drm, "bcs%u fused off\n", j);
556 		}
557 	}
558 }
559 
560 static void read_compute_fuses_from_dss(struct xe_gt *gt)
561 {
562 	struct xe_device *xe = gt_to_xe(gt);
563 
564 	/*
565 	 * CCS fusing based on DSS masks only applies to platforms that can
566 	 * have more than one CCS.
567 	 */
568 	if (hweight64(gt->info.engine_mask &
569 		      GENMASK_ULL(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)) <= 1)
570 		return;
571 
572 	/*
573 	 * CCS availability on Xe_HP is inferred from the presence of DSS in
574 	 * each quadrant.
575 	 */
576 	for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
577 		if (!(gt->info.engine_mask & BIT(i)))
578 			continue;
579 
580 		if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
581 			gt->info.engine_mask &= ~BIT(i);
582 			drm_info(&xe->drm, "ccs%u fused off\n", j);
583 		}
584 	}
585 }
586 
587 static void read_compute_fuses_from_reg(struct xe_gt *gt)
588 {
589 	struct xe_device *xe = gt_to_xe(gt);
590 	u32 ccs_mask;
591 
592 	ccs_mask = xe_mmio_read32(gt, XEHP_FUSE4);
593 	ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask);
594 
595 	for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
596 		if (!(gt->info.engine_mask & BIT(i)))
597 			continue;
598 
599 		if ((ccs_mask & BIT(j)) == 0) {
600 			gt->info.engine_mask &= ~BIT(i);
601 			drm_info(&xe->drm, "ccs%u fused off\n", j);
602 		}
603 	}
604 }
605 
606 static void read_compute_fuses(struct xe_gt *gt)
607 {
608 	if (GRAPHICS_VER(gt_to_xe(gt)) >= 20)
609 		read_compute_fuses_from_reg(gt);
610 	else
611 		read_compute_fuses_from_dss(gt);
612 }
613 
614 static void check_gsc_availability(struct xe_gt *gt)
615 {
616 	struct xe_device *xe = gt_to_xe(gt);
617 
618 	if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
619 		return;
620 
621 	/*
622 	 * The GSCCS is only used to communicate with the GSC FW, so if we don't
623 	 * have the FW there is nothing we need the engine for and can therefore
624 	 * skip its initialization.
625 	 */
626 	if (!xe_uc_fw_is_available(&gt->uc.gsc.fw)) {
627 		gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0);
628 		drm_info(&xe->drm, "gsccs disabled due to lack of FW\n");
629 	}
630 }
631 
632 int xe_hw_engines_init_early(struct xe_gt *gt)
633 {
634 	int i;
635 
636 	read_media_fuses(gt);
637 	read_copy_fuses(gt);
638 	read_compute_fuses(gt);
639 	check_gsc_availability(gt);
640 
641 	BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
642 	BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
643 
644 	for (i = 0; i < ARRAY_SIZE(gt->hw_engines); i++)
645 		hw_engine_init_early(gt, &gt->hw_engines[i], i);
646 
647 	return 0;
648 }
649 
650 int xe_hw_engines_init(struct xe_gt *gt)
651 {
652 	int err;
653 	struct xe_hw_engine *hwe;
654 	enum xe_hw_engine_id id;
655 
656 	for_each_hw_engine(hwe, gt, id) {
657 		err = hw_engine_init(gt, hwe, id);
658 		if (err)
659 			return err;
660 	}
661 
662 	hw_engine_setup_logical_mapping(gt);
663 
664 	return 0;
665 }
666 
667 void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
668 {
669 	wake_up_all(&gt_to_xe(hwe->gt)->ufence_wq);
670 
671 	if (hwe->irq_handler)
672 		hwe->irq_handler(hwe, intr_vec);
673 
674 	if (intr_vec & GT_RENDER_USER_INTERRUPT)
675 		xe_hw_fence_irq_run(hwe->fence_irq);
676 }
677 
678 /**
679  * xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
680  * @hwe: Xe HW Engine.
681  *
682  * This can be printed out in a later stage like during dev_coredump
683  * analysis.
684  *
685  * Returns: a Xe HW Engine snapshot object that must be freed by the
686  * caller, using `xe_hw_engine_snapshot_free`.
687  */
688 struct xe_hw_engine_snapshot *
689 xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
690 {
691 	struct xe_hw_engine_snapshot *snapshot;
692 	int len;
693 
694 	if (!xe_hw_engine_is_valid(hwe))
695 		return NULL;
696 
697 	snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
698 
699 	if (!snapshot)
700 		return NULL;
701 
702 	len = strlen(hwe->name) + 1;
703 	snapshot->name = kzalloc(len, GFP_ATOMIC);
704 	if (snapshot->name)
705 		strscpy(snapshot->name, hwe->name, len);
706 
707 	snapshot->class = hwe->class;
708 	snapshot->logical_instance = hwe->logical_instance;
709 	snapshot->forcewake.domain = hwe->domain;
710 	snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
711 						    hwe->domain);
712 	snapshot->mmio_base = hwe->mmio_base;
713 
714 	snapshot->reg.ring_hwstam = hw_engine_mmio_read32(hwe, RING_HWSTAM(0));
715 	snapshot->reg.ring_hws_pga = hw_engine_mmio_read32(hwe,
716 							   RING_HWS_PGA(0));
717 	snapshot->reg.ring_execlist_status_lo =
718 		hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0));
719 	snapshot->reg.ring_execlist_status_hi =
720 		hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0));
721 	snapshot->reg.ring_execlist_sq_contents_lo =
722 		hw_engine_mmio_read32(hwe,
723 				      RING_EXECLIST_SQ_CONTENTS_LO(0));
724 	snapshot->reg.ring_execlist_sq_contents_hi =
725 		hw_engine_mmio_read32(hwe,
726 				      RING_EXECLIST_SQ_CONTENTS_HI(0));
727 	snapshot->reg.ring_start = hw_engine_mmio_read32(hwe, RING_START(0));
728 	snapshot->reg.ring_head =
729 		hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR;
730 	snapshot->reg.ring_tail =
731 		hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR;
732 	snapshot->reg.ring_ctl = hw_engine_mmio_read32(hwe, RING_CTL(0));
733 	snapshot->reg.ring_mi_mode =
734 		hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
735 	snapshot->reg.ring_mode = hw_engine_mmio_read32(hwe, RING_MODE(0));
736 	snapshot->reg.ring_imr = hw_engine_mmio_read32(hwe, RING_IMR(0));
737 	snapshot->reg.ring_esr = hw_engine_mmio_read32(hwe, RING_ESR(0));
738 	snapshot->reg.ring_emr = hw_engine_mmio_read32(hwe, RING_EMR(0));
739 	snapshot->reg.ring_eir = hw_engine_mmio_read32(hwe, RING_EIR(0));
740 	snapshot->reg.ring_acthd_udw =
741 		hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0));
742 	snapshot->reg.ring_acthd = hw_engine_mmio_read32(hwe, RING_ACTHD(0));
743 	snapshot->reg.ring_bbaddr_udw =
744 		hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0));
745 	snapshot->reg.ring_bbaddr = hw_engine_mmio_read32(hwe, RING_BBADDR(0));
746 	snapshot->reg.ring_dma_fadd_udw =
747 		hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0));
748 	snapshot->reg.ring_dma_fadd =
749 		hw_engine_mmio_read32(hwe, RING_DMA_FADD(0));
750 	snapshot->reg.ipehr = hw_engine_mmio_read32(hwe, RING_IPEHR(0));
751 
752 	if (snapshot->class == XE_ENGINE_CLASS_COMPUTE)
753 		snapshot->reg.rcu_mode = xe_mmio_read32(hwe->gt, RCU_MODE);
754 
755 	return snapshot;
756 }
757 
758 /**
759  * xe_hw_engine_snapshot_print - Print out a given Xe HW Engine snapshot.
760  * @snapshot: Xe HW Engine snapshot object.
761  * @p: drm_printer where it will be printed out.
762  *
763  * This function prints out a given Xe HW Engine snapshot object.
764  */
765 void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
766 				 struct drm_printer *p)
767 {
768 	if (!snapshot)
769 		return;
770 
771 	drm_printf(p, "%s (physical), logical instance=%d\n",
772 		   snapshot->name ? snapshot->name : "",
773 		   snapshot->logical_instance);
774 	drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n",
775 		   snapshot->forcewake.domain, snapshot->forcewake.ref);
776 	drm_printf(p, "\tHWSTAM: 0x%08x\n", snapshot->reg.ring_hwstam);
777 	drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n", snapshot->reg.ring_hws_pga);
778 	drm_printf(p, "\tRING_EXECLIST_STATUS_LO: 0x%08x\n",
779 		   snapshot->reg.ring_execlist_status_lo);
780 	drm_printf(p, "\tRING_EXECLIST_STATUS_HI: 0x%08x\n",
781 		   snapshot->reg.ring_execlist_status_hi);
782 	drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_LO: 0x%08x\n",
783 		   snapshot->reg.ring_execlist_sq_contents_lo);
784 	drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_HI: 0x%08x\n",
785 		   snapshot->reg.ring_execlist_sq_contents_hi);
786 	drm_printf(p, "\tRING_START: 0x%08x\n", snapshot->reg.ring_start);
787 	drm_printf(p, "\tRING_HEAD:  0x%08x\n", snapshot->reg.ring_head);
788 	drm_printf(p, "\tRING_TAIL:  0x%08x\n", snapshot->reg.ring_tail);
789 	drm_printf(p, "\tRING_CTL: 0x%08x\n", snapshot->reg.ring_ctl);
790 	drm_printf(p, "\tRING_MI_MODE: 0x%08x\n", snapshot->reg.ring_mi_mode);
791 	drm_printf(p, "\tRING_MODE: 0x%08x\n",
792 		   snapshot->reg.ring_mode);
793 	drm_printf(p, "\tRING_IMR:   0x%08x\n", snapshot->reg.ring_imr);
794 	drm_printf(p, "\tRING_ESR:   0x%08x\n", snapshot->reg.ring_esr);
795 	drm_printf(p, "\tRING_EMR:   0x%08x\n", snapshot->reg.ring_emr);
796 	drm_printf(p, "\tRING_EIR:   0x%08x\n", snapshot->reg.ring_eir);
797 	drm_printf(p, "\tACTHD:  0x%08x_%08x\n", snapshot->reg.ring_acthd_udw,
798 		   snapshot->reg.ring_acthd);
799 	drm_printf(p, "\tBBADDR: 0x%08x_%08x\n", snapshot->reg.ring_bbaddr_udw,
800 		   snapshot->reg.ring_bbaddr);
801 	drm_printf(p, "\tDMA_FADDR: 0x%08x_%08x\n",
802 		   snapshot->reg.ring_dma_fadd_udw,
803 		   snapshot->reg.ring_dma_fadd);
804 	drm_printf(p, "\tIPEHR: 0x%08x\n\n", snapshot->reg.ipehr);
805 	if (snapshot->class == XE_ENGINE_CLASS_COMPUTE)
806 		drm_printf(p, "\tRCU_MODE: 0x%08x\n",
807 			   snapshot->reg.rcu_mode);
808 }
809 
810 /**
811  * xe_hw_engine_snapshot_free - Free all allocated objects for a given snapshot.
812  * @snapshot: Xe HW Engine snapshot object.
813  *
814  * This function free all the memory that needed to be allocated at capture
815  * time.
816  */
817 void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot)
818 {
819 	if (!snapshot)
820 		return;
821 
822 	kfree(snapshot->name);
823 	kfree(snapshot);
824 }
825 
826 /**
827  * xe_hw_engine_print - Xe HW Engine Print.
828  * @hwe: Hardware Engine.
829  * @p: drm_printer.
830  *
831  * This function quickly capture a snapshot and immediately print it out.
832  */
833 void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p)
834 {
835 	struct xe_hw_engine_snapshot *snapshot;
836 
837 	snapshot = xe_hw_engine_snapshot_capture(hwe);
838 	xe_hw_engine_snapshot_print(snapshot, p);
839 	xe_hw_engine_snapshot_free(snapshot);
840 }
841 
842 u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
843 				enum xe_engine_class engine_class)
844 {
845 	u32 mask = 0;
846 	enum xe_hw_engine_id id;
847 
848 	for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
849 		if (engine_infos[id].class == engine_class &&
850 		    gt->info.engine_mask & BIT(id))
851 			mask |= BIT(engine_infos[id].instance);
852 	}
853 	return mask;
854 }
855 
856 bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe)
857 {
858 	struct xe_gt *gt = hwe->gt;
859 	struct xe_device *xe = gt_to_xe(gt);
860 
861 	if (hwe->class == XE_ENGINE_CLASS_OTHER)
862 		return true;
863 
864 	return xe->info.supports_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
865 		hwe->instance == gt->usm.reserved_bcs_instance;
866 }
867