xref: /linux/drivers/gpu/drm/xe/xe_hw_engine.c (revision c0d6f52f9b62479d61f8cd4faf9fb2f8bce6e301)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_hw_engine.h"
7 
8 #include <linux/nospec.h>
9 
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12 #include <uapi/drm/xe_drm.h>
13 #include <generated/xe_wa_oob.h>
14 
15 #include "regs/xe_engine_regs.h"
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_irq_regs.h"
18 #include "xe_assert.h"
19 #include "xe_bo.h"
20 #include "xe_configfs.h"
21 #include "xe_device.h"
22 #include "xe_execlist.h"
23 #include "xe_force_wake.h"
24 #include "xe_gsc.h"
25 #include "xe_gt.h"
26 #include "xe_gt_ccs_mode.h"
27 #include "xe_gt_clock.h"
28 #include "xe_gt_printk.h"
29 #include "xe_gt_mcr.h"
30 #include "xe_gt_topology.h"
31 #include "xe_guc_capture.h"
32 #include "xe_hw_engine_group.h"
33 #include "xe_hw_fence.h"
34 #include "xe_irq.h"
35 #include "xe_lrc.h"
36 #include "xe_mmio.h"
37 #include "xe_reg_sr.h"
38 #include "xe_reg_whitelist.h"
39 #include "xe_rtp.h"
40 #include "xe_sched_job.h"
41 #include "xe_sriov.h"
42 #include "xe_tuning.h"
43 #include "xe_uc_fw.h"
44 #include "xe_wa.h"
45 
46 #define MAX_MMIO_BASES 3
47 struct engine_info {
48 	const char *name;
49 	unsigned int class : 8;
50 	unsigned int instance : 8;
51 	unsigned int irq_offset : 8;
52 	enum xe_force_wake_domains domain;
53 	u32 mmio_base;
54 };
55 
56 static const struct engine_info engine_infos[] = {
57 	[XE_HW_ENGINE_RCS0] = {
58 		.name = "rcs0",
59 		.class = XE_ENGINE_CLASS_RENDER,
60 		.instance = 0,
61 		.irq_offset = ilog2(INTR_RCS0),
62 		.domain = XE_FW_RENDER,
63 		.mmio_base = RENDER_RING_BASE,
64 	},
65 	[XE_HW_ENGINE_BCS0] = {
66 		.name = "bcs0",
67 		.class = XE_ENGINE_CLASS_COPY,
68 		.instance = 0,
69 		.irq_offset = ilog2(INTR_BCS(0)),
70 		.domain = XE_FW_RENDER,
71 		.mmio_base = BLT_RING_BASE,
72 	},
73 	[XE_HW_ENGINE_BCS1] = {
74 		.name = "bcs1",
75 		.class = XE_ENGINE_CLASS_COPY,
76 		.instance = 1,
77 		.irq_offset = ilog2(INTR_BCS(1)),
78 		.domain = XE_FW_RENDER,
79 		.mmio_base = XEHPC_BCS1_RING_BASE,
80 	},
81 	[XE_HW_ENGINE_BCS2] = {
82 		.name = "bcs2",
83 		.class = XE_ENGINE_CLASS_COPY,
84 		.instance = 2,
85 		.irq_offset = ilog2(INTR_BCS(2)),
86 		.domain = XE_FW_RENDER,
87 		.mmio_base = XEHPC_BCS2_RING_BASE,
88 	},
89 	[XE_HW_ENGINE_BCS3] = {
90 		.name = "bcs3",
91 		.class = XE_ENGINE_CLASS_COPY,
92 		.instance = 3,
93 		.irq_offset = ilog2(INTR_BCS(3)),
94 		.domain = XE_FW_RENDER,
95 		.mmio_base = XEHPC_BCS3_RING_BASE,
96 	},
97 	[XE_HW_ENGINE_BCS4] = {
98 		.name = "bcs4",
99 		.class = XE_ENGINE_CLASS_COPY,
100 		.instance = 4,
101 		.irq_offset = ilog2(INTR_BCS(4)),
102 		.domain = XE_FW_RENDER,
103 		.mmio_base = XEHPC_BCS4_RING_BASE,
104 	},
105 	[XE_HW_ENGINE_BCS5] = {
106 		.name = "bcs5",
107 		.class = XE_ENGINE_CLASS_COPY,
108 		.instance = 5,
109 		.irq_offset = ilog2(INTR_BCS(5)),
110 		.domain = XE_FW_RENDER,
111 		.mmio_base = XEHPC_BCS5_RING_BASE,
112 	},
113 	[XE_HW_ENGINE_BCS6] = {
114 		.name = "bcs6",
115 		.class = XE_ENGINE_CLASS_COPY,
116 		.instance = 6,
117 		.irq_offset = ilog2(INTR_BCS(6)),
118 		.domain = XE_FW_RENDER,
119 		.mmio_base = XEHPC_BCS6_RING_BASE,
120 	},
121 	[XE_HW_ENGINE_BCS7] = {
122 		.name = "bcs7",
123 		.class = XE_ENGINE_CLASS_COPY,
124 		.irq_offset = ilog2(INTR_BCS(7)),
125 		.instance = 7,
126 		.domain = XE_FW_RENDER,
127 		.mmio_base = XEHPC_BCS7_RING_BASE,
128 	},
129 	[XE_HW_ENGINE_BCS8] = {
130 		.name = "bcs8",
131 		.class = XE_ENGINE_CLASS_COPY,
132 		.instance = 8,
133 		.irq_offset = ilog2(INTR_BCS8),
134 		.domain = XE_FW_RENDER,
135 		.mmio_base = XEHPC_BCS8_RING_BASE,
136 	},
137 
138 	[XE_HW_ENGINE_VCS0] = {
139 		.name = "vcs0",
140 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
141 		.instance = 0,
142 		.irq_offset = 32 + ilog2(INTR_VCS(0)),
143 		.domain = XE_FW_MEDIA_VDBOX0,
144 		.mmio_base = BSD_RING_BASE,
145 	},
146 	[XE_HW_ENGINE_VCS1] = {
147 		.name = "vcs1",
148 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
149 		.instance = 1,
150 		.irq_offset = 32 + ilog2(INTR_VCS(1)),
151 		.domain = XE_FW_MEDIA_VDBOX1,
152 		.mmio_base = BSD2_RING_BASE,
153 	},
154 	[XE_HW_ENGINE_VCS2] = {
155 		.name = "vcs2",
156 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
157 		.instance = 2,
158 		.irq_offset = 32 + ilog2(INTR_VCS(2)),
159 		.domain = XE_FW_MEDIA_VDBOX2,
160 		.mmio_base = BSD3_RING_BASE,
161 	},
162 	[XE_HW_ENGINE_VCS3] = {
163 		.name = "vcs3",
164 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
165 		.instance = 3,
166 		.irq_offset = 32 + ilog2(INTR_VCS(3)),
167 		.domain = XE_FW_MEDIA_VDBOX3,
168 		.mmio_base = BSD4_RING_BASE,
169 	},
170 	[XE_HW_ENGINE_VCS4] = {
171 		.name = "vcs4",
172 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
173 		.instance = 4,
174 		.irq_offset = 32 + ilog2(INTR_VCS(4)),
175 		.domain = XE_FW_MEDIA_VDBOX4,
176 		.mmio_base = XEHP_BSD5_RING_BASE,
177 	},
178 	[XE_HW_ENGINE_VCS5] = {
179 		.name = "vcs5",
180 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
181 		.instance = 5,
182 		.irq_offset = 32 + ilog2(INTR_VCS(5)),
183 		.domain = XE_FW_MEDIA_VDBOX5,
184 		.mmio_base = XEHP_BSD6_RING_BASE,
185 	},
186 	[XE_HW_ENGINE_VCS6] = {
187 		.name = "vcs6",
188 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
189 		.instance = 6,
190 		.irq_offset = 32 + ilog2(INTR_VCS(6)),
191 		.domain = XE_FW_MEDIA_VDBOX6,
192 		.mmio_base = XEHP_BSD7_RING_BASE,
193 	},
194 	[XE_HW_ENGINE_VCS7] = {
195 		.name = "vcs7",
196 		.class = XE_ENGINE_CLASS_VIDEO_DECODE,
197 		.instance = 7,
198 		.irq_offset = 32 + ilog2(INTR_VCS(7)),
199 		.domain = XE_FW_MEDIA_VDBOX7,
200 		.mmio_base = XEHP_BSD8_RING_BASE,
201 	},
202 	[XE_HW_ENGINE_VECS0] = {
203 		.name = "vecs0",
204 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
205 		.instance = 0,
206 		.irq_offset = 32 + ilog2(INTR_VECS(0)),
207 		.domain = XE_FW_MEDIA_VEBOX0,
208 		.mmio_base = VEBOX_RING_BASE,
209 	},
210 	[XE_HW_ENGINE_VECS1] = {
211 		.name = "vecs1",
212 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
213 		.instance = 1,
214 		.irq_offset = 32 + ilog2(INTR_VECS(1)),
215 		.domain = XE_FW_MEDIA_VEBOX1,
216 		.mmio_base = VEBOX2_RING_BASE,
217 	},
218 	[XE_HW_ENGINE_VECS2] = {
219 		.name = "vecs2",
220 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
221 		.instance = 2,
222 		.irq_offset = 32 + ilog2(INTR_VECS(2)),
223 		.domain = XE_FW_MEDIA_VEBOX2,
224 		.mmio_base = XEHP_VEBOX3_RING_BASE,
225 	},
226 	[XE_HW_ENGINE_VECS3] = {
227 		.name = "vecs3",
228 		.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
229 		.instance = 3,
230 		.irq_offset = 32 + ilog2(INTR_VECS(3)),
231 		.domain = XE_FW_MEDIA_VEBOX3,
232 		.mmio_base = XEHP_VEBOX4_RING_BASE,
233 	},
234 	[XE_HW_ENGINE_CCS0] = {
235 		.name = "ccs0",
236 		.class = XE_ENGINE_CLASS_COMPUTE,
237 		.instance = 0,
238 		.irq_offset = ilog2(INTR_CCS(0)),
239 		.domain = XE_FW_RENDER,
240 		.mmio_base = COMPUTE0_RING_BASE,
241 	},
242 	[XE_HW_ENGINE_CCS1] = {
243 		.name = "ccs1",
244 		.class = XE_ENGINE_CLASS_COMPUTE,
245 		.instance = 1,
246 		.irq_offset = ilog2(INTR_CCS(1)),
247 		.domain = XE_FW_RENDER,
248 		.mmio_base = COMPUTE1_RING_BASE,
249 	},
250 	[XE_HW_ENGINE_CCS2] = {
251 		.name = "ccs2",
252 		.class = XE_ENGINE_CLASS_COMPUTE,
253 		.instance = 2,
254 		.irq_offset = ilog2(INTR_CCS(2)),
255 		.domain = XE_FW_RENDER,
256 		.mmio_base = COMPUTE2_RING_BASE,
257 	},
258 	[XE_HW_ENGINE_CCS3] = {
259 		.name = "ccs3",
260 		.class = XE_ENGINE_CLASS_COMPUTE,
261 		.instance = 3,
262 		.irq_offset = ilog2(INTR_CCS(3)),
263 		.domain = XE_FW_RENDER,
264 		.mmio_base = COMPUTE3_RING_BASE,
265 	},
266 	[XE_HW_ENGINE_GSCCS0] = {
267 		.name = "gsccs0",
268 		.class = XE_ENGINE_CLASS_OTHER,
269 		.instance = OTHER_GSC_INSTANCE,
270 		.domain = XE_FW_GSC,
271 		.mmio_base = GSCCS_RING_BASE,
272 	},
273 };
274 
275 static void hw_engine_fini(void *arg)
276 {
277 	struct xe_hw_engine *hwe = arg;
278 
279 	if (hwe->exl_port)
280 		xe_execlist_port_destroy(hwe->exl_port);
281 
282 	hwe->gt = NULL;
283 }
284 
285 /**
286  * xe_hw_engine_mmio_write32() - Write engine register
287  * @hwe: engine
288  * @reg: register to write into
289  * @val: desired 32-bit value to write
290  *
291  * This function will write val into an engine specific register.
292  * Forcewake must be held by the caller.
293  *
294  */
295 void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe,
296 			       struct xe_reg reg, u32 val)
297 {
298 	xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
299 	xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
300 
301 	reg.addr += hwe->mmio_base;
302 
303 	xe_mmio_write32(&hwe->gt->mmio, reg, val);
304 }
305 
306 /**
307  * xe_hw_engine_mmio_read32() - Read engine register
308  * @hwe: engine
309  * @reg: register to read from
310  *
311  * This function will read from an engine specific register.
312  * Forcewake must be held by the caller.
313  *
314  * Return: value of the 32-bit register.
315  */
316 u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
317 {
318 	xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
319 	xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
320 
321 	reg.addr += hwe->mmio_base;
322 
323 	return xe_mmio_read32(&hwe->gt->mmio, reg);
324 }
325 
326 void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
327 {
328 	u32 ccs_mask =
329 		xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
330 	u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
331 
332 	if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
333 		xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
334 				_MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
335 
336 	xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
337 	xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
338 				  xe_bo_ggtt_addr(hwe->hwsp));
339 
340 	if (xe_device_has_msix(gt_to_xe(hwe->gt)))
341 		ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
342 	xe_hw_engine_mmio_write32(hwe, RING_MODE(0), ring_mode);
343 	xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
344 				  _MASKED_BIT_DISABLE(STOP_RING));
345 	xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
346 }
347 
348 static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_device *xe,
349 						 const struct xe_gt *gt,
350 						 const struct xe_hw_engine *hwe)
351 {
352 	/*
353 	 * Xe3p no longer supports load balance mode, so "fixed cslice" mode
354 	 * is automatic and no RCU_MODE programming is required.
355 	 */
356 	if (GRAPHICS_VER(gt_to_xe(gt)) >= 35)
357 		return false;
358 
359 	return xe_gt_ccs_mode_enabled(gt) &&
360 	       xe_rtp_match_first_render_or_compute(xe, gt, hwe);
361 }
362 
363 static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_device *xe,
364 				      const struct xe_gt *gt,
365 				      const struct xe_hw_engine *hwe)
366 {
367 	if (GRAPHICS_VER(xe) < 20)
368 		return false;
369 
370 	if (hwe->class != XE_ENGINE_CLASS_COMPUTE &&
371 	    hwe->class != XE_ENGINE_CLASS_RENDER)
372 		return false;
373 
374 	return xe_mmio_read32(&hwe->gt->mmio, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
375 }
376 
377 void
378 xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
379 {
380 	struct xe_gt *gt = hwe->gt;
381 	const u8 mocs_write_idx = gt->mocs.uc_index;
382 	const u8 mocs_read_idx = gt->mocs.uc_index;
383 	u32 blit_cctl_val = REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, mocs_write_idx) |
384 			    REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, mocs_read_idx);
385 	struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
386 	const struct xe_rtp_entry_sr lrc_setup[] = {
387 		/*
388 		 * Some blitter commands do not have a field for MOCS, those
389 		 * commands will use MOCS index pointed by BLIT_CCTL.
390 		 * BLIT_CCTL registers are needed to be programmed to un-cached.
391 		 */
392 		{ XE_RTP_NAME("BLIT_CCTL_default_MOCS"),
393 		  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
394 			       ENGINE_CLASS(COPY)),
395 		  XE_RTP_ACTIONS(FIELD_SET(BLIT_CCTL(0),
396 				 BLIT_CCTL_DST_MOCS_MASK |
397 				 BLIT_CCTL_SRC_MOCS_MASK,
398 				 blit_cctl_val,
399 				 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
400 		},
401 		/* Disable WMTP if HW doesn't support it */
402 		{ XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
403 		  XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
404 		  XE_RTP_ACTIONS(FIELD_SET(CS_CHICKEN1(0),
405 					   PREEMPT_GPGPU_LEVEL_MASK,
406 					   PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
407 		  XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
408 		},
409 	};
410 
411 	xe_rtp_process_to_sr(&ctx, lrc_setup, ARRAY_SIZE(lrc_setup), &hwe->reg_lrc);
412 }
413 
414 static void
415 hw_engine_setup_default_state(struct xe_hw_engine *hwe)
416 {
417 	struct xe_gt *gt = hwe->gt;
418 	struct xe_device *xe = gt_to_xe(gt);
419 	/*
420 	 * RING_CMD_CCTL specifies the default MOCS entry that will be
421 	 * used by the command streamer when executing commands that
422 	 * don't have a way to explicitly specify a MOCS setting.
423 	 * The default should usually reference whichever MOCS entry
424 	 * corresponds to uncached behavior, although use of a WB cached
425 	 * entry is recommended by the spec in certain circumstances on
426 	 * specific platforms.
427 	 * Bspec: 72161
428 	 */
429 	const u8 mocs_write_idx = gt->mocs.uc_index;
430 	const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
431 				 (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
432 				 gt->mocs.wb_index : gt->mocs.uc_index;
433 	u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
434 				REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, mocs_read_idx);
435 	struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
436 	const struct xe_rtp_entry_sr engine_entries[] = {
437 		{ XE_RTP_NAME("RING_CMD_CCTL_default_MOCS"),
438 		  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED)),
439 		  XE_RTP_ACTIONS(FIELD_SET(RING_CMD_CCTL(0),
440 					   CMD_CCTL_WRITE_OVERRIDE_MASK |
441 					   CMD_CCTL_READ_OVERRIDE_MASK,
442 					   ring_cmd_cctl_val,
443 					   XE_RTP_ACTION_FLAG(ENGINE_BASE)))
444 		},
445 		/*
446 		 * To allow the GSC engine to go idle on MTL we need to enable
447 		 * idle messaging and set the hysteresis value (we use 0xA=5us
448 		 * as recommended in spec). On platforms after MTL this is
449 		 * enabled by default.
450 		 */
451 		{ XE_RTP_NAME("MTL GSCCS IDLE MSG enable"),
452 		  XE_RTP_RULES(MEDIA_VERSION(1300), ENGINE_CLASS(OTHER)),
453 		  XE_RTP_ACTIONS(CLR(RING_PSMI_CTL(0),
454 				     IDLE_MSG_DISABLE,
455 				     XE_RTP_ACTION_FLAG(ENGINE_BASE)),
456 				 FIELD_SET(RING_PWRCTX_MAXCNT(0),
457 					   IDLE_WAIT_TIME,
458 					   0xA,
459 					   XE_RTP_ACTION_FLAG(ENGINE_BASE)))
460 		},
461 		/* Enable Priority Mem Read */
462 		{ XE_RTP_NAME("Priority_Mem_Read"),
463 		  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
464 		  XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
465 				     XE_RTP_ACTION_FLAG(ENGINE_BASE)))
466 		},
467 		/* Use Fixed slice CCS mode */
468 		{ XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
469 		  XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
470 		  XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
471 					   RCU_MODE_FIXED_SLICE_CCS_MODE))
472 		},
473 	};
474 
475 	xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr);
476 }
477 
478 static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance)
479 {
480 	const struct engine_info *info;
481 	enum xe_hw_engine_id id;
482 
483 	for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
484 		info = &engine_infos[id];
485 		if (info->class == class && info->instance == instance)
486 			return info;
487 	}
488 
489 	return NULL;
490 }
491 
492 static u16 get_msix_irq_offset(struct xe_gt *gt, enum xe_engine_class class)
493 {
494 	/* For MSI-X, hw engines report to offset of engine instance zero */
495 	const struct engine_info *info = find_engine_info(class, 0);
496 
497 	xe_gt_assert(gt, info);
498 
499 	return info ? info->irq_offset : 0;
500 }
501 
502 static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
503 				 enum xe_hw_engine_id id)
504 {
505 	const struct engine_info *info;
506 
507 	if (WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name))
508 		return;
509 
510 	if (!(gt->info.engine_mask & BIT(id)))
511 		return;
512 
513 	info = &engine_infos[id];
514 
515 	xe_gt_assert(gt, !hwe->gt);
516 
517 	hwe->gt = gt;
518 	hwe->class = info->class;
519 	hwe->instance = info->instance;
520 	hwe->mmio_base = info->mmio_base;
521 	hwe->irq_offset = xe_device_has_msix(gt_to_xe(gt)) ?
522 		get_msix_irq_offset(gt, info->class) :
523 		info->irq_offset;
524 	hwe->domain = info->domain;
525 	hwe->name = info->name;
526 	hwe->fence_irq = &gt->fence_irq[info->class];
527 	hwe->engine_id = id;
528 
529 	hwe->eclass = &gt->eclass[hwe->class];
530 	if (!hwe->eclass->sched_props.job_timeout_ms) {
531 		hwe->eclass->sched_props.job_timeout_ms = 5 * 1000;
532 		hwe->eclass->sched_props.job_timeout_min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
533 		hwe->eclass->sched_props.job_timeout_max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
534 		hwe->eclass->sched_props.timeslice_us = 1 * 1000;
535 		hwe->eclass->sched_props.timeslice_min = XE_HW_ENGINE_TIMESLICE_MIN;
536 		hwe->eclass->sched_props.timeslice_max = XE_HW_ENGINE_TIMESLICE_MAX;
537 		hwe->eclass->sched_props.preempt_timeout_us = XE_HW_ENGINE_PREEMPT_TIMEOUT;
538 		hwe->eclass->sched_props.preempt_timeout_min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
539 		hwe->eclass->sched_props.preempt_timeout_max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
540 
541 		/*
542 		 * The GSC engine can accept submissions while the GSC shim is
543 		 * being reset, during which time the submission is stalled. In
544 		 * the worst case, the shim reset can take up to the maximum GSC
545 		 * command execution time (250ms), so the request start can be
546 		 * delayed by that much; the request itself can take that long
547 		 * without being preemptible, which means worst case it can
548 		 * theoretically take up to 500ms for a preemption to go through
549 		 * on the GSC engine. Adding to that an extra 100ms as a safety
550 		 * margin, we get a minimum recommended timeout of 600ms.
551 		 * The preempt_timeout value can't be tuned for OTHER_CLASS
552 		 * because the class is reserved for kernel usage, so we just
553 		 * need to make sure that the starting value is above that
554 		 * threshold; since our default value (640ms) is greater than
555 		 * 600ms, the only way we can go below is via a kconfig setting.
556 		 * If that happens, log it in dmesg and update the value.
557 		 */
558 		if (hwe->class == XE_ENGINE_CLASS_OTHER) {
559 			const u32 min_preempt_timeout = 600 * 1000;
560 			if (hwe->eclass->sched_props.preempt_timeout_us < min_preempt_timeout) {
561 				hwe->eclass->sched_props.preempt_timeout_us = min_preempt_timeout;
562 				xe_gt_notice(gt, "Increasing preempt_timeout for GSC to 600ms\n");
563 			}
564 		}
565 
566 		/* Record default props */
567 		hwe->eclass->defaults = hwe->eclass->sched_props;
568 	}
569 
570 	xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt));
571 	xe_tuning_process_engine(hwe);
572 	xe_wa_process_engine(hwe);
573 	hw_engine_setup_default_state(hwe);
574 
575 	xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt));
576 	xe_reg_whitelist_process_engine(hwe);
577 }
578 
579 static void adjust_idledly(struct xe_hw_engine *hwe)
580 {
581 	struct xe_gt *gt = hwe->gt;
582 	u32 idledly, maxcnt;
583 	u32 idledly_units_ps = 8 * gt->info.timestamp_base;
584 	u32 maxcnt_units_ns = 640;
585 	bool inhibit_switch = 0;
586 
587 	if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_GT_WA(gt, 16023105232)) {
588 		idledly = xe_mmio_read32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base));
589 		maxcnt = xe_mmio_read32(&gt->mmio, RING_PWRCTX_MAXCNT(hwe->mmio_base));
590 
591 		inhibit_switch = idledly & INHIBIT_SWITCH_UNTIL_PREEMPTED;
592 		idledly = REG_FIELD_GET(IDLE_DELAY, idledly);
593 		idledly = DIV_ROUND_CLOSEST(idledly * idledly_units_ps, 1000);
594 		maxcnt = REG_FIELD_GET(IDLE_WAIT_TIME, maxcnt);
595 		maxcnt *= maxcnt_units_ns;
596 
597 		if (xe_gt_WARN_ON(gt, idledly >= maxcnt || inhibit_switch)) {
598 			idledly = DIV_ROUND_CLOSEST(((maxcnt - 1) * maxcnt_units_ns),
599 						    idledly_units_ps);
600 			idledly = DIV_ROUND_CLOSEST(idledly, 1000);
601 			xe_mmio_write32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base), idledly);
602 		}
603 	}
604 }
605 
606 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
607 			  enum xe_hw_engine_id id)
608 {
609 	struct xe_device *xe = gt_to_xe(gt);
610 	struct xe_tile *tile = gt_to_tile(gt);
611 	int err;
612 
613 	xe_gt_assert(gt, id < ARRAY_SIZE(engine_infos) && engine_infos[id].name);
614 	xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
615 
616 	xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
617 
618 	hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
619 						 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
620 						 XE_BO_FLAG_GGTT |
621 						 XE_BO_FLAG_GGTT_INVALIDATE);
622 	if (IS_ERR(hwe->hwsp)) {
623 		err = PTR_ERR(hwe->hwsp);
624 		goto err_name;
625 	}
626 
627 	if (!xe_device_uc_enabled(xe)) {
628 		hwe->exl_port = xe_execlist_port_create(xe, hwe);
629 		if (IS_ERR(hwe->exl_port)) {
630 			err = PTR_ERR(hwe->exl_port);
631 			goto err_hwsp;
632 		}
633 	} else {
634 		/* GSCCS has a special interrupt for reset */
635 		if (hwe->class == XE_ENGINE_CLASS_OTHER)
636 			hwe->irq_handler = xe_gsc_hwe_irq_handler;
637 
638 		if (!IS_SRIOV_VF(xe))
639 			xe_hw_engine_enable_ring(hwe);
640 	}
641 
642 	/* We reserve the highest BCS instance for USM */
643 	if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
644 		gt->usm.reserved_bcs_instance = hwe->instance;
645 
646 	/* Ensure IDLEDLY is lower than MAXCNT */
647 	adjust_idledly(hwe);
648 
649 	return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe);
650 
651 err_hwsp:
652 	xe_bo_unpin_map_no_vm(hwe->hwsp);
653 err_name:
654 	hwe->name = NULL;
655 
656 	return err;
657 }
658 
659 static void hw_engine_setup_logical_mapping(struct xe_gt *gt)
660 {
661 	int class;
662 
663 	/* FIXME: Doing a simple logical mapping that works for most hardware */
664 	for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
665 		struct xe_hw_engine *hwe;
666 		enum xe_hw_engine_id id;
667 		int logical_instance = 0;
668 
669 		for_each_hw_engine(hwe, gt, id)
670 			if (hwe->class == class)
671 				hwe->logical_instance = logical_instance++;
672 	}
673 }
674 
675 static void read_media_fuses(struct xe_gt *gt)
676 {
677 	struct xe_device *xe = gt_to_xe(gt);
678 	u32 media_fuse;
679 	u16 vdbox_mask;
680 	u16 vebox_mask;
681 	int i, j;
682 
683 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
684 
685 	media_fuse = xe_mmio_read32(&gt->mmio, GT_VEBOX_VDBOX_DISABLE);
686 
687 	/*
688 	 * Pre-Xe_HP platforms had register bits representing absent engines,
689 	 * whereas Xe_HP and beyond have bits representing present engines.
690 	 * Invert the polarity on old platforms so that we can use common
691 	 * handling below.
692 	 */
693 	if (GRAPHICS_VERx100(xe) < 1250)
694 		media_fuse = ~media_fuse;
695 
696 	vdbox_mask = REG_FIELD_GET(GT_VDBOX_DISABLE_MASK, media_fuse);
697 	vebox_mask = REG_FIELD_GET(GT_VEBOX_DISABLE_MASK, media_fuse);
698 
699 	for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
700 		if (!(gt->info.engine_mask & BIT(i)))
701 			continue;
702 
703 		if (!(BIT(j) & vdbox_mask)) {
704 			gt->info.engine_mask &= ~BIT(i);
705 			xe_gt_info(gt, "vcs%u fused off\n", j);
706 		}
707 	}
708 
709 	for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
710 		if (!(gt->info.engine_mask & BIT(i)))
711 			continue;
712 
713 		if (!(BIT(j) & vebox_mask)) {
714 			gt->info.engine_mask &= ~BIT(i);
715 			xe_gt_info(gt, "vecs%u fused off\n", j);
716 		}
717 	}
718 }
719 
720 static u32 infer_svccopy_from_meml3(struct xe_gt *gt)
721 {
722 	u32 meml3 = REG_FIELD_GET(MEML3_EN_MASK,
723 				  xe_mmio_read32(&gt->mmio, MIRROR_FUSE3));
724 	u32 svccopy_mask = 0;
725 
726 	/*
727 	 * Each of the four meml3 bits determines the fusing of two service
728 	 * copy engines.
729 	 */
730 	for (int i = 0; i < 4; i++)
731 		svccopy_mask |= (meml3 & BIT(i)) ? 0b11 << 2 * i : 0;
732 
733 	return svccopy_mask;
734 }
735 
736 static u32 read_svccopy_fuses(struct xe_gt *gt)
737 {
738 	return REG_FIELD_GET(FUSE_SERVICE_COPY_ENABLE_MASK,
739 			     xe_mmio_read32(&gt->mmio, SERVICE_COPY_ENABLE));
740 }
741 
742 static void read_copy_fuses(struct xe_gt *gt)
743 {
744 	struct xe_device *xe = gt_to_xe(gt);
745 	u32 bcs_mask;
746 
747 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
748 
749 	if (GRAPHICS_VER(xe) >= 35)
750 		bcs_mask = read_svccopy_fuses(gt);
751 	else if (GRAPHICS_VERx100(xe) == 1260)
752 		bcs_mask = infer_svccopy_from_meml3(gt);
753 	else
754 		return;
755 
756 	/* Only BCS1-BCS8 may be fused off */
757 	bcs_mask <<= XE_HW_ENGINE_BCS1;
758 	for (int i = XE_HW_ENGINE_BCS1; i <= XE_HW_ENGINE_BCS8; ++i) {
759 		if (!(gt->info.engine_mask & BIT(i)))
760 			continue;
761 
762 		if (!(bcs_mask & BIT(i))) {
763 			gt->info.engine_mask &= ~BIT(i);
764 			xe_gt_info(gt, "bcs%u fused off\n",
765 				   i - XE_HW_ENGINE_BCS0);
766 		}
767 	}
768 }
769 
770 static void read_compute_fuses_from_dss(struct xe_gt *gt)
771 {
772 	/*
773 	 * CCS fusing based on DSS masks only applies to platforms that can
774 	 * have more than one CCS.
775 	 */
776 	if (hweight64(gt->info.engine_mask &
777 		      GENMASK_ULL(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)) <= 1)
778 		return;
779 
780 	/*
781 	 * CCS availability on Xe_HP is inferred from the presence of DSS in
782 	 * each quadrant.
783 	 */
784 	for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
785 		if (!(gt->info.engine_mask & BIT(i)))
786 			continue;
787 
788 		if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
789 			gt->info.engine_mask &= ~BIT(i);
790 			xe_gt_info(gt, "ccs%u fused off\n", j);
791 		}
792 	}
793 }
794 
795 static void read_compute_fuses_from_reg(struct xe_gt *gt)
796 {
797 	u32 ccs_mask;
798 
799 	ccs_mask = xe_mmio_read32(&gt->mmio, XEHP_FUSE4);
800 	ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask);
801 
802 	for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
803 		if (!(gt->info.engine_mask & BIT(i)))
804 			continue;
805 
806 		if ((ccs_mask & BIT(j)) == 0) {
807 			gt->info.engine_mask &= ~BIT(i);
808 			xe_gt_info(gt, "ccs%u fused off\n", j);
809 		}
810 	}
811 }
812 
813 static void read_compute_fuses(struct xe_gt *gt)
814 {
815 	if (GRAPHICS_VER(gt_to_xe(gt)) >= 20)
816 		read_compute_fuses_from_reg(gt);
817 	else
818 		read_compute_fuses_from_dss(gt);
819 }
820 
821 static void check_gsc_availability(struct xe_gt *gt)
822 {
823 	if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
824 		return;
825 
826 	/*
827 	 * The GSCCS is only used to communicate with the GSC FW, so if we don't
828 	 * have the FW there is nothing we need the engine for and can therefore
829 	 * skip its initialization.
830 	 */
831 	if (!xe_uc_fw_is_available(&gt->uc.gsc.fw)) {
832 		gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0);
833 
834 		/* interrupts where previously enabled, so turn them off */
835 		xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_ENABLE, 0);
836 		xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_MASK, ~0);
837 
838 		xe_gt_dbg(gt, "GSC FW not used, disabling gsccs\n");
839 	}
840 }
841 
842 static void check_sw_disable(struct xe_gt *gt)
843 {
844 	struct xe_device *xe = gt_to_xe(gt);
845 	u64 sw_allowed = xe_configfs_get_engines_allowed(to_pci_dev(xe->drm.dev));
846 	enum xe_hw_engine_id id;
847 
848 	for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
849 		if (!(gt->info.engine_mask & BIT(id)))
850 			continue;
851 
852 		if (!(sw_allowed & BIT(id))) {
853 			gt->info.engine_mask &= ~BIT(id);
854 			xe_gt_info(gt, "%s disabled via configfs\n",
855 				   engine_infos[id].name);
856 		}
857 	}
858 }
859 
860 int xe_hw_engines_init_early(struct xe_gt *gt)
861 {
862 	int i;
863 
864 	read_media_fuses(gt);
865 	read_copy_fuses(gt);
866 	read_compute_fuses(gt);
867 	check_gsc_availability(gt);
868 	check_sw_disable(gt);
869 
870 	BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
871 	BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
872 
873 	for (i = 0; i < ARRAY_SIZE(gt->hw_engines); i++)
874 		hw_engine_init_early(gt, &gt->hw_engines[i], i);
875 
876 	return 0;
877 }
878 
879 int xe_hw_engines_init(struct xe_gt *gt)
880 {
881 	int err;
882 	struct xe_hw_engine *hwe;
883 	enum xe_hw_engine_id id;
884 
885 	for_each_hw_engine(hwe, gt, id) {
886 		err = hw_engine_init(gt, hwe, id);
887 		if (err)
888 			return err;
889 	}
890 
891 	hw_engine_setup_logical_mapping(gt);
892 	err = xe_hw_engine_setup_groups(gt);
893 	if (err)
894 		return err;
895 
896 	return 0;
897 }
898 
899 void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
900 {
901 	wake_up_all(&gt_to_xe(hwe->gt)->ufence_wq);
902 
903 	if (hwe->irq_handler)
904 		hwe->irq_handler(hwe, intr_vec);
905 
906 	if (intr_vec & GT_MI_USER_INTERRUPT)
907 		xe_hw_fence_irq_run(hwe->fence_irq);
908 }
909 
910 /**
911  * xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
912  * @hwe: Xe HW Engine.
913  * @q: The exec queue object.
914  *
915  * This can be printed out in a later stage like during dev_coredump
916  * analysis.
917  *
918  * Returns: a Xe HW Engine snapshot object that must be freed by the
919  * caller, using `xe_hw_engine_snapshot_free`.
920  */
921 struct xe_hw_engine_snapshot *
922 xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q)
923 {
924 	struct xe_hw_engine_snapshot *snapshot;
925 	struct __guc_capture_parsed_output *node;
926 
927 	if (!xe_hw_engine_is_valid(hwe))
928 		return NULL;
929 
930 	snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
931 
932 	if (!snapshot)
933 		return NULL;
934 
935 	snapshot->name = kstrdup(hwe->name, GFP_ATOMIC);
936 	snapshot->hwe = hwe;
937 	snapshot->logical_instance = hwe->logical_instance;
938 	snapshot->forcewake.domain = hwe->domain;
939 	snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
940 						    hwe->domain);
941 	snapshot->mmio_base = hwe->mmio_base;
942 	snapshot->kernel_reserved = xe_hw_engine_is_reserved(hwe);
943 
944 	/* no more VF accessible data below this point */
945 	if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
946 		return snapshot;
947 
948 	if (q) {
949 		/* If got guc capture, set source to GuC */
950 		node = xe_guc_capture_get_matching_and_lock(q);
951 		if (node) {
952 			struct xe_device *xe = gt_to_xe(hwe->gt);
953 			struct xe_devcoredump *coredump = &xe->devcoredump;
954 
955 			coredump->snapshot.matched_node = node;
956 			xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node");
957 			return snapshot;
958 		}
959 	}
960 
961 	/* otherwise, do manual capture */
962 	xe_engine_manual_capture(hwe, snapshot);
963 	xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot");
964 
965 	return snapshot;
966 }
967 
968 /**
969  * xe_hw_engine_snapshot_free - Free all allocated objects for a given snapshot.
970  * @snapshot: Xe HW Engine snapshot object.
971  *
972  * This function free all the memory that needed to be allocated at capture
973  * time.
974  */
975 void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot)
976 {
977 	struct xe_gt *gt;
978 	if (!snapshot)
979 		return;
980 
981 	gt = snapshot->hwe->gt;
982 	/*
983 	 * xe_guc_capture_put_matched_nodes is called here and from
984 	 * xe_devcoredump_snapshot_free, to cover the 2 calling paths
985 	 * of hw_engines - debugfs and devcoredump free.
986 	 */
987 	xe_guc_capture_put_matched_nodes(&gt->uc.guc);
988 
989 	kfree(snapshot->name);
990 	kfree(snapshot);
991 }
992 
993 /**
994  * xe_hw_engine_print - Xe HW Engine Print.
995  * @hwe: Hardware Engine.
996  * @p: drm_printer.
997  *
998  * This function quickly capture a snapshot and immediately print it out.
999  */
1000 void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p)
1001 {
1002 	struct xe_hw_engine_snapshot *snapshot;
1003 
1004 	snapshot = xe_hw_engine_snapshot_capture(hwe, NULL);
1005 	xe_engine_snapshot_print(snapshot, p);
1006 	xe_hw_engine_snapshot_free(snapshot);
1007 }
1008 
1009 u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
1010 				enum xe_engine_class engine_class)
1011 {
1012 	u32 mask = 0;
1013 	enum xe_hw_engine_id id;
1014 
1015 	for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
1016 		if (engine_infos[id].class == engine_class &&
1017 		    gt->info.engine_mask & BIT(id))
1018 			mask |= BIT(engine_infos[id].instance);
1019 	}
1020 	return mask;
1021 }
1022 
1023 bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe)
1024 {
1025 	struct xe_gt *gt = hwe->gt;
1026 	struct xe_device *xe = gt_to_xe(gt);
1027 
1028 	if (hwe->class == XE_ENGINE_CLASS_OTHER)
1029 		return true;
1030 
1031 	/* Check for engines disabled by ccs_mode setting */
1032 	if (xe_gt_ccs_mode_enabled(gt) &&
1033 	    hwe->class == XE_ENGINE_CLASS_COMPUTE &&
1034 	    hwe->logical_instance >= gt->ccs_mode)
1035 		return true;
1036 
1037 	return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
1038 		hwe->instance == gt->usm.reserved_bcs_instance;
1039 }
1040 
1041 const char *xe_hw_engine_class_to_str(enum xe_engine_class class)
1042 {
1043 	switch (class) {
1044 	case XE_ENGINE_CLASS_RENDER:
1045 		return "rcs";
1046 	case XE_ENGINE_CLASS_VIDEO_DECODE:
1047 		return "vcs";
1048 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
1049 		return "vecs";
1050 	case XE_ENGINE_CLASS_COPY:
1051 		return "bcs";
1052 	case XE_ENGINE_CLASS_OTHER:
1053 		return "other";
1054 	case XE_ENGINE_CLASS_COMPUTE:
1055 		return "ccs";
1056 	case XE_ENGINE_CLASS_MAX:
1057 		break;
1058 	}
1059 
1060 	return NULL;
1061 }
1062 
1063 u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe)
1064 {
1065 	return xe_mmio_read64_2x32(&hwe->gt->mmio, RING_TIMESTAMP(hwe->mmio_base));
1066 }
1067 
1068 enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe)
1069 {
1070 	return engine_infos[hwe->engine_id].domain;
1071 }
1072 
1073 static const enum xe_engine_class user_to_xe_engine_class[] = {
1074 	[DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
1075 	[DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
1076 	[DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
1077 	[DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
1078 	[DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
1079 };
1080 
1081 /**
1082  * xe_hw_engine_lookup() - Lookup hardware engine for class:instance
1083  * @xe: xe device
1084  * @eci: engine class and instance
1085  *
1086  * This function will find a hardware engine for given engine
1087  * class and instance.
1088  *
1089  * Return: If found xe_hw_engine pointer, NULL otherwise.
1090  */
1091 struct xe_hw_engine *
1092 xe_hw_engine_lookup(struct xe_device *xe,
1093 		    struct drm_xe_engine_class_instance eci)
1094 {
1095 	struct xe_gt *gt = xe_device_get_gt(xe, eci.gt_id);
1096 	unsigned int idx;
1097 
1098 	if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
1099 		return NULL;
1100 
1101 	if (!gt)
1102 		return NULL;
1103 
1104 	idx = array_index_nospec(eci.engine_class,
1105 				 ARRAY_SIZE(user_to_xe_engine_class));
1106 
1107 	return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
1108 			       user_to_xe_engine_class[idx],
1109 			       eci.engine_instance, true);
1110 }
1111