1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "xe_hw_engine.h"
7
8 #include <linux/nospec.h>
9
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12 #include <uapi/drm/xe_drm.h>
13 #include <generated/xe_wa_oob.h>
14
15 #include "regs/xe_engine_regs.h"
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_irq_regs.h"
18 #include "xe_assert.h"
19 #include "xe_bo.h"
20 #include "xe_configfs.h"
21 #include "xe_device.h"
22 #include "xe_execlist.h"
23 #include "xe_force_wake.h"
24 #include "xe_gsc.h"
25 #include "xe_gt.h"
26 #include "xe_gt_ccs_mode.h"
27 #include "xe_gt_clock.h"
28 #include "xe_gt_printk.h"
29 #include "xe_gt_mcr.h"
30 #include "xe_gt_topology.h"
31 #include "xe_guc_capture.h"
32 #include "xe_hw_engine_group.h"
33 #include "xe_hw_fence.h"
34 #include "xe_irq.h"
35 #include "xe_lrc.h"
36 #include "xe_mmio.h"
37 #include "xe_reg_sr.h"
38 #include "xe_reg_whitelist.h"
39 #include "xe_rtp.h"
40 #include "xe_sched_job.h"
41 #include "xe_sriov.h"
42 #include "xe_tuning.h"
43 #include "xe_uc_fw.h"
44 #include "xe_wa.h"
45
46 #define MAX_MMIO_BASES 3
47 struct engine_info {
48 const char *name;
49 unsigned int class : 8;
50 unsigned int instance : 8;
51 unsigned int irq_offset : 8;
52 enum xe_force_wake_domains domain;
53 u32 mmio_base;
54 };
55
56 static const struct engine_info engine_infos[] = {
57 [XE_HW_ENGINE_RCS0] = {
58 .name = "rcs0",
59 .class = XE_ENGINE_CLASS_RENDER,
60 .instance = 0,
61 .irq_offset = ilog2(INTR_RCS0),
62 .domain = XE_FW_RENDER,
63 .mmio_base = RENDER_RING_BASE,
64 },
65 [XE_HW_ENGINE_BCS0] = {
66 .name = "bcs0",
67 .class = XE_ENGINE_CLASS_COPY,
68 .instance = 0,
69 .irq_offset = ilog2(INTR_BCS(0)),
70 .domain = XE_FW_RENDER,
71 .mmio_base = BLT_RING_BASE,
72 },
73 [XE_HW_ENGINE_BCS1] = {
74 .name = "bcs1",
75 .class = XE_ENGINE_CLASS_COPY,
76 .instance = 1,
77 .irq_offset = ilog2(INTR_BCS(1)),
78 .domain = XE_FW_RENDER,
79 .mmio_base = XEHPC_BCS1_RING_BASE,
80 },
81 [XE_HW_ENGINE_BCS2] = {
82 .name = "bcs2",
83 .class = XE_ENGINE_CLASS_COPY,
84 .instance = 2,
85 .irq_offset = ilog2(INTR_BCS(2)),
86 .domain = XE_FW_RENDER,
87 .mmio_base = XEHPC_BCS2_RING_BASE,
88 },
89 [XE_HW_ENGINE_BCS3] = {
90 .name = "bcs3",
91 .class = XE_ENGINE_CLASS_COPY,
92 .instance = 3,
93 .irq_offset = ilog2(INTR_BCS(3)),
94 .domain = XE_FW_RENDER,
95 .mmio_base = XEHPC_BCS3_RING_BASE,
96 },
97 [XE_HW_ENGINE_BCS4] = {
98 .name = "bcs4",
99 .class = XE_ENGINE_CLASS_COPY,
100 .instance = 4,
101 .irq_offset = ilog2(INTR_BCS(4)),
102 .domain = XE_FW_RENDER,
103 .mmio_base = XEHPC_BCS4_RING_BASE,
104 },
105 [XE_HW_ENGINE_BCS5] = {
106 .name = "bcs5",
107 .class = XE_ENGINE_CLASS_COPY,
108 .instance = 5,
109 .irq_offset = ilog2(INTR_BCS(5)),
110 .domain = XE_FW_RENDER,
111 .mmio_base = XEHPC_BCS5_RING_BASE,
112 },
113 [XE_HW_ENGINE_BCS6] = {
114 .name = "bcs6",
115 .class = XE_ENGINE_CLASS_COPY,
116 .instance = 6,
117 .irq_offset = ilog2(INTR_BCS(6)),
118 .domain = XE_FW_RENDER,
119 .mmio_base = XEHPC_BCS6_RING_BASE,
120 },
121 [XE_HW_ENGINE_BCS7] = {
122 .name = "bcs7",
123 .class = XE_ENGINE_CLASS_COPY,
124 .irq_offset = ilog2(INTR_BCS(7)),
125 .instance = 7,
126 .domain = XE_FW_RENDER,
127 .mmio_base = XEHPC_BCS7_RING_BASE,
128 },
129 [XE_HW_ENGINE_BCS8] = {
130 .name = "bcs8",
131 .class = XE_ENGINE_CLASS_COPY,
132 .instance = 8,
133 .irq_offset = ilog2(INTR_BCS8),
134 .domain = XE_FW_RENDER,
135 .mmio_base = XEHPC_BCS8_RING_BASE,
136 },
137
138 [XE_HW_ENGINE_VCS0] = {
139 .name = "vcs0",
140 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
141 .instance = 0,
142 .irq_offset = 32 + ilog2(INTR_VCS(0)),
143 .domain = XE_FW_MEDIA_VDBOX0,
144 .mmio_base = BSD_RING_BASE,
145 },
146 [XE_HW_ENGINE_VCS1] = {
147 .name = "vcs1",
148 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
149 .instance = 1,
150 .irq_offset = 32 + ilog2(INTR_VCS(1)),
151 .domain = XE_FW_MEDIA_VDBOX1,
152 .mmio_base = BSD2_RING_BASE,
153 },
154 [XE_HW_ENGINE_VCS2] = {
155 .name = "vcs2",
156 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
157 .instance = 2,
158 .irq_offset = 32 + ilog2(INTR_VCS(2)),
159 .domain = XE_FW_MEDIA_VDBOX2,
160 .mmio_base = BSD3_RING_BASE,
161 },
162 [XE_HW_ENGINE_VCS3] = {
163 .name = "vcs3",
164 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
165 .instance = 3,
166 .irq_offset = 32 + ilog2(INTR_VCS(3)),
167 .domain = XE_FW_MEDIA_VDBOX3,
168 .mmio_base = BSD4_RING_BASE,
169 },
170 [XE_HW_ENGINE_VCS4] = {
171 .name = "vcs4",
172 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
173 .instance = 4,
174 .irq_offset = 32 + ilog2(INTR_VCS(4)),
175 .domain = XE_FW_MEDIA_VDBOX4,
176 .mmio_base = XEHP_BSD5_RING_BASE,
177 },
178 [XE_HW_ENGINE_VCS5] = {
179 .name = "vcs5",
180 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
181 .instance = 5,
182 .irq_offset = 32 + ilog2(INTR_VCS(5)),
183 .domain = XE_FW_MEDIA_VDBOX5,
184 .mmio_base = XEHP_BSD6_RING_BASE,
185 },
186 [XE_HW_ENGINE_VCS6] = {
187 .name = "vcs6",
188 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
189 .instance = 6,
190 .irq_offset = 32 + ilog2(INTR_VCS(6)),
191 .domain = XE_FW_MEDIA_VDBOX6,
192 .mmio_base = XEHP_BSD7_RING_BASE,
193 },
194 [XE_HW_ENGINE_VCS7] = {
195 .name = "vcs7",
196 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
197 .instance = 7,
198 .irq_offset = 32 + ilog2(INTR_VCS(7)),
199 .domain = XE_FW_MEDIA_VDBOX7,
200 .mmio_base = XEHP_BSD8_RING_BASE,
201 },
202 [XE_HW_ENGINE_VECS0] = {
203 .name = "vecs0",
204 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
205 .instance = 0,
206 .irq_offset = 32 + ilog2(INTR_VECS(0)),
207 .domain = XE_FW_MEDIA_VEBOX0,
208 .mmio_base = VEBOX_RING_BASE,
209 },
210 [XE_HW_ENGINE_VECS1] = {
211 .name = "vecs1",
212 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
213 .instance = 1,
214 .irq_offset = 32 + ilog2(INTR_VECS(1)),
215 .domain = XE_FW_MEDIA_VEBOX1,
216 .mmio_base = VEBOX2_RING_BASE,
217 },
218 [XE_HW_ENGINE_VECS2] = {
219 .name = "vecs2",
220 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
221 .instance = 2,
222 .irq_offset = 32 + ilog2(INTR_VECS(2)),
223 .domain = XE_FW_MEDIA_VEBOX2,
224 .mmio_base = XEHP_VEBOX3_RING_BASE,
225 },
226 [XE_HW_ENGINE_VECS3] = {
227 .name = "vecs3",
228 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
229 .instance = 3,
230 .irq_offset = 32 + ilog2(INTR_VECS(3)),
231 .domain = XE_FW_MEDIA_VEBOX3,
232 .mmio_base = XEHP_VEBOX4_RING_BASE,
233 },
234 [XE_HW_ENGINE_CCS0] = {
235 .name = "ccs0",
236 .class = XE_ENGINE_CLASS_COMPUTE,
237 .instance = 0,
238 .irq_offset = ilog2(INTR_CCS(0)),
239 .domain = XE_FW_RENDER,
240 .mmio_base = COMPUTE0_RING_BASE,
241 },
242 [XE_HW_ENGINE_CCS1] = {
243 .name = "ccs1",
244 .class = XE_ENGINE_CLASS_COMPUTE,
245 .instance = 1,
246 .irq_offset = ilog2(INTR_CCS(1)),
247 .domain = XE_FW_RENDER,
248 .mmio_base = COMPUTE1_RING_BASE,
249 },
250 [XE_HW_ENGINE_CCS2] = {
251 .name = "ccs2",
252 .class = XE_ENGINE_CLASS_COMPUTE,
253 .instance = 2,
254 .irq_offset = ilog2(INTR_CCS(2)),
255 .domain = XE_FW_RENDER,
256 .mmio_base = COMPUTE2_RING_BASE,
257 },
258 [XE_HW_ENGINE_CCS3] = {
259 .name = "ccs3",
260 .class = XE_ENGINE_CLASS_COMPUTE,
261 .instance = 3,
262 .irq_offset = ilog2(INTR_CCS(3)),
263 .domain = XE_FW_RENDER,
264 .mmio_base = COMPUTE3_RING_BASE,
265 },
266 [XE_HW_ENGINE_GSCCS0] = {
267 .name = "gsccs0",
268 .class = XE_ENGINE_CLASS_OTHER,
269 .instance = OTHER_GSC_INSTANCE,
270 .domain = XE_FW_GSC,
271 .mmio_base = GSCCS_RING_BASE,
272 },
273 };
274
hw_engine_fini(void * arg)275 static void hw_engine_fini(void *arg)
276 {
277 struct xe_hw_engine *hwe = arg;
278
279 if (hwe->exl_port)
280 xe_execlist_port_destroy(hwe->exl_port);
281
282 hwe->gt = NULL;
283 }
284
285 /**
286 * xe_hw_engine_mmio_write32() - Write engine register
287 * @hwe: engine
288 * @reg: register to write into
289 * @val: desired 32-bit value to write
290 *
291 * This function will write val into an engine specific register.
292 * Forcewake must be held by the caller.
293 *
294 */
xe_hw_engine_mmio_write32(struct xe_hw_engine * hwe,struct xe_reg reg,u32 val)295 void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe,
296 struct xe_reg reg, u32 val)
297 {
298 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
299 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
300
301 reg.addr += hwe->mmio_base;
302
303 xe_mmio_write32(&hwe->gt->mmio, reg, val);
304 }
305
306 /**
307 * xe_hw_engine_mmio_read32() - Read engine register
308 * @hwe: engine
309 * @reg: register to read from
310 *
311 * This function will read from an engine specific register.
312 * Forcewake must be held by the caller.
313 *
314 * Return: value of the 32-bit register.
315 */
xe_hw_engine_mmio_read32(struct xe_hw_engine * hwe,struct xe_reg reg)316 u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
317 {
318 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
319 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
320
321 reg.addr += hwe->mmio_base;
322
323 return xe_mmio_read32(&hwe->gt->mmio, reg);
324 }
325
xe_hw_engine_enable_ring(struct xe_hw_engine * hwe)326 void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
327 {
328 u32 ccs_mask =
329 xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
330 u32 ring_mode = REG_MASKED_FIELD_ENABLE(GFX_DISABLE_LEGACY_MODE);
331
332 if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
333 xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
334 REG_MASKED_FIELD_ENABLE(RCU_MODE_CCS_ENABLE));
335
336 xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
337 xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
338 xe_bo_ggtt_addr(hwe->hwsp));
339
340 if (xe_device_has_msix(gt_to_xe(hwe->gt)))
341 ring_mode |= REG_MASKED_FIELD_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
342 xe_hw_engine_mmio_write32(hwe, RING_MODE(0), ring_mode);
343 xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
344 REG_MASKED_FIELD_DISABLE(STOP_RING));
345 xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
346 }
347
xe_hw_engine_match_fixed_cslice_mode(const struct xe_device * xe,const struct xe_gt * gt,const struct xe_hw_engine * hwe)348 static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_device *xe,
349 const struct xe_gt *gt,
350 const struct xe_hw_engine *hwe)
351 {
352 /*
353 * Xe3p no longer supports load balance mode, so "fixed cslice" mode
354 * is automatic and no RCU_MODE programming is required.
355 */
356 if (GRAPHICS_VER(gt_to_xe(gt)) >= 35)
357 return false;
358
359 return xe_gt_ccs_mode_enabled(gt) &&
360 xe_rtp_match_first_render_or_compute(xe, gt, hwe);
361 }
362
xe_rtp_cfeg_wmtp_disabled(const struct xe_device * xe,const struct xe_gt * gt,const struct xe_hw_engine * hwe)363 static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_device *xe,
364 const struct xe_gt *gt,
365 const struct xe_hw_engine *hwe)
366 {
367 if (GRAPHICS_VER(xe) < 20)
368 return false;
369
370 if (hwe->class != XE_ENGINE_CLASS_COMPUTE &&
371 hwe->class != XE_ENGINE_CLASS_RENDER)
372 return false;
373
374 return xe_mmio_read32(&hwe->gt->mmio, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
375 }
376
377 void
xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine * hwe)378 xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
379 {
380 struct xe_gt *gt = hwe->gt;
381 const u8 mocs_write_idx = gt->mocs.uc_index;
382 const u8 mocs_read_idx = gt->mocs.uc_index;
383 u32 blit_cctl_val = REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, mocs_write_idx) |
384 REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, mocs_read_idx);
385 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
386 const struct xe_rtp_entry_sr lrc_setup[] = {
387 /*
388 * Some blitter commands do not have a field for MOCS, those
389 * commands will use MOCS index pointed by BLIT_CCTL.
390 * BLIT_CCTL registers are needed to be programmed to un-cached.
391 */
392 { XE_RTP_NAME("BLIT_CCTL_default_MOCS"),
393 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
394 ENGINE_CLASS(COPY)),
395 XE_RTP_ACTIONS(FIELD_SET(BLIT_CCTL(0),
396 BLIT_CCTL_DST_MOCS_MASK |
397 BLIT_CCTL_SRC_MOCS_MASK,
398 blit_cctl_val,
399 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
400 },
401 /* Disable WMTP if HW doesn't support it */
402 { XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
403 XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
404 XE_RTP_ACTIONS(FIELD_SET(CS_CHICKEN1(0),
405 PREEMPT_GPGPU_LEVEL_MASK,
406 PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
407 XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
408 },
409 };
410
411 xe_rtp_process_to_sr(&ctx, lrc_setup, ARRAY_SIZE(lrc_setup),
412 &hwe->reg_lrc, true);
413 }
414
415 static void
hw_engine_setup_default_state(struct xe_hw_engine * hwe)416 hw_engine_setup_default_state(struct xe_hw_engine *hwe)
417 {
418 struct xe_gt *gt = hwe->gt;
419 struct xe_device *xe = gt_to_xe(gt);
420 /*
421 * RING_CMD_CCTL specifies the default MOCS entry that will be
422 * used by the command streamer when executing commands that
423 * don't have a way to explicitly specify a MOCS setting.
424 * The default should usually reference whichever MOCS entry
425 * corresponds to uncached behavior, although use of a WB cached
426 * entry is recommended by the spec in certain circumstances on
427 * specific platforms.
428 * Bspec: 72161
429 */
430 const u8 mocs_write_idx = gt->mocs.uc_index;
431 const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
432 (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
433 gt->mocs.wb_index : gt->mocs.uc_index;
434 u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
435 REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, mocs_read_idx);
436 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
437 const struct xe_rtp_entry_sr engine_entries[] = {
438 { XE_RTP_NAME("RING_CMD_CCTL_default_MOCS"),
439 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED)),
440 XE_RTP_ACTIONS(FIELD_SET(RING_CMD_CCTL(0),
441 CMD_CCTL_WRITE_OVERRIDE_MASK |
442 CMD_CCTL_READ_OVERRIDE_MASK,
443 ring_cmd_cctl_val,
444 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
445 },
446 /*
447 * To allow the GSC engine to go idle on MTL we need to enable
448 * idle messaging and set the hysteresis value (we use 0xA=5us
449 * as recommended in spec). On platforms after MTL this is
450 * enabled by default.
451 */
452 { XE_RTP_NAME("MTL GSCCS IDLE MSG enable"),
453 XE_RTP_RULES(MEDIA_VERSION(1300), ENGINE_CLASS(OTHER)),
454 XE_RTP_ACTIONS(CLR(RING_PSMI_CTL(0),
455 IDLE_MSG_DISABLE,
456 XE_RTP_ACTION_FLAG(ENGINE_BASE)),
457 FIELD_SET(RING_PWRCTX_MAXCNT(0),
458 IDLE_WAIT_TIME,
459 0xA,
460 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
461 },
462 /* Enable Priority Mem Read */
463 { XE_RTP_NAME("Priority_Mem_Read"),
464 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
465 XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
466 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
467 },
468 /* Use Fixed slice CCS mode */
469 { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
470 XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
471 XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
472 RCU_MODE_FIXED_SLICE_CCS_MODE))
473 },
474 };
475
476 xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries),
477 &hwe->reg_sr, false);
478 }
479
find_engine_info(enum xe_engine_class class,int instance)480 static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance)
481 {
482 const struct engine_info *info;
483 enum xe_hw_engine_id id;
484
485 for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
486 info = &engine_infos[id];
487 if (info->class == class && info->instance == instance)
488 return info;
489 }
490
491 return NULL;
492 }
493
get_msix_irq_offset(struct xe_gt * gt,enum xe_engine_class class)494 static u16 get_msix_irq_offset(struct xe_gt *gt, enum xe_engine_class class)
495 {
496 /* For MSI-X, hw engines report to offset of engine instance zero */
497 const struct engine_info *info = find_engine_info(class, 0);
498
499 xe_gt_assert(gt, info);
500
501 return info ? info->irq_offset : 0;
502 }
503
hw_engine_init_early(struct xe_gt * gt,struct xe_hw_engine * hwe,enum xe_hw_engine_id id)504 static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
505 enum xe_hw_engine_id id)
506 {
507 const struct engine_info *info;
508
509 if (WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name))
510 return;
511
512 if (!(gt->info.engine_mask & BIT(id)))
513 return;
514
515 info = &engine_infos[id];
516
517 xe_gt_assert(gt, !hwe->gt);
518
519 hwe->gt = gt;
520 hwe->class = info->class;
521 hwe->instance = info->instance;
522 hwe->mmio_base = info->mmio_base;
523 hwe->irq_offset = xe_device_has_msix(gt_to_xe(gt)) ?
524 get_msix_irq_offset(gt, info->class) :
525 info->irq_offset;
526 hwe->domain = info->domain;
527 hwe->name = info->name;
528 hwe->fence_irq = >->fence_irq[info->class];
529 hwe->engine_id = id;
530
531 hwe->eclass = >->eclass[hwe->class];
532 if (!hwe->eclass->sched_props.job_timeout_ms) {
533 hwe->eclass->sched_props.job_timeout_ms = 5 * 1000;
534 hwe->eclass->sched_props.job_timeout_min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
535 hwe->eclass->sched_props.job_timeout_max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
536 hwe->eclass->sched_props.timeslice_us = 1 * 1000;
537 hwe->eclass->sched_props.timeslice_min = XE_HW_ENGINE_TIMESLICE_MIN;
538 hwe->eclass->sched_props.timeslice_max = XE_HW_ENGINE_TIMESLICE_MAX;
539 hwe->eclass->sched_props.preempt_timeout_us = XE_HW_ENGINE_PREEMPT_TIMEOUT;
540 hwe->eclass->sched_props.preempt_timeout_min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
541 hwe->eclass->sched_props.preempt_timeout_max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
542
543 /*
544 * The GSC engine can accept submissions while the GSC shim is
545 * being reset, during which time the submission is stalled. In
546 * the worst case, the shim reset can take up to the maximum GSC
547 * command execution time (250ms), so the request start can be
548 * delayed by that much; the request itself can take that long
549 * without being preemptible, which means worst case it can
550 * theoretically take up to 500ms for a preemption to go through
551 * on the GSC engine. Adding to that an extra 100ms as a safety
552 * margin, we get a minimum recommended timeout of 600ms.
553 * The preempt_timeout value can't be tuned for OTHER_CLASS
554 * because the class is reserved for kernel usage, so we just
555 * need to make sure that the starting value is above that
556 * threshold; since our default value (640ms) is greater than
557 * 600ms, the only way we can go below is via a kconfig setting.
558 * If that happens, log it in dmesg and update the value.
559 */
560 if (hwe->class == XE_ENGINE_CLASS_OTHER) {
561 const u32 min_preempt_timeout = 600 * 1000;
562 if (hwe->eclass->sched_props.preempt_timeout_us < min_preempt_timeout) {
563 hwe->eclass->sched_props.preempt_timeout_us = min_preempt_timeout;
564 xe_gt_notice(gt, "Increasing preempt_timeout for GSC to 600ms\n");
565 }
566 }
567
568 /* Record default props */
569 hwe->eclass->defaults = hwe->eclass->sched_props;
570 }
571
572 xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt));
573 xe_tuning_process_engine(hwe);
574 xe_wa_process_engine(hwe);
575 hw_engine_setup_default_state(hwe);
576
577 xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt));
578 xe_reg_whitelist_process_engine(hwe);
579 }
580
adjust_idledly(struct xe_hw_engine * hwe)581 static void adjust_idledly(struct xe_hw_engine *hwe)
582 {
583 struct xe_gt *gt = hwe->gt;
584 u32 idledly, maxcnt;
585 u32 idledly_units_ps = 8 * gt->info.timestamp_base;
586 u32 maxcnt_units_ns = 640;
587 bool inhibit_switch = 0;
588
589 if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_GT_WA(gt, 16023105232)) {
590 idledly = xe_mmio_read32(>->mmio, RING_IDLEDLY(hwe->mmio_base));
591 maxcnt = xe_mmio_read32(>->mmio, RING_PWRCTX_MAXCNT(hwe->mmio_base));
592
593 inhibit_switch = idledly & INHIBIT_SWITCH_UNTIL_PREEMPTED;
594 idledly = REG_FIELD_GET(IDLE_DELAY, idledly);
595 idledly = DIV_ROUND_CLOSEST(idledly * idledly_units_ps, 1000);
596 maxcnt = REG_FIELD_GET(IDLE_WAIT_TIME, maxcnt);
597 maxcnt *= maxcnt_units_ns;
598
599 if (xe_gt_WARN_ON(gt, idledly >= maxcnt || inhibit_switch)) {
600 idledly = DIV_ROUND_CLOSEST(((maxcnt - 1) * 1000),
601 idledly_units_ps);
602 xe_mmio_write32(>->mmio, RING_IDLEDLY(hwe->mmio_base), idledly);
603 }
604 }
605 }
606
hw_engine_init(struct xe_gt * gt,struct xe_hw_engine * hwe,enum xe_hw_engine_id id)607 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
608 enum xe_hw_engine_id id)
609 {
610 struct xe_device *xe = gt_to_xe(gt);
611 struct xe_tile *tile = gt_to_tile(gt);
612 int err;
613
614 xe_gt_assert(gt, id < ARRAY_SIZE(engine_infos) && engine_infos[id].name);
615 xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
616
617 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
618
619 hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
620 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
621 XE_BO_FLAG_GGTT |
622 XE_BO_FLAG_GGTT_INVALIDATE);
623 if (IS_ERR(hwe->hwsp)) {
624 err = PTR_ERR(hwe->hwsp);
625 goto err_name;
626 }
627
628 if (!xe_device_uc_enabled(xe)) {
629 hwe->exl_port = xe_execlist_port_create(xe, hwe);
630 if (IS_ERR(hwe->exl_port)) {
631 err = PTR_ERR(hwe->exl_port);
632 goto err_hwsp;
633 }
634 } else {
635 /* GSCCS has a special interrupt for reset */
636 if (hwe->class == XE_ENGINE_CLASS_OTHER)
637 hwe->irq_handler = xe_gsc_hwe_irq_handler;
638
639 if (!IS_SRIOV_VF(xe))
640 xe_hw_engine_enable_ring(hwe);
641 }
642
643 /* We reserve the highest BCS instance for USM */
644 if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
645 gt->usm.reserved_bcs_instance = hwe->instance;
646
647 /* Ensure IDLEDLY is lower than MAXCNT */
648 adjust_idledly(hwe);
649
650 return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe);
651
652 err_hwsp:
653 xe_bo_unpin_map_no_vm(hwe->hwsp);
654 err_name:
655 hwe->name = NULL;
656
657 return err;
658 }
659
hw_engine_setup_logical_mapping(struct xe_gt * gt)660 static void hw_engine_setup_logical_mapping(struct xe_gt *gt)
661 {
662 int class;
663
664 /* FIXME: Doing a simple logical mapping that works for most hardware */
665 for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
666 struct xe_hw_engine *hwe;
667 enum xe_hw_engine_id id;
668 int logical_instance = 0;
669
670 for_each_hw_engine(hwe, gt, id)
671 if (hwe->class == class)
672 hwe->logical_instance = logical_instance++;
673 }
674 }
675
read_media_fuses(struct xe_gt * gt)676 static void read_media_fuses(struct xe_gt *gt)
677 {
678 struct xe_device *xe = gt_to_xe(gt);
679 u32 media_fuse;
680 u16 vdbox_mask;
681 u16 vebox_mask;
682 int i, j;
683
684 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
685
686 media_fuse = xe_mmio_read32(>->mmio, GT_VEBOX_VDBOX_DISABLE);
687
688 /*
689 * Pre-Xe_HP platforms had register bits representing absent engines,
690 * whereas Xe_HP and beyond have bits representing present engines.
691 * Invert the polarity on old platforms so that we can use common
692 * handling below.
693 */
694 if (GRAPHICS_VERx100(xe) < 1250)
695 media_fuse = ~media_fuse;
696
697 vdbox_mask = REG_FIELD_GET(GT_VDBOX_DISABLE_MASK, media_fuse);
698 vebox_mask = REG_FIELD_GET(GT_VEBOX_DISABLE_MASK, media_fuse);
699
700 for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
701 if (!(gt->info.engine_mask & BIT(i)))
702 continue;
703
704 if (!(BIT(j) & vdbox_mask)) {
705 gt->info.engine_mask &= ~BIT(i);
706 xe_gt_info(gt, "vcs%u fused off\n", j);
707 }
708 }
709
710 for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
711 if (!(gt->info.engine_mask & BIT(i)))
712 continue;
713
714 if (!(BIT(j) & vebox_mask)) {
715 gt->info.engine_mask &= ~BIT(i);
716 xe_gt_info(gt, "vecs%u fused off\n", j);
717 }
718 }
719 }
720
infer_svccopy_from_meml3(struct xe_gt * gt)721 static u32 infer_svccopy_from_meml3(struct xe_gt *gt)
722 {
723 u32 meml3 = REG_FIELD_GET(MEML3_EN_MASK,
724 xe_mmio_read32(>->mmio, MIRROR_FUSE3));
725 u32 svccopy_mask = 0;
726
727 /*
728 * Each of the four meml3 bits determines the fusing of two service
729 * copy engines.
730 */
731 for (int i = 0; i < 4; i++)
732 svccopy_mask |= (meml3 & BIT(i)) ? 0b11 << 2 * i : 0;
733
734 return svccopy_mask;
735 }
736
read_svccopy_fuses(struct xe_gt * gt)737 static u32 read_svccopy_fuses(struct xe_gt *gt)
738 {
739 return REG_FIELD_GET(FUSE_SERVICE_COPY_ENABLE_MASK,
740 xe_mmio_read32(>->mmio, SERVICE_COPY_ENABLE));
741 }
742
read_copy_fuses(struct xe_gt * gt)743 static void read_copy_fuses(struct xe_gt *gt)
744 {
745 struct xe_device *xe = gt_to_xe(gt);
746 u32 bcs_mask;
747
748 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
749
750 if (GRAPHICS_VER(xe) >= 35)
751 bcs_mask = read_svccopy_fuses(gt);
752 else if (GRAPHICS_VERx100(xe) == 1260)
753 bcs_mask = infer_svccopy_from_meml3(gt);
754 else
755 return;
756
757 /* Only BCS1-BCS8 may be fused off */
758 bcs_mask <<= XE_HW_ENGINE_BCS1;
759 for (int i = XE_HW_ENGINE_BCS1; i <= XE_HW_ENGINE_BCS8; ++i) {
760 if (!(gt->info.engine_mask & BIT(i)))
761 continue;
762
763 if (!(bcs_mask & BIT(i))) {
764 gt->info.engine_mask &= ~BIT(i);
765 xe_gt_info(gt, "bcs%u fused off\n",
766 i - XE_HW_ENGINE_BCS0);
767 }
768 }
769 }
770
read_compute_fuses_from_dss(struct xe_gt * gt)771 static void read_compute_fuses_from_dss(struct xe_gt *gt)
772 {
773 /*
774 * CCS fusing based on DSS masks only applies to platforms that can
775 * have more than one CCS.
776 */
777 if (hweight64(gt->info.engine_mask &
778 GENMASK_ULL(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)) <= 1)
779 return;
780
781 /*
782 * CCS availability on Xe_HP is inferred from the presence of DSS in
783 * each quadrant.
784 */
785 for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
786 if (!(gt->info.engine_mask & BIT(i)))
787 continue;
788
789 if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
790 gt->info.engine_mask &= ~BIT(i);
791 xe_gt_info(gt, "ccs%u fused off\n", j);
792 }
793 }
794 }
795
read_compute_fuses_from_reg(struct xe_gt * gt)796 static void read_compute_fuses_from_reg(struct xe_gt *gt)
797 {
798 u32 ccs_mask;
799
800 ccs_mask = xe_mmio_read32(>->mmio, XEHP_FUSE4);
801 ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask);
802
803 for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
804 if (!(gt->info.engine_mask & BIT(i)))
805 continue;
806
807 if ((ccs_mask & BIT(j)) == 0) {
808 gt->info.engine_mask &= ~BIT(i);
809 xe_gt_info(gt, "ccs%u fused off\n", j);
810 }
811 }
812 }
813
read_compute_fuses(struct xe_gt * gt)814 static void read_compute_fuses(struct xe_gt *gt)
815 {
816 if (GRAPHICS_VER(gt_to_xe(gt)) >= 20)
817 read_compute_fuses_from_reg(gt);
818 else
819 read_compute_fuses_from_dss(gt);
820 }
821
check_gsc_availability(struct xe_gt * gt)822 static void check_gsc_availability(struct xe_gt *gt)
823 {
824 if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
825 return;
826
827 /*
828 * The GSCCS is only used to communicate with the GSC FW, so if we don't
829 * have the FW there is nothing we need the engine for and can therefore
830 * skip its initialization.
831 */
832 if (!xe_uc_fw_is_available(>->uc.gsc.fw)) {
833 gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0);
834
835 /* interrupts where previously enabled, so turn them off */
836 xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_ENABLE, 0);
837 xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_MASK, ~0);
838
839 xe_gt_dbg(gt, "GSC FW not used, disabling gsccs\n");
840 }
841 }
842
check_sw_disable(struct xe_gt * gt)843 static void check_sw_disable(struct xe_gt *gt)
844 {
845 struct xe_device *xe = gt_to_xe(gt);
846 u64 sw_allowed = xe_configfs_get_engines_allowed(to_pci_dev(xe->drm.dev));
847 enum xe_hw_engine_id id;
848
849 for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
850 if (!(gt->info.engine_mask & BIT(id)))
851 continue;
852
853 if (!(sw_allowed & BIT(id))) {
854 gt->info.engine_mask &= ~BIT(id);
855 xe_gt_info(gt, "%s disabled via configfs\n",
856 engine_infos[id].name);
857 }
858 }
859 }
860
xe_hw_engines_init_early(struct xe_gt * gt)861 int xe_hw_engines_init_early(struct xe_gt *gt)
862 {
863 int i;
864
865 read_media_fuses(gt);
866 read_copy_fuses(gt);
867 read_compute_fuses(gt);
868 check_gsc_availability(gt);
869 check_sw_disable(gt);
870
871 BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
872 BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
873
874 for (i = 0; i < ARRAY_SIZE(gt->hw_engines); i++)
875 hw_engine_init_early(gt, >->hw_engines[i], i);
876
877 return 0;
878 }
879
xe_hw_engines_init(struct xe_gt * gt)880 int xe_hw_engines_init(struct xe_gt *gt)
881 {
882 int err;
883 struct xe_hw_engine *hwe;
884 enum xe_hw_engine_id id;
885
886 for_each_hw_engine(hwe, gt, id) {
887 err = hw_engine_init(gt, hwe, id);
888 if (err)
889 return err;
890 }
891
892 hw_engine_setup_logical_mapping(gt);
893 err = xe_hw_engine_setup_groups(gt);
894 if (err)
895 return err;
896
897 return 0;
898 }
899
xe_hw_engine_handle_irq(struct xe_hw_engine * hwe,u16 intr_vec)900 void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
901 {
902 wake_up_all(>_to_xe(hwe->gt)->ufence_wq);
903
904 if (hwe->irq_handler)
905 hwe->irq_handler(hwe, intr_vec);
906
907 if (intr_vec & GT_MI_USER_INTERRUPT)
908 xe_hw_fence_irq_run(hwe->fence_irq);
909 }
910
911 /**
912 * xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
913 * @hwe: Xe HW Engine.
914 * @q: The exec queue object.
915 *
916 * This can be printed out in a later stage like during dev_coredump
917 * analysis.
918 *
919 * Returns: a Xe HW Engine snapshot object that must be freed by the
920 * caller, using `xe_hw_engine_snapshot_free`.
921 */
922 struct xe_hw_engine_snapshot *
xe_hw_engine_snapshot_capture(struct xe_hw_engine * hwe,struct xe_exec_queue * q)923 xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q)
924 {
925 struct xe_hw_engine_snapshot *snapshot;
926 struct __guc_capture_parsed_output *node;
927
928 if (!xe_hw_engine_is_valid(hwe))
929 return NULL;
930
931 snapshot = kzalloc_obj(*snapshot, GFP_ATOMIC);
932
933 if (!snapshot)
934 return NULL;
935
936 snapshot->name = kstrdup(hwe->name, GFP_ATOMIC);
937 snapshot->hwe = hwe;
938 snapshot->logical_instance = hwe->logical_instance;
939 snapshot->forcewake.domain = hwe->domain;
940 snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
941 hwe->domain);
942 snapshot->mmio_base = hwe->mmio_base;
943 snapshot->kernel_reserved = xe_hw_engine_is_reserved(hwe);
944
945 /* no more VF accessible data below this point */
946 if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
947 return snapshot;
948
949 if (q) {
950 /* If got guc capture, set source to GuC */
951 node = xe_guc_capture_get_matching_and_lock(q);
952 if (node) {
953 struct xe_device *xe = gt_to_xe(hwe->gt);
954 struct xe_devcoredump *coredump = &xe->devcoredump;
955
956 coredump->snapshot.matched_node = node;
957 xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node");
958 return snapshot;
959 }
960 }
961
962 /* otherwise, do manual capture */
963 xe_engine_manual_capture(hwe, snapshot);
964 xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot");
965
966 return snapshot;
967 }
968
969 /**
970 * xe_hw_engine_snapshot_free - Free all allocated objects for a given snapshot.
971 * @snapshot: Xe HW Engine snapshot object.
972 *
973 * This function free all the memory that needed to be allocated at capture
974 * time.
975 */
xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot * snapshot)976 void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot)
977 {
978 struct xe_gt *gt;
979 if (!snapshot)
980 return;
981
982 gt = snapshot->hwe->gt;
983 /*
984 * xe_guc_capture_put_matched_nodes is called here and from
985 * xe_devcoredump_snapshot_free, to cover the 2 calling paths
986 * of hw_engines - debugfs and devcoredump free.
987 */
988 xe_guc_capture_put_matched_nodes(>->uc.guc);
989
990 kfree(snapshot->name);
991 kfree(snapshot);
992 }
993
994 /**
995 * xe_hw_engine_print - Xe HW Engine Print.
996 * @hwe: Hardware Engine.
997 * @p: drm_printer.
998 *
999 * This function quickly capture a snapshot and immediately print it out.
1000 */
xe_hw_engine_print(struct xe_hw_engine * hwe,struct drm_printer * p)1001 void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p)
1002 {
1003 struct xe_hw_engine_snapshot *snapshot;
1004
1005 snapshot = xe_hw_engine_snapshot_capture(hwe, NULL);
1006 xe_engine_snapshot_print(snapshot, p);
1007 xe_hw_engine_snapshot_free(snapshot);
1008 }
1009
xe_hw_engine_mask_per_class(struct xe_gt * gt,enum xe_engine_class engine_class)1010 u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
1011 enum xe_engine_class engine_class)
1012 {
1013 u32 mask = 0;
1014 enum xe_hw_engine_id id;
1015
1016 for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
1017 if (engine_infos[id].class == engine_class &&
1018 gt->info.engine_mask & BIT(id))
1019 mask |= BIT(engine_infos[id].instance);
1020 }
1021 return mask;
1022 }
1023
xe_hw_engine_is_reserved(struct xe_hw_engine * hwe)1024 bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe)
1025 {
1026 struct xe_gt *gt = hwe->gt;
1027 struct xe_device *xe = gt_to_xe(gt);
1028
1029 if (hwe->class == XE_ENGINE_CLASS_OTHER)
1030 return true;
1031
1032 /* Check for engines disabled by ccs_mode setting */
1033 if (xe_gt_ccs_mode_enabled(gt) &&
1034 hwe->class == XE_ENGINE_CLASS_COMPUTE &&
1035 hwe->logical_instance >= gt->ccs_mode)
1036 return true;
1037
1038 return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
1039 hwe->instance == gt->usm.reserved_bcs_instance;
1040 }
1041
xe_hw_engine_class_to_str(enum xe_engine_class class)1042 const char *xe_hw_engine_class_to_str(enum xe_engine_class class)
1043 {
1044 switch (class) {
1045 case XE_ENGINE_CLASS_RENDER:
1046 return "rcs";
1047 case XE_ENGINE_CLASS_VIDEO_DECODE:
1048 return "vcs";
1049 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
1050 return "vecs";
1051 case XE_ENGINE_CLASS_COPY:
1052 return "bcs";
1053 case XE_ENGINE_CLASS_OTHER:
1054 return "other";
1055 case XE_ENGINE_CLASS_COMPUTE:
1056 return "ccs";
1057 case XE_ENGINE_CLASS_MAX:
1058 break;
1059 }
1060
1061 return NULL;
1062 }
1063
xe_hw_engine_read_timestamp(struct xe_hw_engine * hwe)1064 u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe)
1065 {
1066 return xe_mmio_read64_2x32(&hwe->gt->mmio, RING_TIMESTAMP(hwe->mmio_base));
1067 }
1068
xe_hw_engine_to_fw_domain(struct xe_hw_engine * hwe)1069 enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe)
1070 {
1071 return engine_infos[hwe->engine_id].domain;
1072 }
1073
1074 static const enum xe_engine_class user_to_xe_engine_class[] = {
1075 [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
1076 [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
1077 [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
1078 [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
1079 [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
1080 };
1081
1082 /**
1083 * xe_hw_engine_lookup() - Lookup hardware engine for class:instance
1084 * @xe: xe device
1085 * @eci: engine class and instance
1086 *
1087 * This function will find a hardware engine for given engine
1088 * class and instance.
1089 *
1090 * Return: If found xe_hw_engine pointer, NULL otherwise.
1091 */
1092 struct xe_hw_engine *
xe_hw_engine_lookup(struct xe_device * xe,struct drm_xe_engine_class_instance eci)1093 xe_hw_engine_lookup(struct xe_device *xe,
1094 struct drm_xe_engine_class_instance eci)
1095 {
1096 struct xe_gt *gt = xe_device_get_gt(xe, eci.gt_id);
1097 unsigned int idx;
1098
1099 if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
1100 return NULL;
1101
1102 if (!gt)
1103 return NULL;
1104
1105 idx = array_index_nospec(eci.engine_class,
1106 ARRAY_SIZE(user_to_xe_engine_class));
1107
1108 return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
1109 user_to_xe_engine_class[idx],
1110 eci.engine_instance, true);
1111 }
1112