1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "xe_hw_engine.h"
7
8 #include <linux/nospec.h>
9
10 #include <drm/drm_managed.h>
11 #include <uapi/drm/xe_drm.h>
12
13 #include "regs/xe_engine_regs.h"
14 #include "regs/xe_gt_regs.h"
15 #include "regs/xe_irq_regs.h"
16 #include "xe_assert.h"
17 #include "xe_bo.h"
18 #include "xe_device.h"
19 #include "xe_execlist.h"
20 #include "xe_force_wake.h"
21 #include "xe_gsc.h"
22 #include "xe_gt.h"
23 #include "xe_gt_ccs_mode.h"
24 #include "xe_gt_printk.h"
25 #include "xe_gt_mcr.h"
26 #include "xe_gt_topology.h"
27 #include "xe_guc_capture.h"
28 #include "xe_hw_engine_group.h"
29 #include "xe_hw_fence.h"
30 #include "xe_irq.h"
31 #include "xe_lrc.h"
32 #include "xe_macros.h"
33 #include "xe_mmio.h"
34 #include "xe_reg_sr.h"
35 #include "xe_reg_whitelist.h"
36 #include "xe_rtp.h"
37 #include "xe_sched_job.h"
38 #include "xe_sriov.h"
39 #include "xe_tuning.h"
40 #include "xe_uc_fw.h"
41 #include "xe_wa.h"
42
43 #define MAX_MMIO_BASES 3
44 struct engine_info {
45 const char *name;
46 unsigned int class : 8;
47 unsigned int instance : 8;
48 unsigned int irq_offset : 8;
49 enum xe_force_wake_domains domain;
50 u32 mmio_base;
51 };
52
53 static const struct engine_info engine_infos[] = {
54 [XE_HW_ENGINE_RCS0] = {
55 .name = "rcs0",
56 .class = XE_ENGINE_CLASS_RENDER,
57 .instance = 0,
58 .irq_offset = ilog2(INTR_RCS0),
59 .domain = XE_FW_RENDER,
60 .mmio_base = RENDER_RING_BASE,
61 },
62 [XE_HW_ENGINE_BCS0] = {
63 .name = "bcs0",
64 .class = XE_ENGINE_CLASS_COPY,
65 .instance = 0,
66 .irq_offset = ilog2(INTR_BCS(0)),
67 .domain = XE_FW_RENDER,
68 .mmio_base = BLT_RING_BASE,
69 },
70 [XE_HW_ENGINE_BCS1] = {
71 .name = "bcs1",
72 .class = XE_ENGINE_CLASS_COPY,
73 .instance = 1,
74 .irq_offset = ilog2(INTR_BCS(1)),
75 .domain = XE_FW_RENDER,
76 .mmio_base = XEHPC_BCS1_RING_BASE,
77 },
78 [XE_HW_ENGINE_BCS2] = {
79 .name = "bcs2",
80 .class = XE_ENGINE_CLASS_COPY,
81 .instance = 2,
82 .irq_offset = ilog2(INTR_BCS(2)),
83 .domain = XE_FW_RENDER,
84 .mmio_base = XEHPC_BCS2_RING_BASE,
85 },
86 [XE_HW_ENGINE_BCS3] = {
87 .name = "bcs3",
88 .class = XE_ENGINE_CLASS_COPY,
89 .instance = 3,
90 .irq_offset = ilog2(INTR_BCS(3)),
91 .domain = XE_FW_RENDER,
92 .mmio_base = XEHPC_BCS3_RING_BASE,
93 },
94 [XE_HW_ENGINE_BCS4] = {
95 .name = "bcs4",
96 .class = XE_ENGINE_CLASS_COPY,
97 .instance = 4,
98 .irq_offset = ilog2(INTR_BCS(4)),
99 .domain = XE_FW_RENDER,
100 .mmio_base = XEHPC_BCS4_RING_BASE,
101 },
102 [XE_HW_ENGINE_BCS5] = {
103 .name = "bcs5",
104 .class = XE_ENGINE_CLASS_COPY,
105 .instance = 5,
106 .irq_offset = ilog2(INTR_BCS(5)),
107 .domain = XE_FW_RENDER,
108 .mmio_base = XEHPC_BCS5_RING_BASE,
109 },
110 [XE_HW_ENGINE_BCS6] = {
111 .name = "bcs6",
112 .class = XE_ENGINE_CLASS_COPY,
113 .instance = 6,
114 .irq_offset = ilog2(INTR_BCS(6)),
115 .domain = XE_FW_RENDER,
116 .mmio_base = XEHPC_BCS6_RING_BASE,
117 },
118 [XE_HW_ENGINE_BCS7] = {
119 .name = "bcs7",
120 .class = XE_ENGINE_CLASS_COPY,
121 .irq_offset = ilog2(INTR_BCS(7)),
122 .instance = 7,
123 .domain = XE_FW_RENDER,
124 .mmio_base = XEHPC_BCS7_RING_BASE,
125 },
126 [XE_HW_ENGINE_BCS8] = {
127 .name = "bcs8",
128 .class = XE_ENGINE_CLASS_COPY,
129 .instance = 8,
130 .irq_offset = ilog2(INTR_BCS8),
131 .domain = XE_FW_RENDER,
132 .mmio_base = XEHPC_BCS8_RING_BASE,
133 },
134
135 [XE_HW_ENGINE_VCS0] = {
136 .name = "vcs0",
137 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
138 .instance = 0,
139 .irq_offset = 32 + ilog2(INTR_VCS(0)),
140 .domain = XE_FW_MEDIA_VDBOX0,
141 .mmio_base = BSD_RING_BASE,
142 },
143 [XE_HW_ENGINE_VCS1] = {
144 .name = "vcs1",
145 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
146 .instance = 1,
147 .irq_offset = 32 + ilog2(INTR_VCS(1)),
148 .domain = XE_FW_MEDIA_VDBOX1,
149 .mmio_base = BSD2_RING_BASE,
150 },
151 [XE_HW_ENGINE_VCS2] = {
152 .name = "vcs2",
153 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
154 .instance = 2,
155 .irq_offset = 32 + ilog2(INTR_VCS(2)),
156 .domain = XE_FW_MEDIA_VDBOX2,
157 .mmio_base = BSD3_RING_BASE,
158 },
159 [XE_HW_ENGINE_VCS3] = {
160 .name = "vcs3",
161 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
162 .instance = 3,
163 .irq_offset = 32 + ilog2(INTR_VCS(3)),
164 .domain = XE_FW_MEDIA_VDBOX3,
165 .mmio_base = BSD4_RING_BASE,
166 },
167 [XE_HW_ENGINE_VCS4] = {
168 .name = "vcs4",
169 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
170 .instance = 4,
171 .irq_offset = 32 + ilog2(INTR_VCS(4)),
172 .domain = XE_FW_MEDIA_VDBOX4,
173 .mmio_base = XEHP_BSD5_RING_BASE,
174 },
175 [XE_HW_ENGINE_VCS5] = {
176 .name = "vcs5",
177 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
178 .instance = 5,
179 .irq_offset = 32 + ilog2(INTR_VCS(5)),
180 .domain = XE_FW_MEDIA_VDBOX5,
181 .mmio_base = XEHP_BSD6_RING_BASE,
182 },
183 [XE_HW_ENGINE_VCS6] = {
184 .name = "vcs6",
185 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
186 .instance = 6,
187 .irq_offset = 32 + ilog2(INTR_VCS(6)),
188 .domain = XE_FW_MEDIA_VDBOX6,
189 .mmio_base = XEHP_BSD7_RING_BASE,
190 },
191 [XE_HW_ENGINE_VCS7] = {
192 .name = "vcs7",
193 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
194 .instance = 7,
195 .irq_offset = 32 + ilog2(INTR_VCS(7)),
196 .domain = XE_FW_MEDIA_VDBOX7,
197 .mmio_base = XEHP_BSD8_RING_BASE,
198 },
199 [XE_HW_ENGINE_VECS0] = {
200 .name = "vecs0",
201 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
202 .instance = 0,
203 .irq_offset = 32 + ilog2(INTR_VECS(0)),
204 .domain = XE_FW_MEDIA_VEBOX0,
205 .mmio_base = VEBOX_RING_BASE,
206 },
207 [XE_HW_ENGINE_VECS1] = {
208 .name = "vecs1",
209 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
210 .instance = 1,
211 .irq_offset = 32 + ilog2(INTR_VECS(1)),
212 .domain = XE_FW_MEDIA_VEBOX1,
213 .mmio_base = VEBOX2_RING_BASE,
214 },
215 [XE_HW_ENGINE_VECS2] = {
216 .name = "vecs2",
217 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
218 .instance = 2,
219 .irq_offset = 32 + ilog2(INTR_VECS(2)),
220 .domain = XE_FW_MEDIA_VEBOX2,
221 .mmio_base = XEHP_VEBOX3_RING_BASE,
222 },
223 [XE_HW_ENGINE_VECS3] = {
224 .name = "vecs3",
225 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
226 .instance = 3,
227 .irq_offset = 32 + ilog2(INTR_VECS(3)),
228 .domain = XE_FW_MEDIA_VEBOX3,
229 .mmio_base = XEHP_VEBOX4_RING_BASE,
230 },
231 [XE_HW_ENGINE_CCS0] = {
232 .name = "ccs0",
233 .class = XE_ENGINE_CLASS_COMPUTE,
234 .instance = 0,
235 .irq_offset = ilog2(INTR_CCS(0)),
236 .domain = XE_FW_RENDER,
237 .mmio_base = COMPUTE0_RING_BASE,
238 },
239 [XE_HW_ENGINE_CCS1] = {
240 .name = "ccs1",
241 .class = XE_ENGINE_CLASS_COMPUTE,
242 .instance = 1,
243 .irq_offset = ilog2(INTR_CCS(1)),
244 .domain = XE_FW_RENDER,
245 .mmio_base = COMPUTE1_RING_BASE,
246 },
247 [XE_HW_ENGINE_CCS2] = {
248 .name = "ccs2",
249 .class = XE_ENGINE_CLASS_COMPUTE,
250 .instance = 2,
251 .irq_offset = ilog2(INTR_CCS(2)),
252 .domain = XE_FW_RENDER,
253 .mmio_base = COMPUTE2_RING_BASE,
254 },
255 [XE_HW_ENGINE_CCS3] = {
256 .name = "ccs3",
257 .class = XE_ENGINE_CLASS_COMPUTE,
258 .instance = 3,
259 .irq_offset = ilog2(INTR_CCS(3)),
260 .domain = XE_FW_RENDER,
261 .mmio_base = COMPUTE3_RING_BASE,
262 },
263 [XE_HW_ENGINE_GSCCS0] = {
264 .name = "gsccs0",
265 .class = XE_ENGINE_CLASS_OTHER,
266 .instance = OTHER_GSC_INSTANCE,
267 .domain = XE_FW_GSC,
268 .mmio_base = GSCCS_RING_BASE,
269 },
270 };
271
hw_engine_fini(void * arg)272 static void hw_engine_fini(void *arg)
273 {
274 struct xe_hw_engine *hwe = arg;
275
276 if (hwe->exl_port)
277 xe_execlist_port_destroy(hwe->exl_port);
278
279 hwe->gt = NULL;
280 }
281
282 /**
283 * xe_hw_engine_mmio_write32() - Write engine register
284 * @hwe: engine
285 * @reg: register to write into
286 * @val: desired 32-bit value to write
287 *
288 * This function will write val into an engine specific register.
289 * Forcewake must be held by the caller.
290 *
291 */
xe_hw_engine_mmio_write32(struct xe_hw_engine * hwe,struct xe_reg reg,u32 val)292 void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe,
293 struct xe_reg reg, u32 val)
294 {
295 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
296 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
297
298 reg.addr += hwe->mmio_base;
299
300 xe_mmio_write32(&hwe->gt->mmio, reg, val);
301 }
302
303 /**
304 * xe_hw_engine_mmio_read32() - Read engine register
305 * @hwe: engine
306 * @reg: register to read from
307 *
308 * This function will read from an engine specific register.
309 * Forcewake must be held by the caller.
310 *
311 * Return: value of the 32-bit register.
312 */
xe_hw_engine_mmio_read32(struct xe_hw_engine * hwe,struct xe_reg reg)313 u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
314 {
315 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
316 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
317
318 reg.addr += hwe->mmio_base;
319
320 return xe_mmio_read32(&hwe->gt->mmio, reg);
321 }
322
xe_hw_engine_enable_ring(struct xe_hw_engine * hwe)323 void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
324 {
325 u32 ccs_mask =
326 xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
327 u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
328
329 if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
330 xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
331 _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
332
333 xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
334 xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
335 xe_bo_ggtt_addr(hwe->hwsp));
336
337 if (xe_device_has_msix(gt_to_xe(hwe->gt)))
338 ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
339 xe_hw_engine_mmio_write32(hwe, RING_MODE(0), ring_mode);
340 xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
341 _MASKED_BIT_DISABLE(STOP_RING));
342 xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
343 }
344
xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt * gt,const struct xe_hw_engine * hwe)345 static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt,
346 const struct xe_hw_engine *hwe)
347 {
348 return xe_gt_ccs_mode_enabled(gt) &&
349 xe_rtp_match_first_render_or_compute(gt, hwe);
350 }
351
xe_rtp_cfeg_wmtp_disabled(const struct xe_gt * gt,const struct xe_hw_engine * hwe)352 static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt,
353 const struct xe_hw_engine *hwe)
354 {
355 if (GRAPHICS_VER(gt_to_xe(gt)) < 20)
356 return false;
357
358 if (hwe->class != XE_ENGINE_CLASS_COMPUTE &&
359 hwe->class != XE_ENGINE_CLASS_RENDER)
360 return false;
361
362 return xe_mmio_read32(&hwe->gt->mmio, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
363 }
364
365 void
xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine * hwe)366 xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
367 {
368 struct xe_gt *gt = hwe->gt;
369 const u8 mocs_write_idx = gt->mocs.uc_index;
370 const u8 mocs_read_idx = gt->mocs.uc_index;
371 u32 blit_cctl_val = REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, mocs_write_idx) |
372 REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, mocs_read_idx);
373 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
374 const struct xe_rtp_entry_sr lrc_setup[] = {
375 /*
376 * Some blitter commands do not have a field for MOCS, those
377 * commands will use MOCS index pointed by BLIT_CCTL.
378 * BLIT_CCTL registers are needed to be programmed to un-cached.
379 */
380 { XE_RTP_NAME("BLIT_CCTL_default_MOCS"),
381 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
382 ENGINE_CLASS(COPY)),
383 XE_RTP_ACTIONS(FIELD_SET(BLIT_CCTL(0),
384 BLIT_CCTL_DST_MOCS_MASK |
385 BLIT_CCTL_SRC_MOCS_MASK,
386 blit_cctl_val,
387 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
388 },
389 /* Use Fixed slice CCS mode */
390 { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
391 XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
392 XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
393 RCU_MODE_FIXED_SLICE_CCS_MODE))
394 },
395 /* Disable WMTP if HW doesn't support it */
396 { XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
397 XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
398 XE_RTP_ACTIONS(FIELD_SET(CS_CHICKEN1(0),
399 PREEMPT_GPGPU_LEVEL_MASK,
400 PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
401 XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
402 },
403 {}
404 };
405
406 xe_rtp_process_to_sr(&ctx, lrc_setup, &hwe->reg_lrc);
407 }
408
409 static void
hw_engine_setup_default_state(struct xe_hw_engine * hwe)410 hw_engine_setup_default_state(struct xe_hw_engine *hwe)
411 {
412 struct xe_gt *gt = hwe->gt;
413 struct xe_device *xe = gt_to_xe(gt);
414 /*
415 * RING_CMD_CCTL specifies the default MOCS entry that will be
416 * used by the command streamer when executing commands that
417 * don't have a way to explicitly specify a MOCS setting.
418 * The default should usually reference whichever MOCS entry
419 * corresponds to uncached behavior, although use of a WB cached
420 * entry is recommended by the spec in certain circumstances on
421 * specific platforms.
422 * Bspec: 72161
423 */
424 const u8 mocs_write_idx = gt->mocs.uc_index;
425 const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
426 (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
427 gt->mocs.wb_index : gt->mocs.uc_index;
428 u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
429 REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, mocs_read_idx);
430 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
431 const struct xe_rtp_entry_sr engine_entries[] = {
432 { XE_RTP_NAME("RING_CMD_CCTL_default_MOCS"),
433 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED)),
434 XE_RTP_ACTIONS(FIELD_SET(RING_CMD_CCTL(0),
435 CMD_CCTL_WRITE_OVERRIDE_MASK |
436 CMD_CCTL_READ_OVERRIDE_MASK,
437 ring_cmd_cctl_val,
438 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
439 },
440 /*
441 * To allow the GSC engine to go idle on MTL we need to enable
442 * idle messaging and set the hysteresis value (we use 0xA=5us
443 * as recommended in spec). On platforms after MTL this is
444 * enabled by default.
445 */
446 { XE_RTP_NAME("MTL GSCCS IDLE MSG enable"),
447 XE_RTP_RULES(MEDIA_VERSION(1300), ENGINE_CLASS(OTHER)),
448 XE_RTP_ACTIONS(CLR(RING_PSMI_CTL(0),
449 IDLE_MSG_DISABLE,
450 XE_RTP_ACTION_FLAG(ENGINE_BASE)),
451 FIELD_SET(RING_PWRCTX_MAXCNT(0),
452 IDLE_WAIT_TIME,
453 0xA,
454 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
455 },
456 /* Enable Priority Mem Read */
457 { XE_RTP_NAME("Priority_Mem_Read"),
458 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
459 XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
460 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
461 },
462 {}
463 };
464
465 xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr);
466 }
467
find_engine_info(enum xe_engine_class class,int instance)468 static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance)
469 {
470 const struct engine_info *info;
471 enum xe_hw_engine_id id;
472
473 for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
474 info = &engine_infos[id];
475 if (info->class == class && info->instance == instance)
476 return info;
477 }
478
479 return NULL;
480 }
481
get_msix_irq_offset(struct xe_gt * gt,enum xe_engine_class class)482 static u16 get_msix_irq_offset(struct xe_gt *gt, enum xe_engine_class class)
483 {
484 /* For MSI-X, hw engines report to offset of engine instance zero */
485 const struct engine_info *info = find_engine_info(class, 0);
486
487 xe_gt_assert(gt, info);
488
489 return info ? info->irq_offset : 0;
490 }
491
hw_engine_init_early(struct xe_gt * gt,struct xe_hw_engine * hwe,enum xe_hw_engine_id id)492 static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
493 enum xe_hw_engine_id id)
494 {
495 const struct engine_info *info;
496
497 if (WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name))
498 return;
499
500 if (!(gt->info.engine_mask & BIT(id)))
501 return;
502
503 info = &engine_infos[id];
504
505 xe_gt_assert(gt, !hwe->gt);
506
507 hwe->gt = gt;
508 hwe->class = info->class;
509 hwe->instance = info->instance;
510 hwe->mmio_base = info->mmio_base;
511 hwe->irq_offset = xe_device_has_msix(gt_to_xe(gt)) ?
512 get_msix_irq_offset(gt, info->class) :
513 info->irq_offset;
514 hwe->domain = info->domain;
515 hwe->name = info->name;
516 hwe->fence_irq = >->fence_irq[info->class];
517 hwe->engine_id = id;
518
519 hwe->eclass = >->eclass[hwe->class];
520 if (!hwe->eclass->sched_props.job_timeout_ms) {
521 hwe->eclass->sched_props.job_timeout_ms = 5 * 1000;
522 hwe->eclass->sched_props.job_timeout_min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
523 hwe->eclass->sched_props.job_timeout_max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
524 hwe->eclass->sched_props.timeslice_us = 1 * 1000;
525 hwe->eclass->sched_props.timeslice_min = XE_HW_ENGINE_TIMESLICE_MIN;
526 hwe->eclass->sched_props.timeslice_max = XE_HW_ENGINE_TIMESLICE_MAX;
527 hwe->eclass->sched_props.preempt_timeout_us = XE_HW_ENGINE_PREEMPT_TIMEOUT;
528 hwe->eclass->sched_props.preempt_timeout_min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
529 hwe->eclass->sched_props.preempt_timeout_max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
530
531 /*
532 * The GSC engine can accept submissions while the GSC shim is
533 * being reset, during which time the submission is stalled. In
534 * the worst case, the shim reset can take up to the maximum GSC
535 * command execution time (250ms), so the request start can be
536 * delayed by that much; the request itself can take that long
537 * without being preemptible, which means worst case it can
538 * theoretically take up to 500ms for a preemption to go through
539 * on the GSC engine. Adding to that an extra 100ms as a safety
540 * margin, we get a minimum recommended timeout of 600ms.
541 * The preempt_timeout value can't be tuned for OTHER_CLASS
542 * because the class is reserved for kernel usage, so we just
543 * need to make sure that the starting value is above that
544 * threshold; since our default value (640ms) is greater than
545 * 600ms, the only way we can go below is via a kconfig setting.
546 * If that happens, log it in dmesg and update the value.
547 */
548 if (hwe->class == XE_ENGINE_CLASS_OTHER) {
549 const u32 min_preempt_timeout = 600 * 1000;
550 if (hwe->eclass->sched_props.preempt_timeout_us < min_preempt_timeout) {
551 hwe->eclass->sched_props.preempt_timeout_us = min_preempt_timeout;
552 xe_gt_notice(gt, "Increasing preempt_timeout for GSC to 600ms\n");
553 }
554 }
555
556 /* Record default props */
557 hwe->eclass->defaults = hwe->eclass->sched_props;
558 }
559
560 xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt));
561 xe_tuning_process_engine(hwe);
562 xe_wa_process_engine(hwe);
563 hw_engine_setup_default_state(hwe);
564
565 xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt));
566 xe_reg_whitelist_process_engine(hwe);
567 }
568
hw_engine_init(struct xe_gt * gt,struct xe_hw_engine * hwe,enum xe_hw_engine_id id)569 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
570 enum xe_hw_engine_id id)
571 {
572 struct xe_device *xe = gt_to_xe(gt);
573 struct xe_tile *tile = gt_to_tile(gt);
574 int err;
575
576 xe_gt_assert(gt, id < ARRAY_SIZE(engine_infos) && engine_infos[id].name);
577 xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
578
579 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
580
581 hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
582 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
583 XE_BO_FLAG_GGTT |
584 XE_BO_FLAG_GGTT_INVALIDATE);
585 if (IS_ERR(hwe->hwsp)) {
586 err = PTR_ERR(hwe->hwsp);
587 goto err_name;
588 }
589
590 if (!xe_device_uc_enabled(xe)) {
591 hwe->exl_port = xe_execlist_port_create(xe, hwe);
592 if (IS_ERR(hwe->exl_port)) {
593 err = PTR_ERR(hwe->exl_port);
594 goto err_hwsp;
595 }
596 } else {
597 /* GSCCS has a special interrupt for reset */
598 if (hwe->class == XE_ENGINE_CLASS_OTHER)
599 hwe->irq_handler = xe_gsc_hwe_irq_handler;
600
601 if (!IS_SRIOV_VF(xe))
602 xe_hw_engine_enable_ring(hwe);
603 }
604
605 /* We reserve the highest BCS instance for USM */
606 if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
607 gt->usm.reserved_bcs_instance = hwe->instance;
608
609 return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe);
610
611 err_hwsp:
612 xe_bo_unpin_map_no_vm(hwe->hwsp);
613 err_name:
614 hwe->name = NULL;
615
616 return err;
617 }
618
hw_engine_setup_logical_mapping(struct xe_gt * gt)619 static void hw_engine_setup_logical_mapping(struct xe_gt *gt)
620 {
621 int class;
622
623 /* FIXME: Doing a simple logical mapping that works for most hardware */
624 for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
625 struct xe_hw_engine *hwe;
626 enum xe_hw_engine_id id;
627 int logical_instance = 0;
628
629 for_each_hw_engine(hwe, gt, id)
630 if (hwe->class == class)
631 hwe->logical_instance = logical_instance++;
632 }
633 }
634
read_media_fuses(struct xe_gt * gt)635 static void read_media_fuses(struct xe_gt *gt)
636 {
637 struct xe_device *xe = gt_to_xe(gt);
638 u32 media_fuse;
639 u16 vdbox_mask;
640 u16 vebox_mask;
641 int i, j;
642
643 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
644
645 media_fuse = xe_mmio_read32(>->mmio, GT_VEBOX_VDBOX_DISABLE);
646
647 /*
648 * Pre-Xe_HP platforms had register bits representing absent engines,
649 * whereas Xe_HP and beyond have bits representing present engines.
650 * Invert the polarity on old platforms so that we can use common
651 * handling below.
652 */
653 if (GRAPHICS_VERx100(xe) < 1250)
654 media_fuse = ~media_fuse;
655
656 vdbox_mask = REG_FIELD_GET(GT_VDBOX_DISABLE_MASK, media_fuse);
657 vebox_mask = REG_FIELD_GET(GT_VEBOX_DISABLE_MASK, media_fuse);
658
659 for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
660 if (!(gt->info.engine_mask & BIT(i)))
661 continue;
662
663 if (!(BIT(j) & vdbox_mask)) {
664 gt->info.engine_mask &= ~BIT(i);
665 drm_info(&xe->drm, "vcs%u fused off\n", j);
666 }
667 }
668
669 for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
670 if (!(gt->info.engine_mask & BIT(i)))
671 continue;
672
673 if (!(BIT(j) & vebox_mask)) {
674 gt->info.engine_mask &= ~BIT(i);
675 drm_info(&xe->drm, "vecs%u fused off\n", j);
676 }
677 }
678 }
679
read_copy_fuses(struct xe_gt * gt)680 static void read_copy_fuses(struct xe_gt *gt)
681 {
682 struct xe_device *xe = gt_to_xe(gt);
683 u32 bcs_mask;
684
685 if (GRAPHICS_VERx100(xe) < 1260 || GRAPHICS_VERx100(xe) >= 1270)
686 return;
687
688 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
689
690 bcs_mask = xe_mmio_read32(>->mmio, MIRROR_FUSE3);
691 bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask);
692
693 /* BCS0 is always present; only BCS1-BCS8 may be fused off */
694 for (int i = XE_HW_ENGINE_BCS1, j = 0; i <= XE_HW_ENGINE_BCS8; ++i, ++j) {
695 if (!(gt->info.engine_mask & BIT(i)))
696 continue;
697
698 if (!(BIT(j / 2) & bcs_mask)) {
699 gt->info.engine_mask &= ~BIT(i);
700 drm_info(&xe->drm, "bcs%u fused off\n", j);
701 }
702 }
703 }
704
read_compute_fuses_from_dss(struct xe_gt * gt)705 static void read_compute_fuses_from_dss(struct xe_gt *gt)
706 {
707 struct xe_device *xe = gt_to_xe(gt);
708
709 /*
710 * CCS fusing based on DSS masks only applies to platforms that can
711 * have more than one CCS.
712 */
713 if (hweight64(gt->info.engine_mask &
714 GENMASK_ULL(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)) <= 1)
715 return;
716
717 /*
718 * CCS availability on Xe_HP is inferred from the presence of DSS in
719 * each quadrant.
720 */
721 for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
722 if (!(gt->info.engine_mask & BIT(i)))
723 continue;
724
725 if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
726 gt->info.engine_mask &= ~BIT(i);
727 drm_info(&xe->drm, "ccs%u fused off\n", j);
728 }
729 }
730 }
731
read_compute_fuses_from_reg(struct xe_gt * gt)732 static void read_compute_fuses_from_reg(struct xe_gt *gt)
733 {
734 struct xe_device *xe = gt_to_xe(gt);
735 u32 ccs_mask;
736
737 ccs_mask = xe_mmio_read32(>->mmio, XEHP_FUSE4);
738 ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask);
739
740 for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
741 if (!(gt->info.engine_mask & BIT(i)))
742 continue;
743
744 if ((ccs_mask & BIT(j)) == 0) {
745 gt->info.engine_mask &= ~BIT(i);
746 drm_info(&xe->drm, "ccs%u fused off\n", j);
747 }
748 }
749 }
750
read_compute_fuses(struct xe_gt * gt)751 static void read_compute_fuses(struct xe_gt *gt)
752 {
753 if (GRAPHICS_VER(gt_to_xe(gt)) >= 20)
754 read_compute_fuses_from_reg(gt);
755 else
756 read_compute_fuses_from_dss(gt);
757 }
758
check_gsc_availability(struct xe_gt * gt)759 static void check_gsc_availability(struct xe_gt *gt)
760 {
761 struct xe_device *xe = gt_to_xe(gt);
762
763 if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
764 return;
765
766 /*
767 * The GSCCS is only used to communicate with the GSC FW, so if we don't
768 * have the FW there is nothing we need the engine for and can therefore
769 * skip its initialization.
770 */
771 if (!xe_uc_fw_is_available(>->uc.gsc.fw)) {
772 gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0);
773
774 /* interrupts where previously enabled, so turn them off */
775 xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_ENABLE, 0);
776 xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_MASK, ~0);
777
778 drm_dbg(&xe->drm, "GSC FW not used, disabling gsccs\n");
779 }
780 }
781
xe_hw_engines_init_early(struct xe_gt * gt)782 int xe_hw_engines_init_early(struct xe_gt *gt)
783 {
784 int i;
785
786 read_media_fuses(gt);
787 read_copy_fuses(gt);
788 read_compute_fuses(gt);
789 check_gsc_availability(gt);
790
791 BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
792 BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
793
794 for (i = 0; i < ARRAY_SIZE(gt->hw_engines); i++)
795 hw_engine_init_early(gt, >->hw_engines[i], i);
796
797 return 0;
798 }
799
xe_hw_engines_init(struct xe_gt * gt)800 int xe_hw_engines_init(struct xe_gt *gt)
801 {
802 int err;
803 struct xe_hw_engine *hwe;
804 enum xe_hw_engine_id id;
805
806 for_each_hw_engine(hwe, gt, id) {
807 err = hw_engine_init(gt, hwe, id);
808 if (err)
809 return err;
810 }
811
812 hw_engine_setup_logical_mapping(gt);
813 err = xe_hw_engine_setup_groups(gt);
814 if (err)
815 return err;
816
817 return 0;
818 }
819
xe_hw_engine_handle_irq(struct xe_hw_engine * hwe,u16 intr_vec)820 void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
821 {
822 wake_up_all(>_to_xe(hwe->gt)->ufence_wq);
823
824 if (hwe->irq_handler)
825 hwe->irq_handler(hwe, intr_vec);
826
827 if (intr_vec & GT_RENDER_USER_INTERRUPT)
828 xe_hw_fence_irq_run(hwe->fence_irq);
829 }
830
831 /**
832 * xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
833 * @hwe: Xe HW Engine.
834 * @q: The exec queue object.
835 *
836 * This can be printed out in a later stage like during dev_coredump
837 * analysis.
838 *
839 * Returns: a Xe HW Engine snapshot object that must be freed by the
840 * caller, using `xe_hw_engine_snapshot_free`.
841 */
842 struct xe_hw_engine_snapshot *
xe_hw_engine_snapshot_capture(struct xe_hw_engine * hwe,struct xe_exec_queue * q)843 xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q)
844 {
845 struct xe_hw_engine_snapshot *snapshot;
846 struct __guc_capture_parsed_output *node;
847
848 if (!xe_hw_engine_is_valid(hwe))
849 return NULL;
850
851 snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
852
853 if (!snapshot)
854 return NULL;
855
856 snapshot->name = kstrdup(hwe->name, GFP_ATOMIC);
857 snapshot->hwe = hwe;
858 snapshot->logical_instance = hwe->logical_instance;
859 snapshot->forcewake.domain = hwe->domain;
860 snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
861 hwe->domain);
862 snapshot->mmio_base = hwe->mmio_base;
863 snapshot->kernel_reserved = xe_hw_engine_is_reserved(hwe);
864
865 /* no more VF accessible data below this point */
866 if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
867 return snapshot;
868
869 if (q) {
870 /* If got guc capture, set source to GuC */
871 node = xe_guc_capture_get_matching_and_lock(q);
872 if (node) {
873 struct xe_device *xe = gt_to_xe(hwe->gt);
874 struct xe_devcoredump *coredump = &xe->devcoredump;
875
876 coredump->snapshot.matched_node = node;
877 xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node");
878 return snapshot;
879 }
880 }
881
882 /* otherwise, do manual capture */
883 xe_engine_manual_capture(hwe, snapshot);
884 xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot");
885
886 return snapshot;
887 }
888
889 /**
890 * xe_hw_engine_snapshot_free - Free all allocated objects for a given snapshot.
891 * @snapshot: Xe HW Engine snapshot object.
892 *
893 * This function free all the memory that needed to be allocated at capture
894 * time.
895 */
xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot * snapshot)896 void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot)
897 {
898 struct xe_gt *gt;
899 if (!snapshot)
900 return;
901
902 gt = snapshot->hwe->gt;
903 /*
904 * xe_guc_capture_put_matched_nodes is called here and from
905 * xe_devcoredump_snapshot_free, to cover the 2 calling paths
906 * of hw_engines - debugfs and devcoredump free.
907 */
908 xe_guc_capture_put_matched_nodes(>->uc.guc);
909
910 kfree(snapshot->name);
911 kfree(snapshot);
912 }
913
914 /**
915 * xe_hw_engine_print - Xe HW Engine Print.
916 * @hwe: Hardware Engine.
917 * @p: drm_printer.
918 *
919 * This function quickly capture a snapshot and immediately print it out.
920 */
xe_hw_engine_print(struct xe_hw_engine * hwe,struct drm_printer * p)921 void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p)
922 {
923 struct xe_hw_engine_snapshot *snapshot;
924
925 snapshot = xe_hw_engine_snapshot_capture(hwe, NULL);
926 xe_engine_snapshot_print(snapshot, p);
927 xe_hw_engine_snapshot_free(snapshot);
928 }
929
xe_hw_engine_mask_per_class(struct xe_gt * gt,enum xe_engine_class engine_class)930 u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
931 enum xe_engine_class engine_class)
932 {
933 u32 mask = 0;
934 enum xe_hw_engine_id id;
935
936 for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
937 if (engine_infos[id].class == engine_class &&
938 gt->info.engine_mask & BIT(id))
939 mask |= BIT(engine_infos[id].instance);
940 }
941 return mask;
942 }
943
xe_hw_engine_is_reserved(struct xe_hw_engine * hwe)944 bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe)
945 {
946 struct xe_gt *gt = hwe->gt;
947 struct xe_device *xe = gt_to_xe(gt);
948
949 if (hwe->class == XE_ENGINE_CLASS_OTHER)
950 return true;
951
952 /* Check for engines disabled by ccs_mode setting */
953 if (xe_gt_ccs_mode_enabled(gt) &&
954 hwe->class == XE_ENGINE_CLASS_COMPUTE &&
955 hwe->logical_instance >= gt->ccs_mode)
956 return true;
957
958 return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
959 hwe->instance == gt->usm.reserved_bcs_instance;
960 }
961
xe_hw_engine_class_to_str(enum xe_engine_class class)962 const char *xe_hw_engine_class_to_str(enum xe_engine_class class)
963 {
964 switch (class) {
965 case XE_ENGINE_CLASS_RENDER:
966 return "rcs";
967 case XE_ENGINE_CLASS_VIDEO_DECODE:
968 return "vcs";
969 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
970 return "vecs";
971 case XE_ENGINE_CLASS_COPY:
972 return "bcs";
973 case XE_ENGINE_CLASS_OTHER:
974 return "other";
975 case XE_ENGINE_CLASS_COMPUTE:
976 return "ccs";
977 case XE_ENGINE_CLASS_MAX:
978 break;
979 }
980
981 return NULL;
982 }
983
xe_hw_engine_read_timestamp(struct xe_hw_engine * hwe)984 u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe)
985 {
986 return xe_mmio_read64_2x32(&hwe->gt->mmio, RING_TIMESTAMP(hwe->mmio_base));
987 }
988
xe_hw_engine_to_fw_domain(struct xe_hw_engine * hwe)989 enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe)
990 {
991 return engine_infos[hwe->engine_id].domain;
992 }
993
994 static const enum xe_engine_class user_to_xe_engine_class[] = {
995 [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
996 [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
997 [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
998 [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
999 [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
1000 };
1001
1002 /**
1003 * xe_hw_engine_lookup() - Lookup hardware engine for class:instance
1004 * @xe: xe device
1005 * @eci: engine class and instance
1006 *
1007 * This function will find a hardware engine for given engine
1008 * class and instance.
1009 *
1010 * Return: If found xe_hw_engine pointer, NULL otherwise.
1011 */
1012 struct xe_hw_engine *
xe_hw_engine_lookup(struct xe_device * xe,struct drm_xe_engine_class_instance eci)1013 xe_hw_engine_lookup(struct xe_device *xe,
1014 struct drm_xe_engine_class_instance eci)
1015 {
1016 unsigned int idx;
1017
1018 if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
1019 return NULL;
1020
1021 if (eci.gt_id >= xe->info.gt_count)
1022 return NULL;
1023
1024 idx = array_index_nospec(eci.engine_class,
1025 ARRAY_SIZE(user_to_xe_engine_class));
1026
1027 return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
1028 user_to_xe_engine_class[idx],
1029 eci.engine_instance, true);
1030 }
1031