1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "xe_hw_engine.h"
7
8 #include <linux/nospec.h>
9
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12 #include <uapi/drm/xe_drm.h>
13 #include <generated/xe_wa_oob.h>
14
15 #include "regs/xe_engine_regs.h"
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_irq_regs.h"
18 #include "xe_assert.h"
19 #include "xe_bo.h"
20 #include "xe_device.h"
21 #include "xe_execlist.h"
22 #include "xe_force_wake.h"
23 #include "xe_gsc.h"
24 #include "xe_gt.h"
25 #include "xe_gt_ccs_mode.h"
26 #include "xe_gt_clock.h"
27 #include "xe_gt_printk.h"
28 #include "xe_gt_mcr.h"
29 #include "xe_gt_topology.h"
30 #include "xe_guc_capture.h"
31 #include "xe_hw_engine_group.h"
32 #include "xe_hw_fence.h"
33 #include "xe_irq.h"
34 #include "xe_lrc.h"
35 #include "xe_macros.h"
36 #include "xe_mmio.h"
37 #include "xe_reg_sr.h"
38 #include "xe_reg_whitelist.h"
39 #include "xe_rtp.h"
40 #include "xe_sched_job.h"
41 #include "xe_sriov.h"
42 #include "xe_tuning.h"
43 #include "xe_uc_fw.h"
44 #include "xe_wa.h"
45
46 #define MAX_MMIO_BASES 3
47 struct engine_info {
48 const char *name;
49 unsigned int class : 8;
50 unsigned int instance : 8;
51 unsigned int irq_offset : 8;
52 enum xe_force_wake_domains domain;
53 u32 mmio_base;
54 };
55
56 static const struct engine_info engine_infos[] = {
57 [XE_HW_ENGINE_RCS0] = {
58 .name = "rcs0",
59 .class = XE_ENGINE_CLASS_RENDER,
60 .instance = 0,
61 .irq_offset = ilog2(INTR_RCS0),
62 .domain = XE_FW_RENDER,
63 .mmio_base = RENDER_RING_BASE,
64 },
65 [XE_HW_ENGINE_BCS0] = {
66 .name = "bcs0",
67 .class = XE_ENGINE_CLASS_COPY,
68 .instance = 0,
69 .irq_offset = ilog2(INTR_BCS(0)),
70 .domain = XE_FW_RENDER,
71 .mmio_base = BLT_RING_BASE,
72 },
73 [XE_HW_ENGINE_BCS1] = {
74 .name = "bcs1",
75 .class = XE_ENGINE_CLASS_COPY,
76 .instance = 1,
77 .irq_offset = ilog2(INTR_BCS(1)),
78 .domain = XE_FW_RENDER,
79 .mmio_base = XEHPC_BCS1_RING_BASE,
80 },
81 [XE_HW_ENGINE_BCS2] = {
82 .name = "bcs2",
83 .class = XE_ENGINE_CLASS_COPY,
84 .instance = 2,
85 .irq_offset = ilog2(INTR_BCS(2)),
86 .domain = XE_FW_RENDER,
87 .mmio_base = XEHPC_BCS2_RING_BASE,
88 },
89 [XE_HW_ENGINE_BCS3] = {
90 .name = "bcs3",
91 .class = XE_ENGINE_CLASS_COPY,
92 .instance = 3,
93 .irq_offset = ilog2(INTR_BCS(3)),
94 .domain = XE_FW_RENDER,
95 .mmio_base = XEHPC_BCS3_RING_BASE,
96 },
97 [XE_HW_ENGINE_BCS4] = {
98 .name = "bcs4",
99 .class = XE_ENGINE_CLASS_COPY,
100 .instance = 4,
101 .irq_offset = ilog2(INTR_BCS(4)),
102 .domain = XE_FW_RENDER,
103 .mmio_base = XEHPC_BCS4_RING_BASE,
104 },
105 [XE_HW_ENGINE_BCS5] = {
106 .name = "bcs5",
107 .class = XE_ENGINE_CLASS_COPY,
108 .instance = 5,
109 .irq_offset = ilog2(INTR_BCS(5)),
110 .domain = XE_FW_RENDER,
111 .mmio_base = XEHPC_BCS5_RING_BASE,
112 },
113 [XE_HW_ENGINE_BCS6] = {
114 .name = "bcs6",
115 .class = XE_ENGINE_CLASS_COPY,
116 .instance = 6,
117 .irq_offset = ilog2(INTR_BCS(6)),
118 .domain = XE_FW_RENDER,
119 .mmio_base = XEHPC_BCS6_RING_BASE,
120 },
121 [XE_HW_ENGINE_BCS7] = {
122 .name = "bcs7",
123 .class = XE_ENGINE_CLASS_COPY,
124 .irq_offset = ilog2(INTR_BCS(7)),
125 .instance = 7,
126 .domain = XE_FW_RENDER,
127 .mmio_base = XEHPC_BCS7_RING_BASE,
128 },
129 [XE_HW_ENGINE_BCS8] = {
130 .name = "bcs8",
131 .class = XE_ENGINE_CLASS_COPY,
132 .instance = 8,
133 .irq_offset = ilog2(INTR_BCS8),
134 .domain = XE_FW_RENDER,
135 .mmio_base = XEHPC_BCS8_RING_BASE,
136 },
137
138 [XE_HW_ENGINE_VCS0] = {
139 .name = "vcs0",
140 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
141 .instance = 0,
142 .irq_offset = 32 + ilog2(INTR_VCS(0)),
143 .domain = XE_FW_MEDIA_VDBOX0,
144 .mmio_base = BSD_RING_BASE,
145 },
146 [XE_HW_ENGINE_VCS1] = {
147 .name = "vcs1",
148 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
149 .instance = 1,
150 .irq_offset = 32 + ilog2(INTR_VCS(1)),
151 .domain = XE_FW_MEDIA_VDBOX1,
152 .mmio_base = BSD2_RING_BASE,
153 },
154 [XE_HW_ENGINE_VCS2] = {
155 .name = "vcs2",
156 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
157 .instance = 2,
158 .irq_offset = 32 + ilog2(INTR_VCS(2)),
159 .domain = XE_FW_MEDIA_VDBOX2,
160 .mmio_base = BSD3_RING_BASE,
161 },
162 [XE_HW_ENGINE_VCS3] = {
163 .name = "vcs3",
164 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
165 .instance = 3,
166 .irq_offset = 32 + ilog2(INTR_VCS(3)),
167 .domain = XE_FW_MEDIA_VDBOX3,
168 .mmio_base = BSD4_RING_BASE,
169 },
170 [XE_HW_ENGINE_VCS4] = {
171 .name = "vcs4",
172 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
173 .instance = 4,
174 .irq_offset = 32 + ilog2(INTR_VCS(4)),
175 .domain = XE_FW_MEDIA_VDBOX4,
176 .mmio_base = XEHP_BSD5_RING_BASE,
177 },
178 [XE_HW_ENGINE_VCS5] = {
179 .name = "vcs5",
180 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
181 .instance = 5,
182 .irq_offset = 32 + ilog2(INTR_VCS(5)),
183 .domain = XE_FW_MEDIA_VDBOX5,
184 .mmio_base = XEHP_BSD6_RING_BASE,
185 },
186 [XE_HW_ENGINE_VCS6] = {
187 .name = "vcs6",
188 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
189 .instance = 6,
190 .irq_offset = 32 + ilog2(INTR_VCS(6)),
191 .domain = XE_FW_MEDIA_VDBOX6,
192 .mmio_base = XEHP_BSD7_RING_BASE,
193 },
194 [XE_HW_ENGINE_VCS7] = {
195 .name = "vcs7",
196 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
197 .instance = 7,
198 .irq_offset = 32 + ilog2(INTR_VCS(7)),
199 .domain = XE_FW_MEDIA_VDBOX7,
200 .mmio_base = XEHP_BSD8_RING_BASE,
201 },
202 [XE_HW_ENGINE_VECS0] = {
203 .name = "vecs0",
204 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
205 .instance = 0,
206 .irq_offset = 32 + ilog2(INTR_VECS(0)),
207 .domain = XE_FW_MEDIA_VEBOX0,
208 .mmio_base = VEBOX_RING_BASE,
209 },
210 [XE_HW_ENGINE_VECS1] = {
211 .name = "vecs1",
212 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
213 .instance = 1,
214 .irq_offset = 32 + ilog2(INTR_VECS(1)),
215 .domain = XE_FW_MEDIA_VEBOX1,
216 .mmio_base = VEBOX2_RING_BASE,
217 },
218 [XE_HW_ENGINE_VECS2] = {
219 .name = "vecs2",
220 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
221 .instance = 2,
222 .irq_offset = 32 + ilog2(INTR_VECS(2)),
223 .domain = XE_FW_MEDIA_VEBOX2,
224 .mmio_base = XEHP_VEBOX3_RING_BASE,
225 },
226 [XE_HW_ENGINE_VECS3] = {
227 .name = "vecs3",
228 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
229 .instance = 3,
230 .irq_offset = 32 + ilog2(INTR_VECS(3)),
231 .domain = XE_FW_MEDIA_VEBOX3,
232 .mmio_base = XEHP_VEBOX4_RING_BASE,
233 },
234 [XE_HW_ENGINE_CCS0] = {
235 .name = "ccs0",
236 .class = XE_ENGINE_CLASS_COMPUTE,
237 .instance = 0,
238 .irq_offset = ilog2(INTR_CCS(0)),
239 .domain = XE_FW_RENDER,
240 .mmio_base = COMPUTE0_RING_BASE,
241 },
242 [XE_HW_ENGINE_CCS1] = {
243 .name = "ccs1",
244 .class = XE_ENGINE_CLASS_COMPUTE,
245 .instance = 1,
246 .irq_offset = ilog2(INTR_CCS(1)),
247 .domain = XE_FW_RENDER,
248 .mmio_base = COMPUTE1_RING_BASE,
249 },
250 [XE_HW_ENGINE_CCS2] = {
251 .name = "ccs2",
252 .class = XE_ENGINE_CLASS_COMPUTE,
253 .instance = 2,
254 .irq_offset = ilog2(INTR_CCS(2)),
255 .domain = XE_FW_RENDER,
256 .mmio_base = COMPUTE2_RING_BASE,
257 },
258 [XE_HW_ENGINE_CCS3] = {
259 .name = "ccs3",
260 .class = XE_ENGINE_CLASS_COMPUTE,
261 .instance = 3,
262 .irq_offset = ilog2(INTR_CCS(3)),
263 .domain = XE_FW_RENDER,
264 .mmio_base = COMPUTE3_RING_BASE,
265 },
266 [XE_HW_ENGINE_GSCCS0] = {
267 .name = "gsccs0",
268 .class = XE_ENGINE_CLASS_OTHER,
269 .instance = OTHER_GSC_INSTANCE,
270 .domain = XE_FW_GSC,
271 .mmio_base = GSCCS_RING_BASE,
272 },
273 };
274
hw_engine_fini(void * arg)275 static void hw_engine_fini(void *arg)
276 {
277 struct xe_hw_engine *hwe = arg;
278
279 if (hwe->exl_port)
280 xe_execlist_port_destroy(hwe->exl_port);
281
282 hwe->gt = NULL;
283 }
284
285 /**
286 * xe_hw_engine_mmio_write32() - Write engine register
287 * @hwe: engine
288 * @reg: register to write into
289 * @val: desired 32-bit value to write
290 *
291 * This function will write val into an engine specific register.
292 * Forcewake must be held by the caller.
293 *
294 */
xe_hw_engine_mmio_write32(struct xe_hw_engine * hwe,struct xe_reg reg,u32 val)295 void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe,
296 struct xe_reg reg, u32 val)
297 {
298 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
299 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
300
301 reg.addr += hwe->mmio_base;
302
303 xe_mmio_write32(&hwe->gt->mmio, reg, val);
304 }
305
306 /**
307 * xe_hw_engine_mmio_read32() - Read engine register
308 * @hwe: engine
309 * @reg: register to read from
310 *
311 * This function will read from an engine specific register.
312 * Forcewake must be held by the caller.
313 *
314 * Return: value of the 32-bit register.
315 */
xe_hw_engine_mmio_read32(struct xe_hw_engine * hwe,struct xe_reg reg)316 u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
317 {
318 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
319 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
320
321 reg.addr += hwe->mmio_base;
322
323 return xe_mmio_read32(&hwe->gt->mmio, reg);
324 }
325
xe_hw_engine_enable_ring(struct xe_hw_engine * hwe)326 void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
327 {
328 u32 ccs_mask =
329 xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
330 u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
331
332 if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
333 xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
334 _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
335
336 xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
337 xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
338 xe_bo_ggtt_addr(hwe->hwsp));
339
340 if (xe_device_has_msix(gt_to_xe(hwe->gt)))
341 ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
342 xe_hw_engine_mmio_write32(hwe, RING_MODE(0), ring_mode);
343 xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
344 _MASKED_BIT_DISABLE(STOP_RING));
345 xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
346 }
347
xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt * gt,const struct xe_hw_engine * hwe)348 static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt,
349 const struct xe_hw_engine *hwe)
350 {
351 return xe_gt_ccs_mode_enabled(gt) &&
352 xe_rtp_match_first_render_or_compute(gt, hwe);
353 }
354
xe_rtp_cfeg_wmtp_disabled(const struct xe_gt * gt,const struct xe_hw_engine * hwe)355 static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt,
356 const struct xe_hw_engine *hwe)
357 {
358 if (GRAPHICS_VER(gt_to_xe(gt)) < 20)
359 return false;
360
361 if (hwe->class != XE_ENGINE_CLASS_COMPUTE &&
362 hwe->class != XE_ENGINE_CLASS_RENDER)
363 return false;
364
365 return xe_mmio_read32(&hwe->gt->mmio, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
366 }
367
368 void
xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine * hwe)369 xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
370 {
371 struct xe_gt *gt = hwe->gt;
372 const u8 mocs_write_idx = gt->mocs.uc_index;
373 const u8 mocs_read_idx = gt->mocs.uc_index;
374 u32 blit_cctl_val = REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, mocs_write_idx) |
375 REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, mocs_read_idx);
376 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
377 const struct xe_rtp_entry_sr lrc_setup[] = {
378 /*
379 * Some blitter commands do not have a field for MOCS, those
380 * commands will use MOCS index pointed by BLIT_CCTL.
381 * BLIT_CCTL registers are needed to be programmed to un-cached.
382 */
383 { XE_RTP_NAME("BLIT_CCTL_default_MOCS"),
384 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
385 ENGINE_CLASS(COPY)),
386 XE_RTP_ACTIONS(FIELD_SET(BLIT_CCTL(0),
387 BLIT_CCTL_DST_MOCS_MASK |
388 BLIT_CCTL_SRC_MOCS_MASK,
389 blit_cctl_val,
390 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
391 },
392 /* Disable WMTP if HW doesn't support it */
393 { XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
394 XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
395 XE_RTP_ACTIONS(FIELD_SET(CS_CHICKEN1(0),
396 PREEMPT_GPGPU_LEVEL_MASK,
397 PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
398 XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
399 },
400 };
401
402 xe_rtp_process_to_sr(&ctx, lrc_setup, ARRAY_SIZE(lrc_setup), &hwe->reg_lrc);
403 }
404
405 static void
hw_engine_setup_default_state(struct xe_hw_engine * hwe)406 hw_engine_setup_default_state(struct xe_hw_engine *hwe)
407 {
408 struct xe_gt *gt = hwe->gt;
409 struct xe_device *xe = gt_to_xe(gt);
410 /*
411 * RING_CMD_CCTL specifies the default MOCS entry that will be
412 * used by the command streamer when executing commands that
413 * don't have a way to explicitly specify a MOCS setting.
414 * The default should usually reference whichever MOCS entry
415 * corresponds to uncached behavior, although use of a WB cached
416 * entry is recommended by the spec in certain circumstances on
417 * specific platforms.
418 * Bspec: 72161
419 */
420 const u8 mocs_write_idx = gt->mocs.uc_index;
421 const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
422 (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
423 gt->mocs.wb_index : gt->mocs.uc_index;
424 u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
425 REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, mocs_read_idx);
426 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
427 const struct xe_rtp_entry_sr engine_entries[] = {
428 { XE_RTP_NAME("RING_CMD_CCTL_default_MOCS"),
429 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED)),
430 XE_RTP_ACTIONS(FIELD_SET(RING_CMD_CCTL(0),
431 CMD_CCTL_WRITE_OVERRIDE_MASK |
432 CMD_CCTL_READ_OVERRIDE_MASK,
433 ring_cmd_cctl_val,
434 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
435 },
436 /*
437 * To allow the GSC engine to go idle on MTL we need to enable
438 * idle messaging and set the hysteresis value (we use 0xA=5us
439 * as recommended in spec). On platforms after MTL this is
440 * enabled by default.
441 */
442 { XE_RTP_NAME("MTL GSCCS IDLE MSG enable"),
443 XE_RTP_RULES(MEDIA_VERSION(1300), ENGINE_CLASS(OTHER)),
444 XE_RTP_ACTIONS(CLR(RING_PSMI_CTL(0),
445 IDLE_MSG_DISABLE,
446 XE_RTP_ACTION_FLAG(ENGINE_BASE)),
447 FIELD_SET(RING_PWRCTX_MAXCNT(0),
448 IDLE_WAIT_TIME,
449 0xA,
450 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
451 },
452 /* Enable Priority Mem Read */
453 { XE_RTP_NAME("Priority_Mem_Read"),
454 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
455 XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
456 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
457 },
458 /* Use Fixed slice CCS mode */
459 { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
460 XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
461 XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
462 RCU_MODE_FIXED_SLICE_CCS_MODE))
463 },
464 };
465
466 xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr);
467 }
468
find_engine_info(enum xe_engine_class class,int instance)469 static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance)
470 {
471 const struct engine_info *info;
472 enum xe_hw_engine_id id;
473
474 for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
475 info = &engine_infos[id];
476 if (info->class == class && info->instance == instance)
477 return info;
478 }
479
480 return NULL;
481 }
482
get_msix_irq_offset(struct xe_gt * gt,enum xe_engine_class class)483 static u16 get_msix_irq_offset(struct xe_gt *gt, enum xe_engine_class class)
484 {
485 /* For MSI-X, hw engines report to offset of engine instance zero */
486 const struct engine_info *info = find_engine_info(class, 0);
487
488 xe_gt_assert(gt, info);
489
490 return info ? info->irq_offset : 0;
491 }
492
hw_engine_init_early(struct xe_gt * gt,struct xe_hw_engine * hwe,enum xe_hw_engine_id id)493 static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
494 enum xe_hw_engine_id id)
495 {
496 const struct engine_info *info;
497
498 if (WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name))
499 return;
500
501 if (!(gt->info.engine_mask & BIT(id)))
502 return;
503
504 info = &engine_infos[id];
505
506 xe_gt_assert(gt, !hwe->gt);
507
508 hwe->gt = gt;
509 hwe->class = info->class;
510 hwe->instance = info->instance;
511 hwe->mmio_base = info->mmio_base;
512 hwe->irq_offset = xe_device_has_msix(gt_to_xe(gt)) ?
513 get_msix_irq_offset(gt, info->class) :
514 info->irq_offset;
515 hwe->domain = info->domain;
516 hwe->name = info->name;
517 hwe->fence_irq = >->fence_irq[info->class];
518 hwe->engine_id = id;
519
520 hwe->eclass = >->eclass[hwe->class];
521 if (!hwe->eclass->sched_props.job_timeout_ms) {
522 hwe->eclass->sched_props.job_timeout_ms = 5 * 1000;
523 hwe->eclass->sched_props.job_timeout_min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
524 hwe->eclass->sched_props.job_timeout_max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
525 hwe->eclass->sched_props.timeslice_us = 1 * 1000;
526 hwe->eclass->sched_props.timeslice_min = XE_HW_ENGINE_TIMESLICE_MIN;
527 hwe->eclass->sched_props.timeslice_max = XE_HW_ENGINE_TIMESLICE_MAX;
528 hwe->eclass->sched_props.preempt_timeout_us = XE_HW_ENGINE_PREEMPT_TIMEOUT;
529 hwe->eclass->sched_props.preempt_timeout_min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
530 hwe->eclass->sched_props.preempt_timeout_max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
531
532 /*
533 * The GSC engine can accept submissions while the GSC shim is
534 * being reset, during which time the submission is stalled. In
535 * the worst case, the shim reset can take up to the maximum GSC
536 * command execution time (250ms), so the request start can be
537 * delayed by that much; the request itself can take that long
538 * without being preemptible, which means worst case it can
539 * theoretically take up to 500ms for a preemption to go through
540 * on the GSC engine. Adding to that an extra 100ms as a safety
541 * margin, we get a minimum recommended timeout of 600ms.
542 * The preempt_timeout value can't be tuned for OTHER_CLASS
543 * because the class is reserved for kernel usage, so we just
544 * need to make sure that the starting value is above that
545 * threshold; since our default value (640ms) is greater than
546 * 600ms, the only way we can go below is via a kconfig setting.
547 * If that happens, log it in dmesg and update the value.
548 */
549 if (hwe->class == XE_ENGINE_CLASS_OTHER) {
550 const u32 min_preempt_timeout = 600 * 1000;
551 if (hwe->eclass->sched_props.preempt_timeout_us < min_preempt_timeout) {
552 hwe->eclass->sched_props.preempt_timeout_us = min_preempt_timeout;
553 xe_gt_notice(gt, "Increasing preempt_timeout for GSC to 600ms\n");
554 }
555 }
556
557 /* Record default props */
558 hwe->eclass->defaults = hwe->eclass->sched_props;
559 }
560
561 xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt));
562 xe_tuning_process_engine(hwe);
563 xe_wa_process_engine(hwe);
564 hw_engine_setup_default_state(hwe);
565
566 xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt));
567 xe_reg_whitelist_process_engine(hwe);
568 }
569
adjust_idledly(struct xe_hw_engine * hwe)570 static void adjust_idledly(struct xe_hw_engine *hwe)
571 {
572 struct xe_gt *gt = hwe->gt;
573 u32 idledly, maxcnt;
574 u32 idledly_units_ps = 8 * gt->info.timestamp_base;
575 u32 maxcnt_units_ns = 640;
576 bool inhibit_switch = 0;
577
578 if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_WA(gt, 16023105232)) {
579 idledly = xe_mmio_read32(>->mmio, RING_IDLEDLY(hwe->mmio_base));
580 maxcnt = xe_mmio_read32(>->mmio, RING_PWRCTX_MAXCNT(hwe->mmio_base));
581
582 inhibit_switch = idledly & INHIBIT_SWITCH_UNTIL_PREEMPTED;
583 idledly = REG_FIELD_GET(IDLE_DELAY, idledly);
584 idledly = DIV_ROUND_CLOSEST(idledly * idledly_units_ps, 1000);
585 maxcnt = REG_FIELD_GET(IDLE_WAIT_TIME, maxcnt);
586 maxcnt *= maxcnt_units_ns;
587
588 if (xe_gt_WARN_ON(gt, idledly >= maxcnt || inhibit_switch)) {
589 idledly = DIV_ROUND_CLOSEST(((maxcnt - 1) * maxcnt_units_ns),
590 idledly_units_ps);
591 idledly = DIV_ROUND_CLOSEST(idledly, 1000);
592 xe_mmio_write32(>->mmio, RING_IDLEDLY(hwe->mmio_base), idledly);
593 }
594 }
595 }
596
hw_engine_init(struct xe_gt * gt,struct xe_hw_engine * hwe,enum xe_hw_engine_id id)597 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
598 enum xe_hw_engine_id id)
599 {
600 struct xe_device *xe = gt_to_xe(gt);
601 struct xe_tile *tile = gt_to_tile(gt);
602 int err;
603
604 xe_gt_assert(gt, id < ARRAY_SIZE(engine_infos) && engine_infos[id].name);
605 xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
606
607 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
608
609 hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
610 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
611 XE_BO_FLAG_GGTT |
612 XE_BO_FLAG_GGTT_INVALIDATE);
613 if (IS_ERR(hwe->hwsp)) {
614 err = PTR_ERR(hwe->hwsp);
615 goto err_name;
616 }
617
618 if (!xe_device_uc_enabled(xe)) {
619 hwe->exl_port = xe_execlist_port_create(xe, hwe);
620 if (IS_ERR(hwe->exl_port)) {
621 err = PTR_ERR(hwe->exl_port);
622 goto err_hwsp;
623 }
624 } else {
625 /* GSCCS has a special interrupt for reset */
626 if (hwe->class == XE_ENGINE_CLASS_OTHER)
627 hwe->irq_handler = xe_gsc_hwe_irq_handler;
628
629 if (!IS_SRIOV_VF(xe))
630 xe_hw_engine_enable_ring(hwe);
631 }
632
633 /* We reserve the highest BCS instance for USM */
634 if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
635 gt->usm.reserved_bcs_instance = hwe->instance;
636
637 /* Ensure IDLEDLY is lower than MAXCNT */
638 adjust_idledly(hwe);
639
640 return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe);
641
642 err_hwsp:
643 xe_bo_unpin_map_no_vm(hwe->hwsp);
644 err_name:
645 hwe->name = NULL;
646
647 return err;
648 }
649
hw_engine_setup_logical_mapping(struct xe_gt * gt)650 static void hw_engine_setup_logical_mapping(struct xe_gt *gt)
651 {
652 int class;
653
654 /* FIXME: Doing a simple logical mapping that works for most hardware */
655 for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
656 struct xe_hw_engine *hwe;
657 enum xe_hw_engine_id id;
658 int logical_instance = 0;
659
660 for_each_hw_engine(hwe, gt, id)
661 if (hwe->class == class)
662 hwe->logical_instance = logical_instance++;
663 }
664 }
665
read_media_fuses(struct xe_gt * gt)666 static void read_media_fuses(struct xe_gt *gt)
667 {
668 struct xe_device *xe = gt_to_xe(gt);
669 u32 media_fuse;
670 u16 vdbox_mask;
671 u16 vebox_mask;
672 int i, j;
673
674 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
675
676 media_fuse = xe_mmio_read32(>->mmio, GT_VEBOX_VDBOX_DISABLE);
677
678 /*
679 * Pre-Xe_HP platforms had register bits representing absent engines,
680 * whereas Xe_HP and beyond have bits representing present engines.
681 * Invert the polarity on old platforms so that we can use common
682 * handling below.
683 */
684 if (GRAPHICS_VERx100(xe) < 1250)
685 media_fuse = ~media_fuse;
686
687 vdbox_mask = REG_FIELD_GET(GT_VDBOX_DISABLE_MASK, media_fuse);
688 vebox_mask = REG_FIELD_GET(GT_VEBOX_DISABLE_MASK, media_fuse);
689
690 for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
691 if (!(gt->info.engine_mask & BIT(i)))
692 continue;
693
694 if (!(BIT(j) & vdbox_mask)) {
695 gt->info.engine_mask &= ~BIT(i);
696 drm_info(&xe->drm, "vcs%u fused off\n", j);
697 }
698 }
699
700 for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
701 if (!(gt->info.engine_mask & BIT(i)))
702 continue;
703
704 if (!(BIT(j) & vebox_mask)) {
705 gt->info.engine_mask &= ~BIT(i);
706 drm_info(&xe->drm, "vecs%u fused off\n", j);
707 }
708 }
709 }
710
read_copy_fuses(struct xe_gt * gt)711 static void read_copy_fuses(struct xe_gt *gt)
712 {
713 struct xe_device *xe = gt_to_xe(gt);
714 u32 bcs_mask;
715
716 if (GRAPHICS_VERx100(xe) < 1260 || GRAPHICS_VERx100(xe) >= 1270)
717 return;
718
719 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
720
721 bcs_mask = xe_mmio_read32(>->mmio, MIRROR_FUSE3);
722 bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask);
723
724 /* BCS0 is always present; only BCS1-BCS8 may be fused off */
725 for (int i = XE_HW_ENGINE_BCS1, j = 0; i <= XE_HW_ENGINE_BCS8; ++i, ++j) {
726 if (!(gt->info.engine_mask & BIT(i)))
727 continue;
728
729 if (!(BIT(j / 2) & bcs_mask)) {
730 gt->info.engine_mask &= ~BIT(i);
731 drm_info(&xe->drm, "bcs%u fused off\n", j);
732 }
733 }
734 }
735
read_compute_fuses_from_dss(struct xe_gt * gt)736 static void read_compute_fuses_from_dss(struct xe_gt *gt)
737 {
738 struct xe_device *xe = gt_to_xe(gt);
739
740 /*
741 * CCS fusing based on DSS masks only applies to platforms that can
742 * have more than one CCS.
743 */
744 if (hweight64(gt->info.engine_mask &
745 GENMASK_ULL(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)) <= 1)
746 return;
747
748 /*
749 * CCS availability on Xe_HP is inferred from the presence of DSS in
750 * each quadrant.
751 */
752 for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
753 if (!(gt->info.engine_mask & BIT(i)))
754 continue;
755
756 if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
757 gt->info.engine_mask &= ~BIT(i);
758 drm_info(&xe->drm, "ccs%u fused off\n", j);
759 }
760 }
761 }
762
read_compute_fuses_from_reg(struct xe_gt * gt)763 static void read_compute_fuses_from_reg(struct xe_gt *gt)
764 {
765 struct xe_device *xe = gt_to_xe(gt);
766 u32 ccs_mask;
767
768 ccs_mask = xe_mmio_read32(>->mmio, XEHP_FUSE4);
769 ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask);
770
771 for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
772 if (!(gt->info.engine_mask & BIT(i)))
773 continue;
774
775 if ((ccs_mask & BIT(j)) == 0) {
776 gt->info.engine_mask &= ~BIT(i);
777 drm_info(&xe->drm, "ccs%u fused off\n", j);
778 }
779 }
780 }
781
read_compute_fuses(struct xe_gt * gt)782 static void read_compute_fuses(struct xe_gt *gt)
783 {
784 if (GRAPHICS_VER(gt_to_xe(gt)) >= 20)
785 read_compute_fuses_from_reg(gt);
786 else
787 read_compute_fuses_from_dss(gt);
788 }
789
check_gsc_availability(struct xe_gt * gt)790 static void check_gsc_availability(struct xe_gt *gt)
791 {
792 struct xe_device *xe = gt_to_xe(gt);
793
794 if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
795 return;
796
797 /*
798 * The GSCCS is only used to communicate with the GSC FW, so if we don't
799 * have the FW there is nothing we need the engine for and can therefore
800 * skip its initialization.
801 */
802 if (!xe_uc_fw_is_available(>->uc.gsc.fw)) {
803 gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0);
804
805 /* interrupts where previously enabled, so turn them off */
806 xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_ENABLE, 0);
807 xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_MASK, ~0);
808
809 drm_dbg(&xe->drm, "GSC FW not used, disabling gsccs\n");
810 }
811 }
812
xe_hw_engines_init_early(struct xe_gt * gt)813 int xe_hw_engines_init_early(struct xe_gt *gt)
814 {
815 int i;
816
817 read_media_fuses(gt);
818 read_copy_fuses(gt);
819 read_compute_fuses(gt);
820 check_gsc_availability(gt);
821
822 BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
823 BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
824
825 for (i = 0; i < ARRAY_SIZE(gt->hw_engines); i++)
826 hw_engine_init_early(gt, >->hw_engines[i], i);
827
828 return 0;
829 }
830
xe_hw_engines_init(struct xe_gt * gt)831 int xe_hw_engines_init(struct xe_gt *gt)
832 {
833 int err;
834 struct xe_hw_engine *hwe;
835 enum xe_hw_engine_id id;
836
837 for_each_hw_engine(hwe, gt, id) {
838 err = hw_engine_init(gt, hwe, id);
839 if (err)
840 return err;
841 }
842
843 hw_engine_setup_logical_mapping(gt);
844 err = xe_hw_engine_setup_groups(gt);
845 if (err)
846 return err;
847
848 return 0;
849 }
850
xe_hw_engine_handle_irq(struct xe_hw_engine * hwe,u16 intr_vec)851 void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
852 {
853 wake_up_all(>_to_xe(hwe->gt)->ufence_wq);
854
855 if (hwe->irq_handler)
856 hwe->irq_handler(hwe, intr_vec);
857
858 if (intr_vec & GT_RENDER_USER_INTERRUPT)
859 xe_hw_fence_irq_run(hwe->fence_irq);
860 }
861
862 /**
863 * xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
864 * @hwe: Xe HW Engine.
865 * @q: The exec queue object.
866 *
867 * This can be printed out in a later stage like during dev_coredump
868 * analysis.
869 *
870 * Returns: a Xe HW Engine snapshot object that must be freed by the
871 * caller, using `xe_hw_engine_snapshot_free`.
872 */
873 struct xe_hw_engine_snapshot *
xe_hw_engine_snapshot_capture(struct xe_hw_engine * hwe,struct xe_exec_queue * q)874 xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q)
875 {
876 struct xe_hw_engine_snapshot *snapshot;
877 struct __guc_capture_parsed_output *node;
878
879 if (!xe_hw_engine_is_valid(hwe))
880 return NULL;
881
882 snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
883
884 if (!snapshot)
885 return NULL;
886
887 snapshot->name = kstrdup(hwe->name, GFP_ATOMIC);
888 snapshot->hwe = hwe;
889 snapshot->logical_instance = hwe->logical_instance;
890 snapshot->forcewake.domain = hwe->domain;
891 snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
892 hwe->domain);
893 snapshot->mmio_base = hwe->mmio_base;
894 snapshot->kernel_reserved = xe_hw_engine_is_reserved(hwe);
895
896 /* no more VF accessible data below this point */
897 if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
898 return snapshot;
899
900 if (q) {
901 /* If got guc capture, set source to GuC */
902 node = xe_guc_capture_get_matching_and_lock(q);
903 if (node) {
904 struct xe_device *xe = gt_to_xe(hwe->gt);
905 struct xe_devcoredump *coredump = &xe->devcoredump;
906
907 coredump->snapshot.matched_node = node;
908 xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node");
909 return snapshot;
910 }
911 }
912
913 /* otherwise, do manual capture */
914 xe_engine_manual_capture(hwe, snapshot);
915 xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot");
916
917 return snapshot;
918 }
919
920 /**
921 * xe_hw_engine_snapshot_free - Free all allocated objects for a given snapshot.
922 * @snapshot: Xe HW Engine snapshot object.
923 *
924 * This function free all the memory that needed to be allocated at capture
925 * time.
926 */
xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot * snapshot)927 void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot)
928 {
929 struct xe_gt *gt;
930 if (!snapshot)
931 return;
932
933 gt = snapshot->hwe->gt;
934 /*
935 * xe_guc_capture_put_matched_nodes is called here and from
936 * xe_devcoredump_snapshot_free, to cover the 2 calling paths
937 * of hw_engines - debugfs and devcoredump free.
938 */
939 xe_guc_capture_put_matched_nodes(>->uc.guc);
940
941 kfree(snapshot->name);
942 kfree(snapshot);
943 }
944
945 /**
946 * xe_hw_engine_print - Xe HW Engine Print.
947 * @hwe: Hardware Engine.
948 * @p: drm_printer.
949 *
950 * This function quickly capture a snapshot and immediately print it out.
951 */
xe_hw_engine_print(struct xe_hw_engine * hwe,struct drm_printer * p)952 void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p)
953 {
954 struct xe_hw_engine_snapshot *snapshot;
955
956 snapshot = xe_hw_engine_snapshot_capture(hwe, NULL);
957 xe_engine_snapshot_print(snapshot, p);
958 xe_hw_engine_snapshot_free(snapshot);
959 }
960
xe_hw_engine_mask_per_class(struct xe_gt * gt,enum xe_engine_class engine_class)961 u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
962 enum xe_engine_class engine_class)
963 {
964 u32 mask = 0;
965 enum xe_hw_engine_id id;
966
967 for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
968 if (engine_infos[id].class == engine_class &&
969 gt->info.engine_mask & BIT(id))
970 mask |= BIT(engine_infos[id].instance);
971 }
972 return mask;
973 }
974
xe_hw_engine_is_reserved(struct xe_hw_engine * hwe)975 bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe)
976 {
977 struct xe_gt *gt = hwe->gt;
978 struct xe_device *xe = gt_to_xe(gt);
979
980 if (hwe->class == XE_ENGINE_CLASS_OTHER)
981 return true;
982
983 /* Check for engines disabled by ccs_mode setting */
984 if (xe_gt_ccs_mode_enabled(gt) &&
985 hwe->class == XE_ENGINE_CLASS_COMPUTE &&
986 hwe->logical_instance >= gt->ccs_mode)
987 return true;
988
989 return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
990 hwe->instance == gt->usm.reserved_bcs_instance;
991 }
992
xe_hw_engine_class_to_str(enum xe_engine_class class)993 const char *xe_hw_engine_class_to_str(enum xe_engine_class class)
994 {
995 switch (class) {
996 case XE_ENGINE_CLASS_RENDER:
997 return "rcs";
998 case XE_ENGINE_CLASS_VIDEO_DECODE:
999 return "vcs";
1000 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
1001 return "vecs";
1002 case XE_ENGINE_CLASS_COPY:
1003 return "bcs";
1004 case XE_ENGINE_CLASS_OTHER:
1005 return "other";
1006 case XE_ENGINE_CLASS_COMPUTE:
1007 return "ccs";
1008 case XE_ENGINE_CLASS_MAX:
1009 break;
1010 }
1011
1012 return NULL;
1013 }
1014
xe_hw_engine_read_timestamp(struct xe_hw_engine * hwe)1015 u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe)
1016 {
1017 return xe_mmio_read64_2x32(&hwe->gt->mmio, RING_TIMESTAMP(hwe->mmio_base));
1018 }
1019
xe_hw_engine_to_fw_domain(struct xe_hw_engine * hwe)1020 enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe)
1021 {
1022 return engine_infos[hwe->engine_id].domain;
1023 }
1024
1025 static const enum xe_engine_class user_to_xe_engine_class[] = {
1026 [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
1027 [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
1028 [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
1029 [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
1030 [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
1031 };
1032
1033 /**
1034 * xe_hw_engine_lookup() - Lookup hardware engine for class:instance
1035 * @xe: xe device
1036 * @eci: engine class and instance
1037 *
1038 * This function will find a hardware engine for given engine
1039 * class and instance.
1040 *
1041 * Return: If found xe_hw_engine pointer, NULL otherwise.
1042 */
1043 struct xe_hw_engine *
xe_hw_engine_lookup(struct xe_device * xe,struct drm_xe_engine_class_instance eci)1044 xe_hw_engine_lookup(struct xe_device *xe,
1045 struct drm_xe_engine_class_instance eci)
1046 {
1047 unsigned int idx;
1048
1049 if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
1050 return NULL;
1051
1052 if (eci.gt_id >= xe->info.gt_count)
1053 return NULL;
1054
1055 idx = array_index_nospec(eci.engine_class,
1056 ARRAY_SIZE(user_to_xe_engine_class));
1057
1058 return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
1059 user_to_xe_engine_class[idx],
1060 eci.engine_instance, true);
1061 }
1062