1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_guc.h"
7
8 #include <linux/iopoll.h>
9 #include <drm/drm_managed.h>
10
11 #include <generated/xe_wa_oob.h>
12
13 #include "abi/guc_actions_abi.h"
14 #include "abi/guc_errors_abi.h"
15 #include "regs/xe_gt_regs.h"
16 #include "regs/xe_gtt_defs.h"
17 #include "regs/xe_guc_regs.h"
18 #include "regs/xe_irq_regs.h"
19 #include "xe_bo.h"
20 #include "xe_configfs.h"
21 #include "xe_device.h"
22 #include "xe_force_wake.h"
23 #include "xe_gt.h"
24 #include "xe_gt_printk.h"
25 #include "xe_gt_sriov_vf.h"
26 #include "xe_gt_throttle.h"
27 #include "xe_gt_sriov_pf_migration.h"
28 #include "xe_guc_ads.h"
29 #include "xe_guc_buf.h"
30 #include "xe_guc_capture.h"
31 #include "xe_guc_ct.h"
32 #include "xe_guc_db_mgr.h"
33 #include "xe_guc_engine_activity.h"
34 #include "xe_guc_hwconfig.h"
35 #include "xe_guc_klv_helpers.h"
36 #include "xe_guc_log.h"
37 #include "xe_guc_pc.h"
38 #include "xe_guc_relay.h"
39 #include "xe_guc_submit.h"
40 #include "xe_memirq.h"
41 #include "xe_mmio.h"
42 #include "xe_platform_types.h"
43 #include "xe_sriov.h"
44 #include "xe_sriov_pf_migration.h"
45 #include "xe_uc.h"
46 #include "xe_uc_fw.h"
47 #include "xe_wa.h"
48 #include "xe_wopcm.h"
49
guc_bo_ggtt_addr(struct xe_guc * guc,struct xe_bo * bo)50 static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
51 struct xe_bo *bo)
52 {
53 struct xe_device *xe = guc_to_xe(guc);
54 u32 addr;
55
56 /*
57 * For most BOs, the address on the allocating tile is fine. However for
58 * some, e.g. G2G CTB, the address on a specific tile is required as it
59 * might be different for each tile. So, just always ask for the address
60 * on the target GuC.
61 */
62 addr = __xe_bo_ggtt_addr(bo, gt_to_tile(guc_to_gt(guc))->id);
63
64 /* GuC addresses above GUC_GGTT_TOP don't map through the GTT */
65 xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
66 xe_assert(xe, addr < GUC_GGTT_TOP);
67 xe_assert(xe, xe_bo_size(bo) <= GUC_GGTT_TOP - addr);
68
69 return addr;
70 }
71
guc_ctl_debug_flags(struct xe_guc * guc)72 static u32 guc_ctl_debug_flags(struct xe_guc *guc)
73 {
74 u32 level = xe_guc_log_get_level(&guc->log);
75 u32 flags = 0;
76
77 if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
78 flags |= GUC_LOG_DISABLED;
79 else
80 flags |= FIELD_PREP(GUC_LOG_VERBOSITY, GUC_LOG_LEVEL_TO_VERBOSITY(level));
81
82 return flags;
83 }
84
guc_ctl_feature_flags(struct xe_guc * guc)85 static u32 guc_ctl_feature_flags(struct xe_guc *guc)
86 {
87 struct xe_device *xe = guc_to_xe(guc);
88 u32 flags = GUC_CTL_ENABLE_LITE_RESTORE;
89
90 if (!xe->info.skip_guc_pc)
91 flags |= GUC_CTL_ENABLE_SLPC;
92
93 if (xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev)))
94 flags |= GUC_CTL_ENABLE_PSMI_LOGGING;
95
96 if (xe_guc_using_main_gamctrl_queues(guc))
97 flags |= GUC_CTL_MAIN_GAMCTRL_QUEUES;
98
99 return flags;
100 }
101
guc_ctl_log_params_flags(struct xe_guc * guc)102 static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
103 {
104 u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT;
105 u32 flags;
106
107 #if (((XE_GUC_LOG_CRASH_DUMP_BUFFER_SIZE) % SZ_1M) == 0)
108 #define LOG_UNIT SZ_1M
109 #define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
110 #else
111 #define LOG_UNIT SZ_4K
112 #define LOG_FLAG 0
113 #endif
114
115 #if (((XE_GUC_LOG_STATE_CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
116 #define CAPTURE_UNIT SZ_1M
117 #define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
118 #else
119 #define CAPTURE_UNIT SZ_4K
120 #define CAPTURE_FLAG 0
121 #endif
122
123 BUILD_BUG_ON(!XE_GUC_LOG_CRASH_DUMP_BUFFER_SIZE);
124 BUILD_BUG_ON(!IS_ALIGNED(XE_GUC_LOG_CRASH_DUMP_BUFFER_SIZE, LOG_UNIT));
125 BUILD_BUG_ON(!XE_GUC_LOG_EVENT_DATA_BUFFER_SIZE);
126 BUILD_BUG_ON(!IS_ALIGNED(XE_GUC_LOG_EVENT_DATA_BUFFER_SIZE, LOG_UNIT));
127 BUILD_BUG_ON(!XE_GUC_LOG_STATE_CAPTURE_BUFFER_SIZE);
128 BUILD_BUG_ON(!IS_ALIGNED(XE_GUC_LOG_STATE_CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
129
130 flags = GUC_LOG_VALID |
131 GUC_LOG_NOTIFY_ON_HALF_FULL |
132 CAPTURE_FLAG |
133 LOG_FLAG |
134 FIELD_PREP(GUC_LOG_CRASH_DUMP, XE_GUC_LOG_CRASH_DUMP_BUFFER_SIZE / LOG_UNIT - 1) |
135 FIELD_PREP(GUC_LOG_EVENT_DATA, XE_GUC_LOG_EVENT_DATA_BUFFER_SIZE / LOG_UNIT - 1) |
136 FIELD_PREP(GUC_LOG_STATE_CAPTURE, XE_GUC_LOG_STATE_CAPTURE_BUFFER_SIZE /
137 CAPTURE_UNIT - 1) |
138 FIELD_PREP(GUC_LOG_BUF_ADDR, offset);
139
140 #undef LOG_UNIT
141 #undef LOG_FLAG
142 #undef CAPTURE_UNIT
143 #undef CAPTURE_FLAG
144
145 return flags;
146 }
147
guc_ctl_ads_flags(struct xe_guc * guc)148 static u32 guc_ctl_ads_flags(struct xe_guc *guc)
149 {
150 u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT;
151 u32 flags = FIELD_PREP(GUC_ADS_ADDR, ads);
152
153 return flags;
154 }
155
needs_wa_dual_queue(struct xe_gt * gt)156 static bool needs_wa_dual_queue(struct xe_gt *gt)
157 {
158 /*
159 * The DUAL_QUEUE_WA tells the GuC to not allow concurrent submissions
160 * on RCS and CCSes with different address spaces, which on DG2 is
161 * required as a WA for an HW bug.
162 */
163 if (XE_GT_WA(gt, 22011391025))
164 return true;
165
166 /*
167 * On newer platforms, the HW has been updated to not allow parallel
168 * execution of different address spaces, so the RCS/CCS will stall the
169 * context switch if one of the other RCS/CCSes is busy with a different
170 * address space. While functionally correct, having a submission
171 * stalled on the HW limits the GuC ability to shuffle things around and
172 * can cause complications if the non-stalled submission runs for a long
173 * time, because the GuC doesn't know that the stalled submission isn't
174 * actually running and might declare it as hung. Therefore, we enable
175 * the DUAL_QUEUE_WA on all newer platforms on GTs that have CCS engines
176 * to move management back to the GuC.
177 */
178 if (CCS_INSTANCES(gt) && GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
179 return true;
180
181 return false;
182 }
183
guc_ctl_wa_flags(struct xe_guc * guc)184 static u32 guc_ctl_wa_flags(struct xe_guc *guc)
185 {
186 struct xe_device *xe = guc_to_xe(guc);
187 struct xe_gt *gt = guc_to_gt(guc);
188 u32 flags = 0;
189
190 if (XE_GT_WA(gt, 22012773006))
191 flags |= GUC_WA_POLLCS;
192
193 if (XE_GT_WA(gt, 14014475959))
194 flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
195
196 if (needs_wa_dual_queue(gt))
197 flags |= GUC_WA_DUAL_QUEUE;
198
199 /*
200 * Wa_22011802037: FIXME - there's more to be done than simply setting
201 * this flag: make sure each CS is stopped when preparing for GT reset
202 * and wait for pending MI_FW.
203 */
204 if (GRAPHICS_VERx100(xe) < 1270)
205 flags |= GUC_WA_PRE_PARSER;
206
207 if (XE_GT_WA(gt, 22012727170) || XE_GT_WA(gt, 22012727685))
208 flags |= GUC_WA_CONTEXT_ISOLATION;
209
210 if (XE_GT_WA(gt, 18020744125) &&
211 !xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
212 flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
213
214 if (XE_GT_WA(gt, 1509372804))
215 flags |= GUC_WA_RENDER_RST_RC6_EXIT;
216
217 if (XE_GT_WA(gt, 14018913170))
218 flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
219
220 if (XE_GT_WA(gt, 16023683509))
221 flags |= GUC_WA_SAVE_RESTORE_MCFG_REG_AT_MC6;
222
223 return flags;
224 }
225
guc_ctl_devid(struct xe_guc * guc)226 static u32 guc_ctl_devid(struct xe_guc *guc)
227 {
228 struct xe_device *xe = guc_to_xe(guc);
229
230 return (((u32)xe->info.devid) << 16) | xe->info.revid;
231 }
232
guc_print_params(struct xe_guc * guc)233 static void guc_print_params(struct xe_guc *guc)
234 {
235 struct xe_gt *gt = guc_to_gt(guc);
236 u32 *params = guc->params;
237 int i;
238
239 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
240 BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
241
242 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
243 xe_gt_dbg(gt, "GuC param[%2d] = 0x%08x\n", i, params[i]);
244 }
245
guc_init_params(struct xe_guc * guc)246 static void guc_init_params(struct xe_guc *guc)
247 {
248 u32 *params = guc->params;
249
250 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
251 params[GUC_CTL_FEATURE] = 0;
252 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
253 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
254 params[GUC_CTL_WA] = 0;
255 params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
256
257 guc_print_params(guc);
258 }
259
guc_init_params_post_hwconfig(struct xe_guc * guc)260 static void guc_init_params_post_hwconfig(struct xe_guc *guc)
261 {
262 u32 *params = guc->params;
263
264 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
265 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
266 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
267 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
268 params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
269 params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
270
271 guc_print_params(guc);
272 }
273
274 /*
275 * Initialize the GuC parameter block before starting the firmware
276 * transfer. These parameters are read by the firmware on startup
277 * and cannot be changed thereafter.
278 */
guc_write_params(struct xe_guc * guc)279 static void guc_write_params(struct xe_guc *guc)
280 {
281 struct xe_gt *gt = guc_to_gt(guc);
282 int i;
283
284 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
285
286 xe_mmio_write32(>->mmio, SOFT_SCRATCH(0), 0);
287
288 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
289 xe_mmio_write32(>->mmio, SOFT_SCRATCH(1 + i), guc->params[i]);
290 }
291
guc_action_register_g2g_buffer(struct xe_guc * guc,u32 type,u32 dst_tile,u32 dst_dev,u32 desc_addr,u32 buff_addr,u32 size)292 static int guc_action_register_g2g_buffer(struct xe_guc *guc, u32 type, u32 dst_tile, u32 dst_dev,
293 u32 desc_addr, u32 buff_addr, u32 size)
294 {
295 struct xe_gt *gt = guc_to_gt(guc);
296 struct xe_device *xe = gt_to_xe(gt);
297 u32 action[] = {
298 XE_GUC_ACTION_REGISTER_G2G,
299 FIELD_PREP(XE_G2G_REGISTER_SIZE, size / SZ_4K - 1) |
300 FIELD_PREP(XE_G2G_REGISTER_TYPE, type) |
301 FIELD_PREP(XE_G2G_REGISTER_TILE, dst_tile) |
302 FIELD_PREP(XE_G2G_REGISTER_DEVICE, dst_dev),
303 desc_addr,
304 buff_addr,
305 };
306
307 xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT));
308 xe_assert(xe, !(size % SZ_4K));
309
310 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
311 }
312
guc_action_deregister_g2g_buffer(struct xe_guc * guc,u32 type,u32 dst_tile,u32 dst_dev)313 static int guc_action_deregister_g2g_buffer(struct xe_guc *guc, u32 type, u32 dst_tile, u32 dst_dev)
314 {
315 struct xe_gt *gt = guc_to_gt(guc);
316 struct xe_device *xe = gt_to_xe(gt);
317 u32 action[] = {
318 XE_GUC_ACTION_DEREGISTER_G2G,
319 FIELD_PREP(XE_G2G_DEREGISTER_TYPE, type) |
320 FIELD_PREP(XE_G2G_DEREGISTER_TILE, dst_tile) |
321 FIELD_PREP(XE_G2G_DEREGISTER_DEVICE, dst_dev),
322 };
323
324 xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT));
325
326 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
327 }
328
329 #define G2G_DEV(gt) (((gt)->info.type == XE_GT_TYPE_MAIN) ? 0 : 1)
330
331 #define G2G_BUFFER_SIZE (SZ_4K)
332 #define G2G_DESC_SIZE (64)
333 #define G2G_DESC_AREA_SIZE (SZ_4K)
334
335 /*
336 * Generate a unique id for each bi-directional CTB for each pair of
337 * near and far tiles/devices. The id can then be used as an index into
338 * a single allocation that is sub-divided into multiple CTBs.
339 *
340 * For example, with two devices per tile and two tiles, the table should
341 * look like:
342 * Far <tile>.<dev>
343 * 0.0 0.1 1.0 1.1
344 * N 0.0 --/-- 00/01 02/03 04/05
345 * e 0.1 01/00 --/-- 06/07 08/09
346 * a 1.0 03/02 07/06 --/-- 10/11
347 * r 1.1 05/04 09/08 11/10 --/--
348 *
349 * Where each entry is Rx/Tx channel id.
350 *
351 * So GuC #3 (tile 1, dev 1) talking to GuC #2 (tile 1, dev 0) would
352 * be reading from channel #11 and writing to channel #10. Whereas,
353 * GuC #2 talking to GuC #3 would be read on #10 and write to #11.
354 */
g2g_slot(u32 near_tile,u32 near_dev,u32 far_tile,u32 far_dev,u32 type,u32 max_inst,bool have_dev)355 static unsigned int g2g_slot(u32 near_tile, u32 near_dev, u32 far_tile, u32 far_dev,
356 u32 type, u32 max_inst, bool have_dev)
357 {
358 u32 near = near_tile, far = far_tile;
359 u32 idx = 0, x, y, direction;
360 int i;
361
362 if (have_dev) {
363 near = (near << 1) | near_dev;
364 far = (far << 1) | far_dev;
365 }
366
367 /* No need to send to one's self */
368 if (far == near)
369 return -1;
370
371 if (far > near) {
372 /* Top right table half */
373 x = far;
374 y = near;
375
376 /* T/R is 'forwards' direction */
377 direction = type;
378 } else {
379 /* Bottom left table half */
380 x = near;
381 y = far;
382
383 /* B/L is 'backwards' direction */
384 direction = (1 - type);
385 }
386
387 /* Count the rows prior to the target */
388 for (i = y; i > 0; i--)
389 idx += max_inst - i;
390
391 /* Count this row up to the target */
392 idx += (x - 1 - y);
393
394 /* Slots are in Rx/Tx pairs */
395 idx *= 2;
396
397 /* Pick Rx/Tx direction */
398 idx += direction;
399
400 return idx;
401 }
402
guc_g2g_register(struct xe_guc * near_guc,struct xe_gt * far_gt,u32 type,bool have_dev)403 static int guc_g2g_register(struct xe_guc *near_guc, struct xe_gt *far_gt, u32 type, bool have_dev)
404 {
405 struct xe_gt *near_gt = guc_to_gt(near_guc);
406 struct xe_device *xe = gt_to_xe(near_gt);
407 struct xe_bo *g2g_bo;
408 u32 near_tile = gt_to_tile(near_gt)->id;
409 u32 near_dev = G2G_DEV(near_gt);
410 u32 far_tile = gt_to_tile(far_gt)->id;
411 u32 far_dev = G2G_DEV(far_gt);
412 u32 max = xe->info.gt_count;
413 u32 base, desc, buf;
414 int slot;
415
416 /* G2G is not allowed between different cards */
417 xe_assert(xe, xe == gt_to_xe(far_gt));
418
419 g2g_bo = near_guc->g2g.bo;
420 xe_assert(xe, g2g_bo);
421
422 slot = g2g_slot(near_tile, near_dev, far_tile, far_dev, type, max, have_dev);
423 xe_assert(xe, slot >= 0);
424
425 base = guc_bo_ggtt_addr(near_guc, g2g_bo);
426 desc = base + slot * G2G_DESC_SIZE;
427 buf = base + G2G_DESC_AREA_SIZE + slot * G2G_BUFFER_SIZE;
428
429 xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE);
430 xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(g2g_bo));
431
432 return guc_action_register_g2g_buffer(near_guc, type, far_tile, far_dev,
433 desc, buf, G2G_BUFFER_SIZE);
434 }
435
guc_g2g_deregister(struct xe_guc * guc,u32 far_tile,u32 far_dev,u32 type)436 static void guc_g2g_deregister(struct xe_guc *guc, u32 far_tile, u32 far_dev, u32 type)
437 {
438 guc_action_deregister_g2g_buffer(guc, type, far_tile, far_dev);
439 }
440
guc_g2g_size(struct xe_guc * guc)441 static u32 guc_g2g_size(struct xe_guc *guc)
442 {
443 struct xe_gt *gt = guc_to_gt(guc);
444 struct xe_device *xe = gt_to_xe(gt);
445 unsigned int count = xe->info.gt_count;
446 u32 num_channels = (count * (count - 1)) / 2;
447
448 xe_assert(xe, num_channels * XE_G2G_TYPE_LIMIT * G2G_DESC_SIZE <= G2G_DESC_AREA_SIZE);
449
450 return num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
451 }
452
xe_guc_g2g_wanted(struct xe_device * xe)453 static bool xe_guc_g2g_wanted(struct xe_device *xe)
454 {
455 /* Can't do GuC to GuC communication if there is only one GuC */
456 if (xe->info.gt_count <= 1)
457 return false;
458
459 /* No current user */
460 return false;
461 }
462
guc_g2g_alloc(struct xe_guc * guc)463 static int guc_g2g_alloc(struct xe_guc *guc)
464 {
465 struct xe_gt *gt = guc_to_gt(guc);
466 struct xe_device *xe = gt_to_xe(gt);
467 struct xe_tile *tile = gt_to_tile(gt);
468 struct xe_bo *bo;
469 u32 g2g_size;
470
471 if (guc->g2g.bo)
472 return 0;
473
474 if (gt->info.id != 0) {
475 struct xe_gt *root_gt = xe_device_get_gt(xe, 0);
476 struct xe_guc *root_guc = &root_gt->uc.guc;
477 struct xe_bo *bo;
478
479 bo = xe_bo_get(root_guc->g2g.bo);
480 if (!bo)
481 return -ENODEV;
482
483 guc->g2g.bo = bo;
484 guc->g2g.owned = false;
485 return 0;
486 }
487
488 g2g_size = guc_g2g_size(guc);
489 bo = xe_managed_bo_create_pin_map(xe, tile, g2g_size,
490 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
491 XE_BO_FLAG_GGTT |
492 XE_BO_FLAG_GGTT_ALL |
493 XE_BO_FLAG_GGTT_INVALIDATE |
494 XE_BO_FLAG_PINNED_NORESTORE);
495 if (IS_ERR(bo))
496 return PTR_ERR(bo);
497
498 xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
499 guc->g2g.bo = bo;
500 guc->g2g.owned = true;
501
502 return 0;
503 }
504
guc_g2g_fini(struct xe_guc * guc)505 static void guc_g2g_fini(struct xe_guc *guc)
506 {
507 if (!guc->g2g.bo)
508 return;
509
510 /* Unpinning the owned object is handled by generic shutdown */
511 if (!guc->g2g.owned)
512 xe_bo_put(guc->g2g.bo);
513
514 guc->g2g.bo = NULL;
515 }
516
guc_g2g_start(struct xe_guc * guc)517 static int guc_g2g_start(struct xe_guc *guc)
518 {
519 struct xe_gt *far_gt, *gt = guc_to_gt(guc);
520 struct xe_device *xe = gt_to_xe(gt);
521 unsigned int i, j;
522 int t, err;
523 bool have_dev;
524
525 if (!guc->g2g.bo) {
526 int ret;
527
528 ret = guc_g2g_alloc(guc);
529 if (ret)
530 return ret;
531 }
532
533 /* GuC interface will need extending if more GT device types are ever created. */
534 xe_gt_assert(gt, (gt->info.type == XE_GT_TYPE_MAIN) || (gt->info.type == XE_GT_TYPE_MEDIA));
535
536 /* Channel numbering depends on whether there are multiple GTs per tile */
537 have_dev = xe->info.gt_count > xe->info.tile_count;
538
539 for_each_gt(far_gt, xe, i) {
540 u32 far_tile, far_dev;
541
542 if (far_gt->info.id == gt->info.id)
543 continue;
544
545 far_tile = gt_to_tile(far_gt)->id;
546 far_dev = G2G_DEV(far_gt);
547
548 for (t = 0; t < XE_G2G_TYPE_LIMIT; t++) {
549 err = guc_g2g_register(guc, far_gt, t, have_dev);
550 if (err) {
551 while (--t >= 0)
552 guc_g2g_deregister(guc, far_tile, far_dev, t);
553 goto err_deregister;
554 }
555 }
556 }
557
558 return 0;
559
560 err_deregister:
561 for_each_gt(far_gt, xe, j) {
562 u32 tile, dev;
563
564 if (far_gt->info.id == gt->info.id)
565 continue;
566
567 if (j >= i)
568 break;
569
570 tile = gt_to_tile(far_gt)->id;
571 dev = G2G_DEV(far_gt);
572
573 for (t = 0; t < XE_G2G_TYPE_LIMIT; t++)
574 guc_g2g_deregister(guc, tile, dev, t);
575 }
576
577 return err;
578 }
579
__guc_opt_in_features_enable(struct xe_guc * guc,u64 addr,u32 num_dwords)580 static int __guc_opt_in_features_enable(struct xe_guc *guc, u64 addr, u32 num_dwords)
581 {
582 u32 action[] = {
583 XE_GUC_ACTION_OPT_IN_FEATURE_KLV,
584 lower_32_bits(addr),
585 upper_32_bits(addr),
586 num_dwords
587 };
588
589 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
590 }
591
supports_dynamic_ics(struct xe_guc * guc)592 static bool supports_dynamic_ics(struct xe_guc *guc)
593 {
594 struct xe_device *xe = guc_to_xe(guc);
595 struct xe_gt *gt = guc_to_gt(guc);
596
597 /* Dynamic ICS is available for PVC and Xe2 and newer platforms. */
598 if (xe->info.platform != XE_PVC && GRAPHICS_VER(xe) < 20)
599 return false;
600
601 /*
602 * The feature is currently not compatible with multi-lrc, so the GuC
603 * does not support it at all on the media engines (which are the main
604 * users of mlrc). On the primary GT side, to avoid it being used in
605 * conjunction with mlrc, we only enable it if we are in single CCS
606 * mode.
607 */
608 if (xe_gt_is_media_type(gt) || gt->ccs_mode > 1)
609 return false;
610
611 /*
612 * Dynamic ICS requires GuC v70.40.1, which maps to compatibility
613 * version v1.18.4.
614 */
615 return GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 18, 4);
616 }
617
618 #define OPT_IN_MAX_DWORDS 16
xe_guc_opt_in_features_enable(struct xe_guc * guc)619 int xe_guc_opt_in_features_enable(struct xe_guc *guc)
620 {
621 struct xe_device *xe = guc_to_xe(guc);
622 CLASS(xe_guc_buf, buf)(&guc->buf, OPT_IN_MAX_DWORDS);
623 u32 count = 0;
624 u32 *klvs;
625 int ret;
626
627 if (!xe_guc_buf_is_valid(buf))
628 return -ENOBUFS;
629
630 klvs = xe_guc_buf_cpu_ptr(buf);
631
632 /*
633 * The extra CAT error type opt-in was added in GuC v70.17.0, which maps
634 * to compatibility version v1.7.0.
635 * Note that the GuC allows enabling this KLV even on platforms that do
636 * not support the extra type; in such case the returned type variable
637 * will be set to a known invalid value which we can check against.
638 */
639 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 7, 0))
640 klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_EXT_CAT_ERR_TYPE);
641
642 if (supports_dynamic_ics(guc))
643 klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH);
644
645 if (count) {
646 xe_assert(xe, count <= OPT_IN_MAX_DWORDS);
647
648 ret = __guc_opt_in_features_enable(guc, xe_guc_buf_flush(buf), count);
649 if (ret < 0) {
650 xe_gt_err(guc_to_gt(guc),
651 "failed to enable GuC opt-in features: %pe\n",
652 ERR_PTR(ret));
653 return ret;
654 }
655 }
656
657 return 0;
658 }
659
guc_fini_hw(void * arg)660 static void guc_fini_hw(void *arg)
661 {
662 struct xe_guc *guc = arg;
663 struct xe_gt *gt = guc_to_gt(guc);
664
665 xe_with_force_wake(fw_ref, gt_to_fw(gt), XE_FORCEWAKE_ALL)
666 xe_uc_sanitize_reset(&guc_to_gt(guc)->uc);
667
668 guc_g2g_fini(guc);
669 }
670
671 /**
672 * xe_guc_comm_init_early - early initialization of GuC communication
673 * @guc: the &xe_guc to initialize
674 *
675 * Must be called prior to first MMIO communication with GuC firmware.
676 */
xe_guc_comm_init_early(struct xe_guc * guc)677 void xe_guc_comm_init_early(struct xe_guc *guc)
678 {
679 struct xe_gt *gt = guc_to_gt(guc);
680
681 if (xe_gt_is_media_type(gt))
682 guc->notify_reg = MED_GUC_HOST_INTERRUPT;
683 else
684 guc->notify_reg = GUC_HOST_INTERRUPT;
685 }
686
xe_guc_realloc_post_hwconfig(struct xe_guc * guc)687 static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
688 {
689 struct xe_tile *tile = gt_to_tile(guc_to_gt(guc));
690 struct xe_device *xe = guc_to_xe(guc);
691 int ret;
692
693 if (!IS_DGFX(guc_to_xe(guc)))
694 return 0;
695
696 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->fw.bo);
697 if (ret)
698 return ret;
699
700 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->log.bo);
701 if (ret)
702 return ret;
703
704 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ads.bo);
705 if (ret)
706 return ret;
707
708 return 0;
709 }
710
vf_guc_init_noalloc(struct xe_guc * guc)711 static int vf_guc_init_noalloc(struct xe_guc *guc)
712 {
713 struct xe_gt *gt = guc_to_gt(guc);
714 int err;
715
716 err = xe_gt_sriov_vf_bootstrap(gt);
717 if (err)
718 return err;
719
720 err = xe_gt_sriov_vf_query_config(gt);
721 if (err)
722 return err;
723
724 return 0;
725 }
726
xe_guc_init_noalloc(struct xe_guc * guc)727 int xe_guc_init_noalloc(struct xe_guc *guc)
728 {
729 struct xe_device *xe = guc_to_xe(guc);
730 struct xe_gt *gt = guc_to_gt(guc);
731 int ret;
732
733 xe_guc_comm_init_early(guc);
734
735 ret = xe_guc_ct_init_noalloc(&guc->ct);
736 if (ret)
737 goto out;
738
739 ret = xe_guc_relay_init(&guc->relay);
740 if (ret)
741 goto out;
742
743 if (IS_SRIOV_VF(xe)) {
744 ret = vf_guc_init_noalloc(guc);
745 if (ret)
746 goto out;
747 }
748
749 return 0;
750
751 out:
752 xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret));
753 return ret;
754 }
755
xe_guc_init(struct xe_guc * guc)756 int xe_guc_init(struct xe_guc *guc)
757 {
758 struct xe_device *xe = guc_to_xe(guc);
759 struct xe_gt *gt = guc_to_gt(guc);
760 int ret;
761
762 guc->fw.type = XE_UC_FW_TYPE_GUC;
763 ret = xe_uc_fw_init(&guc->fw);
764 if (ret)
765 return ret;
766
767 if (!xe_uc_fw_is_enabled(&guc->fw))
768 return 0;
769
770 /* Disable page reclaim if GuC FW does not support */
771 if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 14, 0))
772 xe->info.has_page_reclaim_hw_assist = false;
773
774 if (IS_SRIOV_VF(xe)) {
775 ret = xe_guc_ct_init(&guc->ct);
776 if (ret)
777 goto out;
778 return 0;
779 }
780
781 ret = xe_guc_log_init(&guc->log);
782 if (ret)
783 goto out;
784
785 ret = xe_guc_capture_init(guc);
786 if (ret)
787 goto out;
788
789 ret = xe_guc_ads_init(&guc->ads);
790 if (ret)
791 goto out;
792
793 ret = xe_guc_ct_init(&guc->ct);
794 if (ret)
795 goto out;
796
797 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
798
799 ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc);
800 if (ret)
801 goto out;
802
803 guc_init_params(guc);
804
805 return 0;
806
807 out:
808 xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret));
809 return ret;
810 }
811
vf_guc_init_post_hwconfig(struct xe_guc * guc)812 static int vf_guc_init_post_hwconfig(struct xe_guc *guc)
813 {
814 int err;
815
816 err = xe_guc_submit_init(guc, xe_gt_sriov_vf_guc_ids(guc_to_gt(guc)));
817 if (err)
818 return err;
819
820 err = xe_guc_buf_cache_init(&guc->buf);
821 if (err)
822 return err;
823
824 /* XXX xe_guc_db_mgr_init not needed for now */
825
826 return 0;
827 }
828
guc_additional_cache_size(struct xe_device * xe)829 static u32 guc_additional_cache_size(struct xe_device *xe)
830 {
831 if (IS_SRIOV_PF(xe) && xe_sriov_pf_migration_supported(xe))
832 return XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE;
833 else
834 return 0; /* Fallback to default size */
835 }
836
837 /**
838 * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
839 * @guc: The GuC object
840 *
841 * Return: 0 on success, negative error code on error.
842 */
xe_guc_init_post_hwconfig(struct xe_guc * guc)843 int xe_guc_init_post_hwconfig(struct xe_guc *guc)
844 {
845 int ret;
846
847 if (IS_SRIOV_VF(guc_to_xe(guc)))
848 return vf_guc_init_post_hwconfig(guc);
849
850 ret = xe_guc_realloc_post_hwconfig(guc);
851 if (ret)
852 return ret;
853
854 ret = xe_guc_ct_init_post_hwconfig(&guc->ct);
855 if (ret)
856 return ret;
857
858 guc_init_params_post_hwconfig(guc);
859
860 ret = xe_guc_submit_init(guc, ~0);
861 if (ret)
862 return ret;
863
864 ret = xe_guc_db_mgr_init(&guc->dbm, ~0);
865 if (ret)
866 return ret;
867
868 ret = xe_guc_pc_init(&guc->pc);
869 if (ret)
870 return ret;
871
872 ret = xe_guc_engine_activity_init(guc);
873 if (ret)
874 return ret;
875
876 ret = xe_guc_buf_cache_init_with_size(&guc->buf,
877 guc_additional_cache_size(guc_to_xe(guc)));
878 if (ret)
879 return ret;
880
881 return xe_guc_ads_init_post_hwconfig(&guc->ads);
882 }
883
xe_guc_post_load_init(struct xe_guc * guc)884 int xe_guc_post_load_init(struct xe_guc *guc)
885 {
886 int ret;
887
888 xe_guc_ads_populate_post_load(&guc->ads);
889
890 ret = xe_guc_opt_in_features_enable(guc);
891 if (ret)
892 return ret;
893
894 if (xe_guc_g2g_wanted(guc_to_xe(guc))) {
895 ret = guc_g2g_start(guc);
896 if (ret)
897 return ret;
898 }
899
900 return xe_guc_submit_enable(guc);
901 }
902
xe_guc_reset(struct xe_guc * guc)903 int xe_guc_reset(struct xe_guc *guc)
904 {
905 struct xe_gt *gt = guc_to_gt(guc);
906 struct xe_mmio *mmio = >->mmio;
907 u32 guc_status, gdrst;
908 int ret;
909
910 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
911
912 if (IS_SRIOV_VF(gt_to_xe(gt)))
913 return xe_gt_sriov_vf_bootstrap(gt);
914
915 xe_mmio_write32(mmio, GDRST, GRDOM_GUC);
916
917 ret = xe_mmio_wait32(mmio, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
918 if (ret) {
919 xe_gt_err(gt, "GuC reset timed out, GDRST=%#x\n", gdrst);
920 goto err_out;
921 }
922
923 guc_status = xe_mmio_read32(mmio, GUC_STATUS);
924 if (!(guc_status & GS_MIA_IN_RESET)) {
925 xe_gt_err(gt, "GuC status: %#x, MIA core expected to be in reset\n",
926 guc_status);
927 ret = -EIO;
928 goto err_out;
929 }
930
931 return 0;
932
933 err_out:
934
935 return ret;
936 }
937
guc_prepare_xfer(struct xe_guc * guc)938 static void guc_prepare_xfer(struct xe_guc *guc)
939 {
940 struct xe_gt *gt = guc_to_gt(guc);
941 struct xe_mmio *mmio = >->mmio;
942 struct xe_device *xe = guc_to_xe(guc);
943 u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
944 GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
945 GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
946 GUC_ENABLE_MIA_CLOCK_GATING;
947
948 if (GRAPHICS_VERx100(xe) < 1250)
949 shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
950 GUC_ENABLE_MIA_CACHING;
951
952 if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC)
953 shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index);
954
955 /* Must program this register before loading the ucode with DMA */
956 xe_mmio_write32(mmio, GUC_SHIM_CONTROL, shim_flags);
957
958 xe_mmio_write32(mmio, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
959
960 /* Make sure GuC receives ARAT interrupts */
961 xe_mmio_rmw32(mmio, PMINTRMSK, ARAT_EXPIRED_INTRMSK, 0);
962 }
963
964 /*
965 * Supporting MMIO & in memory RSA
966 */
guc_xfer_rsa(struct xe_guc * guc)967 static int guc_xfer_rsa(struct xe_guc *guc)
968 {
969 struct xe_gt *gt = guc_to_gt(guc);
970 u32 rsa[UOS_RSA_SCRATCH_COUNT];
971 size_t copied;
972 int i;
973
974 if (guc->fw.rsa_size > 256) {
975 u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) +
976 xe_uc_fw_rsa_offset(&guc->fw);
977 xe_mmio_write32(>->mmio, UOS_RSA_SCRATCH(0), rsa_ggtt_addr);
978 return 0;
979 }
980
981 copied = xe_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa));
982 if (copied < sizeof(rsa))
983 return -ENOMEM;
984
985 for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
986 xe_mmio_write32(>->mmio, UOS_RSA_SCRATCH(i), rsa[i]);
987
988 return 0;
989 }
990
991 /*
992 * Wait for the GuC to start up.
993 *
994 * Measurements indicate this should take no more than 20ms (assuming the GT
995 * clock is at maximum frequency). However, thermal throttling and other issues
996 * can prevent the clock hitting max and thus making the load take significantly
997 * longer. Allow up to 3s as a safety margin in normal builds. For
998 * CONFIG_DRM_XE_DEBUG allow up to 10s to account for slower execution, issues
999 * in PCODE, driver, fan, etc.
1000 *
1001 * Keep checking the GUC_STATUS every 10ms with a debug message every 100
1002 * attempts as a "I'm slow, but alive" message. Regardless, if it takes more
1003 * than 200ms, emit a warning.
1004 */
1005
1006 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
1007 #define GUC_LOAD_TIMEOUT_SEC 20
1008 #else
1009 #define GUC_LOAD_TIMEOUT_SEC 3
1010 #endif
1011 #define GUC_LOAD_TIME_WARN_MSEC 200
1012
print_load_status_err(struct xe_gt * gt,u32 status)1013 static void print_load_status_err(struct xe_gt *gt, u32 status)
1014 {
1015 struct xe_mmio *mmio = >->mmio;
1016 u32 ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, status);
1017 u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status);
1018
1019 xe_gt_err(gt, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
1020 REG_FIELD_GET(GS_MIA_IN_RESET, status),
1021 bootrom, ukernel,
1022 REG_FIELD_GET(GS_MIA_MASK, status),
1023 REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
1024
1025 switch (bootrom) {
1026 case XE_BOOTROM_STATUS_NO_KEY_FOUND:
1027 xe_gt_err(gt, "invalid key requested, header = 0x%08X\n",
1028 xe_mmio_read32(mmio, GUC_HEADER_INFO));
1029 break;
1030 case XE_BOOTROM_STATUS_RSA_FAILED:
1031 xe_gt_err(gt, "firmware signature verification failed\n");
1032 break;
1033 case XE_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
1034 xe_gt_err(gt, "firmware production part check failure\n");
1035 break;
1036 }
1037
1038 switch (ukernel) {
1039 case XE_GUC_LOAD_STATUS_HWCONFIG_START:
1040 xe_gt_err(gt, "still extracting hwconfig table.\n");
1041 break;
1042 case XE_GUC_LOAD_STATUS_EXCEPTION:
1043 xe_gt_err(gt, "firmware exception. EIP: %#x\n",
1044 xe_mmio_read32(mmio, SOFT_SCRATCH(13)));
1045 break;
1046 case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
1047 xe_gt_err(gt, "illegal init/ADS data\n");
1048 break;
1049 case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
1050 xe_gt_err(gt, "illegal register in save/restore workaround list\n");
1051 break;
1052 case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
1053 xe_gt_err(gt, "illegal workaround KLV data\n");
1054 break;
1055 case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG:
1056 xe_gt_err(gt, "illegal feature flag specified\n");
1057 break;
1058 }
1059 }
1060
1061 /*
1062 * Check GUC_STATUS looking for known terminal states (either completion or
1063 * failure) of either the microkernel status field or the boot ROM status field.
1064 *
1065 * Returns 1 for successful completion, -1 for failure and 0 for any
1066 * intermediate state.
1067 */
guc_load_done(struct xe_gt * gt,u32 * status,u32 * tries)1068 static int guc_load_done(struct xe_gt *gt, u32 *status, u32 *tries)
1069 {
1070 u32 ukernel, bootrom;
1071
1072 *status = xe_mmio_read32(>->mmio, GUC_STATUS);
1073 ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, *status);
1074 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, *status);
1075
1076 switch (ukernel) {
1077 case XE_GUC_LOAD_STATUS_READY:
1078 return 1;
1079 case XE_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH:
1080 case XE_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH:
1081 case XE_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE:
1082 case XE_GUC_LOAD_STATUS_HWCONFIG_ERROR:
1083 case XE_GUC_LOAD_STATUS_BOOTROM_VERSION_MISMATCH:
1084 case XE_GUC_LOAD_STATUS_DPC_ERROR:
1085 case XE_GUC_LOAD_STATUS_EXCEPTION:
1086 case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
1087 case XE_GUC_LOAD_STATUS_MPU_DATA_INVALID:
1088 case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
1089 case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
1090 case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG:
1091 return -1;
1092 }
1093
1094 switch (bootrom) {
1095 case XE_BOOTROM_STATUS_NO_KEY_FOUND:
1096 case XE_BOOTROM_STATUS_RSA_FAILED:
1097 case XE_BOOTROM_STATUS_PAVPC_FAILED:
1098 case XE_BOOTROM_STATUS_WOPCM_FAILED:
1099 case XE_BOOTROM_STATUS_LOADLOC_FAILED:
1100 case XE_BOOTROM_STATUS_JUMP_FAILED:
1101 case XE_BOOTROM_STATUS_RC6CTXCONFIG_FAILED:
1102 case XE_BOOTROM_STATUS_MPUMAP_INCORRECT:
1103 case XE_BOOTROM_STATUS_EXCEPTION:
1104 case XE_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
1105 return -1;
1106 }
1107
1108 if (++*tries >= 100) {
1109 struct xe_guc_pc *guc_pc = >->uc.guc.pc;
1110
1111 *tries = 0;
1112 xe_gt_dbg(gt, "GuC load still in progress, freq = %dMHz (req %dMHz), status = 0x%08X [0x%02X/%02X]\n",
1113 xe_guc_pc_get_act_freq(guc_pc),
1114 xe_guc_pc_get_cur_freq_fw(guc_pc),
1115 *status, ukernel, bootrom);
1116 }
1117
1118 return 0;
1119 }
1120
guc_wait_ucode(struct xe_guc * guc)1121 static int guc_wait_ucode(struct xe_guc *guc)
1122 {
1123 struct xe_gt *gt = guc_to_gt(guc);
1124 struct xe_guc_pc *guc_pc = >->uc.guc.pc;
1125 u32 before_freq, act_freq, cur_freq;
1126 u32 status = 0, tries = 0;
1127 ktime_t before;
1128 u64 delta_ms;
1129 int ret;
1130
1131 before_freq = xe_guc_pc_get_act_freq(guc_pc);
1132 before = ktime_get();
1133
1134 ret = poll_timeout_us(ret = guc_load_done(gt, &status, &tries), ret,
1135 10 * USEC_PER_MSEC,
1136 GUC_LOAD_TIMEOUT_SEC * USEC_PER_SEC, false);
1137
1138 delta_ms = ktime_to_ms(ktime_sub(ktime_get(), before));
1139 act_freq = xe_guc_pc_get_act_freq(guc_pc);
1140 cur_freq = xe_guc_pc_get_cur_freq_fw(guc_pc);
1141
1142 if (ret) {
1143 xe_gt_err(gt, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz (req %dMHz)\n",
1144 status, delta_ms, xe_guc_pc_get_act_freq(guc_pc),
1145 xe_guc_pc_get_cur_freq_fw(guc_pc));
1146 print_load_status_err(gt, status);
1147
1148 return -EPROTO;
1149 }
1150
1151 if (delta_ms > GUC_LOAD_TIME_WARN_MSEC) {
1152 xe_gt_warn(gt, "GuC load: excessive init time: %lldms! [status = 0x%08X]\n",
1153 delta_ms, status);
1154 xe_gt_warn(gt, "GuC load: excessive init time: [freq = %dMHz (req = %dMHz), before = %dMHz, perf_limit_reasons = 0x%08X]\n",
1155 act_freq, cur_freq, before_freq,
1156 xe_gt_throttle_get_limit_reasons(gt));
1157 } else {
1158 xe_gt_dbg(gt, "GuC load: init took %lldms, freq = %dMHz (req = %dMHz), before = %dMHz, status = 0x%08X\n",
1159 delta_ms, act_freq, cur_freq, before_freq, status);
1160 }
1161
1162 return 0;
1163 }
1164 ALLOW_ERROR_INJECTION(guc_wait_ucode, ERRNO);
1165
__xe_guc_upload(struct xe_guc * guc)1166 static int __xe_guc_upload(struct xe_guc *guc)
1167 {
1168 int ret;
1169
1170 /* Raise GT freq to speed up HuC/GuC load */
1171 xe_guc_pc_raise_unslice(&guc->pc);
1172
1173 guc_write_params(guc);
1174 guc_prepare_xfer(guc);
1175
1176 /*
1177 * Note that GuC needs the CSS header plus uKernel code to be copied
1178 * by the DMA engine in one operation, whereas the RSA signature is
1179 * loaded separately, either by copying it to the UOS_RSA_SCRATCH
1180 * register (if key size <= 256) or through a ggtt-pinned vma (if key
1181 * size > 256). The RSA size and therefore the way we provide it to the
1182 * HW is fixed for each platform and hard-coded in the bootrom.
1183 */
1184 ret = guc_xfer_rsa(guc);
1185 if (ret)
1186 goto out;
1187 /*
1188 * Current uCode expects the code to be loaded at 8k; locations below
1189 * this are used for the stack.
1190 */
1191 ret = xe_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
1192 if (ret)
1193 goto out;
1194
1195 /* Wait for authentication */
1196 ret = guc_wait_ucode(guc);
1197 if (ret)
1198 goto out;
1199
1200 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING);
1201 return 0;
1202
1203 out:
1204 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
1205 return ret;
1206 }
1207
vf_guc_min_load_for_hwconfig(struct xe_guc * guc)1208 static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc)
1209 {
1210 struct xe_gt *gt = guc_to_gt(guc);
1211 int ret;
1212
1213 ret = xe_guc_hwconfig_init(guc);
1214 if (ret)
1215 return ret;
1216
1217 ret = xe_guc_enable_communication(guc);
1218 if (ret)
1219 return ret;
1220
1221 ret = xe_gt_sriov_vf_connect(gt);
1222 if (ret)
1223 goto err_out;
1224
1225 ret = xe_gt_sriov_vf_query_runtime(gt);
1226 if (ret)
1227 goto err_out;
1228
1229 return 0;
1230
1231 err_out:
1232 xe_guc_sanitize(guc);
1233 return ret;
1234 }
1235
1236 /**
1237 * xe_guc_min_load_for_hwconfig - load minimal GuC and read hwconfig table
1238 * @guc: The GuC object
1239 *
1240 * This function uploads a minimal GuC that does not support submissions but
1241 * in a state where the hwconfig table can be read. Next, it reads and parses
1242 * the hwconfig table so it can be used for subsequent steps in the driver load.
1243 * Lastly, it enables CT communication (XXX: this is needed for PFs/VFs only).
1244 *
1245 * Return: 0 on success, negative error code on error.
1246 */
xe_guc_min_load_for_hwconfig(struct xe_guc * guc)1247 int xe_guc_min_load_for_hwconfig(struct xe_guc *guc)
1248 {
1249 int ret;
1250
1251 if (IS_SRIOV_VF(guc_to_xe(guc)))
1252 return vf_guc_min_load_for_hwconfig(guc);
1253
1254 xe_guc_ads_populate_minimal(&guc->ads);
1255
1256 xe_guc_pc_init_early(&guc->pc);
1257
1258 ret = __xe_guc_upload(guc);
1259 if (ret)
1260 return ret;
1261
1262 ret = xe_guc_hwconfig_init(guc);
1263 if (ret)
1264 return ret;
1265
1266 ret = xe_guc_enable_communication(guc);
1267 if (ret)
1268 return ret;
1269
1270 return 0;
1271 }
1272
xe_guc_upload(struct xe_guc * guc)1273 int xe_guc_upload(struct xe_guc *guc)
1274 {
1275 struct xe_gt *gt = guc_to_gt(guc);
1276
1277 xe_guc_ads_populate(&guc->ads);
1278
1279 if (xe_guc_using_main_gamctrl_queues(guc))
1280 xe_mmio_write32(>->mmio, MAIN_GAMCTRL_MODE, MAIN_GAMCTRL_QUEUE_SELECT);
1281
1282 return __xe_guc_upload(guc);
1283 }
1284
guc_handle_mmio_msg(struct xe_guc * guc)1285 static void guc_handle_mmio_msg(struct xe_guc *guc)
1286 {
1287 struct xe_gt *gt = guc_to_gt(guc);
1288 u32 msg;
1289
1290 if (IS_SRIOV_VF(guc_to_xe(guc)))
1291 return;
1292
1293 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
1294
1295 msg = xe_mmio_read32(>->mmio, SOFT_SCRATCH(15));
1296 msg &= XE_GUC_RECV_MSG_EXCEPTION |
1297 XE_GUC_RECV_MSG_CRASH_DUMP_POSTED;
1298 xe_mmio_write32(>->mmio, SOFT_SCRATCH(15), 0);
1299
1300 if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED)
1301 xe_gt_err(gt, "Received early GuC crash dump notification!\n");
1302
1303 if (msg & XE_GUC_RECV_MSG_EXCEPTION)
1304 xe_gt_err(gt, "Received early GuC exception notification!\n");
1305 }
1306
guc_enable_irq(struct xe_guc * guc)1307 static void guc_enable_irq(struct xe_guc *guc)
1308 {
1309 struct xe_gt *gt = guc_to_gt(guc);
1310 u32 events = xe_gt_is_media_type(gt) ?
1311 REG_FIELD_PREP(ENGINE0_MASK, GUC_INTR_GUC2HOST) :
1312 REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
1313
1314 /* Primary GuC and media GuC share a single enable bit */
1315 xe_mmio_write32(>->mmio, GUC_SG_INTR_ENABLE,
1316 REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST));
1317
1318 /*
1319 * There are separate mask bits for primary and media GuCs, so use
1320 * a RMW operation to avoid clobbering the other GuC's setting.
1321 */
1322 xe_mmio_rmw32(>->mmio, GUC_SG_INTR_MASK, events, 0);
1323 }
1324
xe_guc_enable_communication(struct xe_guc * guc)1325 int xe_guc_enable_communication(struct xe_guc *guc)
1326 {
1327 struct xe_device *xe = guc_to_xe(guc);
1328 int err;
1329
1330 if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
1331 struct xe_gt *gt = guc_to_gt(guc);
1332 struct xe_tile *tile = gt_to_tile(gt);
1333
1334 err = xe_memirq_init_guc(&tile->memirq, guc);
1335 if (err)
1336 return err;
1337 } else {
1338 guc_enable_irq(guc);
1339 }
1340
1341 err = xe_guc_ct_enable(&guc->ct);
1342 if (err)
1343 return err;
1344
1345 guc_handle_mmio_msg(guc);
1346
1347 return 0;
1348 }
1349
xe_guc_suspend(struct xe_guc * guc)1350 int xe_guc_suspend(struct xe_guc *guc)
1351 {
1352 struct xe_gt *gt = guc_to_gt(guc);
1353 u32 action[] = {
1354 XE_GUC_ACTION_CLIENT_SOFT_RESET,
1355 };
1356 int ret;
1357
1358 ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
1359 if (ret) {
1360 xe_gt_err(gt, "GuC suspend failed: %pe\n", ERR_PTR(ret));
1361 return ret;
1362 }
1363
1364 xe_guc_sanitize(guc);
1365 return 0;
1366 }
1367
xe_guc_notify(struct xe_guc * guc)1368 void xe_guc_notify(struct xe_guc *guc)
1369 {
1370 struct xe_gt *gt = guc_to_gt(guc);
1371 const u32 default_notify_data = 0;
1372
1373 /*
1374 * Both GUC_HOST_INTERRUPT and MED_GUC_HOST_INTERRUPT can pass
1375 * additional payload data to the GuC but this capability is not
1376 * used by the firmware yet. Use default value in the meantime.
1377 */
1378 xe_mmio_write32(>->mmio, guc->notify_reg, default_notify_data);
1379 }
1380
xe_guc_auth_huc(struct xe_guc * guc,u32 rsa_addr)1381 int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr)
1382 {
1383 u32 action[] = {
1384 XE_GUC_ACTION_AUTHENTICATE_HUC,
1385 rsa_addr
1386 };
1387
1388 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
1389 }
1390
xe_guc_mmio_send_recv(struct xe_guc * guc,const u32 * request,u32 len,u32 * response_buf)1391 int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
1392 u32 len, u32 *response_buf)
1393 {
1394 struct xe_device *xe = guc_to_xe(guc);
1395 struct xe_gt *gt = guc_to_gt(guc);
1396 struct xe_mmio *mmio = >->mmio;
1397 u32 header, reply;
1398 struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
1399 MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
1400 const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1;
1401 bool lost = false;
1402 int ret;
1403 int i;
1404
1405 BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
1406
1407 xe_assert(xe, len);
1408 xe_assert(xe, len <= VF_SW_FLAG_COUNT);
1409 xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
1410 xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) ==
1411 GUC_HXG_ORIGIN_HOST);
1412 xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) ==
1413 GUC_HXG_TYPE_REQUEST);
1414
1415 retry:
1416 /* Not in critical data-path, just do if else for GT type */
1417 if (xe_gt_is_media_type(gt)) {
1418 for (i = 0; i < len; ++i)
1419 xe_mmio_write32(mmio, MED_VF_SW_FLAG(i),
1420 request[i]);
1421 xe_mmio_read32(mmio, MED_VF_SW_FLAG(LAST_INDEX));
1422 } else {
1423 for (i = 0; i < len; ++i)
1424 xe_mmio_write32(mmio, VF_SW_FLAG(i),
1425 request[i]);
1426 xe_mmio_read32(mmio, VF_SW_FLAG(LAST_INDEX));
1427 }
1428
1429 xe_guc_notify(guc);
1430
1431 ret = xe_mmio_wait32(mmio, reply_reg, GUC_HXG_MSG_0_ORIGIN,
1432 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
1433 50000, &reply, false);
1434 if (ret) {
1435 /* scratch registers might be cleared during FLR, try once more */
1436 if (!reply && !lost) {
1437 xe_gt_dbg(gt, "GuC mmio request %#x: lost, trying again\n", request[0]);
1438 lost = true;
1439 goto retry;
1440 }
1441 timeout:
1442 xe_gt_err(gt, "GuC mmio request %#x: no reply %#x\n",
1443 request[0], reply);
1444 return ret;
1445 }
1446
1447 header = xe_mmio_read32(mmio, reply_reg);
1448 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
1449 GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
1450 /*
1451 * Once we got a BUSY reply we must wait again for the final
1452 * response but this time we can't use ORIGIN mask anymore.
1453 * To spot a right change in the reply, we take advantage that
1454 * response SUCCESS and FAILURE differ only by the single bit
1455 * and all other bits are set and can be used as a new mask.
1456 */
1457 u32 resp_bits = GUC_HXG_TYPE_RESPONSE_SUCCESS & GUC_HXG_TYPE_RESPONSE_FAILURE;
1458 u32 resp_mask = FIELD_PREP(GUC_HXG_MSG_0_TYPE, resp_bits);
1459
1460 BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS);
1461 BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
1462
1463 ret = xe_mmio_wait32(mmio, reply_reg, resp_mask, resp_mask,
1464 2000000, &header, false);
1465
1466 if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
1467 GUC_HXG_ORIGIN_GUC))
1468 goto proto;
1469 if (unlikely(ret)) {
1470 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
1471 GUC_HXG_TYPE_NO_RESPONSE_BUSY)
1472 goto proto;
1473 goto timeout;
1474 }
1475 }
1476
1477 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
1478 GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
1479 u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
1480
1481 xe_gt_dbg(gt, "GuC mmio request %#x: retrying, reason %#x\n",
1482 request[0], reason);
1483 goto retry;
1484 }
1485
1486 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
1487 GUC_HXG_TYPE_RESPONSE_FAILURE) {
1488 u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
1489 u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
1490
1491 if (unlikely(error == XE_GUC_RESPONSE_VF_MIGRATED)) {
1492 xe_gt_dbg(gt, "GuC mmio request %#x rejected due to MIGRATION (hint %#x)\n",
1493 request[0], hint);
1494 return -EREMCHG;
1495 }
1496
1497 xe_gt_err(gt, "GuC mmio request %#x: failure %#x hint %#x\n",
1498 request[0], error, hint);
1499 return -ENXIO;
1500 }
1501
1502 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
1503 GUC_HXG_TYPE_RESPONSE_SUCCESS) {
1504 proto:
1505 xe_gt_err(gt, "GuC mmio request %#x: unexpected reply %#x\n",
1506 request[0], header);
1507 return -EPROTO;
1508 }
1509
1510 /* Just copy entire possible message response */
1511 if (response_buf) {
1512 response_buf[0] = header;
1513
1514 for (i = 1; i < VF_SW_FLAG_COUNT; i++) {
1515 reply_reg.addr += sizeof(u32);
1516 response_buf[i] = xe_mmio_read32(mmio, reply_reg);
1517 }
1518 }
1519
1520 /* Use data from the GuC response as our return value */
1521 return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
1522 }
1523 ALLOW_ERROR_INJECTION(xe_guc_mmio_send_recv, ERRNO);
1524
xe_guc_mmio_send(struct xe_guc * guc,const u32 * request,u32 len)1525 int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
1526 {
1527 return xe_guc_mmio_send_recv(guc, request, len, NULL);
1528 }
1529
guc_self_cfg(struct xe_guc * guc,u16 key,u16 len,u64 val)1530 static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val)
1531 {
1532 struct xe_device *xe = guc_to_xe(guc);
1533 u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
1534 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
1535 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
1536 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
1537 GUC_ACTION_HOST2GUC_SELF_CFG),
1538 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
1539 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
1540 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32,
1541 lower_32_bits(val)),
1542 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64,
1543 upper_32_bits(val)),
1544 };
1545 int ret;
1546
1547 xe_assert(xe, len <= 2);
1548 xe_assert(xe, len != 1 || !upper_32_bits(val));
1549
1550 /* Self config must go over MMIO */
1551 ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
1552
1553 if (unlikely(ret < 0))
1554 return ret;
1555 if (unlikely(ret > 1))
1556 return -EPROTO;
1557 if (unlikely(!ret))
1558 return -ENOKEY;
1559
1560 return 0;
1561 }
1562
xe_guc_self_cfg32(struct xe_guc * guc,u16 key,u32 val)1563 int xe_guc_self_cfg32(struct xe_guc *guc, u16 key, u32 val)
1564 {
1565 return guc_self_cfg(guc, key, 1, val);
1566 }
1567
xe_guc_self_cfg64(struct xe_guc * guc,u16 key,u64 val)1568 int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val)
1569 {
1570 return guc_self_cfg(guc, key, 2, val);
1571 }
1572
xe_guc_sw_0_irq_handler(struct xe_guc * guc)1573 static void xe_guc_sw_0_irq_handler(struct xe_guc *guc)
1574 {
1575 struct xe_gt *gt = guc_to_gt(guc);
1576
1577 if (IS_SRIOV_VF(gt_to_xe(gt)))
1578 xe_gt_sriov_vf_migrated_event_handler(gt);
1579 }
1580
xe_guc_irq_handler(struct xe_guc * guc,const u16 iir)1581 void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir)
1582 {
1583 if (iir & GUC_INTR_GUC2HOST)
1584 xe_guc_ct_irq_handler(&guc->ct);
1585
1586 if (iir & GUC_INTR_SW_INT_0)
1587 xe_guc_sw_0_irq_handler(guc);
1588 }
1589
xe_guc_sanitize(struct xe_guc * guc)1590 void xe_guc_sanitize(struct xe_guc *guc)
1591 {
1592 xe_uc_fw_sanitize(&guc->fw);
1593 xe_guc_ct_disable(&guc->ct);
1594 xe_guc_submit_disable(guc);
1595 }
1596
xe_guc_reset_prepare(struct xe_guc * guc)1597 int xe_guc_reset_prepare(struct xe_guc *guc)
1598 {
1599 return xe_guc_submit_reset_prepare(guc);
1600 }
1601
xe_guc_reset_wait(struct xe_guc * guc)1602 void xe_guc_reset_wait(struct xe_guc *guc)
1603 {
1604 xe_guc_submit_reset_wait(guc);
1605 }
1606
xe_guc_stop_prepare(struct xe_guc * guc)1607 void xe_guc_stop_prepare(struct xe_guc *guc)
1608 {
1609 if (!IS_SRIOV_VF(guc_to_xe(guc))) {
1610 int err;
1611
1612 err = xe_guc_pc_stop(&guc->pc);
1613 xe_gt_WARN(guc_to_gt(guc), err, "Failed to stop GuC PC: %pe\n",
1614 ERR_PTR(err));
1615 }
1616 }
1617
xe_guc_stop(struct xe_guc * guc)1618 void xe_guc_stop(struct xe_guc *guc)
1619 {
1620 xe_guc_ct_stop(&guc->ct);
1621
1622 xe_guc_submit_stop(guc);
1623 }
1624
xe_guc_start(struct xe_guc * guc)1625 int xe_guc_start(struct xe_guc *guc)
1626 {
1627 return xe_guc_submit_start(guc);
1628 }
1629
1630 /**
1631 * xe_guc_runtime_suspend() - GuC runtime suspend
1632 * @guc: The GuC object
1633 *
1634 * Stop further runs of submission tasks on given GuC and runtime suspend
1635 * GuC CT.
1636 */
xe_guc_runtime_suspend(struct xe_guc * guc)1637 void xe_guc_runtime_suspend(struct xe_guc *guc)
1638 {
1639 xe_guc_submit_pause(guc);
1640 xe_guc_submit_disable(guc);
1641 xe_guc_ct_runtime_suspend(&guc->ct);
1642 }
1643
1644 /**
1645 * xe_guc_runtime_resume() - GuC runtime resume
1646 * @guc: The GuC object
1647 *
1648 * Runtime resume GuC CT and allow further runs of submission tasks on
1649 * given GuC.
1650 */
xe_guc_runtime_resume(struct xe_guc * guc)1651 void xe_guc_runtime_resume(struct xe_guc *guc)
1652 {
1653 /*
1654 * Runtime PM flows are not applicable for VFs, so it's safe to
1655 * directly enable IRQ.
1656 */
1657 guc_enable_irq(guc);
1658
1659 xe_guc_ct_runtime_resume(&guc->ct);
1660 xe_guc_submit_enable(guc);
1661 xe_guc_submit_unpause(guc);
1662 }
1663
xe_guc_print_info(struct xe_guc * guc,struct drm_printer * p)1664 int xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
1665 {
1666 struct xe_gt *gt = guc_to_gt(guc);
1667 u32 status;
1668 int i;
1669
1670 xe_uc_fw_print(&guc->fw, p);
1671
1672 if (!IS_SRIOV_VF(gt_to_xe(gt))) {
1673 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
1674 if (!fw_ref.domains)
1675 return -EIO;
1676
1677 status = xe_mmio_read32(>->mmio, GUC_STATUS);
1678
1679 drm_printf(p, "\nGuC status 0x%08x:\n", status);
1680 drm_printf(p, "\tBootrom status = 0x%x\n",
1681 REG_FIELD_GET(GS_BOOTROM_MASK, status));
1682 drm_printf(p, "\tuKernel status = 0x%x\n",
1683 REG_FIELD_GET(GS_UKERNEL_MASK, status));
1684 drm_printf(p, "\tMIA Core status = 0x%x\n",
1685 REG_FIELD_GET(GS_MIA_MASK, status));
1686 drm_printf(p, "\tLog level = %d\n",
1687 xe_guc_log_get_level(&guc->log));
1688
1689 drm_puts(p, "\nScratch registers:\n");
1690 for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
1691 drm_printf(p, "\t%2d: \t0x%x\n",
1692 i, xe_mmio_read32(>->mmio, SOFT_SCRATCH(i)));
1693 }
1694 }
1695
1696 drm_puts(p, "\n");
1697 xe_guc_ct_print(&guc->ct, p, false);
1698
1699 drm_puts(p, "\n");
1700 xe_guc_submit_print(guc, p);
1701
1702 return 0;
1703 }
1704
1705 /**
1706 * xe_guc_declare_wedged() - Declare GuC wedged
1707 * @guc: the GuC object
1708 *
1709 * Wedge the GuC which stops all submission, saves desired debug state, and
1710 * cleans up anything which could timeout.
1711 */
xe_guc_declare_wedged(struct xe_guc * guc)1712 void xe_guc_declare_wedged(struct xe_guc *guc)
1713 {
1714 xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
1715
1716 xe_guc_reset_prepare(guc);
1717 xe_guc_ct_stop(&guc->ct);
1718 xe_guc_submit_wedge(guc);
1719 }
1720
1721 /**
1722 * xe_guc_using_main_gamctrl_queues() - Detect which reporting queues to use.
1723 * @guc: The GuC object
1724 *
1725 * For Xe3p and beyond, we want to program the hardware to use the
1726 * "Main GAMCTRL queue" rather than the legacy queue before we upload
1727 * the GuC firmware. This will allow the GuC to use a new set of
1728 * registers for pagefault handling and avoid some unnecessary
1729 * complications with MCR register range handling.
1730 *
1731 * Return: true if can use new main gamctrl queues.
1732 */
xe_guc_using_main_gamctrl_queues(struct xe_guc * guc)1733 bool xe_guc_using_main_gamctrl_queues(struct xe_guc *guc)
1734 {
1735 struct xe_gt *gt = guc_to_gt(guc);
1736
1737 /*
1738 * For Xe3p media gt (35), the GuC and the CS subunits may be still Xe3
1739 * that lacks the Main GAMCTRL support. Reserved bits from the GMD_ID
1740 * inform the IP version of the subunits.
1741 */
1742 if (xe_gt_is_media_type(gt) && MEDIA_VER(gt_to_xe(gt)) == 35) {
1743 u32 val = xe_mmio_read32(>->mmio, GMD_ID);
1744 u32 subip = REG_FIELD_GET(GMD_ID_SUBIP_FLAG_MASK, val);
1745
1746 if (!subip)
1747 return true;
1748
1749 xe_gt_WARN(gt, subip != 1,
1750 "GMD_ID has unknown value in the SUBIP_FLAG field - 0x%x\n",
1751 subip);
1752
1753 return false;
1754 }
1755
1756 return GT_VER(gt) >= 35;
1757 }
1758
1759 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1760 #include "tests/xe_guc_g2g_test.c"
1761 #endif
1762