xref: /linux/drivers/gpu/drm/xe/xe_guc.c (revision e64b9cc293ae710c815c2de1ec9dcaa0784a8017)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc.h"
7 
8 #include <linux/iopoll.h>
9 #include <drm/drm_managed.h>
10 
11 #include <generated/xe_wa_oob.h>
12 
13 #include "abi/guc_actions_abi.h"
14 #include "abi/guc_errors_abi.h"
15 #include "regs/xe_gt_regs.h"
16 #include "regs/xe_gtt_defs.h"
17 #include "regs/xe_guc_regs.h"
18 #include "regs/xe_irq_regs.h"
19 #include "xe_bo.h"
20 #include "xe_configfs.h"
21 #include "xe_device.h"
22 #include "xe_force_wake.h"
23 #include "xe_gt.h"
24 #include "xe_gt_printk.h"
25 #include "xe_gt_sriov_vf.h"
26 #include "xe_gt_throttle.h"
27 #include "xe_gt_sriov_pf_migration.h"
28 #include "xe_guc_ads.h"
29 #include "xe_guc_buf.h"
30 #include "xe_guc_capture.h"
31 #include "xe_guc_ct.h"
32 #include "xe_guc_db_mgr.h"
33 #include "xe_guc_engine_activity.h"
34 #include "xe_guc_hwconfig.h"
35 #include "xe_guc_klv_helpers.h"
36 #include "xe_guc_log.h"
37 #include "xe_guc_pc.h"
38 #include "xe_guc_rc.h"
39 #include "xe_guc_relay.h"
40 #include "xe_guc_submit.h"
41 #include "xe_memirq.h"
42 #include "xe_mmio.h"
43 #include "xe_platform_types.h"
44 #include "xe_sleep.h"
45 #include "xe_sriov.h"
46 #include "xe_sriov_pf_migration.h"
47 #include "xe_uc.h"
48 #include "xe_uc_fw.h"
49 #include "xe_wa.h"
50 #include "xe_wopcm.h"
51 
52 static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
53 			    struct xe_bo *bo)
54 {
55 	struct xe_device *xe = guc_to_xe(guc);
56 	u32 addr;
57 
58 	/*
59 	 * For most BOs, the address on the allocating tile is fine. However for
60 	 * some, e.g. G2G CTB, the address on a specific tile is required as it
61 	 * might be different for each tile. So, just always ask for the address
62 	 * on the target GuC.
63 	 */
64 	addr = __xe_bo_ggtt_addr(bo, gt_to_tile(guc_to_gt(guc))->id);
65 
66 	/* GuC addresses above GUC_GGTT_TOP don't map through the GTT */
67 	xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
68 	xe_assert(xe, addr < GUC_GGTT_TOP);
69 	xe_assert(xe, xe_bo_size(bo) <= GUC_GGTT_TOP - addr);
70 
71 	return addr;
72 }
73 
74 static u32 guc_ctl_debug_flags(struct xe_guc *guc)
75 {
76 	u32 level = xe_guc_log_get_level(&guc->log);
77 	u32 flags = 0;
78 
79 	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
80 		flags |= GUC_LOG_DISABLED;
81 	else
82 		flags |= FIELD_PREP(GUC_LOG_VERBOSITY, GUC_LOG_LEVEL_TO_VERBOSITY(level));
83 
84 	return flags;
85 }
86 
87 static u32 guc_ctl_feature_flags(struct xe_guc *guc)
88 {
89 	struct xe_device *xe = guc_to_xe(guc);
90 	u32 flags = GUC_CTL_ENABLE_LITE_RESTORE;
91 
92 	if (!xe->info.skip_guc_pc)
93 		flags |= GUC_CTL_ENABLE_SLPC;
94 
95 	if (xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev)))
96 		flags |= GUC_CTL_ENABLE_PSMI_LOGGING;
97 
98 	if (xe_guc_using_main_gamctrl_queues(guc))
99 		flags |= GUC_CTL_MAIN_GAMCTRL_QUEUES;
100 
101 	return flags;
102 }
103 
104 static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
105 {
106 	u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT;
107 	u32 flags;
108 
109 	#if (((XE_GUC_LOG_CRASH_DUMP_BUFFER_SIZE) % SZ_1M) == 0)
110 	#define LOG_UNIT SZ_1M
111 	#define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
112 	#else
113 	#define LOG_UNIT SZ_4K
114 	#define LOG_FLAG 0
115 	#endif
116 
117 	#if (((XE_GUC_LOG_STATE_CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
118 	#define CAPTURE_UNIT SZ_1M
119 	#define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
120 	#else
121 	#define CAPTURE_UNIT SZ_4K
122 	#define CAPTURE_FLAG 0
123 	#endif
124 
125 	BUILD_BUG_ON(!XE_GUC_LOG_CRASH_DUMP_BUFFER_SIZE);
126 	BUILD_BUG_ON(!IS_ALIGNED(XE_GUC_LOG_CRASH_DUMP_BUFFER_SIZE, LOG_UNIT));
127 	BUILD_BUG_ON(!XE_GUC_LOG_EVENT_DATA_BUFFER_SIZE);
128 	BUILD_BUG_ON(!IS_ALIGNED(XE_GUC_LOG_EVENT_DATA_BUFFER_SIZE, LOG_UNIT));
129 	BUILD_BUG_ON(!XE_GUC_LOG_STATE_CAPTURE_BUFFER_SIZE);
130 	BUILD_BUG_ON(!IS_ALIGNED(XE_GUC_LOG_STATE_CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
131 
132 	flags = GUC_LOG_VALID |
133 		GUC_LOG_NOTIFY_ON_HALF_FULL |
134 		CAPTURE_FLAG |
135 		LOG_FLAG |
136 		FIELD_PREP(GUC_LOG_CRASH_DUMP, XE_GUC_LOG_CRASH_DUMP_BUFFER_SIZE / LOG_UNIT - 1) |
137 		FIELD_PREP(GUC_LOG_EVENT_DATA, XE_GUC_LOG_EVENT_DATA_BUFFER_SIZE / LOG_UNIT - 1) |
138 		FIELD_PREP(GUC_LOG_STATE_CAPTURE, XE_GUC_LOG_STATE_CAPTURE_BUFFER_SIZE /
139 			   CAPTURE_UNIT - 1) |
140 		FIELD_PREP(GUC_LOG_BUF_ADDR, offset);
141 
142 	#undef LOG_UNIT
143 	#undef LOG_FLAG
144 	#undef CAPTURE_UNIT
145 	#undef CAPTURE_FLAG
146 
147 	return flags;
148 }
149 
150 static u32 guc_ctl_ads_flags(struct xe_guc *guc)
151 {
152 	u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT;
153 	u32 flags = FIELD_PREP(GUC_ADS_ADDR, ads);
154 
155 	return flags;
156 }
157 
158 static bool needs_wa_dual_queue(struct xe_gt *gt)
159 {
160 	/*
161 	 * The DUAL_QUEUE_WA tells the GuC to not allow concurrent submissions
162 	 * on RCS and CCSes with different address spaces, which on DG2 is
163 	 * required as a WA for an HW bug.
164 	 */
165 	if (XE_GT_WA(gt, 22011391025))
166 		return true;
167 
168 	/*
169 	 * On newer platforms, the HW has been updated to not allow parallel
170 	 * execution of different address spaces, so the RCS/CCS will stall the
171 	 * context switch if one of the other RCS/CCSes is busy with a different
172 	 * address space. While functionally correct, having a submission
173 	 * stalled on the HW limits the GuC ability to shuffle things around and
174 	 * can cause complications if the non-stalled submission runs for a long
175 	 * time, because the GuC doesn't know that the stalled submission isn't
176 	 * actually running and might declare it as hung. Therefore, we enable
177 	 * the DUAL_QUEUE_WA on all newer platforms on GTs that have CCS engines
178 	 * to move management back to the GuC.
179 	 */
180 	if (CCS_INSTANCES(gt) && GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
181 		return true;
182 
183 	return false;
184 }
185 
186 static u32 guc_ctl_wa_flags(struct xe_guc *guc)
187 {
188 	struct xe_device *xe = guc_to_xe(guc);
189 	struct xe_gt *gt = guc_to_gt(guc);
190 	u32 flags = 0;
191 
192 	if (XE_GT_WA(gt, 22012773006))
193 		flags |= GUC_WA_POLLCS;
194 
195 	if (XE_GT_WA(gt, 14014475959))
196 		flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
197 
198 	if (needs_wa_dual_queue(gt))
199 		flags |= GUC_WA_DUAL_QUEUE;
200 
201 	/*
202 	 * Wa_22011802037: FIXME - there's more to be done than simply setting
203 	 * this flag: make sure each CS is stopped when preparing for GT reset
204 	 * and wait for pending MI_FW.
205 	 */
206 	if (GRAPHICS_VERx100(xe) < 1270)
207 		flags |= GUC_WA_PRE_PARSER;
208 
209 	if (XE_GT_WA(gt, 22012727170) || XE_GT_WA(gt, 22012727685))
210 		flags |= GUC_WA_CONTEXT_ISOLATION;
211 
212 	if (XE_GT_WA(gt, 18020744125) &&
213 	    !xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
214 		flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
215 
216 	if (XE_GT_WA(gt, 14018913170))
217 		flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
218 
219 	if (XE_GT_WA(gt, 16023683509))
220 		flags |= GUC_WA_SAVE_RESTORE_MCFG_REG_AT_MC6;
221 
222 	return flags;
223 }
224 
225 static u32 guc_ctl_devid(struct xe_guc *guc)
226 {
227 	struct xe_device *xe = guc_to_xe(guc);
228 
229 	return (((u32)xe->info.devid) << 16) | xe->info.revid;
230 }
231 
232 static void guc_print_params(struct xe_guc *guc)
233 {
234 	struct xe_gt *gt = guc_to_gt(guc);
235 	u32 *params = guc->params;
236 	int i;
237 
238 	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
239 	BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
240 
241 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
242 		xe_gt_dbg(gt, "GuC param[%2d] = 0x%08x\n", i, params[i]);
243 }
244 
245 static void guc_init_params(struct xe_guc *guc)
246 {
247 	u32 *params = guc->params;
248 
249 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
250 	params[GUC_CTL_FEATURE] = 0;
251 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
252 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
253 	params[GUC_CTL_WA] = 0;
254 	params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
255 
256 	guc_print_params(guc);
257 }
258 
259 static void guc_init_params_post_hwconfig(struct xe_guc *guc)
260 {
261 	u32 *params = guc->params;
262 
263 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
264 	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
265 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
266 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
267 	params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
268 	params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
269 
270 	guc_print_params(guc);
271 }
272 
273 /*
274  * Initialize the GuC parameter block before starting the firmware
275  * transfer. These parameters are read by the firmware on startup
276  * and cannot be changed thereafter.
277  */
278 static void guc_write_params(struct xe_guc *guc)
279 {
280 	struct xe_gt *gt = guc_to_gt(guc);
281 	int i;
282 
283 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
284 
285 	xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(0), 0);
286 
287 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
288 		xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(1 + i), guc->params[i]);
289 }
290 
291 static int guc_action_register_g2g_buffer(struct xe_guc *guc, u32 type, u32 dst_tile, u32 dst_dev,
292 					  u32 desc_addr, u32 buff_addr, u32 size)
293 {
294 	struct xe_gt *gt = guc_to_gt(guc);
295 	struct xe_device *xe = gt_to_xe(gt);
296 	u32 action[] = {
297 		XE_GUC_ACTION_REGISTER_G2G,
298 		FIELD_PREP(XE_G2G_REGISTER_SIZE, size / SZ_4K - 1) |
299 		FIELD_PREP(XE_G2G_REGISTER_TYPE, type) |
300 		FIELD_PREP(XE_G2G_REGISTER_TILE, dst_tile) |
301 		FIELD_PREP(XE_G2G_REGISTER_DEVICE, dst_dev),
302 		desc_addr,
303 		buff_addr,
304 	};
305 
306 	xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT));
307 	xe_assert(xe, !(size % SZ_4K));
308 
309 	return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
310 }
311 
312 static int guc_action_deregister_g2g_buffer(struct xe_guc *guc, u32 type, u32 dst_tile, u32 dst_dev)
313 {
314 	struct xe_gt *gt = guc_to_gt(guc);
315 	struct xe_device *xe = gt_to_xe(gt);
316 	u32 action[] = {
317 		XE_GUC_ACTION_DEREGISTER_G2G,
318 		FIELD_PREP(XE_G2G_DEREGISTER_TYPE, type) |
319 		FIELD_PREP(XE_G2G_DEREGISTER_TILE, dst_tile) |
320 		FIELD_PREP(XE_G2G_DEREGISTER_DEVICE, dst_dev),
321 	};
322 
323 	xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT));
324 
325 	return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
326 }
327 
328 #define G2G_DEV(gt)	(((gt)->info.type == XE_GT_TYPE_MAIN) ? 0 : 1)
329 
330 #define G2G_BUFFER_SIZE (SZ_4K)
331 #define G2G_DESC_SIZE (64)
332 #define G2G_DESC_AREA_SIZE (SZ_4K)
333 
334 /*
335  * Generate a unique id for each bi-directional CTB for each pair of
336  * near and far tiles/devices. The id can then be used as an index into
337  * a single allocation that is sub-divided into multiple CTBs.
338  *
339  * For example, with two devices per tile and two tiles, the table should
340  * look like:
341  *           Far <tile>.<dev>
342  *         0.0   0.1   1.0   1.1
343  * N 0.0  --/-- 00/01 02/03 04/05
344  * e 0.1  01/00 --/-- 06/07 08/09
345  * a 1.0  03/02 07/06 --/-- 10/11
346  * r 1.1  05/04 09/08 11/10 --/--
347  *
348  * Where each entry is Rx/Tx channel id.
349  *
350  * So GuC #3 (tile 1, dev 1) talking to GuC #2 (tile 1, dev 0) would
351  * be reading from channel #11 and writing to channel #10. Whereas,
352  * GuC #2 talking to GuC #3 would be read on #10 and write to #11.
353  */
354 static unsigned int g2g_slot(u32 near_tile, u32 near_dev, u32 far_tile, u32 far_dev,
355 			     u32 type, u32 max_inst, bool have_dev)
356 {
357 	u32 near = near_tile, far = far_tile;
358 	u32 idx = 0, x, y, direction;
359 	int i;
360 
361 	if (have_dev) {
362 		near = (near << 1) | near_dev;
363 		far = (far << 1) | far_dev;
364 	}
365 
366 	/* No need to send to one's self */
367 	if (far == near)
368 		return -1;
369 
370 	if (far > near) {
371 		/* Top right table half */
372 		x = far;
373 		y = near;
374 
375 		/* T/R is 'forwards' direction */
376 		direction = type;
377 	} else {
378 		/* Bottom left table half */
379 		x = near;
380 		y = far;
381 
382 		/* B/L is 'backwards' direction */
383 		direction = (1 - type);
384 	}
385 
386 	/* Count the rows prior to the target */
387 	for (i = y; i > 0; i--)
388 		idx += max_inst - i;
389 
390 	/* Count this row up to the target */
391 	idx += (x - 1 - y);
392 
393 	/* Slots are in Rx/Tx pairs */
394 	idx *= 2;
395 
396 	/* Pick Rx/Tx direction */
397 	idx += direction;
398 
399 	return idx;
400 }
401 
402 static int guc_g2g_register(struct xe_guc *near_guc, struct xe_gt *far_gt, u32 type, bool have_dev)
403 {
404 	struct xe_gt *near_gt = guc_to_gt(near_guc);
405 	struct xe_device *xe = gt_to_xe(near_gt);
406 	struct xe_bo *g2g_bo;
407 	u32 near_tile = gt_to_tile(near_gt)->id;
408 	u32 near_dev = G2G_DEV(near_gt);
409 	u32 far_tile = gt_to_tile(far_gt)->id;
410 	u32 far_dev = G2G_DEV(far_gt);
411 	u32 max = xe->info.gt_count;
412 	u32 base, desc, buf;
413 	int slot;
414 
415 	/* G2G is not allowed between different cards */
416 	xe_assert(xe, xe == gt_to_xe(far_gt));
417 
418 	g2g_bo = near_guc->g2g.bo;
419 	xe_assert(xe, g2g_bo);
420 
421 	slot = g2g_slot(near_tile, near_dev, far_tile, far_dev, type, max, have_dev);
422 	xe_assert(xe, slot >= 0);
423 
424 	base = guc_bo_ggtt_addr(near_guc, g2g_bo);
425 	desc = base + slot * G2G_DESC_SIZE;
426 	buf = base + G2G_DESC_AREA_SIZE + slot * G2G_BUFFER_SIZE;
427 
428 	xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE);
429 	xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(g2g_bo));
430 
431 	return guc_action_register_g2g_buffer(near_guc, type, far_tile, far_dev,
432 					      desc, buf, G2G_BUFFER_SIZE);
433 }
434 
435 static void guc_g2g_deregister(struct xe_guc *guc, u32 far_tile, u32 far_dev, u32 type)
436 {
437 	guc_action_deregister_g2g_buffer(guc, type, far_tile, far_dev);
438 }
439 
440 static u32 guc_g2g_size(struct xe_guc *guc)
441 {
442 	struct xe_gt *gt = guc_to_gt(guc);
443 	struct xe_device *xe = gt_to_xe(gt);
444 	unsigned int count = xe->info.gt_count;
445 	u32 num_channels = (count * (count - 1)) / 2;
446 
447 	xe_assert(xe, num_channels * XE_G2G_TYPE_LIMIT * G2G_DESC_SIZE <= G2G_DESC_AREA_SIZE);
448 
449 	return num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
450 }
451 
452 static bool xe_guc_g2g_wanted(struct xe_device *xe)
453 {
454 	/* Can't do GuC to GuC communication if there is only one GuC */
455 	if (xe->info.gt_count <= 1)
456 		return false;
457 
458 	/* No current user */
459 	return false;
460 }
461 
462 static int guc_g2g_alloc(struct xe_guc *guc)
463 {
464 	struct xe_gt *gt = guc_to_gt(guc);
465 	struct xe_device *xe = gt_to_xe(gt);
466 	struct xe_tile *tile = gt_to_tile(gt);
467 	struct xe_bo *bo;
468 	u32 g2g_size;
469 
470 	if (guc->g2g.bo)
471 		return 0;
472 
473 	if (gt->info.id != 0) {
474 		struct xe_gt *root_gt = xe_device_get_gt(xe, 0);
475 		struct xe_guc *root_guc = &root_gt->uc.guc;
476 		struct xe_bo *bo;
477 
478 		bo = xe_bo_get(root_guc->g2g.bo);
479 		if (!bo)
480 			return -ENODEV;
481 
482 		guc->g2g.bo = bo;
483 		guc->g2g.owned = false;
484 		return 0;
485 	}
486 
487 	g2g_size = guc_g2g_size(guc);
488 	bo = xe_managed_bo_create_pin_map(xe, tile, g2g_size,
489 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
490 					  XE_BO_FLAG_GGTT |
491 					  XE_BO_FLAG_GGTT_ALL |
492 					  XE_BO_FLAG_GGTT_INVALIDATE |
493 					  XE_BO_FLAG_PINNED_NORESTORE);
494 	if (IS_ERR(bo))
495 		return PTR_ERR(bo);
496 
497 	xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
498 	guc->g2g.bo = bo;
499 	guc->g2g.owned = true;
500 
501 	return 0;
502 }
503 
504 static void guc_g2g_fini(struct xe_guc *guc)
505 {
506 	if (!guc->g2g.bo)
507 		return;
508 
509 	/* Unpinning the owned object is handled by generic shutdown */
510 	if (!guc->g2g.owned)
511 		xe_bo_put(guc->g2g.bo);
512 
513 	guc->g2g.bo = NULL;
514 }
515 
516 static int guc_g2g_start(struct xe_guc *guc)
517 {
518 	struct xe_gt *far_gt, *gt = guc_to_gt(guc);
519 	struct xe_device *xe = gt_to_xe(gt);
520 	unsigned int i, j;
521 	int t, err;
522 	bool have_dev;
523 
524 	if (!guc->g2g.bo) {
525 		int ret;
526 
527 		ret = guc_g2g_alloc(guc);
528 		if (ret)
529 			return ret;
530 	}
531 
532 	/* GuC interface will need extending if more GT device types are ever created. */
533 	xe_gt_assert(gt, (gt->info.type == XE_GT_TYPE_MAIN) || (gt->info.type == XE_GT_TYPE_MEDIA));
534 
535 	/* Channel numbering depends on whether there are multiple GTs per tile */
536 	have_dev = xe->info.gt_count > xe->info.tile_count;
537 
538 	for_each_gt(far_gt, xe, i) {
539 		u32 far_tile, far_dev;
540 
541 		if (far_gt->info.id == gt->info.id)
542 			continue;
543 
544 		far_tile = gt_to_tile(far_gt)->id;
545 		far_dev = G2G_DEV(far_gt);
546 
547 		for (t = 0; t < XE_G2G_TYPE_LIMIT; t++) {
548 			err = guc_g2g_register(guc, far_gt, t, have_dev);
549 			if (err) {
550 				while (--t >= 0)
551 					guc_g2g_deregister(guc, far_tile, far_dev, t);
552 				goto err_deregister;
553 			}
554 		}
555 	}
556 
557 	return 0;
558 
559 err_deregister:
560 	for_each_gt(far_gt, xe, j) {
561 		u32 tile, dev;
562 
563 		if (far_gt->info.id == gt->info.id)
564 			continue;
565 
566 		if (j >= i)
567 			break;
568 
569 		tile = gt_to_tile(far_gt)->id;
570 		dev = G2G_DEV(far_gt);
571 
572 		for (t = 0; t < XE_G2G_TYPE_LIMIT; t++)
573 			guc_g2g_deregister(guc, tile, dev, t);
574 	}
575 
576 	return err;
577 }
578 
579 static int __guc_opt_in_features_enable(struct xe_guc *guc, u64 addr, u32 num_dwords)
580 {
581 	u32 action[] = {
582 		XE_GUC_ACTION_OPT_IN_FEATURE_KLV,
583 		lower_32_bits(addr),
584 		upper_32_bits(addr),
585 		num_dwords
586 	};
587 
588 	return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
589 }
590 
591 static bool supports_dynamic_ics(struct xe_guc *guc)
592 {
593 	struct xe_device *xe = guc_to_xe(guc);
594 	struct xe_gt *gt = guc_to_gt(guc);
595 
596 	/* Dynamic ICS is available for PVC and Xe2 and newer platforms. */
597 	if (xe->info.platform != XE_PVC && GRAPHICS_VER(xe) < 20)
598 		return false;
599 
600 	/*
601 	 * The feature is currently not compatible with multi-lrc, so the GuC
602 	 * does not support it at all on the media engines (which are the main
603 	 * users of mlrc). On the primary GT side, to avoid it being used in
604 	 * conjunction with mlrc, we only enable it if we are in single CCS
605 	 * mode.
606 	 */
607 	if (xe_gt_is_media_type(gt) || gt->ccs_mode > 1)
608 		return false;
609 
610 	/*
611 	 * Dynamic ICS requires GuC v70.40.1, which maps to compatibility
612 	 * version v1.18.4.
613 	 */
614 	return GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 18, 4);
615 }
616 
617 #define OPT_IN_MAX_DWORDS 16
618 int xe_guc_opt_in_features_enable(struct xe_guc *guc)
619 {
620 	struct xe_device *xe = guc_to_xe(guc);
621 	CLASS(xe_guc_buf, buf)(&guc->buf, OPT_IN_MAX_DWORDS);
622 	u32 count = 0;
623 	u32 *klvs;
624 	int ret;
625 
626 	if (!xe_guc_buf_is_valid(buf))
627 		return -ENOBUFS;
628 
629 	klvs = xe_guc_buf_cpu_ptr(buf);
630 
631 	/*
632 	 * The extra CAT error type opt-in was added in GuC v70.17.0, which maps
633 	 * to compatibility version v1.7.0.
634 	 * Note that the GuC allows enabling this KLV even on platforms that do
635 	 * not support the extra type; in such case the returned type variable
636 	 * will be set to a known invalid value which we can check against.
637 	 */
638 	if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 7, 0))
639 		klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_EXT_CAT_ERR_TYPE);
640 
641 	if (supports_dynamic_ics(guc))
642 		klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH);
643 
644 	if (count) {
645 		xe_assert(xe, count <= OPT_IN_MAX_DWORDS);
646 
647 		ret = __guc_opt_in_features_enable(guc, xe_guc_buf_flush(buf), count);
648 		if (ret < 0) {
649 			xe_gt_err(guc_to_gt(guc),
650 				  "failed to enable GuC opt-in features: %pe\n",
651 				  ERR_PTR(ret));
652 			return ret;
653 		}
654 	}
655 
656 	return 0;
657 }
658 
659 static void guc_fini_hw(void *arg)
660 {
661 	struct xe_guc *guc = arg;
662 	struct xe_gt *gt = guc_to_gt(guc);
663 
664 	xe_with_force_wake(fw_ref, gt_to_fw(gt), XE_FORCEWAKE_ALL)
665 		xe_uc_sanitize_reset(&guc_to_gt(guc)->uc);
666 
667 	guc_g2g_fini(guc);
668 }
669 
670 static void vf_guc_fini_hw(void *arg)
671 {
672 	struct xe_guc *guc = arg;
673 
674 	xe_gt_sriov_vf_reset(guc_to_gt(guc));
675 }
676 
677 /**
678  * xe_guc_comm_init_early - early initialization of GuC communication
679  * @guc: the &xe_guc to initialize
680  *
681  * Must be called prior to first MMIO communication with GuC firmware.
682  */
683 void xe_guc_comm_init_early(struct xe_guc *guc)
684 {
685 	struct xe_gt *gt = guc_to_gt(guc);
686 
687 	if (xe_gt_is_media_type(gt))
688 		guc->notify_reg = MED_GUC_HOST_INTERRUPT;
689 	else
690 		guc->notify_reg = GUC_HOST_INTERRUPT;
691 }
692 
693 static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
694 {
695 	struct xe_tile *tile = gt_to_tile(guc_to_gt(guc));
696 	struct xe_device *xe = guc_to_xe(guc);
697 	int ret;
698 
699 	if (!IS_DGFX(guc_to_xe(guc)))
700 		return 0;
701 
702 	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->fw.bo);
703 	if (ret)
704 		return ret;
705 
706 	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->log.bo);
707 	if (ret)
708 		return ret;
709 
710 	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ads.bo);
711 	if (ret)
712 		return ret;
713 
714 	return 0;
715 }
716 
717 static int vf_guc_init_noalloc(struct xe_guc *guc)
718 {
719 	struct xe_gt *gt = guc_to_gt(guc);
720 	int err;
721 
722 	err = xe_gt_sriov_vf_bootstrap(gt);
723 	if (err)
724 		return err;
725 
726 	err = xe_gt_sriov_vf_query_config(gt);
727 	if (err)
728 		return err;
729 
730 	return 0;
731 }
732 
733 int xe_guc_init_noalloc(struct xe_guc *guc)
734 {
735 	struct xe_device *xe = guc_to_xe(guc);
736 	struct xe_gt *gt = guc_to_gt(guc);
737 	int ret;
738 
739 	xe_guc_comm_init_early(guc);
740 
741 	ret = xe_guc_ct_init_noalloc(&guc->ct);
742 	if (ret)
743 		goto out;
744 
745 	ret = xe_guc_relay_init(&guc->relay);
746 	if (ret)
747 		goto out;
748 
749 	if (IS_SRIOV_VF(xe)) {
750 		ret = vf_guc_init_noalloc(guc);
751 		if (ret)
752 			goto out;
753 	}
754 
755 	return 0;
756 
757 out:
758 	xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret));
759 	return ret;
760 }
761 
762 int xe_guc_init(struct xe_guc *guc)
763 {
764 	struct xe_device *xe = guc_to_xe(guc);
765 	struct xe_gt *gt = guc_to_gt(guc);
766 	int ret;
767 
768 	guc->fw.type = XE_UC_FW_TYPE_GUC;
769 	ret = xe_uc_fw_init(&guc->fw);
770 	if (ret)
771 		return ret;
772 
773 	if (!xe_uc_fw_is_enabled(&guc->fw))
774 		return 0;
775 
776 	/* Disable page reclaim if GuC FW does not support */
777 	if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 14, 0))
778 		xe->info.has_page_reclaim_hw_assist = false;
779 
780 	if (IS_SRIOV_VF(xe)) {
781 		ret = devm_add_action_or_reset(xe->drm.dev, vf_guc_fini_hw, guc);
782 		if (ret)
783 			goto out;
784 
785 		ret = xe_guc_ct_init(&guc->ct);
786 		if (ret)
787 			goto out;
788 		return 0;
789 	}
790 
791 	ret = xe_guc_log_init(&guc->log);
792 	if (ret)
793 		goto out;
794 
795 	ret = xe_guc_capture_init(guc);
796 	if (ret)
797 		goto out;
798 
799 	ret = xe_guc_ads_init(&guc->ads);
800 	if (ret)
801 		goto out;
802 
803 	ret = xe_guc_ct_init(&guc->ct);
804 	if (ret)
805 		goto out;
806 
807 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
808 
809 	ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc);
810 	if (ret)
811 		goto out;
812 
813 	guc_init_params(guc);
814 
815 	return 0;
816 
817 out:
818 	xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret));
819 	return ret;
820 }
821 
822 static int vf_guc_init_post_hwconfig(struct xe_guc *guc)
823 {
824 	int err;
825 
826 	err = xe_guc_submit_init(guc, xe_gt_sriov_vf_guc_ids(guc_to_gt(guc)));
827 	if (err)
828 		return err;
829 
830 	err = xe_guc_buf_cache_init(&guc->buf);
831 	if (err)
832 		return err;
833 
834 	/* XXX xe_guc_db_mgr_init not needed for now */
835 
836 	return 0;
837 }
838 
839 static u32 guc_additional_cache_size(struct xe_device *xe)
840 {
841 	if (IS_SRIOV_PF(xe) && xe_sriov_pf_migration_supported(xe))
842 		return XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE;
843 	else
844 		return 0; /* Fallback to default size */
845 }
846 
847 /**
848  * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
849  * @guc: The GuC object
850  *
851  * Return: 0 on success, negative error code on error.
852  */
853 int xe_guc_init_post_hwconfig(struct xe_guc *guc)
854 {
855 	int ret;
856 
857 	if (IS_SRIOV_VF(guc_to_xe(guc)))
858 		return vf_guc_init_post_hwconfig(guc);
859 
860 	ret = xe_guc_realloc_post_hwconfig(guc);
861 	if (ret)
862 		return ret;
863 
864 	ret = xe_guc_ct_init_post_hwconfig(&guc->ct);
865 	if (ret)
866 		return ret;
867 
868 	guc_init_params_post_hwconfig(guc);
869 
870 	ret = xe_guc_submit_init(guc, ~0);
871 	if (ret)
872 		return ret;
873 
874 	ret = xe_guc_db_mgr_init(&guc->dbm, ~0);
875 	if (ret)
876 		return ret;
877 
878 	ret = xe_guc_pc_init(&guc->pc);
879 	if (ret)
880 		return ret;
881 
882 	ret = xe_guc_rc_init(guc);
883 	if (ret)
884 		return ret;
885 
886 	ret = xe_guc_engine_activity_init(guc);
887 	if (ret)
888 		return ret;
889 
890 	ret = xe_guc_buf_cache_init_with_size(&guc->buf,
891 					      guc_additional_cache_size(guc_to_xe(guc)));
892 	if (ret)
893 		return ret;
894 
895 	return xe_guc_ads_init_post_hwconfig(&guc->ads);
896 }
897 
898 int xe_guc_post_load_init(struct xe_guc *guc)
899 {
900 	int ret;
901 
902 	xe_guc_ads_populate_post_load(&guc->ads);
903 
904 	ret = xe_guc_opt_in_features_enable(guc);
905 	if (ret)
906 		return ret;
907 
908 	if (xe_guc_g2g_wanted(guc_to_xe(guc))) {
909 		ret = guc_g2g_start(guc);
910 		if (ret)
911 			return ret;
912 	}
913 
914 	return xe_guc_submit_enable(guc);
915 }
916 
917 /*
918  * Wa_14025883347: Prevent GuC firmware DMA failures during GuC-only reset by ensuring
919  * SRAM save/restore operations are complete before reset.
920  */
921 static void guc_prevent_fw_dma_failure_on_reset(struct xe_guc *guc)
922 {
923 	struct xe_gt *gt = guc_to_gt(guc);
924 	u32 boot_hash_chk, guc_status, sram_status;
925 	int ret;
926 
927 	guc_status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
928 	if (guc_status & GS_MIA_IN_RESET)
929 		return;
930 
931 	boot_hash_chk = xe_mmio_read32(&gt->mmio, BOOT_HASH_CHK);
932 	if (!(boot_hash_chk & GUC_BOOT_UKERNEL_VALID))
933 		return;
934 
935 	/* Disable idle flow during reset (GuC reset re-enables it automatically) */
936 	xe_mmio_rmw32(&gt->mmio, GUC_MAX_IDLE_COUNT, 0, GUC_IDLE_FLOW_DISABLE);
937 
938 	ret = xe_mmio_wait32(&gt->mmio, GUC_STATUS, GS_UKERNEL_MASK,
939 			     FIELD_PREP(GS_UKERNEL_MASK, XE_GUC_LOAD_STATUS_READY),
940 			     100000, &guc_status, false);
941 	if (ret)
942 		xe_gt_warn(gt, "GuC not ready after disabling idle flow (GUC_STATUS: 0x%x)\n",
943 			   guc_status);
944 
945 	ret = xe_mmio_wait32(&gt->mmio, GUC_SRAM_STATUS, GUC_SRAM_HANDLING_MASK,
946 			     0, 5000, &sram_status, false);
947 	if (ret)
948 		xe_gt_warn(gt, "SRAM handling not complete (GUC_SRAM_STATUS: 0x%x)\n",
949 			   sram_status);
950 }
951 
952 int xe_guc_reset(struct xe_guc *guc)
953 {
954 	struct xe_gt *gt = guc_to_gt(guc);
955 	struct xe_mmio *mmio = &gt->mmio;
956 	u32 guc_status, gdrst;
957 	int ret;
958 
959 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
960 
961 	if (IS_SRIOV_VF(gt_to_xe(gt)))
962 		return xe_gt_sriov_vf_bootstrap(gt);
963 
964 	if (XE_GT_WA(gt, 14025883347))
965 		guc_prevent_fw_dma_failure_on_reset(guc);
966 
967 	xe_mmio_write32(mmio, GDRST, GRDOM_GUC);
968 
969 	ret = xe_mmio_wait32(mmio, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
970 	if (ret) {
971 		xe_gt_err(gt, "GuC reset timed out, GDRST=%#x\n", gdrst);
972 		goto err_out;
973 	}
974 
975 	guc_status = xe_mmio_read32(mmio, GUC_STATUS);
976 	if (!(guc_status & GS_MIA_IN_RESET)) {
977 		xe_gt_err(gt, "GuC status: %#x, MIA core expected to be in reset\n",
978 			  guc_status);
979 		ret = -EIO;
980 		goto err_out;
981 	}
982 
983 	return 0;
984 
985 err_out:
986 
987 	return ret;
988 }
989 
990 static void guc_prepare_xfer(struct xe_guc *guc)
991 {
992 	struct xe_gt *gt = guc_to_gt(guc);
993 	struct xe_mmio *mmio = &gt->mmio;
994 	struct xe_device *xe =  guc_to_xe(guc);
995 	u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
996 		GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
997 		GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
998 		GUC_ENABLE_MIA_CLOCK_GATING;
999 
1000 	if (GRAPHICS_VERx100(xe) < 1250)
1001 		shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
1002 				GUC_ENABLE_MIA_CACHING;
1003 
1004 	if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC)
1005 		shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index);
1006 
1007 	/* Must program this register before loading the ucode with DMA */
1008 	xe_mmio_write32(mmio, GUC_SHIM_CONTROL, shim_flags);
1009 
1010 	xe_mmio_write32(mmio, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
1011 
1012 	/* Make sure GuC receives ARAT interrupts */
1013 	xe_mmio_rmw32(mmio, PMINTRMSK, ARAT_EXPIRED_INTRMSK, 0);
1014 }
1015 
1016 /*
1017  * Supporting MMIO & in memory RSA
1018  */
1019 static int guc_xfer_rsa(struct xe_guc *guc)
1020 {
1021 	struct xe_gt *gt = guc_to_gt(guc);
1022 	u32 rsa[UOS_RSA_SCRATCH_COUNT];
1023 	size_t copied;
1024 	int i;
1025 
1026 	if (guc->fw.rsa_size > 256) {
1027 		u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) +
1028 				    xe_uc_fw_rsa_offset(&guc->fw);
1029 		xe_mmio_write32(&gt->mmio, UOS_RSA_SCRATCH(0), rsa_ggtt_addr);
1030 		return 0;
1031 	}
1032 
1033 	copied = xe_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa));
1034 	if (copied < sizeof(rsa))
1035 		return -ENOMEM;
1036 
1037 	for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
1038 		xe_mmio_write32(&gt->mmio, UOS_RSA_SCRATCH(i), rsa[i]);
1039 
1040 	return 0;
1041 }
1042 
1043 /*
1044  * Wait for the GuC to start up.
1045  *
1046  * Measurements indicate this should take no more than 20ms (assuming the GT
1047  * clock is at maximum frequency). However, thermal throttling and other issues
1048  * can prevent the clock hitting max and thus making the load take significantly
1049  * longer. Allow up to 3s as a safety margin in normal builds. For
1050  * CONFIG_DRM_XE_DEBUG allow up to 10s to account for slower execution, issues
1051  * in PCODE, driver, fan, etc.
1052  *
1053  * Keep checking the GUC_STATUS every 10ms with a debug message every 100
1054  * attempts as a "I'm slow, but alive" message. Regardless, if it takes more
1055  * than 200ms, emit a warning.
1056  */
1057 
1058 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
1059 #define GUC_LOAD_TIMEOUT_SEC	20
1060 #else
1061 #define GUC_LOAD_TIMEOUT_SEC	3
1062 #endif
1063 #define GUC_LOAD_TIME_WARN_MSEC	200
1064 
1065 static void print_load_status_err(struct xe_gt *gt, u32 status)
1066 {
1067 	struct xe_mmio *mmio = &gt->mmio;
1068 	u32 ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, status);
1069 	u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status);
1070 
1071 	xe_gt_err(gt, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
1072 		  REG_FIELD_GET(GS_MIA_IN_RESET, status),
1073 		  bootrom, ukernel,
1074 		  REG_FIELD_GET(GS_MIA_MASK, status),
1075 		  REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
1076 
1077 	switch (bootrom) {
1078 	case XE_BOOTROM_STATUS_NO_KEY_FOUND:
1079 		xe_gt_err(gt, "invalid key requested, header = 0x%08X\n",
1080 			  xe_mmio_read32(mmio, GUC_HEADER_INFO));
1081 		break;
1082 	case XE_BOOTROM_STATUS_RSA_FAILED:
1083 		xe_gt_err(gt, "firmware signature verification failed\n");
1084 		break;
1085 	case XE_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
1086 		xe_gt_err(gt, "firmware production part check failure\n");
1087 		break;
1088 	}
1089 
1090 	switch (ukernel) {
1091 	case XE_GUC_LOAD_STATUS_HWCONFIG_START:
1092 		xe_gt_err(gt, "still extracting hwconfig table.\n");
1093 		break;
1094 	case XE_GUC_LOAD_STATUS_EXCEPTION:
1095 		xe_gt_err(gt, "firmware exception. EIP: %#x\n",
1096 			  xe_mmio_read32(mmio, SOFT_SCRATCH(13)));
1097 		break;
1098 	case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
1099 		xe_gt_err(gt, "illegal init/ADS data\n");
1100 		break;
1101 	case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
1102 		xe_gt_err(gt, "illegal register in save/restore workaround list\n");
1103 		break;
1104 	case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
1105 		xe_gt_err(gt, "illegal workaround KLV data\n");
1106 		break;
1107 	case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG:
1108 		xe_gt_err(gt, "illegal feature flag specified\n");
1109 		break;
1110 	}
1111 }
1112 
1113 /*
1114  * Check GUC_STATUS looking for known terminal states (either completion or
1115  * failure) of either the microkernel status field or the boot ROM status field.
1116  *
1117  * Returns 1 for successful completion, -1 for failure and 0 for any
1118  * intermediate state.
1119  */
1120 static int guc_load_done(struct xe_gt *gt, u32 *status, u32 *tries)
1121 {
1122 	u32 ukernel, bootrom;
1123 
1124 	*status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
1125 	ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, *status);
1126 	bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, *status);
1127 
1128 	switch (ukernel) {
1129 	case XE_GUC_LOAD_STATUS_READY:
1130 		return 1;
1131 	case XE_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH:
1132 	case XE_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH:
1133 	case XE_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE:
1134 	case XE_GUC_LOAD_STATUS_HWCONFIG_ERROR:
1135 	case XE_GUC_LOAD_STATUS_BOOTROM_VERSION_MISMATCH:
1136 	case XE_GUC_LOAD_STATUS_DPC_ERROR:
1137 	case XE_GUC_LOAD_STATUS_EXCEPTION:
1138 	case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
1139 	case XE_GUC_LOAD_STATUS_MPU_DATA_INVALID:
1140 	case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
1141 	case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
1142 	case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG:
1143 		return -1;
1144 	}
1145 
1146 	switch (bootrom) {
1147 	case XE_BOOTROM_STATUS_NO_KEY_FOUND:
1148 	case XE_BOOTROM_STATUS_RSA_FAILED:
1149 	case XE_BOOTROM_STATUS_PAVPC_FAILED:
1150 	case XE_BOOTROM_STATUS_WOPCM_FAILED:
1151 	case XE_BOOTROM_STATUS_LOADLOC_FAILED:
1152 	case XE_BOOTROM_STATUS_JUMP_FAILED:
1153 	case XE_BOOTROM_STATUS_RC6CTXCONFIG_FAILED:
1154 	case XE_BOOTROM_STATUS_MPUMAP_INCORRECT:
1155 	case XE_BOOTROM_STATUS_EXCEPTION:
1156 	case XE_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
1157 		return -1;
1158 	}
1159 
1160 	if (++*tries >= 100) {
1161 		struct xe_guc_pc *guc_pc = &gt->uc.guc.pc;
1162 
1163 		*tries = 0;
1164 		xe_gt_dbg(gt, "GuC load still in progress, freq = %dMHz (req %dMHz), status = 0x%08X [0x%02X/%02X]\n",
1165 			  xe_guc_pc_get_act_freq(guc_pc),
1166 			  xe_guc_pc_get_cur_freq_fw(guc_pc),
1167 			  *status, ukernel, bootrom);
1168 	}
1169 
1170 	return 0;
1171 }
1172 
1173 static int guc_wait_ucode(struct xe_guc *guc)
1174 {
1175 	struct xe_gt *gt = guc_to_gt(guc);
1176 	struct xe_guc_pc *guc_pc = &gt->uc.guc.pc;
1177 	u32 before_freq, act_freq, cur_freq;
1178 	u32 status = 0, tries = 0;
1179 	ktime_t before;
1180 	u64 delta_ms;
1181 	int ret;
1182 
1183 	before_freq = xe_guc_pc_get_act_freq(guc_pc);
1184 	before = ktime_get();
1185 
1186 	ret = poll_timeout_us(ret = guc_load_done(gt, &status, &tries), ret,
1187 			      10 * USEC_PER_MSEC,
1188 			      GUC_LOAD_TIMEOUT_SEC * USEC_PER_SEC, false);
1189 
1190 	delta_ms = ktime_to_ms(ktime_sub(ktime_get(), before));
1191 	act_freq = xe_guc_pc_get_act_freq(guc_pc);
1192 	cur_freq = xe_guc_pc_get_cur_freq_fw(guc_pc);
1193 
1194 	if (ret) {
1195 		xe_gt_err(gt, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz (req %dMHz)\n",
1196 			  status, delta_ms, xe_guc_pc_get_act_freq(guc_pc),
1197 			  xe_guc_pc_get_cur_freq_fw(guc_pc));
1198 		print_load_status_err(gt, status);
1199 
1200 		return -EPROTO;
1201 	}
1202 
1203 	if (delta_ms > GUC_LOAD_TIME_WARN_MSEC) {
1204 		xe_gt_warn(gt, "GuC load: excessive init time: %lldms! [status = 0x%08X]\n",
1205 			   delta_ms, status);
1206 		xe_gt_warn(gt, "GuC load: excessive init time: [freq = %dMHz (req = %dMHz), before = %dMHz, perf_limit_reasons = 0x%08X]\n",
1207 			   act_freq, cur_freq, before_freq,
1208 			   xe_gt_throttle_get_limit_reasons(gt));
1209 	} else {
1210 		xe_gt_dbg(gt, "GuC load: init took %lldms, freq = %dMHz (req = %dMHz), before = %dMHz, status = 0x%08X\n",
1211 			  delta_ms, act_freq, cur_freq, before_freq, status);
1212 	}
1213 
1214 	return 0;
1215 }
1216 ALLOW_ERROR_INJECTION(guc_wait_ucode, ERRNO);
1217 
1218 static int __xe_guc_upload(struct xe_guc *guc)
1219 {
1220 	int ret;
1221 
1222 	/* Raise GT freq to speed up HuC/GuC load */
1223 	xe_guc_pc_raise_unslice(&guc->pc);
1224 
1225 	guc_write_params(guc);
1226 	guc_prepare_xfer(guc);
1227 
1228 	/*
1229 	 * Note that GuC needs the CSS header plus uKernel code to be copied
1230 	 * by the DMA engine in one operation, whereas the RSA signature is
1231 	 * loaded separately, either by copying it to the UOS_RSA_SCRATCH
1232 	 * register (if key size <= 256) or through a ggtt-pinned vma (if key
1233 	 * size > 256). The RSA size and therefore the way we provide it to the
1234 	 * HW is fixed for each platform and hard-coded in the bootrom.
1235 	 */
1236 	ret = guc_xfer_rsa(guc);
1237 	if (ret)
1238 		goto out;
1239 	/*
1240 	 * Current uCode expects the code to be loaded at 8k; locations below
1241 	 * this are used for the stack.
1242 	 */
1243 	ret = xe_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
1244 	if (ret)
1245 		goto out;
1246 
1247 	/* Wait for authentication */
1248 	ret = guc_wait_ucode(guc);
1249 	if (ret)
1250 		goto out;
1251 
1252 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING);
1253 	return 0;
1254 
1255 out:
1256 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
1257 	return ret;
1258 }
1259 
1260 static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc)
1261 {
1262 	struct xe_gt *gt = guc_to_gt(guc);
1263 	int ret;
1264 
1265 	ret = xe_guc_hwconfig_init(guc);
1266 	if (ret)
1267 		return ret;
1268 
1269 	ret = xe_guc_enable_communication(guc);
1270 	if (ret)
1271 		return ret;
1272 
1273 	ret = xe_gt_sriov_vf_connect(gt);
1274 	if (ret)
1275 		goto err_out;
1276 
1277 	ret = xe_gt_sriov_vf_query_runtime(gt);
1278 	if (ret)
1279 		goto err_out;
1280 
1281 	return 0;
1282 
1283 err_out:
1284 	xe_guc_sanitize(guc);
1285 	return ret;
1286 }
1287 
1288 /**
1289  * xe_guc_min_load_for_hwconfig - load minimal GuC and read hwconfig table
1290  * @guc: The GuC object
1291  *
1292  * This function uploads a minimal GuC that does not support submissions but
1293  * in a state where the hwconfig table can be read. Next, it reads and parses
1294  * the hwconfig table so it can be used for subsequent steps in the driver load.
1295  * Lastly, it enables CT communication (XXX: this is needed for PFs/VFs only).
1296  *
1297  * Return: 0 on success, negative error code on error.
1298  */
1299 int xe_guc_min_load_for_hwconfig(struct xe_guc *guc)
1300 {
1301 	int ret;
1302 
1303 	if (IS_SRIOV_VF(guc_to_xe(guc)))
1304 		return vf_guc_min_load_for_hwconfig(guc);
1305 
1306 	xe_guc_ads_populate_minimal(&guc->ads);
1307 
1308 	xe_guc_pc_init_early(&guc->pc);
1309 
1310 	ret = __xe_guc_upload(guc);
1311 	if (ret)
1312 		return ret;
1313 
1314 	ret = xe_guc_hwconfig_init(guc);
1315 	if (ret)
1316 		return ret;
1317 
1318 	ret = xe_guc_enable_communication(guc);
1319 	if (ret)
1320 		return ret;
1321 
1322 	return 0;
1323 }
1324 
1325 int xe_guc_upload(struct xe_guc *guc)
1326 {
1327 	struct xe_gt *gt = guc_to_gt(guc);
1328 
1329 	xe_guc_ads_populate(&guc->ads);
1330 
1331 	if (xe_guc_using_main_gamctrl_queues(guc))
1332 		xe_mmio_write32(&gt->mmio, MAIN_GAMCTRL_MODE, MAIN_GAMCTRL_QUEUE_SELECT);
1333 
1334 	return __xe_guc_upload(guc);
1335 }
1336 
1337 static void guc_handle_mmio_msg(struct xe_guc *guc)
1338 {
1339 	struct xe_gt *gt = guc_to_gt(guc);
1340 	u32 msg;
1341 
1342 	if (IS_SRIOV_VF(guc_to_xe(guc)))
1343 		return;
1344 
1345 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
1346 
1347 	msg = xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(15));
1348 	msg &= XE_GUC_RECV_MSG_EXCEPTION |
1349 		XE_GUC_RECV_MSG_CRASH_DUMP_POSTED;
1350 	xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(15), 0);
1351 
1352 	if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED)
1353 		xe_gt_err(gt, "Received early GuC crash dump notification!\n");
1354 
1355 	if (msg & XE_GUC_RECV_MSG_EXCEPTION)
1356 		xe_gt_err(gt, "Received early GuC exception notification!\n");
1357 }
1358 
1359 static void guc_enable_irq(struct xe_guc *guc)
1360 {
1361 	struct xe_gt *gt = guc_to_gt(guc);
1362 	u32 events = xe_gt_is_media_type(gt) ?
1363 		REG_FIELD_PREP(ENGINE0_MASK, GUC_INTR_GUC2HOST)  :
1364 		REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
1365 
1366 	/* Primary GuC and media GuC share a single enable bit */
1367 	xe_mmio_write32(&gt->mmio, GUC_SG_INTR_ENABLE,
1368 			REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST));
1369 
1370 	/*
1371 	 * There are separate mask bits for primary and media GuCs, so use
1372 	 * a RMW operation to avoid clobbering the other GuC's setting.
1373 	 */
1374 	xe_mmio_rmw32(&gt->mmio, GUC_SG_INTR_MASK, events, 0);
1375 }
1376 
1377 int xe_guc_enable_communication(struct xe_guc *guc)
1378 {
1379 	struct xe_device *xe = guc_to_xe(guc);
1380 	int err;
1381 
1382 	if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
1383 		struct xe_gt *gt = guc_to_gt(guc);
1384 		struct xe_tile *tile = gt_to_tile(gt);
1385 
1386 		err = xe_memirq_init_guc(&tile->memirq, guc);
1387 		if (err)
1388 			return err;
1389 	} else {
1390 		guc_enable_irq(guc);
1391 	}
1392 
1393 	err = xe_guc_ct_enable(&guc->ct);
1394 	if (err)
1395 		return err;
1396 
1397 	guc_handle_mmio_msg(guc);
1398 
1399 	return 0;
1400 }
1401 
1402 int xe_guc_suspend(struct xe_guc *guc)
1403 {
1404 	struct xe_gt *gt = guc_to_gt(guc);
1405 	u32 action[] = {
1406 		XE_GUC_ACTION_CLIENT_SOFT_RESET,
1407 	};
1408 	int ret;
1409 
1410 	ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
1411 	if (ret) {
1412 		xe_gt_err(gt, "GuC suspend failed: %pe\n", ERR_PTR(ret));
1413 		return ret;
1414 	}
1415 
1416 	xe_guc_sanitize(guc);
1417 	return 0;
1418 }
1419 
1420 void xe_guc_notify(struct xe_guc *guc)
1421 {
1422 	struct xe_gt *gt = guc_to_gt(guc);
1423 	const u32 default_notify_data = 0;
1424 
1425 	/*
1426 	 * Both GUC_HOST_INTERRUPT and MED_GUC_HOST_INTERRUPT can pass
1427 	 * additional payload data to the GuC but this capability is not
1428 	 * used by the firmware yet. Use default value in the meantime.
1429 	 */
1430 	xe_mmio_write32(&gt->mmio, guc->notify_reg, default_notify_data);
1431 }
1432 
1433 int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr)
1434 {
1435 	u32 action[] = {
1436 		XE_GUC_ACTION_AUTHENTICATE_HUC,
1437 		rsa_addr
1438 	};
1439 
1440 	return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
1441 }
1442 
1443 #define MAX_RETRIES_ON_FLR	2
1444 #define MIN_SLEEP_MS_ON_FLR	256
1445 
1446 int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
1447 			  u32 len, u32 *response_buf)
1448 {
1449 	struct xe_device *xe = guc_to_xe(guc);
1450 	struct xe_gt *gt = guc_to_gt(guc);
1451 	struct xe_mmio *mmio = &gt->mmio;
1452 	struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
1453 		MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
1454 	const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1;
1455 	unsigned int sleep_period_ms = 1;
1456 	unsigned int lost = 0;
1457 	u32 header;
1458 	int ret;
1459 	int i;
1460 
1461 	BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
1462 
1463 	xe_assert(xe, len);
1464 	xe_assert(xe, len <= VF_SW_FLAG_COUNT);
1465 	xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
1466 	xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) ==
1467 		  GUC_HXG_ORIGIN_HOST);
1468 	xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) ==
1469 		  GUC_HXG_TYPE_REQUEST);
1470 
1471 retry:
1472 	/* Not in critical data-path, just do if else for GT type */
1473 	if (xe_gt_is_media_type(gt)) {
1474 		for (i = 0; i < len; ++i)
1475 			xe_mmio_write32(mmio, MED_VF_SW_FLAG(i),
1476 					request[i]);
1477 		xe_mmio_read32(mmio, MED_VF_SW_FLAG(LAST_INDEX));
1478 	} else {
1479 		for (i = 0; i < len; ++i)
1480 			xe_mmio_write32(mmio, VF_SW_FLAG(i),
1481 					request[i]);
1482 		xe_mmio_read32(mmio, VF_SW_FLAG(LAST_INDEX));
1483 	}
1484 
1485 	xe_guc_notify(guc);
1486 
1487 	ret = xe_mmio_wait32(mmio, reply_reg, GUC_HXG_MSG_0_ORIGIN,
1488 			     FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
1489 			     50000, &header, false);
1490 	if (ret) {
1491 		/* scratch registers might be cleared during FLR, try once more */
1492 		if (!header) {
1493 			if (++lost > MAX_RETRIES_ON_FLR) {
1494 				xe_gt_err(gt, "GuC mmio request %#x: lost, too many retries %u\n",
1495 					  request[0], lost);
1496 				return -ENOLINK;
1497 			}
1498 			xe_gt_dbg(gt, "GuC mmio request %#x: lost, trying again\n", request[0]);
1499 			xe_sleep_relaxed_ms(MIN_SLEEP_MS_ON_FLR);
1500 			goto retry;
1501 		}
1502 timeout:
1503 		xe_gt_err(gt, "GuC mmio request %#x: no reply %#x\n",
1504 			  request[0], header);
1505 		return ret;
1506 	}
1507 
1508 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
1509 	    GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
1510 		/*
1511 		 * Once we got a BUSY reply we must wait again for the final
1512 		 * response but this time we can't use ORIGIN mask anymore.
1513 		 * To spot a right change in the reply, we take advantage that
1514 		 * response SUCCESS and FAILURE differ only by the single bit
1515 		 * and all other bits are set and can be used as a new mask.
1516 		 */
1517 		u32 resp_bits = GUC_HXG_TYPE_RESPONSE_SUCCESS & GUC_HXG_TYPE_RESPONSE_FAILURE;
1518 		u32 resp_mask = FIELD_PREP(GUC_HXG_MSG_0_TYPE, resp_bits);
1519 
1520 		BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS);
1521 		BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
1522 
1523 		ret = xe_mmio_wait32(mmio, reply_reg, resp_mask, resp_mask,
1524 				     2000000, &header, false);
1525 
1526 		if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
1527 			     GUC_HXG_ORIGIN_GUC))
1528 			goto proto;
1529 		if (unlikely(ret)) {
1530 			if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
1531 			    GUC_HXG_TYPE_NO_RESPONSE_BUSY)
1532 				goto proto;
1533 			goto timeout;
1534 		}
1535 	}
1536 
1537 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
1538 	    GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
1539 		u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
1540 
1541 		xe_gt_dbg(gt, "GuC mmio request %#x: retrying, reason %#x\n",
1542 			  request[0], reason);
1543 
1544 		xe_sleep_exponential_ms(&sleep_period_ms, 256);
1545 		goto retry;
1546 	}
1547 
1548 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
1549 	    GUC_HXG_TYPE_RESPONSE_FAILURE) {
1550 		u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
1551 		u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
1552 
1553 		if (unlikely(error == XE_GUC_RESPONSE_VF_MIGRATED)) {
1554 			xe_gt_dbg(gt, "GuC mmio request %#x rejected due to MIGRATION (hint %#x)\n",
1555 				  request[0], hint);
1556 			return -EREMCHG;
1557 		}
1558 
1559 		xe_gt_err(gt, "GuC mmio request %#x: failure %#x hint %#x\n",
1560 			  request[0], error, hint);
1561 		return -ENXIO;
1562 	}
1563 
1564 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
1565 	    GUC_HXG_TYPE_RESPONSE_SUCCESS) {
1566 proto:
1567 		xe_gt_err(gt, "GuC mmio request %#x: unexpected reply %#x\n",
1568 			  request[0], header);
1569 		return -EPROTO;
1570 	}
1571 
1572 	/* Just copy entire possible message response */
1573 	if (response_buf) {
1574 		response_buf[0] = header;
1575 
1576 		for (i = 1; i < VF_SW_FLAG_COUNT; i++) {
1577 			reply_reg.addr += sizeof(u32);
1578 			response_buf[i] = xe_mmio_read32(mmio, reply_reg);
1579 		}
1580 	}
1581 
1582 	/* Use data from the GuC response as our return value */
1583 	return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
1584 }
1585 ALLOW_ERROR_INJECTION(xe_guc_mmio_send_recv, ERRNO);
1586 
1587 int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
1588 {
1589 	return xe_guc_mmio_send_recv(guc, request, len, NULL);
1590 }
1591 
1592 static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val)
1593 {
1594 	struct xe_device *xe = guc_to_xe(guc);
1595 	u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
1596 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
1597 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
1598 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
1599 			   GUC_ACTION_HOST2GUC_SELF_CFG),
1600 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
1601 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
1602 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32,
1603 			   lower_32_bits(val)),
1604 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64,
1605 			   upper_32_bits(val)),
1606 	};
1607 	int ret;
1608 
1609 	xe_assert(xe, len <= 2);
1610 	xe_assert(xe, len != 1 || !upper_32_bits(val));
1611 
1612 	/* Self config must go over MMIO */
1613 	ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
1614 
1615 	if (unlikely(ret < 0))
1616 		return ret;
1617 	if (unlikely(ret > 1))
1618 		return -EPROTO;
1619 	if (unlikely(!ret))
1620 		return -ENOKEY;
1621 
1622 	return 0;
1623 }
1624 
1625 int xe_guc_self_cfg32(struct xe_guc *guc, u16 key, u32 val)
1626 {
1627 	return guc_self_cfg(guc, key, 1, val);
1628 }
1629 
1630 int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val)
1631 {
1632 	return guc_self_cfg(guc, key, 2, val);
1633 }
1634 
1635 static void xe_guc_sw_0_irq_handler(struct xe_guc *guc)
1636 {
1637 	struct xe_gt *gt = guc_to_gt(guc);
1638 
1639 	if (IS_SRIOV_VF(gt_to_xe(gt)))
1640 		xe_gt_sriov_vf_migrated_event_handler(gt);
1641 }
1642 
1643 void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir)
1644 {
1645 	if (iir & GUC_INTR_GUC2HOST)
1646 		xe_guc_ct_irq_handler(&guc->ct);
1647 
1648 	if (iir & GUC_INTR_SW_INT_0)
1649 		xe_guc_sw_0_irq_handler(guc);
1650 }
1651 
1652 void xe_guc_sanitize(struct xe_guc *guc)
1653 {
1654 	xe_uc_fw_sanitize(&guc->fw);
1655 	xe_guc_ct_disable(&guc->ct);
1656 	xe_guc_submit_disable(guc);
1657 }
1658 
1659 int xe_guc_reset_prepare(struct xe_guc *guc)
1660 {
1661 	return xe_guc_submit_reset_prepare(guc);
1662 }
1663 
1664 void xe_guc_reset_wait(struct xe_guc *guc)
1665 {
1666 	xe_guc_submit_reset_wait(guc);
1667 }
1668 
1669 void xe_guc_stop_prepare(struct xe_guc *guc)
1670 {
1671 	if (!IS_SRIOV_VF(guc_to_xe(guc))) {
1672 		int err;
1673 
1674 		xe_guc_rc_disable(guc);
1675 		err = xe_guc_pc_stop(&guc->pc);
1676 		xe_gt_WARN(guc_to_gt(guc), err, "Failed to stop GuC PC: %pe\n",
1677 			   ERR_PTR(err));
1678 	}
1679 }
1680 
1681 void xe_guc_stop(struct xe_guc *guc)
1682 {
1683 	xe_guc_ct_stop(&guc->ct);
1684 
1685 	xe_guc_submit_stop(guc);
1686 }
1687 
1688 int xe_guc_start(struct xe_guc *guc)
1689 {
1690 	return xe_guc_submit_start(guc);
1691 }
1692 
1693 /**
1694  * xe_guc_runtime_suspend() - GuC runtime suspend
1695  * @guc: The GuC object
1696  *
1697  * Stop further runs of submission tasks on given GuC and runtime suspend
1698  * GuC CT.
1699  */
1700 void xe_guc_runtime_suspend(struct xe_guc *guc)
1701 {
1702 	xe_guc_submit_pause(guc);
1703 	xe_guc_submit_disable(guc);
1704 	xe_guc_ct_runtime_suspend(&guc->ct);
1705 }
1706 
1707 /**
1708  * xe_guc_runtime_resume() - GuC runtime resume
1709  * @guc: The GuC object
1710  *
1711  * Runtime resume GuC CT and allow further runs of submission tasks on
1712  * given GuC.
1713  */
1714 void xe_guc_runtime_resume(struct xe_guc *guc)
1715 {
1716 	/*
1717 	 * Runtime PM flows are not applicable for VFs, so it's safe to
1718 	 * directly enable IRQ.
1719 	 */
1720 	guc_enable_irq(guc);
1721 
1722 	xe_guc_ct_runtime_resume(&guc->ct);
1723 	xe_guc_submit_enable(guc);
1724 	xe_guc_submit_unpause(guc);
1725 }
1726 
1727 int xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
1728 {
1729 	struct xe_gt *gt = guc_to_gt(guc);
1730 	u32 status;
1731 	int i;
1732 
1733 	xe_uc_fw_print(&guc->fw, p);
1734 
1735 	if (!IS_SRIOV_VF(gt_to_xe(gt))) {
1736 		CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
1737 		if (!fw_ref.domains)
1738 			return -EIO;
1739 
1740 		status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
1741 
1742 		drm_printf(p, "\nGuC status 0x%08x:\n", status);
1743 		drm_printf(p, "\tBootrom status = 0x%x\n",
1744 			   REG_FIELD_GET(GS_BOOTROM_MASK, status));
1745 		drm_printf(p, "\tuKernel status = 0x%x\n",
1746 			   REG_FIELD_GET(GS_UKERNEL_MASK, status));
1747 		drm_printf(p, "\tMIA Core status = 0x%x\n",
1748 			   REG_FIELD_GET(GS_MIA_MASK, status));
1749 		drm_printf(p, "\tLog level = %d\n",
1750 			   xe_guc_log_get_level(&guc->log));
1751 
1752 		drm_puts(p, "\nScratch registers:\n");
1753 		for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
1754 			drm_printf(p, "\t%2d: \t0x%x\n",
1755 				   i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i)));
1756 		}
1757 	}
1758 
1759 	drm_puts(p, "\n");
1760 	xe_guc_ct_print(&guc->ct, p, false);
1761 
1762 	drm_puts(p, "\n");
1763 	xe_guc_submit_print(guc, p);
1764 
1765 	return 0;
1766 }
1767 
1768 /**
1769  * xe_guc_declare_wedged() - Declare GuC wedged
1770  * @guc: the GuC object
1771  *
1772  * Wedge the GuC which stops all submission, saves desired debug state, and
1773  * cleans up anything which could timeout.
1774  */
1775 void xe_guc_declare_wedged(struct xe_guc *guc)
1776 {
1777 	xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
1778 
1779 	xe_guc_reset_prepare(guc);
1780 	xe_guc_ct_stop(&guc->ct);
1781 	xe_guc_submit_wedge(guc);
1782 }
1783 
1784 /**
1785  * xe_guc_using_main_gamctrl_queues() - Detect which reporting queues to use.
1786  * @guc: The GuC object
1787  *
1788  * For Xe3p and beyond, we want to program the hardware to use the
1789  * "Main GAMCTRL queue" rather than the legacy queue before we upload
1790  * the GuC firmware.  This will allow the GuC to use a new set of
1791  * registers for pagefault handling and avoid some unnecessary
1792  * complications with MCR register range handling.
1793  *
1794  * Return: true if can use new main gamctrl queues.
1795  */
1796 bool xe_guc_using_main_gamctrl_queues(struct xe_guc *guc)
1797 {
1798 	struct xe_gt *gt = guc_to_gt(guc);
1799 
1800 	/*
1801 	 * For Xe3p media gt (35), the GuC and the CS subunits may be still Xe3
1802 	 * that lacks the Main GAMCTRL support. Reserved bits from the GMD_ID
1803 	 * inform the IP version of the subunits.
1804 	 */
1805 	if (xe_gt_is_media_type(gt) && MEDIA_VER(gt_to_xe(gt)) == 35) {
1806 		u32 val = xe_mmio_read32(&gt->mmio, GMD_ID);
1807 		u32 subip = REG_FIELD_GET(GMD_ID_SUBIP_FLAG_MASK, val);
1808 
1809 		if (!subip)
1810 			return true;
1811 
1812 		xe_gt_WARN(gt, subip != 1,
1813 			   "GMD_ID has unknown value in the SUBIP_FLAG field - 0x%x\n",
1814 			   subip);
1815 
1816 		return false;
1817 	}
1818 
1819 	return GT_VER(gt) >= 35;
1820 }
1821 
1822 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1823 #include "tests/xe_guc_g2g_test.c"
1824 #endif
1825