xref: /linux/drivers/gpu/drm/xe/xe_guc.c (revision a5210135489ae7bc1ef1cb4a8157361dd7b468cd)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc.h"
7 
8 #include <linux/iopoll.h>
9 #include <drm/drm_managed.h>
10 
11 #include <generated/xe_wa_oob.h>
12 
13 #include "abi/guc_actions_abi.h"
14 #include "abi/guc_errors_abi.h"
15 #include "regs/xe_gt_regs.h"
16 #include "regs/xe_gtt_defs.h"
17 #include "regs/xe_guc_regs.h"
18 #include "regs/xe_irq_regs.h"
19 #include "xe_bo.h"
20 #include "xe_configfs.h"
21 #include "xe_device.h"
22 #include "xe_force_wake.h"
23 #include "xe_gt.h"
24 #include "xe_gt_printk.h"
25 #include "xe_gt_sriov_vf.h"
26 #include "xe_gt_throttle.h"
27 #include "xe_gt_sriov_pf_migration.h"
28 #include "xe_guc_ads.h"
29 #include "xe_guc_buf.h"
30 #include "xe_guc_capture.h"
31 #include "xe_guc_ct.h"
32 #include "xe_guc_db_mgr.h"
33 #include "xe_guc_engine_activity.h"
34 #include "xe_guc_hwconfig.h"
35 #include "xe_guc_klv_helpers.h"
36 #include "xe_guc_log.h"
37 #include "xe_guc_pc.h"
38 #include "xe_guc_rc.h"
39 #include "xe_guc_relay.h"
40 #include "xe_guc_submit.h"
41 #include "xe_memirq.h"
42 #include "xe_mmio.h"
43 #include "xe_platform_types.h"
44 #include "xe_sleep.h"
45 #include "xe_sriov.h"
46 #include "xe_sriov_pf_migration.h"
47 #include "xe_uc.h"
48 #include "xe_uc_fw.h"
49 #include "xe_wa.h"
50 #include "xe_wopcm.h"
51 
guc_bo_ggtt_addr(struct xe_guc * guc,struct xe_bo * bo)52 static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
53 			    struct xe_bo *bo)
54 {
55 	struct xe_device *xe = guc_to_xe(guc);
56 	u32 addr;
57 
58 	/*
59 	 * For most BOs, the address on the allocating tile is fine. However for
60 	 * some, e.g. G2G CTB, the address on a specific tile is required as it
61 	 * might be different for each tile. So, just always ask for the address
62 	 * on the target GuC.
63 	 */
64 	addr = __xe_bo_ggtt_addr(bo, gt_to_tile(guc_to_gt(guc))->id);
65 
66 	/* GuC addresses above GUC_GGTT_TOP don't map through the GTT */
67 	xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
68 	xe_assert(xe, addr < GUC_GGTT_TOP);
69 	xe_assert(xe, xe_bo_size(bo) <= GUC_GGTT_TOP - addr);
70 
71 	return addr;
72 }
73 
guc_ctl_debug_flags(struct xe_guc * guc)74 static u32 guc_ctl_debug_flags(struct xe_guc *guc)
75 {
76 	u32 level = xe_guc_log_get_level(&guc->log);
77 	u32 flags = 0;
78 
79 	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
80 		flags |= GUC_LOG_DISABLED;
81 	else
82 		flags |= FIELD_PREP(GUC_LOG_VERBOSITY, GUC_LOG_LEVEL_TO_VERBOSITY(level));
83 
84 	return flags;
85 }
86 
guc_ctl_feature_flags(struct xe_guc * guc)87 static u32 guc_ctl_feature_flags(struct xe_guc *guc)
88 {
89 	struct xe_device *xe = guc_to_xe(guc);
90 	u32 flags = GUC_CTL_ENABLE_LITE_RESTORE;
91 
92 	if (!xe->info.skip_guc_pc)
93 		flags |= GUC_CTL_ENABLE_SLPC;
94 
95 	if (xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev)))
96 		flags |= GUC_CTL_ENABLE_PSMI_LOGGING;
97 
98 	if (xe_guc_using_main_gamctrl_queues(guc))
99 		flags |= GUC_CTL_MAIN_GAMCTRL_QUEUES;
100 
101 	if (GRAPHICS_VER(xe) >= 35 && !IS_DGFX(xe) && xe_gt_is_media_type(guc_to_gt(guc)))
102 		flags |= GUC_CTL_ENABLE_L2FLUSH_OPT;
103 
104 	return flags;
105 }
106 
guc_ctl_log_params_flags(struct xe_guc * guc)107 static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
108 {
109 	u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT;
110 	u32 flags;
111 
112 	#if (((XE_GUC_LOG_CRASH_DUMP_BUFFER_SIZE) % SZ_1M) == 0)
113 	#define LOG_UNIT SZ_1M
114 	#define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
115 	#else
116 	#define LOG_UNIT SZ_4K
117 	#define LOG_FLAG 0
118 	#endif
119 
120 	#if (((XE_GUC_LOG_STATE_CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
121 	#define CAPTURE_UNIT SZ_1M
122 	#define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
123 	#else
124 	#define CAPTURE_UNIT SZ_4K
125 	#define CAPTURE_FLAG 0
126 	#endif
127 
128 	BUILD_BUG_ON(!XE_GUC_LOG_CRASH_DUMP_BUFFER_SIZE);
129 	BUILD_BUG_ON(!IS_ALIGNED(XE_GUC_LOG_CRASH_DUMP_BUFFER_SIZE, LOG_UNIT));
130 	BUILD_BUG_ON(!XE_GUC_LOG_EVENT_DATA_BUFFER_SIZE);
131 	BUILD_BUG_ON(!IS_ALIGNED(XE_GUC_LOG_EVENT_DATA_BUFFER_SIZE, LOG_UNIT));
132 	BUILD_BUG_ON(!XE_GUC_LOG_STATE_CAPTURE_BUFFER_SIZE);
133 	BUILD_BUG_ON(!IS_ALIGNED(XE_GUC_LOG_STATE_CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
134 
135 	flags = GUC_LOG_VALID |
136 		GUC_LOG_NOTIFY_ON_HALF_FULL |
137 		CAPTURE_FLAG |
138 		LOG_FLAG |
139 		FIELD_PREP(GUC_LOG_CRASH_DUMP, XE_GUC_LOG_CRASH_DUMP_BUFFER_SIZE / LOG_UNIT - 1) |
140 		FIELD_PREP(GUC_LOG_EVENT_DATA, XE_GUC_LOG_EVENT_DATA_BUFFER_SIZE / LOG_UNIT - 1) |
141 		FIELD_PREP(GUC_LOG_STATE_CAPTURE, XE_GUC_LOG_STATE_CAPTURE_BUFFER_SIZE /
142 			   CAPTURE_UNIT - 1) |
143 		FIELD_PREP(GUC_LOG_BUF_ADDR, offset);
144 
145 	#undef LOG_UNIT
146 	#undef LOG_FLAG
147 	#undef CAPTURE_UNIT
148 	#undef CAPTURE_FLAG
149 
150 	return flags;
151 }
152 
guc_ctl_ads_flags(struct xe_guc * guc)153 static u32 guc_ctl_ads_flags(struct xe_guc *guc)
154 {
155 	u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT;
156 	u32 flags = FIELD_PREP(GUC_ADS_ADDR, ads);
157 
158 	return flags;
159 }
160 
needs_wa_dual_queue(struct xe_gt * gt)161 static bool needs_wa_dual_queue(struct xe_gt *gt)
162 {
163 	/*
164 	 * The DUAL_QUEUE_WA tells the GuC to not allow concurrent submissions
165 	 * on RCS and CCSes with different address spaces, which on DG2 is
166 	 * required as a WA for an HW bug.
167 	 */
168 	if (XE_GT_WA(gt, 22011391025))
169 		return true;
170 
171 	/*
172 	 * On newer platforms, the HW has been updated to not allow parallel
173 	 * execution of different address spaces, so the RCS/CCS will stall the
174 	 * context switch if one of the other RCS/CCSes is busy with a different
175 	 * address space. While functionally correct, having a submission
176 	 * stalled on the HW limits the GuC ability to shuffle things around and
177 	 * can cause complications if the non-stalled submission runs for a long
178 	 * time, because the GuC doesn't know that the stalled submission isn't
179 	 * actually running and might declare it as hung. Therefore, we enable
180 	 * the DUAL_QUEUE_WA on all newer platforms on GTs that have CCS engines
181 	 * to move management back to the GuC.
182 	 */
183 	if (CCS_INSTANCES(gt) && GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
184 		return true;
185 
186 	return false;
187 }
188 
guc_ctl_wa_flags(struct xe_guc * guc)189 static u32 guc_ctl_wa_flags(struct xe_guc *guc)
190 {
191 	struct xe_device *xe = guc_to_xe(guc);
192 	struct xe_gt *gt = guc_to_gt(guc);
193 	u32 flags = 0;
194 
195 	if (XE_GT_WA(gt, 22012773006))
196 		flags |= GUC_WA_POLLCS;
197 
198 	if (XE_GT_WA(gt, 14014475959))
199 		flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
200 
201 	if (needs_wa_dual_queue(gt))
202 		flags |= GUC_WA_DUAL_QUEUE;
203 
204 	/*
205 	 * Wa_22011802037: FIXME - there's more to be done than simply setting
206 	 * this flag: make sure each CS is stopped when preparing for GT reset
207 	 * and wait for pending MI_FW.
208 	 */
209 	if (GRAPHICS_VERx100(xe) < 1270)
210 		flags |= GUC_WA_PRE_PARSER;
211 
212 	if (XE_GT_WA(gt, 22012727170) || XE_GT_WA(gt, 22012727685))
213 		flags |= GUC_WA_CONTEXT_ISOLATION;
214 
215 	if (XE_GT_WA(gt, 18020744125) &&
216 	    !xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
217 		flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
218 
219 	if (XE_GT_WA(gt, 14018913170))
220 		flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
221 
222 	if (XE_GT_WA(gt, 16023683509))
223 		flags |= GUC_WA_SAVE_RESTORE_MCFG_REG_AT_MC6;
224 
225 	return flags;
226 }
227 
guc_ctl_devid(struct xe_guc * guc)228 static u32 guc_ctl_devid(struct xe_guc *guc)
229 {
230 	struct xe_device *xe = guc_to_xe(guc);
231 
232 	return (((u32)xe->info.devid) << 16) | xe->info.revid;
233 }
234 
guc_print_params(struct xe_guc * guc)235 static void guc_print_params(struct xe_guc *guc)
236 {
237 	struct xe_gt *gt = guc_to_gt(guc);
238 	u32 *params = guc->params;
239 	int i;
240 
241 	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
242 	BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
243 
244 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
245 		xe_gt_dbg(gt, "GuC param[%2d] = 0x%08x\n", i, params[i]);
246 }
247 
guc_init_params(struct xe_guc * guc)248 static void guc_init_params(struct xe_guc *guc)
249 {
250 	u32 *params = guc->params;
251 
252 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
253 	params[GUC_CTL_FEATURE] = 0;
254 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
255 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
256 	params[GUC_CTL_WA] = 0;
257 	params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
258 
259 	guc_print_params(guc);
260 }
261 
guc_init_params_post_hwconfig(struct xe_guc * guc)262 static void guc_init_params_post_hwconfig(struct xe_guc *guc)
263 {
264 	u32 *params = guc->params;
265 
266 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
267 	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
268 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
269 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
270 	params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
271 	params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
272 
273 	guc_print_params(guc);
274 }
275 
276 /*
277  * Initialize the GuC parameter block before starting the firmware
278  * transfer. These parameters are read by the firmware on startup
279  * and cannot be changed thereafter.
280  */
guc_write_params(struct xe_guc * guc)281 static void guc_write_params(struct xe_guc *guc)
282 {
283 	struct xe_gt *gt = guc_to_gt(guc);
284 	int i;
285 
286 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
287 
288 	xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(0), 0);
289 
290 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
291 		xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(1 + i), guc->params[i]);
292 }
293 
guc_action_register_g2g_buffer(struct xe_guc * guc,u32 type,u32 dst_tile,u32 dst_dev,u32 desc_addr,u32 buff_addr,u32 size)294 static int guc_action_register_g2g_buffer(struct xe_guc *guc, u32 type, u32 dst_tile, u32 dst_dev,
295 					  u32 desc_addr, u32 buff_addr, u32 size)
296 {
297 	struct xe_gt *gt = guc_to_gt(guc);
298 	struct xe_device *xe = gt_to_xe(gt);
299 	u32 action[] = {
300 		XE_GUC_ACTION_REGISTER_G2G,
301 		FIELD_PREP(XE_G2G_REGISTER_SIZE, size / SZ_4K - 1) |
302 		FIELD_PREP(XE_G2G_REGISTER_TYPE, type) |
303 		FIELD_PREP(XE_G2G_REGISTER_TILE, dst_tile) |
304 		FIELD_PREP(XE_G2G_REGISTER_DEVICE, dst_dev),
305 		desc_addr,
306 		buff_addr,
307 	};
308 
309 	xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT));
310 	xe_assert(xe, !(size % SZ_4K));
311 
312 	return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
313 }
314 
guc_action_deregister_g2g_buffer(struct xe_guc * guc,u32 type,u32 dst_tile,u32 dst_dev)315 static int guc_action_deregister_g2g_buffer(struct xe_guc *guc, u32 type, u32 dst_tile, u32 dst_dev)
316 {
317 	struct xe_gt *gt = guc_to_gt(guc);
318 	struct xe_device *xe = gt_to_xe(gt);
319 	u32 action[] = {
320 		XE_GUC_ACTION_DEREGISTER_G2G,
321 		FIELD_PREP(XE_G2G_DEREGISTER_TYPE, type) |
322 		FIELD_PREP(XE_G2G_DEREGISTER_TILE, dst_tile) |
323 		FIELD_PREP(XE_G2G_DEREGISTER_DEVICE, dst_dev),
324 	};
325 
326 	xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT));
327 
328 	return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
329 }
330 
331 #define G2G_DEV(gt)	(((gt)->info.type == XE_GT_TYPE_MAIN) ? 0 : 1)
332 
333 #define G2G_BUFFER_SIZE (SZ_4K)
334 #define G2G_DESC_SIZE (64)
335 #define G2G_DESC_AREA_SIZE (SZ_4K)
336 
337 /*
338  * Generate a unique id for each bi-directional CTB for each pair of
339  * near and far tiles/devices. The id can then be used as an index into
340  * a single allocation that is sub-divided into multiple CTBs.
341  *
342  * For example, with two devices per tile and two tiles, the table should
343  * look like:
344  *           Far <tile>.<dev>
345  *         0.0   0.1   1.0   1.1
346  * N 0.0  --/-- 00/01 02/03 04/05
347  * e 0.1  01/00 --/-- 06/07 08/09
348  * a 1.0  03/02 07/06 --/-- 10/11
349  * r 1.1  05/04 09/08 11/10 --/--
350  *
351  * Where each entry is Rx/Tx channel id.
352  *
353  * So GuC #3 (tile 1, dev 1) talking to GuC #2 (tile 1, dev 0) would
354  * be reading from channel #11 and writing to channel #10. Whereas,
355  * GuC #2 talking to GuC #3 would be read on #10 and write to #11.
356  */
g2g_slot(u32 near_tile,u32 near_dev,u32 far_tile,u32 far_dev,u32 type,u32 max_inst,bool have_dev)357 static unsigned int g2g_slot(u32 near_tile, u32 near_dev, u32 far_tile, u32 far_dev,
358 			     u32 type, u32 max_inst, bool have_dev)
359 {
360 	u32 near = near_tile, far = far_tile;
361 	u32 idx = 0, x, y, direction;
362 	int i;
363 
364 	if (have_dev) {
365 		near = (near << 1) | near_dev;
366 		far = (far << 1) | far_dev;
367 	}
368 
369 	/* No need to send to one's self */
370 	if (far == near)
371 		return -1;
372 
373 	if (far > near) {
374 		/* Top right table half */
375 		x = far;
376 		y = near;
377 
378 		/* T/R is 'forwards' direction */
379 		direction = type;
380 	} else {
381 		/* Bottom left table half */
382 		x = near;
383 		y = far;
384 
385 		/* B/L is 'backwards' direction */
386 		direction = (1 - type);
387 	}
388 
389 	/* Count the rows prior to the target */
390 	for (i = y; i > 0; i--)
391 		idx += max_inst - i;
392 
393 	/* Count this row up to the target */
394 	idx += (x - 1 - y);
395 
396 	/* Slots are in Rx/Tx pairs */
397 	idx *= 2;
398 
399 	/* Pick Rx/Tx direction */
400 	idx += direction;
401 
402 	return idx;
403 }
404 
guc_g2g_register(struct xe_guc * near_guc,struct xe_gt * far_gt,u32 type,bool have_dev)405 static int guc_g2g_register(struct xe_guc *near_guc, struct xe_gt *far_gt, u32 type, bool have_dev)
406 {
407 	struct xe_gt *near_gt = guc_to_gt(near_guc);
408 	struct xe_device *xe = gt_to_xe(near_gt);
409 	struct xe_bo *g2g_bo;
410 	u32 near_tile = gt_to_tile(near_gt)->id;
411 	u32 near_dev = G2G_DEV(near_gt);
412 	u32 far_tile = gt_to_tile(far_gt)->id;
413 	u32 far_dev = G2G_DEV(far_gt);
414 	u32 max = xe->info.gt_count;
415 	u32 base, desc, buf;
416 	int slot;
417 
418 	/* G2G is not allowed between different cards */
419 	xe_assert(xe, xe == gt_to_xe(far_gt));
420 
421 	g2g_bo = near_guc->g2g.bo;
422 	xe_assert(xe, g2g_bo);
423 
424 	slot = g2g_slot(near_tile, near_dev, far_tile, far_dev, type, max, have_dev);
425 	xe_assert(xe, slot >= 0);
426 
427 	base = guc_bo_ggtt_addr(near_guc, g2g_bo);
428 	desc = base + slot * G2G_DESC_SIZE;
429 	buf = base + G2G_DESC_AREA_SIZE + slot * G2G_BUFFER_SIZE;
430 
431 	xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE);
432 	xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(g2g_bo));
433 
434 	return guc_action_register_g2g_buffer(near_guc, type, far_tile, far_dev,
435 					      desc, buf, G2G_BUFFER_SIZE);
436 }
437 
guc_g2g_deregister(struct xe_guc * guc,u32 far_tile,u32 far_dev,u32 type)438 static void guc_g2g_deregister(struct xe_guc *guc, u32 far_tile, u32 far_dev, u32 type)
439 {
440 	guc_action_deregister_g2g_buffer(guc, type, far_tile, far_dev);
441 }
442 
guc_g2g_size(struct xe_guc * guc)443 static u32 guc_g2g_size(struct xe_guc *guc)
444 {
445 	struct xe_gt *gt = guc_to_gt(guc);
446 	struct xe_device *xe = gt_to_xe(gt);
447 	unsigned int count = xe->info.gt_count;
448 	u32 num_channels = (count * (count - 1)) / 2;
449 
450 	xe_assert(xe, num_channels * XE_G2G_TYPE_LIMIT * G2G_DESC_SIZE <= G2G_DESC_AREA_SIZE);
451 
452 	return num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
453 }
454 
xe_guc_g2g_wanted(struct xe_device * xe)455 static bool xe_guc_g2g_wanted(struct xe_device *xe)
456 {
457 	/* Can't do GuC to GuC communication if there is only one GuC */
458 	if (xe->info.gt_count <= 1)
459 		return false;
460 
461 	/* No current user */
462 	return false;
463 }
464 
guc_g2g_alloc(struct xe_guc * guc)465 static int guc_g2g_alloc(struct xe_guc *guc)
466 {
467 	struct xe_gt *gt = guc_to_gt(guc);
468 	struct xe_device *xe = gt_to_xe(gt);
469 	struct xe_tile *tile = gt_to_tile(gt);
470 	struct xe_bo *bo;
471 	u32 g2g_size;
472 
473 	if (guc->g2g.bo)
474 		return 0;
475 
476 	if (gt->info.id != 0) {
477 		struct xe_gt *root_gt = xe_device_get_gt(xe, 0);
478 		struct xe_guc *root_guc = &root_gt->uc.guc;
479 		struct xe_bo *bo;
480 
481 		bo = xe_bo_get(root_guc->g2g.bo);
482 		if (!bo)
483 			return -ENODEV;
484 
485 		guc->g2g.bo = bo;
486 		guc->g2g.owned = false;
487 		return 0;
488 	}
489 
490 	g2g_size = guc_g2g_size(guc);
491 	bo = xe_managed_bo_create_pin_map(xe, tile, g2g_size,
492 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
493 					  XE_BO_FLAG_GGTT |
494 					  XE_BO_FLAG_GGTT_ALL |
495 					  XE_BO_FLAG_GGTT_INVALIDATE |
496 					  XE_BO_FLAG_PINNED_NORESTORE);
497 	if (IS_ERR(bo))
498 		return PTR_ERR(bo);
499 
500 	xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
501 	guc->g2g.bo = bo;
502 	guc->g2g.owned = true;
503 
504 	return 0;
505 }
506 
guc_g2g_fini(struct xe_guc * guc)507 static void guc_g2g_fini(struct xe_guc *guc)
508 {
509 	if (!guc->g2g.bo)
510 		return;
511 
512 	/* Unpinning the owned object is handled by generic shutdown */
513 	if (!guc->g2g.owned)
514 		xe_bo_put(guc->g2g.bo);
515 
516 	guc->g2g.bo = NULL;
517 }
518 
guc_g2g_start(struct xe_guc * guc)519 static int guc_g2g_start(struct xe_guc *guc)
520 {
521 	struct xe_gt *far_gt, *gt = guc_to_gt(guc);
522 	struct xe_device *xe = gt_to_xe(gt);
523 	unsigned int i, j;
524 	int t, err;
525 	bool have_dev;
526 
527 	if (!guc->g2g.bo) {
528 		int ret;
529 
530 		ret = guc_g2g_alloc(guc);
531 		if (ret)
532 			return ret;
533 	}
534 
535 	/* GuC interface will need extending if more GT device types are ever created. */
536 	xe_gt_assert(gt, (gt->info.type == XE_GT_TYPE_MAIN) || (gt->info.type == XE_GT_TYPE_MEDIA));
537 
538 	/* Channel numbering depends on whether there are multiple GTs per tile */
539 	have_dev = xe->info.gt_count > xe->info.tile_count;
540 
541 	for_each_gt(far_gt, xe, i) {
542 		u32 far_tile, far_dev;
543 
544 		if (far_gt->info.id == gt->info.id)
545 			continue;
546 
547 		far_tile = gt_to_tile(far_gt)->id;
548 		far_dev = G2G_DEV(far_gt);
549 
550 		for (t = 0; t < XE_G2G_TYPE_LIMIT; t++) {
551 			err = guc_g2g_register(guc, far_gt, t, have_dev);
552 			if (err) {
553 				while (--t >= 0)
554 					guc_g2g_deregister(guc, far_tile, far_dev, t);
555 				goto err_deregister;
556 			}
557 		}
558 	}
559 
560 	return 0;
561 
562 err_deregister:
563 	for_each_gt(far_gt, xe, j) {
564 		u32 tile, dev;
565 
566 		if (far_gt->info.id == gt->info.id)
567 			continue;
568 
569 		if (j >= i)
570 			break;
571 
572 		tile = gt_to_tile(far_gt)->id;
573 		dev = G2G_DEV(far_gt);
574 
575 		for (t = 0; t < XE_G2G_TYPE_LIMIT; t++)
576 			guc_g2g_deregister(guc, tile, dev, t);
577 	}
578 
579 	return err;
580 }
581 
__guc_opt_in_features_enable(struct xe_guc * guc,u64 addr,u32 num_dwords)582 static int __guc_opt_in_features_enable(struct xe_guc *guc, u64 addr, u32 num_dwords)
583 {
584 	u32 action[] = {
585 		XE_GUC_ACTION_OPT_IN_FEATURE_KLV,
586 		lower_32_bits(addr),
587 		upper_32_bits(addr),
588 		num_dwords
589 	};
590 
591 	return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
592 }
593 
supports_dynamic_ics(struct xe_guc * guc)594 static bool supports_dynamic_ics(struct xe_guc *guc)
595 {
596 	struct xe_device *xe = guc_to_xe(guc);
597 	struct xe_gt *gt = guc_to_gt(guc);
598 
599 	/* Dynamic ICS is available for PVC and Xe2 and newer platforms. */
600 	if (xe->info.platform != XE_PVC && GRAPHICS_VER(xe) < 20)
601 		return false;
602 
603 	/*
604 	 * The feature is currently not compatible with multi-lrc, so the GuC
605 	 * does not support it at all on the media engines (which are the main
606 	 * users of mlrc). On the primary GT side, to avoid it being used in
607 	 * conjunction with mlrc, we only enable it if we are in single CCS
608 	 * mode.
609 	 */
610 	if (xe_gt_is_media_type(gt) || gt->ccs_mode > 1)
611 		return false;
612 
613 	/*
614 	 * Dynamic ICS requires GuC v70.40.1, which maps to compatibility
615 	 * version v1.18.4.
616 	 */
617 	return GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 18, 4);
618 }
619 
620 #define OPT_IN_MAX_DWORDS 16
xe_guc_opt_in_features_enable(struct xe_guc * guc)621 int xe_guc_opt_in_features_enable(struct xe_guc *guc)
622 {
623 	struct xe_device *xe = guc_to_xe(guc);
624 	CLASS(xe_guc_buf, buf)(&guc->buf, OPT_IN_MAX_DWORDS);
625 	u32 count = 0;
626 	u32 *klvs;
627 	int ret;
628 
629 	if (!xe_guc_buf_is_valid(buf))
630 		return -ENOBUFS;
631 
632 	klvs = xe_guc_buf_cpu_ptr(buf);
633 
634 	/*
635 	 * The extra CAT error type opt-in was added in GuC v70.17.0, which maps
636 	 * to compatibility version v1.7.0.
637 	 * Note that the GuC allows enabling this KLV even on platforms that do
638 	 * not support the extra type; in such case the returned type variable
639 	 * will be set to a known invalid value which we can check against.
640 	 */
641 	if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 7, 0))
642 		klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_EXT_CAT_ERR_TYPE);
643 
644 	if (supports_dynamic_ics(guc))
645 		klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH);
646 
647 	if (count) {
648 		xe_assert(xe, count <= OPT_IN_MAX_DWORDS);
649 
650 		ret = __guc_opt_in_features_enable(guc, xe_guc_buf_flush(buf), count);
651 		if (ret < 0) {
652 			xe_gt_err(guc_to_gt(guc),
653 				  "failed to enable GuC opt-in features: %pe\n",
654 				  ERR_PTR(ret));
655 			return ret;
656 		}
657 	}
658 
659 	return 0;
660 }
661 
guc_fini_hw(void * arg)662 static void guc_fini_hw(void *arg)
663 {
664 	struct xe_guc *guc = arg;
665 	struct xe_gt *gt = guc_to_gt(guc);
666 
667 	xe_with_force_wake(fw_ref, gt_to_fw(gt), XE_FORCEWAKE_ALL)
668 		xe_uc_sanitize_reset(&guc_to_gt(guc)->uc);
669 
670 	guc_g2g_fini(guc);
671 }
672 
vf_guc_fini_hw(void * arg)673 static void vf_guc_fini_hw(void *arg)
674 {
675 	struct xe_guc *guc = arg;
676 
677 	xe_gt_sriov_vf_reset(guc_to_gt(guc));
678 }
679 
680 /**
681  * xe_guc_comm_init_early - early initialization of GuC communication
682  * @guc: the &xe_guc to initialize
683  *
684  * Must be called prior to first MMIO communication with GuC firmware.
685  */
xe_guc_comm_init_early(struct xe_guc * guc)686 void xe_guc_comm_init_early(struct xe_guc *guc)
687 {
688 	struct xe_gt *gt = guc_to_gt(guc);
689 
690 	if (xe_gt_is_media_type(gt))
691 		guc->notify_reg = MED_GUC_HOST_INTERRUPT;
692 	else
693 		guc->notify_reg = GUC_HOST_INTERRUPT;
694 }
695 
xe_guc_realloc_post_hwconfig(struct xe_guc * guc)696 static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
697 {
698 	struct xe_tile *tile = gt_to_tile(guc_to_gt(guc));
699 	struct xe_device *xe = guc_to_xe(guc);
700 	int ret;
701 
702 	if (!IS_DGFX(guc_to_xe(guc)))
703 		return 0;
704 
705 	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->fw.bo);
706 	if (ret)
707 		return ret;
708 
709 	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->log.bo);
710 	if (ret)
711 		return ret;
712 
713 	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ads.bo);
714 	if (ret)
715 		return ret;
716 
717 	return 0;
718 }
719 
vf_guc_init_noalloc(struct xe_guc * guc)720 static int vf_guc_init_noalloc(struct xe_guc *guc)
721 {
722 	struct xe_gt *gt = guc_to_gt(guc);
723 	int err;
724 
725 	err = xe_gt_sriov_vf_bootstrap(gt);
726 	if (err)
727 		return err;
728 
729 	err = xe_gt_sriov_vf_query_config(gt);
730 	if (err)
731 		return err;
732 
733 	return 0;
734 }
735 
xe_guc_init_noalloc(struct xe_guc * guc)736 int xe_guc_init_noalloc(struct xe_guc *guc)
737 {
738 	struct xe_device *xe = guc_to_xe(guc);
739 	struct xe_gt *gt = guc_to_gt(guc);
740 	int ret;
741 
742 	xe_guc_comm_init_early(guc);
743 
744 	ret = xe_guc_ct_init_noalloc(&guc->ct);
745 	if (ret)
746 		goto out;
747 
748 	ret = xe_guc_relay_init(&guc->relay);
749 	if (ret)
750 		goto out;
751 
752 	if (IS_SRIOV_VF(xe)) {
753 		ret = vf_guc_init_noalloc(guc);
754 		if (ret)
755 			goto out;
756 	}
757 
758 	return 0;
759 
760 out:
761 	xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret));
762 	return ret;
763 }
764 
xe_guc_init(struct xe_guc * guc)765 int xe_guc_init(struct xe_guc *guc)
766 {
767 	struct xe_device *xe = guc_to_xe(guc);
768 	struct xe_gt *gt = guc_to_gt(guc);
769 	int ret;
770 
771 	guc->fw.type = XE_UC_FW_TYPE_GUC;
772 	ret = xe_uc_fw_init(&guc->fw);
773 	if (ret)
774 		return ret;
775 
776 	if (!xe_uc_fw_is_enabled(&guc->fw))
777 		return 0;
778 
779 	/* Disable page reclaim if GuC FW does not support */
780 	if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 14, 0))
781 		xe->info.has_page_reclaim_hw_assist = false;
782 
783 	if (IS_SRIOV_VF(xe)) {
784 		ret = devm_add_action_or_reset(xe->drm.dev, vf_guc_fini_hw, guc);
785 		if (ret)
786 			goto out;
787 
788 		ret = xe_guc_ct_init(&guc->ct);
789 		if (ret)
790 			goto out;
791 		return 0;
792 	}
793 
794 	ret = xe_guc_log_init(&guc->log);
795 	if (ret)
796 		goto out;
797 
798 	ret = xe_guc_capture_init(guc);
799 	if (ret)
800 		goto out;
801 
802 	ret = xe_guc_ads_init(&guc->ads);
803 	if (ret)
804 		goto out;
805 
806 	ret = xe_guc_ct_init(&guc->ct);
807 	if (ret)
808 		goto out;
809 
810 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
811 
812 	ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc);
813 	if (ret)
814 		goto out;
815 
816 	guc_init_params(guc);
817 
818 	return 0;
819 
820 out:
821 	xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret));
822 	return ret;
823 }
824 
vf_guc_init_post_hwconfig(struct xe_guc * guc)825 static int vf_guc_init_post_hwconfig(struct xe_guc *guc)
826 {
827 	int err;
828 
829 	err = xe_guc_submit_init(guc, xe_gt_sriov_vf_guc_ids(guc_to_gt(guc)));
830 	if (err)
831 		return err;
832 
833 	err = xe_guc_buf_cache_init(&guc->buf);
834 	if (err)
835 		return err;
836 
837 	/* XXX xe_guc_db_mgr_init not needed for now */
838 
839 	return 0;
840 }
841 
guc_additional_cache_size(struct xe_device * xe)842 static u32 guc_additional_cache_size(struct xe_device *xe)
843 {
844 	if (IS_SRIOV_PF(xe) && xe_sriov_pf_migration_supported(xe))
845 		return XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE;
846 	else
847 		return 0; /* Fallback to default size */
848 }
849 
850 /**
851  * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
852  * @guc: The GuC object
853  *
854  * Return: 0 on success, negative error code on error.
855  */
xe_guc_init_post_hwconfig(struct xe_guc * guc)856 int xe_guc_init_post_hwconfig(struct xe_guc *guc)
857 {
858 	int ret;
859 
860 	if (IS_SRIOV_VF(guc_to_xe(guc)))
861 		return vf_guc_init_post_hwconfig(guc);
862 
863 	ret = xe_guc_realloc_post_hwconfig(guc);
864 	if (ret)
865 		return ret;
866 
867 	ret = xe_guc_ct_init_post_hwconfig(&guc->ct);
868 	if (ret)
869 		return ret;
870 
871 	guc_init_params_post_hwconfig(guc);
872 
873 	ret = xe_guc_submit_init(guc, ~0);
874 	if (ret)
875 		return ret;
876 
877 	ret = xe_guc_db_mgr_init(&guc->dbm, ~0);
878 	if (ret)
879 		return ret;
880 
881 	ret = xe_guc_pc_init(&guc->pc);
882 	if (ret)
883 		return ret;
884 
885 	ret = xe_guc_rc_init(guc);
886 	if (ret)
887 		return ret;
888 
889 	ret = xe_guc_engine_activity_init(guc);
890 	if (ret)
891 		return ret;
892 
893 	ret = xe_guc_buf_cache_init_with_size(&guc->buf,
894 					      guc_additional_cache_size(guc_to_xe(guc)));
895 	if (ret)
896 		return ret;
897 
898 	return xe_guc_ads_init_post_hwconfig(&guc->ads);
899 }
900 
xe_guc_post_load_init(struct xe_guc * guc)901 int xe_guc_post_load_init(struct xe_guc *guc)
902 {
903 	int ret;
904 
905 	xe_guc_ads_populate_post_load(&guc->ads);
906 
907 	ret = xe_guc_opt_in_features_enable(guc);
908 	if (ret)
909 		return ret;
910 
911 	if (xe_guc_g2g_wanted(guc_to_xe(guc))) {
912 		ret = guc_g2g_start(guc);
913 		if (ret)
914 			return ret;
915 	}
916 
917 	return xe_guc_submit_enable(guc);
918 }
919 
920 /*
921  * Wa_14025883347: Prevent GuC firmware DMA failures during GuC-only reset by ensuring
922  * SRAM save/restore operations are complete before reset.
923  */
guc_prevent_fw_dma_failure_on_reset(struct xe_guc * guc)924 static void guc_prevent_fw_dma_failure_on_reset(struct xe_guc *guc)
925 {
926 	struct xe_gt *gt = guc_to_gt(guc);
927 	u32 boot_hash_chk, guc_status, sram_status;
928 	int ret;
929 
930 	guc_status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
931 	if (guc_status & GS_MIA_IN_RESET)
932 		return;
933 
934 	boot_hash_chk = xe_mmio_read32(&gt->mmio, BOOT_HASH_CHK);
935 	if (!(boot_hash_chk & GUC_BOOT_UKERNEL_VALID))
936 		return;
937 
938 	/* Disable idle flow during reset (GuC reset re-enables it automatically) */
939 	xe_mmio_rmw32(&gt->mmio, GUC_MAX_IDLE_COUNT, 0, GUC_IDLE_FLOW_DISABLE);
940 
941 	ret = xe_mmio_wait32(&gt->mmio, GUC_STATUS, GS_UKERNEL_MASK,
942 			     FIELD_PREP(GS_UKERNEL_MASK, XE_GUC_LOAD_STATUS_READY),
943 			     100000, &guc_status, false);
944 	if (ret)
945 		xe_gt_warn(gt, "GuC not ready after disabling idle flow (GUC_STATUS: 0x%x)\n",
946 			   guc_status);
947 
948 	ret = xe_mmio_wait32(&gt->mmio, GUC_SRAM_STATUS, GUC_SRAM_HANDLING_MASK,
949 			     0, 5000, &sram_status, false);
950 	if (ret)
951 		xe_gt_warn(gt, "SRAM handling not complete (GUC_SRAM_STATUS: 0x%x)\n",
952 			   sram_status);
953 }
954 
xe_guc_reset(struct xe_guc * guc)955 int xe_guc_reset(struct xe_guc *guc)
956 {
957 	struct xe_gt *gt = guc_to_gt(guc);
958 	struct xe_mmio *mmio = &gt->mmio;
959 	u32 guc_status, gdrst;
960 	int ret;
961 
962 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
963 
964 	if (IS_SRIOV_VF(gt_to_xe(gt)))
965 		return xe_gt_sriov_vf_bootstrap(gt);
966 
967 	if (XE_GT_WA(gt, 14025883347))
968 		guc_prevent_fw_dma_failure_on_reset(guc);
969 
970 	xe_mmio_write32(mmio, GDRST, GRDOM_GUC);
971 
972 	ret = xe_mmio_wait32(mmio, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
973 	if (ret) {
974 		xe_gt_err(gt, "GuC reset timed out, GDRST=%#x\n", gdrst);
975 		goto err_out;
976 	}
977 
978 	guc_status = xe_mmio_read32(mmio, GUC_STATUS);
979 	if (!(guc_status & GS_MIA_IN_RESET)) {
980 		xe_gt_err(gt, "GuC status: %#x, MIA core expected to be in reset\n",
981 			  guc_status);
982 		ret = -EIO;
983 		goto err_out;
984 	}
985 
986 	return 0;
987 
988 err_out:
989 
990 	return ret;
991 }
992 
guc_prepare_xfer(struct xe_guc * guc)993 static void guc_prepare_xfer(struct xe_guc *guc)
994 {
995 	struct xe_gt *gt = guc_to_gt(guc);
996 	struct xe_mmio *mmio = &gt->mmio;
997 	struct xe_device *xe =  guc_to_xe(guc);
998 	u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
999 		GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
1000 		GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
1001 		GUC_ENABLE_MIA_CLOCK_GATING;
1002 
1003 	if (GRAPHICS_VERx100(xe) < 1250)
1004 		shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
1005 				GUC_ENABLE_MIA_CACHING;
1006 
1007 	if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC)
1008 		shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index);
1009 
1010 	/* Must program this register before loading the ucode with DMA */
1011 	xe_mmio_write32(mmio, GUC_SHIM_CONTROL, shim_flags);
1012 
1013 	xe_mmio_write32(mmio, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
1014 
1015 	/* Make sure GuC receives ARAT interrupts */
1016 	xe_mmio_rmw32(mmio, PMINTRMSK, ARAT_EXPIRED_INTRMSK, 0);
1017 }
1018 
1019 /*
1020  * Supporting MMIO & in memory RSA
1021  */
guc_xfer_rsa(struct xe_guc * guc)1022 static int guc_xfer_rsa(struct xe_guc *guc)
1023 {
1024 	struct xe_gt *gt = guc_to_gt(guc);
1025 	u32 rsa[UOS_RSA_SCRATCH_COUNT];
1026 	size_t copied;
1027 	int i;
1028 
1029 	if (guc->fw.rsa_size > 256) {
1030 		u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) +
1031 				    xe_uc_fw_rsa_offset(&guc->fw);
1032 		xe_mmio_write32(&gt->mmio, UOS_RSA_SCRATCH(0), rsa_ggtt_addr);
1033 		return 0;
1034 	}
1035 
1036 	copied = xe_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa));
1037 	if (copied < sizeof(rsa))
1038 		return -ENOMEM;
1039 
1040 	for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
1041 		xe_mmio_write32(&gt->mmio, UOS_RSA_SCRATCH(i), rsa[i]);
1042 
1043 	return 0;
1044 }
1045 
1046 /*
1047  * Wait for the GuC to start up.
1048  *
1049  * Measurements indicate this should take no more than 20ms (assuming the GT
1050  * clock is at maximum frequency). However, thermal throttling and other issues
1051  * can prevent the clock hitting max and thus making the load take significantly
1052  * longer. Allow up to 3s as a safety margin in normal builds. For
1053  * CONFIG_DRM_XE_DEBUG allow up to 10s to account for slower execution, issues
1054  * in PCODE, driver, fan, etc.
1055  *
1056  * Keep checking the GUC_STATUS every 10ms with a debug message every 100
1057  * attempts as a "I'm slow, but alive" message. Regardless, if it takes more
1058  * than 200ms, emit a warning.
1059  */
1060 
1061 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
1062 #define GUC_LOAD_TIMEOUT_SEC	20
1063 #else
1064 #define GUC_LOAD_TIMEOUT_SEC	3
1065 #endif
1066 #define GUC_LOAD_TIME_WARN_MSEC	200
1067 
print_load_status_err(struct xe_gt * gt,u32 status)1068 static void print_load_status_err(struct xe_gt *gt, u32 status)
1069 {
1070 	struct xe_mmio *mmio = &gt->mmio;
1071 	u32 ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, status);
1072 	u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status);
1073 
1074 	xe_gt_err(gt, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
1075 		  REG_FIELD_GET(GS_MIA_IN_RESET, status),
1076 		  bootrom, ukernel,
1077 		  REG_FIELD_GET(GS_MIA_MASK, status),
1078 		  REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
1079 
1080 	switch (bootrom) {
1081 	case XE_BOOTROM_STATUS_NO_KEY_FOUND:
1082 		xe_gt_err(gt, "invalid key requested, header = 0x%08X\n",
1083 			  xe_mmio_read32(mmio, GUC_HEADER_INFO));
1084 		break;
1085 	case XE_BOOTROM_STATUS_RSA_FAILED:
1086 		xe_gt_err(gt, "firmware signature verification failed\n");
1087 		break;
1088 	case XE_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
1089 		xe_gt_err(gt, "firmware production part check failure\n");
1090 		break;
1091 	}
1092 
1093 	switch (ukernel) {
1094 	case XE_GUC_LOAD_STATUS_HWCONFIG_START:
1095 		xe_gt_err(gt, "still extracting hwconfig table.\n");
1096 		break;
1097 	case XE_GUC_LOAD_STATUS_EXCEPTION:
1098 		xe_gt_err(gt, "firmware exception. EIP: %#x\n",
1099 			  xe_mmio_read32(mmio, SOFT_SCRATCH(13)));
1100 		break;
1101 	case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
1102 		xe_gt_err(gt, "illegal init/ADS data\n");
1103 		break;
1104 	case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
1105 		xe_gt_err(gt, "illegal register in save/restore workaround list\n");
1106 		break;
1107 	case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
1108 		xe_gt_err(gt, "illegal workaround KLV data\n");
1109 		break;
1110 	case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG:
1111 		xe_gt_err(gt, "illegal feature flag specified\n");
1112 		break;
1113 	}
1114 }
1115 
1116 /*
1117  * Check GUC_STATUS looking for known terminal states (either completion or
1118  * failure) of either the microkernel status field or the boot ROM status field.
1119  *
1120  * Returns 1 for successful completion, -1 for failure and 0 for any
1121  * intermediate state.
1122  */
guc_load_done(struct xe_gt * gt,u32 * status,u32 * tries)1123 static int guc_load_done(struct xe_gt *gt, u32 *status, u32 *tries)
1124 {
1125 	u32 ukernel, bootrom;
1126 
1127 	*status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
1128 	ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, *status);
1129 	bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, *status);
1130 
1131 	switch (ukernel) {
1132 	case XE_GUC_LOAD_STATUS_READY:
1133 		return 1;
1134 	case XE_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH:
1135 	case XE_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH:
1136 	case XE_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE:
1137 	case XE_GUC_LOAD_STATUS_HWCONFIG_ERROR:
1138 	case XE_GUC_LOAD_STATUS_BOOTROM_VERSION_MISMATCH:
1139 	case XE_GUC_LOAD_STATUS_DPC_ERROR:
1140 	case XE_GUC_LOAD_STATUS_EXCEPTION:
1141 	case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
1142 	case XE_GUC_LOAD_STATUS_MPU_DATA_INVALID:
1143 	case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
1144 	case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
1145 	case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG:
1146 		return -1;
1147 	}
1148 
1149 	switch (bootrom) {
1150 	case XE_BOOTROM_STATUS_NO_KEY_FOUND:
1151 	case XE_BOOTROM_STATUS_RSA_FAILED:
1152 	case XE_BOOTROM_STATUS_PAVPC_FAILED:
1153 	case XE_BOOTROM_STATUS_WOPCM_FAILED:
1154 	case XE_BOOTROM_STATUS_LOADLOC_FAILED:
1155 	case XE_BOOTROM_STATUS_JUMP_FAILED:
1156 	case XE_BOOTROM_STATUS_RC6CTXCONFIG_FAILED:
1157 	case XE_BOOTROM_STATUS_MPUMAP_INCORRECT:
1158 	case XE_BOOTROM_STATUS_EXCEPTION:
1159 	case XE_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
1160 		return -1;
1161 	}
1162 
1163 	if (++*tries >= 100) {
1164 		struct xe_guc_pc *guc_pc = &gt->uc.guc.pc;
1165 
1166 		*tries = 0;
1167 		xe_gt_dbg(gt, "GuC load still in progress, freq = %dMHz (req %dMHz), status = 0x%08X [0x%02X/%02X]\n",
1168 			  xe_guc_pc_get_act_freq(guc_pc),
1169 			  xe_guc_pc_get_cur_freq_fw(guc_pc),
1170 			  *status, ukernel, bootrom);
1171 	}
1172 
1173 	return 0;
1174 }
1175 
guc_wait_ucode(struct xe_guc * guc)1176 static int guc_wait_ucode(struct xe_guc *guc)
1177 {
1178 	struct xe_gt *gt = guc_to_gt(guc);
1179 	struct xe_guc_pc *guc_pc = &gt->uc.guc.pc;
1180 	u32 before_freq, act_freq, cur_freq;
1181 	u32 status = 0, tries = 0;
1182 	int load_result, ret;
1183 	ktime_t before;
1184 	u64 delta_ms;
1185 
1186 	before_freq = xe_guc_pc_get_act_freq(guc_pc);
1187 	before = ktime_get();
1188 
1189 	ret = poll_timeout_us(load_result = guc_load_done(gt, &status, &tries), load_result,
1190 			      10 * USEC_PER_MSEC,
1191 			      GUC_LOAD_TIMEOUT_SEC * USEC_PER_SEC, false);
1192 
1193 	delta_ms = ktime_to_ms(ktime_sub(ktime_get(), before));
1194 	act_freq = xe_guc_pc_get_act_freq(guc_pc);
1195 	cur_freq = xe_guc_pc_get_cur_freq_fw(guc_pc);
1196 
1197 	if (ret || load_result <= 0) {
1198 		xe_gt_err(gt, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz (req %dMHz)\n",
1199 			  status, delta_ms, xe_guc_pc_get_act_freq(guc_pc),
1200 			  xe_guc_pc_get_cur_freq_fw(guc_pc));
1201 		print_load_status_err(gt, status);
1202 
1203 		return -EPROTO;
1204 	}
1205 
1206 	if (delta_ms > GUC_LOAD_TIME_WARN_MSEC) {
1207 		xe_gt_warn(gt, "GuC load: excessive init time: %lldms! [status = 0x%08X]\n",
1208 			   delta_ms, status);
1209 		xe_gt_warn(gt, "GuC load: excessive init time: [freq = %dMHz (req = %dMHz), before = %dMHz, perf_limit_reasons = 0x%08X]\n",
1210 			   act_freq, cur_freq, before_freq,
1211 			   xe_gt_throttle_get_limit_reasons(gt));
1212 	} else {
1213 		xe_gt_dbg(gt, "GuC load: init took %lldms, freq = %dMHz (req = %dMHz), before = %dMHz, status = 0x%08X\n",
1214 			  delta_ms, act_freq, cur_freq, before_freq, status);
1215 	}
1216 
1217 	return 0;
1218 }
1219 ALLOW_ERROR_INJECTION(guc_wait_ucode, ERRNO);
1220 
__xe_guc_upload(struct xe_guc * guc)1221 static int __xe_guc_upload(struct xe_guc *guc)
1222 {
1223 	int ret;
1224 
1225 	/* Raise GT freq to speed up HuC/GuC load */
1226 	xe_guc_pc_raise_unslice(&guc->pc);
1227 
1228 	guc_write_params(guc);
1229 	guc_prepare_xfer(guc);
1230 
1231 	/*
1232 	 * Note that GuC needs the CSS header plus uKernel code to be copied
1233 	 * by the DMA engine in one operation, whereas the RSA signature is
1234 	 * loaded separately, either by copying it to the UOS_RSA_SCRATCH
1235 	 * register (if key size <= 256) or through a ggtt-pinned vma (if key
1236 	 * size > 256). The RSA size and therefore the way we provide it to the
1237 	 * HW is fixed for each platform and hard-coded in the bootrom.
1238 	 */
1239 	ret = guc_xfer_rsa(guc);
1240 	if (ret)
1241 		goto out;
1242 	/*
1243 	 * Current uCode expects the code to be loaded at 8k; locations below
1244 	 * this are used for the stack.
1245 	 */
1246 	ret = xe_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
1247 	if (ret)
1248 		goto out;
1249 
1250 	/* Wait for authentication */
1251 	ret = guc_wait_ucode(guc);
1252 	if (ret)
1253 		goto out;
1254 
1255 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING);
1256 	return 0;
1257 
1258 out:
1259 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
1260 	return ret;
1261 }
1262 
vf_guc_min_load_for_hwconfig(struct xe_guc * guc)1263 static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc)
1264 {
1265 	struct xe_gt *gt = guc_to_gt(guc);
1266 	int ret;
1267 
1268 	ret = xe_guc_hwconfig_init(guc);
1269 	if (ret)
1270 		return ret;
1271 
1272 	ret = xe_guc_enable_communication(guc);
1273 	if (ret)
1274 		return ret;
1275 
1276 	ret = xe_gt_sriov_vf_connect(gt);
1277 	if (ret)
1278 		goto err_out;
1279 
1280 	ret = xe_gt_sriov_vf_query_runtime(gt);
1281 	if (ret)
1282 		goto err_out;
1283 
1284 	return 0;
1285 
1286 err_out:
1287 	xe_guc_sanitize(guc);
1288 	return ret;
1289 }
1290 
1291 /**
1292  * xe_guc_min_load_for_hwconfig - load minimal GuC and read hwconfig table
1293  * @guc: The GuC object
1294  *
1295  * This function uploads a minimal GuC that does not support submissions but
1296  * in a state where the hwconfig table can be read. Next, it reads and parses
1297  * the hwconfig table so it can be used for subsequent steps in the driver load.
1298  * Lastly, it enables CT communication (XXX: this is needed for PFs/VFs only).
1299  *
1300  * Return: 0 on success, negative error code on error.
1301  */
xe_guc_min_load_for_hwconfig(struct xe_guc * guc)1302 int xe_guc_min_load_for_hwconfig(struct xe_guc *guc)
1303 {
1304 	int ret;
1305 
1306 	if (IS_SRIOV_VF(guc_to_xe(guc)))
1307 		return vf_guc_min_load_for_hwconfig(guc);
1308 
1309 	xe_guc_ads_populate_minimal(&guc->ads);
1310 
1311 	xe_guc_pc_init_early(&guc->pc);
1312 
1313 	ret = __xe_guc_upload(guc);
1314 	if (ret)
1315 		return ret;
1316 
1317 	ret = xe_guc_hwconfig_init(guc);
1318 	if (ret)
1319 		return ret;
1320 
1321 	ret = xe_guc_enable_communication(guc);
1322 	if (ret)
1323 		return ret;
1324 
1325 	return 0;
1326 }
1327 
xe_guc_upload(struct xe_guc * guc)1328 int xe_guc_upload(struct xe_guc *guc)
1329 {
1330 	struct xe_gt *gt = guc_to_gt(guc);
1331 
1332 	xe_guc_ads_populate(&guc->ads);
1333 
1334 	if (xe_guc_using_main_gamctrl_queues(guc))
1335 		xe_mmio_write32(&gt->mmio, MAIN_GAMCTRL_MODE, MAIN_GAMCTRL_QUEUE_SELECT);
1336 
1337 	return __xe_guc_upload(guc);
1338 }
1339 
guc_handle_mmio_msg(struct xe_guc * guc)1340 static void guc_handle_mmio_msg(struct xe_guc *guc)
1341 {
1342 	struct xe_gt *gt = guc_to_gt(guc);
1343 	u32 msg;
1344 
1345 	if (IS_SRIOV_VF(guc_to_xe(guc)))
1346 		return;
1347 
1348 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
1349 
1350 	msg = xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(15));
1351 	msg &= XE_GUC_RECV_MSG_EXCEPTION |
1352 		XE_GUC_RECV_MSG_CRASH_DUMP_POSTED;
1353 	xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(15), 0);
1354 
1355 	if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED)
1356 		xe_gt_err(gt, "Received early GuC crash dump notification!\n");
1357 
1358 	if (msg & XE_GUC_RECV_MSG_EXCEPTION)
1359 		xe_gt_err(gt, "Received early GuC exception notification!\n");
1360 }
1361 
guc_enable_irq(struct xe_guc * guc)1362 static void guc_enable_irq(struct xe_guc *guc)
1363 {
1364 	struct xe_gt *gt = guc_to_gt(guc);
1365 	u32 events = xe_gt_is_media_type(gt) ?
1366 		REG_FIELD_PREP(ENGINE0_MASK, GUC_INTR_GUC2HOST)  :
1367 		REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
1368 
1369 	/* Primary GuC and media GuC share a single enable bit */
1370 	xe_mmio_write32(&gt->mmio, GUC_SG_INTR_ENABLE,
1371 			REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST));
1372 
1373 	/*
1374 	 * There are separate mask bits for primary and media GuCs, so use
1375 	 * a RMW operation to avoid clobbering the other GuC's setting.
1376 	 */
1377 	xe_mmio_rmw32(&gt->mmio, GUC_SG_INTR_MASK, events, 0);
1378 }
1379 
xe_guc_enable_communication(struct xe_guc * guc)1380 int xe_guc_enable_communication(struct xe_guc *guc)
1381 {
1382 	struct xe_device *xe = guc_to_xe(guc);
1383 	int err;
1384 
1385 	if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
1386 		struct xe_gt *gt = guc_to_gt(guc);
1387 		struct xe_tile *tile = gt_to_tile(gt);
1388 
1389 		err = xe_memirq_init_guc(&tile->memirq, guc);
1390 		if (err)
1391 			return err;
1392 	} else {
1393 		guc_enable_irq(guc);
1394 	}
1395 
1396 	err = xe_guc_ct_enable(&guc->ct);
1397 	if (err)
1398 		return err;
1399 
1400 	guc_handle_mmio_msg(guc);
1401 
1402 	return 0;
1403 }
1404 
1405 /**
1406  * xe_guc_softreset() - Soft reset GuC
1407  * @guc: The GuC object
1408  *
1409  * Send soft reset command to GuC through mmio send.
1410  *
1411  * Return: 0 if success, otherwise error code
1412  */
xe_guc_softreset(struct xe_guc * guc)1413 int xe_guc_softreset(struct xe_guc *guc)
1414 {
1415 	u32 action[] = {
1416 		XE_GUC_ACTION_CLIENT_SOFT_RESET,
1417 	};
1418 	int ret;
1419 
1420 	if (!xe_uc_fw_is_running(&guc->fw))
1421 		return 0;
1422 
1423 	ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
1424 	if (ret)
1425 		return ret;
1426 
1427 	return 0;
1428 }
1429 
xe_guc_suspend(struct xe_guc * guc)1430 int xe_guc_suspend(struct xe_guc *guc)
1431 {
1432 	struct xe_gt *gt = guc_to_gt(guc);
1433 	int ret;
1434 
1435 	ret = xe_guc_softreset(guc);
1436 	if (ret) {
1437 		xe_gt_err(gt, "GuC suspend failed: %pe\n", ERR_PTR(ret));
1438 		return ret;
1439 	}
1440 
1441 	xe_guc_sanitize(guc);
1442 	return 0;
1443 }
1444 
xe_guc_notify(struct xe_guc * guc)1445 void xe_guc_notify(struct xe_guc *guc)
1446 {
1447 	struct xe_gt *gt = guc_to_gt(guc);
1448 	const u32 default_notify_data = 0;
1449 
1450 	/*
1451 	 * Both GUC_HOST_INTERRUPT and MED_GUC_HOST_INTERRUPT can pass
1452 	 * additional payload data to the GuC but this capability is not
1453 	 * used by the firmware yet. Use default value in the meantime.
1454 	 */
1455 	xe_mmio_write32(&gt->mmio, guc->notify_reg, default_notify_data);
1456 }
1457 
xe_guc_auth_huc(struct xe_guc * guc,u32 rsa_addr)1458 int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr)
1459 {
1460 	u32 action[] = {
1461 		XE_GUC_ACTION_AUTHENTICATE_HUC,
1462 		rsa_addr
1463 	};
1464 
1465 	return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
1466 }
1467 
1468 #define MAX_RETRIES_ON_FLR	2
1469 #define MIN_SLEEP_MS_ON_FLR	256
1470 
xe_guc_mmio_send_recv(struct xe_guc * guc,const u32 * request,u32 len,u32 * response_buf)1471 int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
1472 			  u32 len, u32 *response_buf)
1473 {
1474 	struct xe_device *xe = guc_to_xe(guc);
1475 	struct xe_gt *gt = guc_to_gt(guc);
1476 	struct xe_mmio *mmio = &gt->mmio;
1477 	struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
1478 		MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
1479 	const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1;
1480 	unsigned int sleep_period_ms = 1;
1481 	unsigned int lost = 0;
1482 	u32 header;
1483 	int ret;
1484 	int i;
1485 
1486 	BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
1487 
1488 	xe_assert(xe, len);
1489 	xe_assert(xe, len <= VF_SW_FLAG_COUNT);
1490 	xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
1491 	xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) ==
1492 		  GUC_HXG_ORIGIN_HOST);
1493 	xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) ==
1494 		  GUC_HXG_TYPE_REQUEST);
1495 
1496 retry:
1497 	/* Not in critical data-path, just do if else for GT type */
1498 	if (xe_gt_is_media_type(gt)) {
1499 		for (i = 0; i < len; ++i)
1500 			xe_mmio_write32(mmio, MED_VF_SW_FLAG(i),
1501 					request[i]);
1502 		xe_mmio_read32(mmio, MED_VF_SW_FLAG(LAST_INDEX));
1503 	} else {
1504 		for (i = 0; i < len; ++i)
1505 			xe_mmio_write32(mmio, VF_SW_FLAG(i),
1506 					request[i]);
1507 		xe_mmio_read32(mmio, VF_SW_FLAG(LAST_INDEX));
1508 	}
1509 
1510 	xe_guc_notify(guc);
1511 
1512 	ret = xe_mmio_wait32(mmio, reply_reg, GUC_HXG_MSG_0_ORIGIN,
1513 			     FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
1514 			     50000, &header, false);
1515 	if (ret) {
1516 		/* scratch registers might be cleared during FLR, try once more */
1517 		if (!header) {
1518 			if (++lost > MAX_RETRIES_ON_FLR) {
1519 				xe_gt_err(gt, "GuC mmio request %#x: lost, too many retries %u\n",
1520 					  request[0], lost);
1521 				return -ENOLINK;
1522 			}
1523 			xe_gt_dbg(gt, "GuC mmio request %#x: lost, trying again\n", request[0]);
1524 			xe_sleep_relaxed_ms(MIN_SLEEP_MS_ON_FLR);
1525 			goto retry;
1526 		}
1527 timeout:
1528 		xe_gt_err(gt, "GuC mmio request %#x: no reply %#x\n",
1529 			  request[0], header);
1530 		return ret;
1531 	}
1532 
1533 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
1534 	    GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
1535 		/*
1536 		 * Once we got a BUSY reply we must wait again for the final
1537 		 * response but this time we can't use ORIGIN mask anymore.
1538 		 * To spot a right change in the reply, we take advantage that
1539 		 * response SUCCESS and FAILURE differ only by the single bit
1540 		 * and all other bits are set and can be used as a new mask.
1541 		 */
1542 		u32 resp_bits = GUC_HXG_TYPE_RESPONSE_SUCCESS & GUC_HXG_TYPE_RESPONSE_FAILURE;
1543 		u32 resp_mask = FIELD_PREP(GUC_HXG_MSG_0_TYPE, resp_bits);
1544 
1545 		BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS);
1546 		BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
1547 
1548 		ret = xe_mmio_wait32(mmio, reply_reg, resp_mask, resp_mask,
1549 				     2000000, &header, false);
1550 
1551 		if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
1552 			     GUC_HXG_ORIGIN_GUC))
1553 			goto proto;
1554 		if (unlikely(ret)) {
1555 			if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
1556 			    GUC_HXG_TYPE_NO_RESPONSE_BUSY)
1557 				goto proto;
1558 			goto timeout;
1559 		}
1560 	}
1561 
1562 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
1563 	    GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
1564 		u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
1565 
1566 		xe_gt_dbg(gt, "GuC mmio request %#x: retrying, reason %#x\n",
1567 			  request[0], reason);
1568 
1569 		xe_sleep_exponential_ms(&sleep_period_ms, 256);
1570 		goto retry;
1571 	}
1572 
1573 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
1574 	    GUC_HXG_TYPE_RESPONSE_FAILURE) {
1575 		u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
1576 		u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
1577 
1578 		if (unlikely(error == XE_GUC_RESPONSE_VF_MIGRATED)) {
1579 			xe_gt_dbg(gt, "GuC mmio request %#x rejected due to MIGRATION (hint %#x)\n",
1580 				  request[0], hint);
1581 			return -EREMCHG;
1582 		}
1583 
1584 		xe_gt_err(gt, "GuC mmio request %#x: failure %#x hint %#x\n",
1585 			  request[0], error, hint);
1586 		return -ENXIO;
1587 	}
1588 
1589 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
1590 	    GUC_HXG_TYPE_RESPONSE_SUCCESS) {
1591 proto:
1592 		xe_gt_err(gt, "GuC mmio request %#x: unexpected reply %#x\n",
1593 			  request[0], header);
1594 		return -EPROTO;
1595 	}
1596 
1597 	/* Just copy entire possible message response */
1598 	if (response_buf) {
1599 		response_buf[0] = header;
1600 
1601 		for (i = 1; i < VF_SW_FLAG_COUNT; i++) {
1602 			reply_reg.addr += sizeof(u32);
1603 			response_buf[i] = xe_mmio_read32(mmio, reply_reg);
1604 		}
1605 	}
1606 
1607 	/* Use data from the GuC response as our return value */
1608 	return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
1609 }
1610 ALLOW_ERROR_INJECTION(xe_guc_mmio_send_recv, ERRNO);
1611 
xe_guc_mmio_send(struct xe_guc * guc,const u32 * request,u32 len)1612 int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
1613 {
1614 	return xe_guc_mmio_send_recv(guc, request, len, NULL);
1615 }
1616 
guc_self_cfg(struct xe_guc * guc,u16 key,u16 len,u64 val)1617 static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val)
1618 {
1619 	struct xe_device *xe = guc_to_xe(guc);
1620 	u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
1621 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
1622 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
1623 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
1624 			   GUC_ACTION_HOST2GUC_SELF_CFG),
1625 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
1626 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
1627 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32,
1628 			   lower_32_bits(val)),
1629 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64,
1630 			   upper_32_bits(val)),
1631 	};
1632 	int ret;
1633 
1634 	xe_assert(xe, len <= 2);
1635 	xe_assert(xe, len != 1 || !upper_32_bits(val));
1636 
1637 	/* Self config must go over MMIO */
1638 	ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
1639 
1640 	if (unlikely(ret < 0))
1641 		return ret;
1642 	if (unlikely(ret > 1))
1643 		return -EPROTO;
1644 	if (unlikely(!ret))
1645 		return -ENOKEY;
1646 
1647 	return 0;
1648 }
1649 
xe_guc_self_cfg32(struct xe_guc * guc,u16 key,u32 val)1650 int xe_guc_self_cfg32(struct xe_guc *guc, u16 key, u32 val)
1651 {
1652 	return guc_self_cfg(guc, key, 1, val);
1653 }
1654 
xe_guc_self_cfg64(struct xe_guc * guc,u16 key,u64 val)1655 int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val)
1656 {
1657 	return guc_self_cfg(guc, key, 2, val);
1658 }
1659 
xe_guc_sw_0_irq_handler(struct xe_guc * guc)1660 static void xe_guc_sw_0_irq_handler(struct xe_guc *guc)
1661 {
1662 	struct xe_gt *gt = guc_to_gt(guc);
1663 
1664 	if (IS_SRIOV_VF(gt_to_xe(gt)))
1665 		xe_gt_sriov_vf_migrated_event_handler(gt);
1666 }
1667 
xe_guc_irq_handler(struct xe_guc * guc,const u16 iir)1668 void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir)
1669 {
1670 	if (iir & GUC_INTR_GUC2HOST)
1671 		xe_guc_ct_irq_handler(&guc->ct);
1672 
1673 	if (iir & GUC_INTR_SW_INT_0)
1674 		xe_guc_sw_0_irq_handler(guc);
1675 }
1676 
xe_guc_sanitize(struct xe_guc * guc)1677 void xe_guc_sanitize(struct xe_guc *guc)
1678 {
1679 	xe_uc_fw_sanitize(&guc->fw);
1680 	xe_guc_ct_disable(&guc->ct);
1681 	xe_guc_submit_disable(guc);
1682 }
1683 
xe_guc_reset_prepare(struct xe_guc * guc)1684 int xe_guc_reset_prepare(struct xe_guc *guc)
1685 {
1686 	return xe_guc_submit_reset_prepare(guc);
1687 }
1688 
xe_guc_reset_wait(struct xe_guc * guc)1689 void xe_guc_reset_wait(struct xe_guc *guc)
1690 {
1691 	xe_guc_submit_reset_wait(guc);
1692 }
1693 
xe_guc_stop_prepare(struct xe_guc * guc)1694 void xe_guc_stop_prepare(struct xe_guc *guc)
1695 {
1696 	if (!IS_SRIOV_VF(guc_to_xe(guc))) {
1697 		int err;
1698 
1699 		err = xe_guc_pc_stop(&guc->pc);
1700 		xe_gt_WARN(guc_to_gt(guc), err, "Failed to stop GuC PC: %pe\n",
1701 			   ERR_PTR(err));
1702 	}
1703 }
1704 
xe_guc_stop(struct xe_guc * guc)1705 void xe_guc_stop(struct xe_guc *guc)
1706 {
1707 	xe_guc_ct_stop(&guc->ct);
1708 
1709 	xe_guc_submit_stop(guc);
1710 }
1711 
xe_guc_start(struct xe_guc * guc)1712 int xe_guc_start(struct xe_guc *guc)
1713 {
1714 	return xe_guc_submit_start(guc);
1715 }
1716 
1717 /**
1718  * xe_guc_runtime_suspend() - GuC runtime suspend
1719  * @guc: The GuC object
1720  *
1721  * Stop further runs of submission tasks on given GuC and runtime suspend
1722  * GuC CT.
1723  */
xe_guc_runtime_suspend(struct xe_guc * guc)1724 void xe_guc_runtime_suspend(struct xe_guc *guc)
1725 {
1726 	xe_guc_submit_pause(guc);
1727 	xe_guc_submit_disable(guc);
1728 	xe_guc_ct_runtime_suspend(&guc->ct);
1729 }
1730 
1731 /**
1732  * xe_guc_runtime_resume() - GuC runtime resume
1733  * @guc: The GuC object
1734  *
1735  * Runtime resume GuC CT and allow further runs of submission tasks on
1736  * given GuC.
1737  */
xe_guc_runtime_resume(struct xe_guc * guc)1738 void xe_guc_runtime_resume(struct xe_guc *guc)
1739 {
1740 	/*
1741 	 * Runtime PM flows are not applicable for VFs, so it's safe to
1742 	 * directly enable IRQ.
1743 	 */
1744 	guc_enable_irq(guc);
1745 
1746 	xe_guc_ct_runtime_resume(&guc->ct);
1747 	xe_guc_submit_enable(guc);
1748 	xe_guc_submit_unpause(guc);
1749 }
1750 
xe_guc_print_info(struct xe_guc * guc,struct drm_printer * p)1751 int xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
1752 {
1753 	struct xe_gt *gt = guc_to_gt(guc);
1754 	u32 status;
1755 	int i;
1756 
1757 	xe_uc_fw_print(&guc->fw, p);
1758 
1759 	if (!IS_SRIOV_VF(gt_to_xe(gt))) {
1760 		CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
1761 		if (!fw_ref.domains)
1762 			return -EIO;
1763 
1764 		status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
1765 
1766 		drm_printf(p, "\nGuC status 0x%08x:\n", status);
1767 		drm_printf(p, "\tBootrom status = 0x%x\n",
1768 			   REG_FIELD_GET(GS_BOOTROM_MASK, status));
1769 		drm_printf(p, "\tuKernel status = 0x%x\n",
1770 			   REG_FIELD_GET(GS_UKERNEL_MASK, status));
1771 		drm_printf(p, "\tMIA Core status = 0x%x\n",
1772 			   REG_FIELD_GET(GS_MIA_MASK, status));
1773 		drm_printf(p, "\tLog level = %d\n",
1774 			   xe_guc_log_get_level(&guc->log));
1775 
1776 		drm_puts(p, "\nScratch registers:\n");
1777 		for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
1778 			drm_printf(p, "\t%2d: \t0x%x\n",
1779 				   i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i)));
1780 		}
1781 	}
1782 
1783 	drm_puts(p, "\n");
1784 	xe_guc_ct_print(&guc->ct, p, false);
1785 
1786 	drm_puts(p, "\n");
1787 	xe_guc_submit_print(guc, p);
1788 
1789 	return 0;
1790 }
1791 
1792 /**
1793  * xe_guc_declare_wedged() - Declare GuC wedged
1794  * @guc: the GuC object
1795  *
1796  * Wedge the GuC which stops all submission, saves desired debug state, and
1797  * cleans up anything which could timeout.
1798  */
xe_guc_declare_wedged(struct xe_guc * guc)1799 void xe_guc_declare_wedged(struct xe_guc *guc)
1800 {
1801 	xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
1802 
1803 	xe_guc_reset_prepare(guc);
1804 	xe_guc_ct_stop(&guc->ct);
1805 	xe_guc_submit_wedge(guc);
1806 }
1807 
1808 /**
1809  * xe_guc_using_main_gamctrl_queues() - Detect which reporting queues to use.
1810  * @guc: The GuC object
1811  *
1812  * For Xe3p and beyond, we want to program the hardware to use the
1813  * "Main GAMCTRL queue" rather than the legacy queue before we upload
1814  * the GuC firmware.  This will allow the GuC to use a new set of
1815  * registers for pagefault handling and avoid some unnecessary
1816  * complications with MCR register range handling.
1817  *
1818  * Return: true if can use new main gamctrl queues.
1819  */
xe_guc_using_main_gamctrl_queues(struct xe_guc * guc)1820 bool xe_guc_using_main_gamctrl_queues(struct xe_guc *guc)
1821 {
1822 	struct xe_gt *gt = guc_to_gt(guc);
1823 
1824 	/*
1825 	 * For Xe3p media gt (35), the GuC and the CS subunits may be still Xe3
1826 	 * that lacks the Main GAMCTRL support. Reserved bits from the GMD_ID
1827 	 * inform the IP version of the subunits.
1828 	 */
1829 	if (xe_gt_is_media_type(gt) && MEDIA_VER(gt_to_xe(gt)) == 35) {
1830 		u32 val = xe_mmio_read32(&gt->mmio, GMD_ID);
1831 		u32 subip = REG_FIELD_GET(GMD_ID_SUBIP_FLAG_MASK, val);
1832 
1833 		if (!subip)
1834 			return true;
1835 
1836 		xe_gt_WARN(gt, subip != 1,
1837 			   "GMD_ID has unknown value in the SUBIP_FLAG field - 0x%x\n",
1838 			   subip);
1839 
1840 		return false;
1841 	}
1842 
1843 	return GT_VER(gt) >= 35;
1844 }
1845 
1846 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1847 #include "tests/xe_guc_g2g_test.c"
1848 #endif
1849