xref: /linux/drivers/gpu/drm/xe/xe_guc.c (revision 8cdcef1c2f82d207aa8b2a02298fbc17191c6261)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc.h"
7 
8 #include <drm/drm_managed.h>
9 
10 #include "abi/guc_actions_abi.h"
11 #include "abi/guc_errors_abi.h"
12 #include "generated/xe_wa_oob.h"
13 #include "regs/xe_gt_regs.h"
14 #include "regs/xe_guc_regs.h"
15 #include "xe_bo.h"
16 #include "xe_device.h"
17 #include "xe_force_wake.h"
18 #include "xe_gt.h"
19 #include "xe_guc_ads.h"
20 #include "xe_guc_ct.h"
21 #include "xe_guc_hwconfig.h"
22 #include "xe_guc_log.h"
23 #include "xe_guc_pc.h"
24 #include "xe_guc_submit.h"
25 #include "xe_mmio.h"
26 #include "xe_platform_types.h"
27 #include "xe_uc.h"
28 #include "xe_uc_fw.h"
29 #include "xe_wa.h"
30 #include "xe_wopcm.h"
31 
32 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
33 #define GUC_GGTT_TOP    0xFEE00000
34 static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
35 			    struct xe_bo *bo)
36 {
37 	struct xe_device *xe = guc_to_xe(guc);
38 	u32 addr = xe_bo_ggtt_addr(bo);
39 
40 	xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
41 	xe_assert(xe, addr < GUC_GGTT_TOP);
42 	xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr);
43 
44 	return addr;
45 }
46 
47 static u32 guc_ctl_debug_flags(struct xe_guc *guc)
48 {
49 	u32 level = xe_guc_log_get_level(&guc->log);
50 	u32 flags = 0;
51 
52 	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
53 		flags |= GUC_LOG_DISABLED;
54 	else
55 		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
56 			 GUC_LOG_VERBOSITY_SHIFT;
57 
58 	return flags;
59 }
60 
61 static u32 guc_ctl_feature_flags(struct xe_guc *guc)
62 {
63 	return GUC_CTL_ENABLE_SLPC;
64 }
65 
66 static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
67 {
68 	u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT;
69 	u32 flags;
70 
71 	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
72 	#define LOG_UNIT SZ_1M
73 	#define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
74 	#else
75 	#define LOG_UNIT SZ_4K
76 	#define LOG_FLAG 0
77 	#endif
78 
79 	#if (((CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
80 	#define CAPTURE_UNIT SZ_1M
81 	#define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
82 	#else
83 	#define CAPTURE_UNIT SZ_4K
84 	#define CAPTURE_FLAG 0
85 	#endif
86 
87 	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
88 	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, LOG_UNIT));
89 	BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
90 	BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, LOG_UNIT));
91 	BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
92 	BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
93 
94 	BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
95 			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
96 	BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
97 			(GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
98 	BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
99 			(GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
100 
101 	flags = GUC_LOG_VALID |
102 		GUC_LOG_NOTIFY_ON_HALF_FULL |
103 		CAPTURE_FLAG |
104 		LOG_FLAG |
105 		((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
106 		((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
107 		((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) <<
108 		 GUC_LOG_CAPTURE_SHIFT) |
109 		(offset << GUC_LOG_BUF_ADDR_SHIFT);
110 
111 	#undef LOG_UNIT
112 	#undef LOG_FLAG
113 	#undef CAPTURE_UNIT
114 	#undef CAPTURE_FLAG
115 
116 	return flags;
117 }
118 
119 static u32 guc_ctl_ads_flags(struct xe_guc *guc)
120 {
121 	u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT;
122 	u32 flags = ads << GUC_ADS_ADDR_SHIFT;
123 
124 	return flags;
125 }
126 
127 static u32 guc_ctl_wa_flags(struct xe_guc *guc)
128 {
129 	struct xe_device *xe = guc_to_xe(guc);
130 	struct xe_gt *gt = guc_to_gt(guc);
131 	u32 flags = 0;
132 
133 	if (XE_WA(gt, 22012773006))
134 		flags |= GUC_WA_POLLCS;
135 
136 	if (XE_WA(gt, 16011759253))
137 		flags |= GUC_WA_GAM_CREDITS;
138 
139 	if (XE_WA(gt, 14014475959))
140 		flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
141 
142 	if (XE_WA(gt, 22011391025) || XE_WA(gt, 14012197797))
143 		flags |= GUC_WA_DUAL_QUEUE;
144 
145 	/*
146 	 * Wa_22011802037: FIXME - there's more to be done than simply setting
147 	 * this flag: make sure each CS is stopped when preparing for GT reset
148 	 * and wait for pending MI_FW.
149 	 */
150 	if (GRAPHICS_VERx100(xe) < 1270)
151 		flags |= GUC_WA_PRE_PARSER;
152 
153 	if (XE_WA(gt, 16011777198))
154 		flags |= GUC_WA_RCS_RESET_BEFORE_RC6;
155 
156 	if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685))
157 		flags |= GUC_WA_CONTEXT_ISOLATION;
158 
159 	if ((XE_WA(gt, 16015675438) || XE_WA(gt, 18020744125)) &&
160 	    !xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
161 		flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
162 
163 	if (XE_WA(gt, 1509372804))
164 		flags |= GUC_WA_RENDER_RST_RC6_EXIT;
165 
166 	return flags;
167 }
168 
169 static u32 guc_ctl_devid(struct xe_guc *guc)
170 {
171 	struct xe_device *xe = guc_to_xe(guc);
172 
173 	return (((u32)xe->info.devid) << 16) | xe->info.revid;
174 }
175 
176 static void guc_init_params(struct xe_guc *guc)
177 {
178 	struct xe_device *xe = guc_to_xe(guc);
179 	u32 *params = guc->params;
180 	int i;
181 
182 	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
183 	BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
184 
185 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
186 	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
187 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
188 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
189 	params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
190 	params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
191 
192 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
193 		drm_dbg(&xe->drm, "GuC param[%2d] = 0x%08x\n", i, params[i]);
194 }
195 
196 /*
197  * Initialize the GuC parameter block before starting the firmware
198  * transfer. These parameters are read by the firmware on startup
199  * and cannot be changed thereafter.
200  */
201 static void guc_write_params(struct xe_guc *guc)
202 {
203 	struct xe_gt *gt = guc_to_gt(guc);
204 	int i;
205 
206 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
207 
208 	xe_mmio_write32(gt, SOFT_SCRATCH(0), 0);
209 
210 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
211 		xe_mmio_write32(gt, SOFT_SCRATCH(1 + i), guc->params[i]);
212 }
213 
214 static void guc_fini(struct drm_device *drm, void *arg)
215 {
216 	struct xe_guc *guc = arg;
217 
218 	xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
219 	xe_guc_pc_fini(&guc->pc);
220 	xe_uc_fini_hw(&guc_to_gt(guc)->uc);
221 	xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
222 }
223 
224 int xe_guc_init(struct xe_guc *guc)
225 {
226 	struct xe_device *xe = guc_to_xe(guc);
227 	struct xe_gt *gt = guc_to_gt(guc);
228 	int ret;
229 
230 	guc->fw.type = XE_UC_FW_TYPE_GUC;
231 	ret = xe_uc_fw_init(&guc->fw);
232 	if (ret)
233 		goto out;
234 
235 	if (!xe_uc_fw_is_enabled(&guc->fw))
236 		return 0;
237 
238 	ret = xe_guc_log_init(&guc->log);
239 	if (ret)
240 		goto out;
241 
242 	ret = xe_guc_ads_init(&guc->ads);
243 	if (ret)
244 		goto out;
245 
246 	ret = xe_guc_ct_init(&guc->ct);
247 	if (ret)
248 		goto out;
249 
250 	ret = xe_guc_pc_init(&guc->pc);
251 	if (ret)
252 		goto out;
253 
254 	ret = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, guc_fini, guc);
255 	if (ret)
256 		goto out;
257 
258 	guc_init_params(guc);
259 
260 	if (xe_gt_is_media_type(gt))
261 		guc->notify_reg = MED_GUC_HOST_INTERRUPT;
262 	else
263 		guc->notify_reg = GUC_HOST_INTERRUPT;
264 
265 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
266 
267 	return 0;
268 
269 out:
270 	drm_err(&xe->drm, "GuC init failed with %d", ret);
271 	return ret;
272 }
273 
274 /**
275  * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
276  * @guc: The GuC object
277  *
278  * Return: 0 on success, negative error code on error.
279  */
280 int xe_guc_init_post_hwconfig(struct xe_guc *guc)
281 {
282 	return xe_guc_ads_init_post_hwconfig(&guc->ads);
283 }
284 
285 int xe_guc_post_load_init(struct xe_guc *guc)
286 {
287 	xe_guc_ads_populate_post_load(&guc->ads);
288 	guc->submission_state.enabled = true;
289 
290 	return 0;
291 }
292 
293 int xe_guc_reset(struct xe_guc *guc)
294 {
295 	struct xe_device *xe = guc_to_xe(guc);
296 	struct xe_gt *gt = guc_to_gt(guc);
297 	u32 guc_status, gdrst;
298 	int ret;
299 
300 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
301 
302 	xe_mmio_write32(gt, GDRST, GRDOM_GUC);
303 
304 	ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
305 	if (ret) {
306 		drm_err(&xe->drm, "GuC reset timed out, GDRST=0x%8x\n",
307 			gdrst);
308 		goto err_out;
309 	}
310 
311 	guc_status = xe_mmio_read32(gt, GUC_STATUS);
312 	if (!(guc_status & GS_MIA_IN_RESET)) {
313 		drm_err(&xe->drm,
314 			"GuC status: 0x%x, MIA core expected to be in reset\n",
315 			guc_status);
316 		ret = -EIO;
317 		goto err_out;
318 	}
319 
320 	return 0;
321 
322 err_out:
323 
324 	return ret;
325 }
326 
327 static void guc_prepare_xfer(struct xe_guc *guc)
328 {
329 	struct xe_gt *gt = guc_to_gt(guc);
330 	struct xe_device *xe =  guc_to_xe(guc);
331 	u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
332 		GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
333 		GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
334 		GUC_ENABLE_MIA_CLOCK_GATING;
335 
336 	if (GRAPHICS_VERx100(xe) < 1250)
337 		shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
338 				GUC_ENABLE_MIA_CACHING;
339 
340 	if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC)
341 		shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index);
342 
343 	/* Must program this register before loading the ucode with DMA */
344 	xe_mmio_write32(gt, GUC_SHIM_CONTROL, shim_flags);
345 
346 	xe_mmio_write32(gt, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
347 }
348 
349 /*
350  * Supporting MMIO & in memory RSA
351  */
352 static int guc_xfer_rsa(struct xe_guc *guc)
353 {
354 	struct xe_gt *gt = guc_to_gt(guc);
355 	u32 rsa[UOS_RSA_SCRATCH_COUNT];
356 	size_t copied;
357 	int i;
358 
359 	if (guc->fw.rsa_size > 256) {
360 		u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) +
361 				    xe_uc_fw_rsa_offset(&guc->fw);
362 		xe_mmio_write32(gt, UOS_RSA_SCRATCH(0), rsa_ggtt_addr);
363 		return 0;
364 	}
365 
366 	copied = xe_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa));
367 	if (copied < sizeof(rsa))
368 		return -ENOMEM;
369 
370 	for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
371 		xe_mmio_write32(gt, UOS_RSA_SCRATCH(i), rsa[i]);
372 
373 	return 0;
374 }
375 
376 static int guc_wait_ucode(struct xe_guc *guc)
377 {
378 	struct xe_device *xe = guc_to_xe(guc);
379 	u32 status;
380 	int ret;
381 
382 	/*
383 	 * Wait for the GuC to start up.
384 	 * NB: Docs recommend not using the interrupt for completion.
385 	 * Measurements indicate this should take no more than 20ms
386 	 * (assuming the GT clock is at maximum frequency). So, a
387 	 * timeout here indicates that the GuC has failed and is unusable.
388 	 * (Higher levels of the driver may decide to reset the GuC and
389 	 * attempt the ucode load again if this happens.)
390 	 *
391 	 * FIXME: There is a known (but exceedingly unlikely) race condition
392 	 * where the asynchronous frequency management code could reduce
393 	 * the GT clock while a GuC reload is in progress (during a full
394 	 * GT reset). A fix is in progress but there are complex locking
395 	 * issues to be resolved. In the meantime bump the timeout to
396 	 * 200ms. Even at slowest clock, this should be sufficient. And
397 	 * in the working case, a larger timeout makes no difference.
398 	 */
399 	ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS, GS_UKERNEL_MASK,
400 			     FIELD_PREP(GS_UKERNEL_MASK, XE_GUC_LOAD_STATUS_READY),
401 			     200000, &status, false);
402 
403 	if (ret) {
404 		struct drm_device *drm = &xe->drm;
405 		struct drm_printer p = drm_info_printer(drm->dev);
406 
407 		drm_info(drm, "GuC load failed: status = 0x%08X\n", status);
408 		drm_info(drm, "GuC load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
409 			 REG_FIELD_GET(GS_MIA_IN_RESET, status),
410 			 REG_FIELD_GET(GS_BOOTROM_MASK, status),
411 			 REG_FIELD_GET(GS_UKERNEL_MASK, status),
412 			 REG_FIELD_GET(GS_MIA_MASK, status),
413 			 REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
414 
415 		if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
416 			drm_info(drm, "GuC firmware signature verification failed\n");
417 			ret = -ENOEXEC;
418 		}
419 
420 		if (REG_FIELD_GET(GS_UKERNEL_MASK, status) ==
421 		    XE_GUC_LOAD_STATUS_EXCEPTION) {
422 			drm_info(drm, "GuC firmware exception. EIP: %#x\n",
423 				 xe_mmio_read32(guc_to_gt(guc),
424 						SOFT_SCRATCH(13)));
425 			ret = -ENXIO;
426 		}
427 
428 		xe_guc_log_print(&guc->log, &p);
429 	} else {
430 		drm_dbg(&xe->drm, "GuC successfully loaded");
431 	}
432 
433 	return ret;
434 }
435 
436 static int __xe_guc_upload(struct xe_guc *guc)
437 {
438 	int ret;
439 
440 	guc_write_params(guc);
441 	guc_prepare_xfer(guc);
442 
443 	/*
444 	 * Note that GuC needs the CSS header plus uKernel code to be copied
445 	 * by the DMA engine in one operation, whereas the RSA signature is
446 	 * loaded separately, either by copying it to the UOS_RSA_SCRATCH
447 	 * register (if key size <= 256) or through a ggtt-pinned vma (if key
448 	 * size > 256). The RSA size and therefore the way we provide it to the
449 	 * HW is fixed for each platform and hard-coded in the bootrom.
450 	 */
451 	ret = guc_xfer_rsa(guc);
452 	if (ret)
453 		goto out;
454 	/*
455 	 * Current uCode expects the code to be loaded at 8k; locations below
456 	 * this are used for the stack.
457 	 */
458 	ret = xe_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
459 	if (ret)
460 		goto out;
461 
462 	/* Wait for authentication */
463 	ret = guc_wait_ucode(guc);
464 	if (ret)
465 		goto out;
466 
467 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING);
468 	return 0;
469 
470 out:
471 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
472 	return 0	/* FIXME: ret, don't want to stop load currently */;
473 }
474 
475 /**
476  * xe_guc_min_load_for_hwconfig - load minimal GuC and read hwconfig table
477  * @guc: The GuC object
478  *
479  * This function uploads a minimal GuC that does not support submissions but
480  * in a state where the hwconfig table can be read. Next, it reads and parses
481  * the hwconfig table so it can be used for subsequent steps in the driver load.
482  * Lastly, it enables CT communication (XXX: this is needed for PFs/VFs only).
483  *
484  * Return: 0 on success, negative error code on error.
485  */
486 int xe_guc_min_load_for_hwconfig(struct xe_guc *guc)
487 {
488 	int ret;
489 
490 	xe_guc_ads_populate_minimal(&guc->ads);
491 
492 	ret = __xe_guc_upload(guc);
493 	if (ret)
494 		return ret;
495 
496 	ret = xe_guc_hwconfig_init(guc);
497 	if (ret)
498 		return ret;
499 
500 	ret = xe_guc_enable_communication(guc);
501 	if (ret)
502 		return ret;
503 
504 	return 0;
505 }
506 
507 int xe_guc_upload(struct xe_guc *guc)
508 {
509 	xe_guc_ads_populate(&guc->ads);
510 
511 	return __xe_guc_upload(guc);
512 }
513 
514 static void guc_handle_mmio_msg(struct xe_guc *guc)
515 {
516 	struct xe_gt *gt = guc_to_gt(guc);
517 	u32 msg;
518 
519 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
520 
521 	msg = xe_mmio_read32(gt, SOFT_SCRATCH(15));
522 	msg &= XE_GUC_RECV_MSG_EXCEPTION |
523 		XE_GUC_RECV_MSG_CRASH_DUMP_POSTED;
524 	xe_mmio_write32(gt, SOFT_SCRATCH(15), 0);
525 
526 	if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED)
527 		drm_err(&guc_to_xe(guc)->drm,
528 			"Received early GuC crash dump notification!\n");
529 
530 	if (msg & XE_GUC_RECV_MSG_EXCEPTION)
531 		drm_err(&guc_to_xe(guc)->drm,
532 			"Received early GuC exception notification!\n");
533 }
534 
535 static void guc_enable_irq(struct xe_guc *guc)
536 {
537 	struct xe_gt *gt = guc_to_gt(guc);
538 	u32 events = xe_gt_is_media_type(gt) ?
539 		REG_FIELD_PREP(ENGINE0_MASK, GUC_INTR_GUC2HOST)  :
540 		REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
541 
542 	/* Primary GuC and media GuC share a single enable bit */
543 	xe_mmio_write32(gt, GUC_SG_INTR_ENABLE,
544 			REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST));
545 
546 	/*
547 	 * There are separate mask bits for primary and media GuCs, so use
548 	 * a RMW operation to avoid clobbering the other GuC's setting.
549 	 */
550 	xe_mmio_rmw32(gt, GUC_SG_INTR_MASK, events, 0);
551 }
552 
553 int xe_guc_enable_communication(struct xe_guc *guc)
554 {
555 	int err;
556 
557 	guc_enable_irq(guc);
558 
559 	xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
560 		      ARAT_EXPIRED_INTRMSK, 0);
561 
562 	err = xe_guc_ct_enable(&guc->ct);
563 	if (err)
564 		return err;
565 
566 	guc_handle_mmio_msg(guc);
567 
568 	return 0;
569 }
570 
571 int xe_guc_suspend(struct xe_guc *guc)
572 {
573 	int ret;
574 	u32 action[] = {
575 		XE_GUC_ACTION_CLIENT_SOFT_RESET,
576 	};
577 
578 	ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
579 	if (ret) {
580 		drm_err(&guc_to_xe(guc)->drm,
581 			"GuC suspend: CLIENT_SOFT_RESET fail: %d!\n", ret);
582 		return ret;
583 	}
584 
585 	xe_guc_sanitize(guc);
586 	return 0;
587 }
588 
589 void xe_guc_notify(struct xe_guc *guc)
590 {
591 	struct xe_gt *gt = guc_to_gt(guc);
592 	const u32 default_notify_data = 0;
593 
594 	/*
595 	 * Both GUC_HOST_INTERRUPT and MED_GUC_HOST_INTERRUPT can pass
596 	 * additional payload data to the GuC but this capability is not
597 	 * used by the firmware yet. Use default value in the meantime.
598 	 */
599 	xe_mmio_write32(gt, guc->notify_reg, default_notify_data);
600 }
601 
602 int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr)
603 {
604 	u32 action[] = {
605 		XE_GUC_ACTION_AUTHENTICATE_HUC,
606 		rsa_addr
607 	};
608 
609 	return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
610 }
611 
612 int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
613 			  u32 len, u32 *response_buf)
614 {
615 	struct xe_device *xe = guc_to_xe(guc);
616 	struct xe_gt *gt = guc_to_gt(guc);
617 	u32 header, reply;
618 	struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
619 		MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
620 	const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1;
621 	int ret;
622 	int i;
623 
624 	BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
625 
626 	xe_assert(xe, !guc->ct.enabled);
627 	xe_assert(xe, len);
628 	xe_assert(xe, len <= VF_SW_FLAG_COUNT);
629 	xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
630 	xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) ==
631 		  GUC_HXG_ORIGIN_HOST);
632 	xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) ==
633 		  GUC_HXG_TYPE_REQUEST);
634 
635 retry:
636 	/* Not in critical data-path, just do if else for GT type */
637 	if (xe_gt_is_media_type(gt)) {
638 		for (i = 0; i < len; ++i)
639 			xe_mmio_write32(gt, MED_VF_SW_FLAG(i),
640 					request[i]);
641 		xe_mmio_read32(gt, MED_VF_SW_FLAG(LAST_INDEX));
642 	} else {
643 		for (i = 0; i < len; ++i)
644 			xe_mmio_write32(gt, VF_SW_FLAG(i),
645 					request[i]);
646 		xe_mmio_read32(gt, VF_SW_FLAG(LAST_INDEX));
647 	}
648 
649 	xe_guc_notify(guc);
650 
651 	ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_ORIGIN,
652 			     FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
653 			     50000, &reply, false);
654 	if (ret) {
655 timeout:
656 		drm_err(&xe->drm, "mmio request %#x: no reply %#x\n",
657 			request[0], reply);
658 		return ret;
659 	}
660 
661 	header = xe_mmio_read32(gt, reply_reg);
662 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
663 	    GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
664 		/*
665 		 * Once we got a BUSY reply we must wait again for the final
666 		 * response but this time we can't use ORIGIN mask anymore.
667 		 * To spot a right change in the reply, we take advantage that
668 		 * response SUCCESS and FAILURE differ only by the single bit
669 		 * and all other bits are set and can be used as a new mask.
670 		 */
671 		u32 resp_bits = GUC_HXG_TYPE_RESPONSE_SUCCESS & GUC_HXG_TYPE_RESPONSE_FAILURE;
672 		u32 resp_mask = FIELD_PREP(GUC_HXG_MSG_0_TYPE, resp_bits);
673 
674 		BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS);
675 		BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
676 
677 		ret = xe_mmio_wait32(gt, reply_reg,  resp_mask, resp_mask,
678 				     1000000, &header, false);
679 
680 		if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
681 			     GUC_HXG_ORIGIN_GUC))
682 			goto proto;
683 		if (unlikely(ret))
684 			goto timeout;
685 	}
686 
687 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
688 	    GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
689 		u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
690 
691 		drm_dbg(&xe->drm, "mmio request %#x: retrying, reason %#x\n",
692 			request[0], reason);
693 		goto retry;
694 	}
695 
696 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
697 	    GUC_HXG_TYPE_RESPONSE_FAILURE) {
698 		u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
699 		u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
700 
701 		drm_err(&xe->drm, "mmio request %#x: failure %#x/%#x\n",
702 			request[0], error, hint);
703 		return -ENXIO;
704 	}
705 
706 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
707 	    GUC_HXG_TYPE_RESPONSE_SUCCESS) {
708 proto:
709 		drm_err(&xe->drm, "mmio request %#x: unexpected reply %#x\n",
710 			request[0], header);
711 		return -EPROTO;
712 	}
713 
714 	/* Just copy entire possible message response */
715 	if (response_buf) {
716 		response_buf[0] = header;
717 
718 		for (i = 1; i < VF_SW_FLAG_COUNT; i++) {
719 			reply_reg.addr += sizeof(u32);
720 			response_buf[i] = xe_mmio_read32(gt, reply_reg);
721 		}
722 	}
723 
724 	/* Use data from the GuC response as our return value */
725 	return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
726 }
727 
728 int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
729 {
730 	return xe_guc_mmio_send_recv(guc, request, len, NULL);
731 }
732 
733 static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val)
734 {
735 	struct xe_device *xe = guc_to_xe(guc);
736 	u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
737 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
738 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
739 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
740 			   GUC_ACTION_HOST2GUC_SELF_CFG),
741 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
742 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
743 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32,
744 			   lower_32_bits(val)),
745 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64,
746 			   upper_32_bits(val)),
747 	};
748 	int ret;
749 
750 	xe_assert(xe, len <= 2);
751 	xe_assert(xe, len != 1 || !upper_32_bits(val));
752 
753 	/* Self config must go over MMIO */
754 	ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
755 
756 	if (unlikely(ret < 0))
757 		return ret;
758 	if (unlikely(ret > 1))
759 		return -EPROTO;
760 	if (unlikely(!ret))
761 		return -ENOKEY;
762 
763 	return 0;
764 }
765 
766 int xe_guc_self_cfg32(struct xe_guc *guc, u16 key, u32 val)
767 {
768 	return guc_self_cfg(guc, key, 1, val);
769 }
770 
771 int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val)
772 {
773 	return guc_self_cfg(guc, key, 2, val);
774 }
775 
776 void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir)
777 {
778 	if (iir & GUC_INTR_GUC2HOST)
779 		xe_guc_ct_irq_handler(&guc->ct);
780 }
781 
782 void xe_guc_sanitize(struct xe_guc *guc)
783 {
784 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
785 	xe_guc_ct_disable(&guc->ct);
786 	guc->submission_state.enabled = false;
787 }
788 
789 int xe_guc_reset_prepare(struct xe_guc *guc)
790 {
791 	return xe_guc_submit_reset_prepare(guc);
792 }
793 
794 void xe_guc_reset_wait(struct xe_guc *guc)
795 {
796 	xe_guc_submit_reset_wait(guc);
797 }
798 
799 void xe_guc_stop_prepare(struct xe_guc *guc)
800 {
801 	XE_WARN_ON(xe_guc_pc_stop(&guc->pc));
802 }
803 
804 int xe_guc_stop(struct xe_guc *guc)
805 {
806 	int ret;
807 
808 	xe_guc_ct_disable(&guc->ct);
809 
810 	ret = xe_guc_submit_stop(guc);
811 	if (ret)
812 		return ret;
813 
814 	return 0;
815 }
816 
817 int xe_guc_start(struct xe_guc *guc)
818 {
819 	int ret;
820 
821 	ret = xe_guc_pc_start(&guc->pc);
822 	XE_WARN_ON(ret);
823 
824 	return xe_guc_submit_start(guc);
825 }
826 
827 void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
828 {
829 	struct xe_gt *gt = guc_to_gt(guc);
830 	u32 status;
831 	int err;
832 	int i;
833 
834 	xe_uc_fw_print(&guc->fw, p);
835 
836 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
837 	if (err)
838 		return;
839 
840 	status = xe_mmio_read32(gt, GUC_STATUS);
841 
842 	drm_printf(p, "\nGuC status 0x%08x:\n", status);
843 	drm_printf(p, "\tBootrom status = 0x%x\n",
844 		   REG_FIELD_GET(GS_BOOTROM_MASK, status));
845 	drm_printf(p, "\tuKernel status = 0x%x\n",
846 		   REG_FIELD_GET(GS_UKERNEL_MASK, status));
847 	drm_printf(p, "\tMIA Core status = 0x%x\n",
848 		   REG_FIELD_GET(GS_MIA_MASK, status));
849 	drm_printf(p, "\tLog level = %d\n",
850 		   xe_guc_log_get_level(&guc->log));
851 
852 	drm_puts(p, "\nScratch registers:\n");
853 	for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
854 		drm_printf(p, "\t%2d: \t0x%x\n",
855 			   i, xe_mmio_read32(gt, SOFT_SCRATCH(i)));
856 	}
857 
858 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
859 
860 	xe_guc_ct_print(&guc->ct, p, false);
861 	xe_guc_submit_print(guc, p);
862 }
863 
864 /**
865  * xe_guc_in_reset() - Detect if GuC MIA is in reset.
866  * @guc: The GuC object
867  *
868  * This function detects runtime resume from d3cold by leveraging
869  * GUC_STATUS, GUC doesn't get reset during d3hot,
870  * it strictly to be called from RPM resume handler.
871  *
872  * Return: true if failed to get forcewake or GuC MIA is in Reset,
873  * otherwise false.
874  */
875 bool xe_guc_in_reset(struct xe_guc *guc)
876 {
877 	struct xe_gt *gt = guc_to_gt(guc);
878 	u32 status;
879 	int err;
880 
881 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
882 	if (err)
883 		return true;
884 
885 	status = xe_mmio_read32(gt, GUC_STATUS);
886 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
887 
888 	return  status & GS_MIA_IN_RESET;
889 }
890