xref: /linux/drivers/gpu/drm/xe/xe_guc.c (revision f5c31bcf604db54470868f3118a60dc4a9ba8813)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc.h"
7 
8 #include <drm/drm_managed.h>
9 
10 #include <generated/xe_wa_oob.h>
11 
12 #include "abi/guc_actions_abi.h"
13 #include "abi/guc_errors_abi.h"
14 #include "regs/xe_gt_regs.h"
15 #include "regs/xe_guc_regs.h"
16 #include "xe_bo.h"
17 #include "xe_device.h"
18 #include "xe_force_wake.h"
19 #include "xe_gt.h"
20 #include "xe_guc_ads.h"
21 #include "xe_guc_ct.h"
22 #include "xe_guc_hwconfig.h"
23 #include "xe_guc_log.h"
24 #include "xe_guc_pc.h"
25 #include "xe_guc_relay.h"
26 #include "xe_guc_submit.h"
27 #include "xe_memirq.h"
28 #include "xe_mmio.h"
29 #include "xe_platform_types.h"
30 #include "xe_sriov.h"
31 #include "xe_uc.h"
32 #include "xe_uc_fw.h"
33 #include "xe_wa.h"
34 #include "xe_wopcm.h"
35 
36 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
37 #define GUC_GGTT_TOP    0xFEE00000
38 static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
39 			    struct xe_bo *bo)
40 {
41 	struct xe_device *xe = guc_to_xe(guc);
42 	u32 addr = xe_bo_ggtt_addr(bo);
43 
44 	xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
45 	xe_assert(xe, addr < GUC_GGTT_TOP);
46 	xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr);
47 
48 	return addr;
49 }
50 
51 static u32 guc_ctl_debug_flags(struct xe_guc *guc)
52 {
53 	u32 level = xe_guc_log_get_level(&guc->log);
54 	u32 flags = 0;
55 
56 	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
57 		flags |= GUC_LOG_DISABLED;
58 	else
59 		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
60 			 GUC_LOG_VERBOSITY_SHIFT;
61 
62 	return flags;
63 }
64 
65 static u32 guc_ctl_feature_flags(struct xe_guc *guc)
66 {
67 	u32 flags = 0;
68 
69 	if (!guc_to_xe(guc)->info.skip_guc_pc)
70 		flags |= GUC_CTL_ENABLE_SLPC;
71 
72 	return flags;
73 }
74 
75 static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
76 {
77 	u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT;
78 	u32 flags;
79 
80 	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
81 	#define LOG_UNIT SZ_1M
82 	#define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
83 	#else
84 	#define LOG_UNIT SZ_4K
85 	#define LOG_FLAG 0
86 	#endif
87 
88 	#if (((CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
89 	#define CAPTURE_UNIT SZ_1M
90 	#define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
91 	#else
92 	#define CAPTURE_UNIT SZ_4K
93 	#define CAPTURE_FLAG 0
94 	#endif
95 
96 	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
97 	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, LOG_UNIT));
98 	BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
99 	BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, LOG_UNIT));
100 	BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
101 	BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
102 
103 	BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
104 			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
105 	BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
106 			(GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
107 	BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
108 			(GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
109 
110 	flags = GUC_LOG_VALID |
111 		GUC_LOG_NOTIFY_ON_HALF_FULL |
112 		CAPTURE_FLAG |
113 		LOG_FLAG |
114 		((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
115 		((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
116 		((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) <<
117 		 GUC_LOG_CAPTURE_SHIFT) |
118 		(offset << GUC_LOG_BUF_ADDR_SHIFT);
119 
120 	#undef LOG_UNIT
121 	#undef LOG_FLAG
122 	#undef CAPTURE_UNIT
123 	#undef CAPTURE_FLAG
124 
125 	return flags;
126 }
127 
128 static u32 guc_ctl_ads_flags(struct xe_guc *guc)
129 {
130 	u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT;
131 	u32 flags = ads << GUC_ADS_ADDR_SHIFT;
132 
133 	return flags;
134 }
135 
136 #define GUC_VER(maj, min, pat)	(((maj) << 16) | ((min) << 8) | (pat))
137 
138 static u32 guc_ctl_wa_flags(struct xe_guc *guc)
139 {
140 	struct xe_device *xe = guc_to_xe(guc);
141 	struct xe_gt *gt = guc_to_gt(guc);
142 	struct xe_uc_fw *uc_fw = &guc->fw;
143 	struct xe_uc_fw_version *version = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE];
144 
145 	u32 flags = 0;
146 
147 	if (XE_WA(gt, 22012773006))
148 		flags |= GUC_WA_POLLCS;
149 
150 	if (XE_WA(gt, 14014475959))
151 		flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
152 
153 	if (XE_WA(gt, 22011391025))
154 		flags |= GUC_WA_DUAL_QUEUE;
155 
156 	/*
157 	 * Wa_22011802037: FIXME - there's more to be done than simply setting
158 	 * this flag: make sure each CS is stopped when preparing for GT reset
159 	 * and wait for pending MI_FW.
160 	 */
161 	if (GRAPHICS_VERx100(xe) < 1270)
162 		flags |= GUC_WA_PRE_PARSER;
163 
164 	if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685))
165 		flags |= GUC_WA_CONTEXT_ISOLATION;
166 
167 	if ((XE_WA(gt, 16015675438) || XE_WA(gt, 18020744125)) &&
168 	    !xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
169 		flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
170 
171 	if (XE_WA(gt, 1509372804))
172 		flags |= GUC_WA_RENDER_RST_RC6_EXIT;
173 
174 	if (XE_WA(gt, 14018913170)) {
175 		if (GUC_VER(version->major, version->minor, version->patch) >= GUC_VER(70, 7, 0))
176 			flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
177 		else
178 			drm_dbg(&xe->drm, "Skip WA 14018913170: GUC version expected >= 70.7.0, found %u.%u.%u\n",
179 				version->major, version->minor, version->patch);
180 	}
181 
182 	return flags;
183 }
184 
185 static u32 guc_ctl_devid(struct xe_guc *guc)
186 {
187 	struct xe_device *xe = guc_to_xe(guc);
188 
189 	return (((u32)xe->info.devid) << 16) | xe->info.revid;
190 }
191 
192 static void guc_init_params(struct xe_guc *guc)
193 {
194 	struct xe_device *xe = guc_to_xe(guc);
195 	u32 *params = guc->params;
196 	int i;
197 
198 	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
199 	BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
200 
201 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
202 	params[GUC_CTL_FEATURE] = 0;
203 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
204 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
205 	params[GUC_CTL_WA] = 0;
206 	params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
207 
208 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
209 		drm_dbg(&xe->drm, "GuC param[%2d] = 0x%08x\n", i, params[i]);
210 }
211 
212 static void guc_init_params_post_hwconfig(struct xe_guc *guc)
213 {
214 	struct xe_device *xe = guc_to_xe(guc);
215 	u32 *params = guc->params;
216 	int i;
217 
218 	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
219 	BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
220 
221 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
222 	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
223 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
224 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
225 	params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
226 	params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
227 
228 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
229 		drm_dbg(&xe->drm, "GuC param[%2d] = 0x%08x\n", i, params[i]);
230 }
231 
232 /*
233  * Initialize the GuC parameter block before starting the firmware
234  * transfer. These parameters are read by the firmware on startup
235  * and cannot be changed thereafter.
236  */
237 static void guc_write_params(struct xe_guc *guc)
238 {
239 	struct xe_gt *gt = guc_to_gt(guc);
240 	int i;
241 
242 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
243 
244 	xe_mmio_write32(gt, SOFT_SCRATCH(0), 0);
245 
246 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
247 		xe_mmio_write32(gt, SOFT_SCRATCH(1 + i), guc->params[i]);
248 }
249 
250 static void guc_fini(struct drm_device *drm, void *arg)
251 {
252 	struct xe_guc *guc = arg;
253 
254 	xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
255 	xe_uc_fini_hw(&guc_to_gt(guc)->uc);
256 	xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
257 }
258 
259 /**
260  * xe_guc_comm_init_early - early initialization of GuC communication
261  * @guc: the &xe_guc to initialize
262  *
263  * Must be called prior to first MMIO communication with GuC firmware.
264  */
265 void xe_guc_comm_init_early(struct xe_guc *guc)
266 {
267 	struct xe_gt *gt = guc_to_gt(guc);
268 
269 	if (xe_gt_is_media_type(gt))
270 		guc->notify_reg = MED_GUC_HOST_INTERRUPT;
271 	else
272 		guc->notify_reg = GUC_HOST_INTERRUPT;
273 }
274 
275 static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
276 {
277 	struct xe_tile *tile = gt_to_tile(guc_to_gt(guc));
278 	struct xe_device *xe = guc_to_xe(guc);
279 	int ret;
280 
281 	if (!IS_DGFX(guc_to_xe(guc)))
282 		return 0;
283 
284 	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->fw.bo);
285 	if (ret)
286 		return ret;
287 
288 	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->log.bo);
289 	if (ret)
290 		return ret;
291 
292 	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ads.bo);
293 	if (ret)
294 		return ret;
295 
296 	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ct.bo);
297 	if (ret)
298 		return ret;
299 
300 	return 0;
301 }
302 
303 int xe_guc_init(struct xe_guc *guc)
304 {
305 	struct xe_device *xe = guc_to_xe(guc);
306 	struct xe_gt *gt = guc_to_gt(guc);
307 	int ret;
308 
309 	guc->fw.type = XE_UC_FW_TYPE_GUC;
310 	ret = xe_uc_fw_init(&guc->fw);
311 	if (ret)
312 		goto out;
313 
314 	if (!xe_uc_fw_is_enabled(&guc->fw))
315 		return 0;
316 
317 	ret = xe_guc_log_init(&guc->log);
318 	if (ret)
319 		goto out;
320 
321 	ret = xe_guc_ads_init(&guc->ads);
322 	if (ret)
323 		goto out;
324 
325 	ret = xe_guc_ct_init(&guc->ct);
326 	if (ret)
327 		goto out;
328 
329 	ret = xe_guc_relay_init(&guc->relay);
330 	if (ret)
331 		goto out;
332 
333 	ret = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, guc_fini, guc);
334 	if (ret)
335 		goto out;
336 
337 	guc_init_params(guc);
338 
339 	xe_guc_comm_init_early(guc);
340 
341 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
342 
343 	return 0;
344 
345 out:
346 	drm_err(&xe->drm, "GuC init failed with %d", ret);
347 	return ret;
348 }
349 
350 /**
351  * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
352  * @guc: The GuC object
353  *
354  * Return: 0 on success, negative error code on error.
355  */
356 int xe_guc_init_post_hwconfig(struct xe_guc *guc)
357 {
358 	int ret;
359 
360 	ret = xe_guc_realloc_post_hwconfig(guc);
361 	if (ret)
362 		return ret;
363 
364 	guc_init_params_post_hwconfig(guc);
365 
366 	ret = xe_guc_pc_init(&guc->pc);
367 	if (ret)
368 		return ret;
369 
370 	return xe_guc_ads_init_post_hwconfig(&guc->ads);
371 }
372 
373 int xe_guc_post_load_init(struct xe_guc *guc)
374 {
375 	xe_guc_ads_populate_post_load(&guc->ads);
376 	guc->submission_state.enabled = true;
377 
378 	return 0;
379 }
380 
381 int xe_guc_reset(struct xe_guc *guc)
382 {
383 	struct xe_device *xe = guc_to_xe(guc);
384 	struct xe_gt *gt = guc_to_gt(guc);
385 	u32 guc_status, gdrst;
386 	int ret;
387 
388 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
389 
390 	xe_mmio_write32(gt, GDRST, GRDOM_GUC);
391 
392 	ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
393 	if (ret) {
394 		drm_err(&xe->drm, "GuC reset timed out, GDRST=0x%8x\n",
395 			gdrst);
396 		goto err_out;
397 	}
398 
399 	guc_status = xe_mmio_read32(gt, GUC_STATUS);
400 	if (!(guc_status & GS_MIA_IN_RESET)) {
401 		drm_err(&xe->drm,
402 			"GuC status: 0x%x, MIA core expected to be in reset\n",
403 			guc_status);
404 		ret = -EIO;
405 		goto err_out;
406 	}
407 
408 	return 0;
409 
410 err_out:
411 
412 	return ret;
413 }
414 
415 static void guc_prepare_xfer(struct xe_guc *guc)
416 {
417 	struct xe_gt *gt = guc_to_gt(guc);
418 	struct xe_device *xe =  guc_to_xe(guc);
419 	u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
420 		GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
421 		GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
422 		GUC_ENABLE_MIA_CLOCK_GATING;
423 
424 	if (GRAPHICS_VERx100(xe) < 1250)
425 		shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
426 				GUC_ENABLE_MIA_CACHING;
427 
428 	if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC)
429 		shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index);
430 
431 	/* Must program this register before loading the ucode with DMA */
432 	xe_mmio_write32(gt, GUC_SHIM_CONTROL, shim_flags);
433 
434 	xe_mmio_write32(gt, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
435 }
436 
437 /*
438  * Supporting MMIO & in memory RSA
439  */
440 static int guc_xfer_rsa(struct xe_guc *guc)
441 {
442 	struct xe_gt *gt = guc_to_gt(guc);
443 	u32 rsa[UOS_RSA_SCRATCH_COUNT];
444 	size_t copied;
445 	int i;
446 
447 	if (guc->fw.rsa_size > 256) {
448 		u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) +
449 				    xe_uc_fw_rsa_offset(&guc->fw);
450 		xe_mmio_write32(gt, UOS_RSA_SCRATCH(0), rsa_ggtt_addr);
451 		return 0;
452 	}
453 
454 	copied = xe_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa));
455 	if (copied < sizeof(rsa))
456 		return -ENOMEM;
457 
458 	for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
459 		xe_mmio_write32(gt, UOS_RSA_SCRATCH(i), rsa[i]);
460 
461 	return 0;
462 }
463 
464 static int guc_wait_ucode(struct xe_guc *guc)
465 {
466 	struct xe_device *xe = guc_to_xe(guc);
467 	u32 status;
468 	int ret;
469 
470 	/*
471 	 * Wait for the GuC to start up.
472 	 * NB: Docs recommend not using the interrupt for completion.
473 	 * Measurements indicate this should take no more than 20ms
474 	 * (assuming the GT clock is at maximum frequency). So, a
475 	 * timeout here indicates that the GuC has failed and is unusable.
476 	 * (Higher levels of the driver may decide to reset the GuC and
477 	 * attempt the ucode load again if this happens.)
478 	 *
479 	 * FIXME: There is a known (but exceedingly unlikely) race condition
480 	 * where the asynchronous frequency management code could reduce
481 	 * the GT clock while a GuC reload is in progress (during a full
482 	 * GT reset). A fix is in progress but there are complex locking
483 	 * issues to be resolved. In the meantime bump the timeout to
484 	 * 200ms. Even at slowest clock, this should be sufficient. And
485 	 * in the working case, a larger timeout makes no difference.
486 	 */
487 	ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS, GS_UKERNEL_MASK,
488 			     FIELD_PREP(GS_UKERNEL_MASK, XE_GUC_LOAD_STATUS_READY),
489 			     200000, &status, false);
490 
491 	if (ret) {
492 		struct drm_device *drm = &xe->drm;
493 
494 		drm_info(drm, "GuC load failed: status = 0x%08X\n", status);
495 		drm_info(drm, "GuC load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
496 			 REG_FIELD_GET(GS_MIA_IN_RESET, status),
497 			 REG_FIELD_GET(GS_BOOTROM_MASK, status),
498 			 REG_FIELD_GET(GS_UKERNEL_MASK, status),
499 			 REG_FIELD_GET(GS_MIA_MASK, status),
500 			 REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
501 
502 		if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
503 			drm_info(drm, "GuC firmware signature verification failed\n");
504 			ret = -ENOEXEC;
505 		}
506 
507 		if (REG_FIELD_GET(GS_UKERNEL_MASK, status) ==
508 		    XE_GUC_LOAD_STATUS_EXCEPTION) {
509 			drm_info(drm, "GuC firmware exception. EIP: %#x\n",
510 				 xe_mmio_read32(guc_to_gt(guc),
511 						SOFT_SCRATCH(13)));
512 			ret = -ENXIO;
513 		}
514 	} else {
515 		drm_dbg(&xe->drm, "GuC successfully loaded");
516 	}
517 
518 	return ret;
519 }
520 
521 static int __xe_guc_upload(struct xe_guc *guc)
522 {
523 	int ret;
524 
525 	guc_write_params(guc);
526 	guc_prepare_xfer(guc);
527 
528 	/*
529 	 * Note that GuC needs the CSS header plus uKernel code to be copied
530 	 * by the DMA engine in one operation, whereas the RSA signature is
531 	 * loaded separately, either by copying it to the UOS_RSA_SCRATCH
532 	 * register (if key size <= 256) or through a ggtt-pinned vma (if key
533 	 * size > 256). The RSA size and therefore the way we provide it to the
534 	 * HW is fixed for each platform and hard-coded in the bootrom.
535 	 */
536 	ret = guc_xfer_rsa(guc);
537 	if (ret)
538 		goto out;
539 	/*
540 	 * Current uCode expects the code to be loaded at 8k; locations below
541 	 * this are used for the stack.
542 	 */
543 	ret = xe_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
544 	if (ret)
545 		goto out;
546 
547 	/* Wait for authentication */
548 	ret = guc_wait_ucode(guc);
549 	if (ret)
550 		goto out;
551 
552 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING);
553 	return 0;
554 
555 out:
556 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
557 	return 0	/* FIXME: ret, don't want to stop load currently */;
558 }
559 
560 /**
561  * xe_guc_min_load_for_hwconfig - load minimal GuC and read hwconfig table
562  * @guc: The GuC object
563  *
564  * This function uploads a minimal GuC that does not support submissions but
565  * in a state where the hwconfig table can be read. Next, it reads and parses
566  * the hwconfig table so it can be used for subsequent steps in the driver load.
567  * Lastly, it enables CT communication (XXX: this is needed for PFs/VFs only).
568  *
569  * Return: 0 on success, negative error code on error.
570  */
571 int xe_guc_min_load_for_hwconfig(struct xe_guc *guc)
572 {
573 	int ret;
574 
575 	xe_guc_ads_populate_minimal(&guc->ads);
576 
577 	/* Raise GT freq to speed up HuC/GuC load */
578 	xe_guc_pc_init_early(&guc->pc);
579 
580 	ret = __xe_guc_upload(guc);
581 	if (ret)
582 		return ret;
583 
584 	ret = xe_guc_hwconfig_init(guc);
585 	if (ret)
586 		return ret;
587 
588 	ret = xe_guc_enable_communication(guc);
589 	if (ret)
590 		return ret;
591 
592 	return 0;
593 }
594 
595 int xe_guc_upload(struct xe_guc *guc)
596 {
597 	xe_guc_ads_populate(&guc->ads);
598 
599 	return __xe_guc_upload(guc);
600 }
601 
602 static void guc_handle_mmio_msg(struct xe_guc *guc)
603 {
604 	struct xe_gt *gt = guc_to_gt(guc);
605 	u32 msg;
606 
607 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
608 
609 	msg = xe_mmio_read32(gt, SOFT_SCRATCH(15));
610 	msg &= XE_GUC_RECV_MSG_EXCEPTION |
611 		XE_GUC_RECV_MSG_CRASH_DUMP_POSTED;
612 	xe_mmio_write32(gt, SOFT_SCRATCH(15), 0);
613 
614 	if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED)
615 		drm_err(&guc_to_xe(guc)->drm,
616 			"Received early GuC crash dump notification!\n");
617 
618 	if (msg & XE_GUC_RECV_MSG_EXCEPTION)
619 		drm_err(&guc_to_xe(guc)->drm,
620 			"Received early GuC exception notification!\n");
621 }
622 
623 static void guc_enable_irq(struct xe_guc *guc)
624 {
625 	struct xe_gt *gt = guc_to_gt(guc);
626 	u32 events = xe_gt_is_media_type(gt) ?
627 		REG_FIELD_PREP(ENGINE0_MASK, GUC_INTR_GUC2HOST)  :
628 		REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
629 
630 	/* Primary GuC and media GuC share a single enable bit */
631 	xe_mmio_write32(gt, GUC_SG_INTR_ENABLE,
632 			REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST));
633 
634 	/*
635 	 * There are separate mask bits for primary and media GuCs, so use
636 	 * a RMW operation to avoid clobbering the other GuC's setting.
637 	 */
638 	xe_mmio_rmw32(gt, GUC_SG_INTR_MASK, events, 0);
639 }
640 
641 int xe_guc_enable_communication(struct xe_guc *guc)
642 {
643 	struct xe_device *xe = guc_to_xe(guc);
644 	int err;
645 
646 	guc_enable_irq(guc);
647 
648 	if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
649 		struct xe_gt *gt = guc_to_gt(guc);
650 		struct xe_tile *tile = gt_to_tile(gt);
651 
652 		err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc);
653 		if (err)
654 			return err;
655 	}
656 
657 	xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
658 		      ARAT_EXPIRED_INTRMSK, 0);
659 
660 	err = xe_guc_ct_enable(&guc->ct);
661 	if (err)
662 		return err;
663 
664 	guc_handle_mmio_msg(guc);
665 
666 	return 0;
667 }
668 
669 int xe_guc_suspend(struct xe_guc *guc)
670 {
671 	int ret;
672 	u32 action[] = {
673 		XE_GUC_ACTION_CLIENT_SOFT_RESET,
674 	};
675 
676 	ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
677 	if (ret) {
678 		drm_err(&guc_to_xe(guc)->drm,
679 			"GuC suspend: CLIENT_SOFT_RESET fail: %d!\n", ret);
680 		return ret;
681 	}
682 
683 	xe_guc_sanitize(guc);
684 	return 0;
685 }
686 
687 void xe_guc_notify(struct xe_guc *guc)
688 {
689 	struct xe_gt *gt = guc_to_gt(guc);
690 	const u32 default_notify_data = 0;
691 
692 	/*
693 	 * Both GUC_HOST_INTERRUPT and MED_GUC_HOST_INTERRUPT can pass
694 	 * additional payload data to the GuC but this capability is not
695 	 * used by the firmware yet. Use default value in the meantime.
696 	 */
697 	xe_mmio_write32(gt, guc->notify_reg, default_notify_data);
698 }
699 
700 int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr)
701 {
702 	u32 action[] = {
703 		XE_GUC_ACTION_AUTHENTICATE_HUC,
704 		rsa_addr
705 	};
706 
707 	return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
708 }
709 
710 int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
711 			  u32 len, u32 *response_buf)
712 {
713 	struct xe_device *xe = guc_to_xe(guc);
714 	struct xe_gt *gt = guc_to_gt(guc);
715 	u32 header, reply;
716 	struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
717 		MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
718 	const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1;
719 	int ret;
720 	int i;
721 
722 	BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
723 
724 	xe_assert(xe, !xe_guc_ct_enabled(&guc->ct));
725 	xe_assert(xe, len);
726 	xe_assert(xe, len <= VF_SW_FLAG_COUNT);
727 	xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
728 	xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) ==
729 		  GUC_HXG_ORIGIN_HOST);
730 	xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) ==
731 		  GUC_HXG_TYPE_REQUEST);
732 
733 retry:
734 	/* Not in critical data-path, just do if else for GT type */
735 	if (xe_gt_is_media_type(gt)) {
736 		for (i = 0; i < len; ++i)
737 			xe_mmio_write32(gt, MED_VF_SW_FLAG(i),
738 					request[i]);
739 		xe_mmio_read32(gt, MED_VF_SW_FLAG(LAST_INDEX));
740 	} else {
741 		for (i = 0; i < len; ++i)
742 			xe_mmio_write32(gt, VF_SW_FLAG(i),
743 					request[i]);
744 		xe_mmio_read32(gt, VF_SW_FLAG(LAST_INDEX));
745 	}
746 
747 	xe_guc_notify(guc);
748 
749 	ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_ORIGIN,
750 			     FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
751 			     50000, &reply, false);
752 	if (ret) {
753 timeout:
754 		drm_err(&xe->drm, "mmio request %#x: no reply %#x\n",
755 			request[0], reply);
756 		return ret;
757 	}
758 
759 	header = xe_mmio_read32(gt, reply_reg);
760 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
761 	    GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
762 		/*
763 		 * Once we got a BUSY reply we must wait again for the final
764 		 * response but this time we can't use ORIGIN mask anymore.
765 		 * To spot a right change in the reply, we take advantage that
766 		 * response SUCCESS and FAILURE differ only by the single bit
767 		 * and all other bits are set and can be used as a new mask.
768 		 */
769 		u32 resp_bits = GUC_HXG_TYPE_RESPONSE_SUCCESS & GUC_HXG_TYPE_RESPONSE_FAILURE;
770 		u32 resp_mask = FIELD_PREP(GUC_HXG_MSG_0_TYPE, resp_bits);
771 
772 		BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS);
773 		BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
774 
775 		ret = xe_mmio_wait32(gt, reply_reg,  resp_mask, resp_mask,
776 				     1000000, &header, false);
777 
778 		if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
779 			     GUC_HXG_ORIGIN_GUC))
780 			goto proto;
781 		if (unlikely(ret)) {
782 			if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
783 			    GUC_HXG_TYPE_NO_RESPONSE_BUSY)
784 				goto proto;
785 			goto timeout;
786 		}
787 	}
788 
789 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
790 	    GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
791 		u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
792 
793 		drm_dbg(&xe->drm, "mmio request %#x: retrying, reason %#x\n",
794 			request[0], reason);
795 		goto retry;
796 	}
797 
798 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
799 	    GUC_HXG_TYPE_RESPONSE_FAILURE) {
800 		u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
801 		u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
802 
803 		drm_err(&xe->drm, "mmio request %#x: failure %#x/%#x\n",
804 			request[0], error, hint);
805 		return -ENXIO;
806 	}
807 
808 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
809 	    GUC_HXG_TYPE_RESPONSE_SUCCESS) {
810 proto:
811 		drm_err(&xe->drm, "mmio request %#x: unexpected reply %#x\n",
812 			request[0], header);
813 		return -EPROTO;
814 	}
815 
816 	/* Just copy entire possible message response */
817 	if (response_buf) {
818 		response_buf[0] = header;
819 
820 		for (i = 1; i < VF_SW_FLAG_COUNT; i++) {
821 			reply_reg.addr += sizeof(u32);
822 			response_buf[i] = xe_mmio_read32(gt, reply_reg);
823 		}
824 	}
825 
826 	/* Use data from the GuC response as our return value */
827 	return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
828 }
829 
830 int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
831 {
832 	return xe_guc_mmio_send_recv(guc, request, len, NULL);
833 }
834 
835 static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val)
836 {
837 	struct xe_device *xe = guc_to_xe(guc);
838 	u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
839 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
840 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
841 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
842 			   GUC_ACTION_HOST2GUC_SELF_CFG),
843 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
844 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
845 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32,
846 			   lower_32_bits(val)),
847 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64,
848 			   upper_32_bits(val)),
849 	};
850 	int ret;
851 
852 	xe_assert(xe, len <= 2);
853 	xe_assert(xe, len != 1 || !upper_32_bits(val));
854 
855 	/* Self config must go over MMIO */
856 	ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
857 
858 	if (unlikely(ret < 0))
859 		return ret;
860 	if (unlikely(ret > 1))
861 		return -EPROTO;
862 	if (unlikely(!ret))
863 		return -ENOKEY;
864 
865 	return 0;
866 }
867 
868 int xe_guc_self_cfg32(struct xe_guc *guc, u16 key, u32 val)
869 {
870 	return guc_self_cfg(guc, key, 1, val);
871 }
872 
873 int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val)
874 {
875 	return guc_self_cfg(guc, key, 2, val);
876 }
877 
878 void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir)
879 {
880 	if (iir & GUC_INTR_GUC2HOST)
881 		xe_guc_ct_irq_handler(&guc->ct);
882 }
883 
884 void xe_guc_sanitize(struct xe_guc *guc)
885 {
886 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
887 	xe_guc_ct_disable(&guc->ct);
888 	guc->submission_state.enabled = false;
889 }
890 
891 int xe_guc_reset_prepare(struct xe_guc *guc)
892 {
893 	return xe_guc_submit_reset_prepare(guc);
894 }
895 
896 void xe_guc_reset_wait(struct xe_guc *guc)
897 {
898 	xe_guc_submit_reset_wait(guc);
899 }
900 
901 void xe_guc_stop_prepare(struct xe_guc *guc)
902 {
903 	XE_WARN_ON(xe_guc_pc_stop(&guc->pc));
904 }
905 
906 int xe_guc_stop(struct xe_guc *guc)
907 {
908 	int ret;
909 
910 	xe_guc_ct_stop(&guc->ct);
911 
912 	ret = xe_guc_submit_stop(guc);
913 	if (ret)
914 		return ret;
915 
916 	return 0;
917 }
918 
919 int xe_guc_start(struct xe_guc *guc)
920 {
921 	int ret;
922 
923 	ret = xe_guc_pc_start(&guc->pc);
924 	XE_WARN_ON(ret);
925 
926 	return xe_guc_submit_start(guc);
927 }
928 
929 void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
930 {
931 	struct xe_gt *gt = guc_to_gt(guc);
932 	u32 status;
933 	int err;
934 	int i;
935 
936 	xe_uc_fw_print(&guc->fw, p);
937 
938 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
939 	if (err)
940 		return;
941 
942 	status = xe_mmio_read32(gt, GUC_STATUS);
943 
944 	drm_printf(p, "\nGuC status 0x%08x:\n", status);
945 	drm_printf(p, "\tBootrom status = 0x%x\n",
946 		   REG_FIELD_GET(GS_BOOTROM_MASK, status));
947 	drm_printf(p, "\tuKernel status = 0x%x\n",
948 		   REG_FIELD_GET(GS_UKERNEL_MASK, status));
949 	drm_printf(p, "\tMIA Core status = 0x%x\n",
950 		   REG_FIELD_GET(GS_MIA_MASK, status));
951 	drm_printf(p, "\tLog level = %d\n",
952 		   xe_guc_log_get_level(&guc->log));
953 
954 	drm_puts(p, "\nScratch registers:\n");
955 	for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
956 		drm_printf(p, "\t%2d: \t0x%x\n",
957 			   i, xe_mmio_read32(gt, SOFT_SCRATCH(i)));
958 	}
959 
960 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
961 
962 	xe_guc_ct_print(&guc->ct, p, false);
963 	xe_guc_submit_print(guc, p);
964 }
965 
966 /**
967  * xe_guc_in_reset() - Detect if GuC MIA is in reset.
968  * @guc: The GuC object
969  *
970  * This function detects runtime resume from d3cold by leveraging
971  * GUC_STATUS, GUC doesn't get reset during d3hot,
972  * it strictly to be called from RPM resume handler.
973  *
974  * Return: true if failed to get forcewake or GuC MIA is in Reset,
975  * otherwise false.
976  */
977 bool xe_guc_in_reset(struct xe_guc *guc)
978 {
979 	struct xe_gt *gt = guc_to_gt(guc);
980 	u32 status;
981 	int err;
982 
983 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
984 	if (err)
985 		return true;
986 
987 	status = xe_mmio_read32(gt, GUC_STATUS);
988 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
989 
990 	return  status & GS_MIA_IN_RESET;
991 }
992