xref: /linux/drivers/gpu/drm/i915/gt/uc/intel_guc.c (revision f73a058be5d70dd81a43f16b2bbff4b1576a7af8)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5 
6 #include "gem/i915_gem_lmem.h"
7 #include "gt/intel_gt.h"
8 #include "gt/intel_gt_irq.h"
9 #include "gt/intel_gt_pm_irq.h"
10 #include "gt/intel_gt_regs.h"
11 #include "intel_guc.h"
12 #include "intel_guc_ads.h"
13 #include "intel_guc_capture.h"
14 #include "intel_guc_print.h"
15 #include "intel_guc_slpc.h"
16 #include "intel_guc_submission.h"
17 #include "i915_drv.h"
18 #include "i915_irq.h"
19 #include "i915_reg.h"
20 
21 /**
22  * DOC: GuC
23  *
24  * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
25  * designed to offload some of the functionality usually performed by the host
26  * driver; currently the main operations it can take care of are:
27  *
28  * - Authentication of the HuC, which is required to fully enable HuC usage.
29  * - Low latency graphics context scheduling (a.k.a. GuC submission).
30  * - GT Power management.
31  *
32  * The enable_guc module parameter can be used to select which of those
33  * operations to enable within GuC. Note that not all the operations are
34  * supported on all gen9+ platforms.
35  *
36  * Enabling the GuC is not mandatory and therefore the firmware is only loaded
37  * if at least one of the operations is selected. However, not loading the GuC
38  * might result in the loss of some features that do require the GuC (currently
39  * just the HuC, but more are expected to land in the future).
40  */
41 
42 void intel_guc_notify(struct intel_guc *guc)
43 {
44 	struct intel_gt *gt = guc_to_gt(guc);
45 
46 	/*
47 	 * On Gen11+, the value written to the register is passes as a payload
48 	 * to the FW. However, the FW currently treats all values the same way
49 	 * (H2G interrupt), so we can just write the value that the HW expects
50 	 * on older gens.
51 	 */
52 	intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
53 }
54 
55 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
56 {
57 	GEM_BUG_ON(!guc->send_regs.base);
58 	GEM_BUG_ON(!guc->send_regs.count);
59 	GEM_BUG_ON(i >= guc->send_regs.count);
60 
61 	return _MMIO(guc->send_regs.base + 4 * i);
62 }
63 
64 void intel_guc_init_send_regs(struct intel_guc *guc)
65 {
66 	struct intel_gt *gt = guc_to_gt(guc);
67 	enum forcewake_domains fw_domains = 0;
68 	unsigned int i;
69 
70 	GEM_BUG_ON(!guc->send_regs.base);
71 	GEM_BUG_ON(!guc->send_regs.count);
72 
73 	for (i = 0; i < guc->send_regs.count; i++) {
74 		fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
75 					guc_send_reg(guc, i),
76 					FW_REG_READ | FW_REG_WRITE);
77 	}
78 	guc->send_regs.fw_domains = fw_domains;
79 }
80 
81 static void gen9_reset_guc_interrupts(struct intel_guc *guc)
82 {
83 	struct intel_gt *gt = guc_to_gt(guc);
84 
85 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
86 
87 	spin_lock_irq(gt->irq_lock);
88 	gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
89 	spin_unlock_irq(gt->irq_lock);
90 }
91 
92 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
93 {
94 	struct intel_gt *gt = guc_to_gt(guc);
95 
96 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
97 
98 	spin_lock_irq(gt->irq_lock);
99 	guc_WARN_ON_ONCE(guc, intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
100 			 gt->pm_guc_events);
101 	gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
102 	spin_unlock_irq(gt->irq_lock);
103 
104 	guc->interrupts.enabled = true;
105 }
106 
107 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
108 {
109 	struct intel_gt *gt = guc_to_gt(guc);
110 
111 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
112 	guc->interrupts.enabled = false;
113 
114 	spin_lock_irq(gt->irq_lock);
115 
116 	gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
117 
118 	spin_unlock_irq(gt->irq_lock);
119 	intel_synchronize_irq(gt->i915);
120 
121 	gen9_reset_guc_interrupts(guc);
122 }
123 
124 static bool __gen11_reset_guc_interrupts(struct intel_gt *gt)
125 {
126 	u32 irq = gt->type == GT_MEDIA ? MTL_MGUC : GEN11_GUC;
127 
128 	lockdep_assert_held(gt->irq_lock);
129 	return gen11_gt_reset_one_iir(gt, 0, irq);
130 }
131 
132 static void gen11_reset_guc_interrupts(struct intel_guc *guc)
133 {
134 	struct intel_gt *gt = guc_to_gt(guc);
135 
136 	spin_lock_irq(gt->irq_lock);
137 	__gen11_reset_guc_interrupts(gt);
138 	spin_unlock_irq(gt->irq_lock);
139 }
140 
141 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
142 {
143 	struct intel_gt *gt = guc_to_gt(guc);
144 
145 	spin_lock_irq(gt->irq_lock);
146 	__gen11_reset_guc_interrupts(gt);
147 	spin_unlock_irq(gt->irq_lock);
148 
149 	guc->interrupts.enabled = true;
150 }
151 
152 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
153 {
154 	struct intel_gt *gt = guc_to_gt(guc);
155 
156 	guc->interrupts.enabled = false;
157 	intel_synchronize_irq(gt->i915);
158 
159 	gen11_reset_guc_interrupts(guc);
160 }
161 
162 static void guc_dead_worker_func(struct work_struct *w)
163 {
164 	struct intel_guc *guc = container_of(w, struct intel_guc, dead_guc_worker);
165 	struct intel_gt *gt = guc_to_gt(guc);
166 	unsigned long last = guc->last_dead_guc_jiffies;
167 	unsigned long delta = jiffies_to_msecs(jiffies - last);
168 
169 	if (delta < 500) {
170 		intel_gt_set_wedged(gt);
171 	} else {
172 		intel_gt_handle_error(gt, ALL_ENGINES, I915_ERROR_CAPTURE, "dead GuC");
173 		guc->last_dead_guc_jiffies = jiffies;
174 	}
175 }
176 
177 void intel_guc_init_early(struct intel_guc *guc)
178 {
179 	struct intel_gt *gt = guc_to_gt(guc);
180 	struct drm_i915_private *i915 = gt->i915;
181 
182 	intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, true);
183 	intel_guc_ct_init_early(&guc->ct);
184 	intel_guc_log_init_early(&guc->log);
185 	intel_guc_submission_init_early(guc);
186 	intel_guc_slpc_init_early(&guc->slpc);
187 	intel_guc_rc_init_early(guc);
188 
189 	INIT_WORK(&guc->dead_guc_worker, guc_dead_worker_func);
190 
191 	mutex_init(&guc->send_mutex);
192 	spin_lock_init(&guc->irq_lock);
193 	if (GRAPHICS_VER(i915) >= 11) {
194 		guc->interrupts.reset = gen11_reset_guc_interrupts;
195 		guc->interrupts.enable = gen11_enable_guc_interrupts;
196 		guc->interrupts.disable = gen11_disable_guc_interrupts;
197 		if (gt->type == GT_MEDIA) {
198 			guc->notify_reg = MEDIA_GUC_HOST_INTERRUPT;
199 			guc->send_regs.base = i915_mmio_reg_offset(MEDIA_SOFT_SCRATCH(0));
200 		} else {
201 			guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
202 			guc->send_regs.base = i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
203 		}
204 
205 		guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
206 
207 	} else {
208 		guc->notify_reg = GUC_SEND_INTERRUPT;
209 		guc->interrupts.reset = gen9_reset_guc_interrupts;
210 		guc->interrupts.enable = gen9_enable_guc_interrupts;
211 		guc->interrupts.disable = gen9_disable_guc_interrupts;
212 		guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
213 		guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
214 		BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
215 	}
216 
217 	intel_guc_enable_msg(guc, INTEL_GUC_RECV_MSG_EXCEPTION |
218 				  INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
219 }
220 
221 void intel_guc_init_late(struct intel_guc *guc)
222 {
223 	intel_guc_ads_init_late(guc);
224 }
225 
226 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
227 {
228 	u32 level = intel_guc_log_get_level(&guc->log);
229 	u32 flags = 0;
230 
231 	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
232 		flags |= GUC_LOG_DISABLED;
233 	else
234 		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
235 			 GUC_LOG_VERBOSITY_SHIFT;
236 
237 	return flags;
238 }
239 
240 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
241 {
242 	u32 flags = 0;
243 
244 	if (!intel_guc_submission_is_used(guc))
245 		flags |= GUC_CTL_DISABLE_SCHEDULER;
246 
247 	if (intel_guc_slpc_is_used(guc))
248 		flags |= GUC_CTL_ENABLE_SLPC;
249 
250 	return flags;
251 }
252 
253 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
254 {
255 	struct intel_guc_log *log = &guc->log;
256 	u32 offset, flags;
257 
258 	GEM_BUG_ON(!log->sizes_initialised);
259 
260 	offset = intel_guc_ggtt_offset(guc, log->vma) >> PAGE_SHIFT;
261 
262 	flags = GUC_LOG_VALID |
263 		GUC_LOG_NOTIFY_ON_HALF_FULL |
264 		log->sizes[GUC_LOG_SECTIONS_DEBUG].flag |
265 		log->sizes[GUC_LOG_SECTIONS_CAPTURE].flag |
266 		(log->sizes[GUC_LOG_SECTIONS_CRASH].count << GUC_LOG_CRASH_SHIFT) |
267 		(log->sizes[GUC_LOG_SECTIONS_DEBUG].count << GUC_LOG_DEBUG_SHIFT) |
268 		(log->sizes[GUC_LOG_SECTIONS_CAPTURE].count << GUC_LOG_CAPTURE_SHIFT) |
269 		(offset << GUC_LOG_BUF_ADDR_SHIFT);
270 
271 	return flags;
272 }
273 
274 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
275 {
276 	u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
277 	u32 flags = ads << GUC_ADS_ADDR_SHIFT;
278 
279 	return flags;
280 }
281 
282 static u32 guc_ctl_wa_flags(struct intel_guc *guc)
283 {
284 	struct intel_gt *gt = guc_to_gt(guc);
285 	u32 flags = 0;
286 
287 	/* Wa_22012773006:gen11,gen12 < XeHP */
288 	if (GRAPHICS_VER(gt->i915) >= 11 &&
289 	    GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 55))
290 		flags |= GUC_WA_POLLCS;
291 
292 	/* Wa_14014475959 */
293 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
294 	    IS_DG2(gt->i915))
295 		flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
296 
297 	/* Wa_16019325821 */
298 	/* Wa_14019159160 */
299 	if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
300 		flags |= GUC_WA_RCS_CCS_SWITCHOUT;
301 
302 	/*
303 	 * Wa_14012197797
304 	 * Wa_22011391025
305 	 *
306 	 * The same WA bit is used for both and 22011391025 is applicable to
307 	 * all DG2.
308 	 */
309 	if (IS_DG2(gt->i915))
310 		flags |= GUC_WA_DUAL_QUEUE;
311 
312 	/* Wa_22011802037: graphics version 11/12 */
313 	if (intel_engine_reset_needs_wa_22011802037(gt))
314 		flags |= GUC_WA_PRE_PARSER;
315 
316 	/*
317 	 * Wa_22012727170
318 	 * Wa_22012727685
319 	 */
320 	if (IS_DG2_G11(gt->i915))
321 		flags |= GUC_WA_CONTEXT_ISOLATION;
322 
323 	/*
324 	 * Wa_14018913170: Applicable to all platforms supported by i915 so
325 	 * don't bother testing for all X/Y/Z platforms explicitly.
326 	 */
327 	if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0))
328 		flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
329 
330 	return flags;
331 }
332 
333 static u32 guc_ctl_devid(struct intel_guc *guc)
334 {
335 	struct drm_i915_private *i915 = guc_to_i915(guc);
336 
337 	return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915);
338 }
339 
340 /*
341  * Initialise the GuC parameter block before starting the firmware
342  * transfer. These parameters are read by the firmware on startup
343  * and cannot be changed thereafter.
344  */
345 static void guc_init_params(struct intel_guc *guc)
346 {
347 	u32 *params = guc->params;
348 	int i;
349 
350 	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
351 
352 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
353 	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
354 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
355 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
356 	params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
357 	params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
358 
359 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
360 		guc_dbg(guc, "param[%2d] = %#x\n", i, params[i]);
361 }
362 
363 /*
364  * Initialise the GuC parameter block before starting the firmware
365  * transfer. These parameters are read by the firmware on startup
366  * and cannot be changed thereafter.
367  */
368 void intel_guc_write_params(struct intel_guc *guc)
369 {
370 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
371 	int i;
372 
373 	/*
374 	 * All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and
375 	 * they are power context saved so it's ok to release forcewake
376 	 * when we are done here and take it again at xfer time.
377 	 */
378 	intel_uncore_forcewake_get(uncore, FORCEWAKE_GT);
379 
380 	intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
381 
382 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
383 		intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
384 
385 	intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
386 }
387 
388 void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p)
389 {
390 	struct intel_gt *gt = guc_to_gt(guc);
391 	intel_wakeref_t wakeref;
392 	u32 stamp = 0;
393 	u64 ktime;
394 
395 	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
396 		stamp = intel_uncore_read(gt->uncore, GUCPMTIMESTAMP);
397 	ktime = ktime_get_boottime_ns();
398 
399 	drm_printf(p, "Kernel timestamp: 0x%08llX [%llu]\n", ktime, ktime);
400 	drm_printf(p, "GuC timestamp: 0x%08X [%u]\n", stamp, stamp);
401 	drm_printf(p, "CS timestamp frequency: %u Hz, %u ns\n",
402 		   gt->clock_frequency, gt->clock_period_ns);
403 }
404 
405 int intel_guc_init(struct intel_guc *guc)
406 {
407 	int ret;
408 
409 	ret = intel_uc_fw_init(&guc->fw);
410 	if (ret)
411 		goto out;
412 
413 	ret = intel_guc_log_create(&guc->log);
414 	if (ret)
415 		goto err_fw;
416 
417 	ret = intel_guc_capture_init(guc);
418 	if (ret)
419 		goto err_log;
420 
421 	ret = intel_guc_ads_create(guc);
422 	if (ret)
423 		goto err_capture;
424 
425 	GEM_BUG_ON(!guc->ads_vma);
426 
427 	ret = intel_guc_ct_init(&guc->ct);
428 	if (ret)
429 		goto err_ads;
430 
431 	if (intel_guc_submission_is_used(guc)) {
432 		/*
433 		 * This is stuff we need to have available at fw load time
434 		 * if we are planning to enable submission later
435 		 */
436 		ret = intel_guc_submission_init(guc);
437 		if (ret)
438 			goto err_ct;
439 	}
440 
441 	if (intel_guc_slpc_is_used(guc)) {
442 		ret = intel_guc_slpc_init(&guc->slpc);
443 		if (ret)
444 			goto err_submission;
445 	}
446 
447 	/* now that everything is perma-pinned, initialize the parameters */
448 	guc_init_params(guc);
449 
450 	intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
451 
452 	return 0;
453 
454 err_submission:
455 	intel_guc_submission_fini(guc);
456 err_ct:
457 	intel_guc_ct_fini(&guc->ct);
458 err_ads:
459 	intel_guc_ads_destroy(guc);
460 err_capture:
461 	intel_guc_capture_destroy(guc);
462 err_log:
463 	intel_guc_log_destroy(&guc->log);
464 err_fw:
465 	intel_uc_fw_fini(&guc->fw);
466 out:
467 	intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
468 	guc_probe_error(guc, "failed with %pe\n", ERR_PTR(ret));
469 	return ret;
470 }
471 
472 void intel_guc_fini(struct intel_guc *guc)
473 {
474 	if (!intel_uc_fw_is_loadable(&guc->fw))
475 		return;
476 
477 	flush_work(&guc->dead_guc_worker);
478 
479 	if (intel_guc_slpc_is_used(guc))
480 		intel_guc_slpc_fini(&guc->slpc);
481 
482 	if (intel_guc_submission_is_used(guc))
483 		intel_guc_submission_fini(guc);
484 
485 	intel_guc_ct_fini(&guc->ct);
486 
487 	intel_guc_ads_destroy(guc);
488 	intel_guc_capture_destroy(guc);
489 	intel_guc_log_destroy(&guc->log);
490 	intel_uc_fw_fini(&guc->fw);
491 }
492 
493 /*
494  * This function implements the MMIO based host to GuC interface.
495  */
496 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
497 			u32 *response_buf, u32 response_buf_size)
498 {
499 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
500 	u32 header;
501 	int i;
502 	int ret;
503 
504 	GEM_BUG_ON(!len);
505 	GEM_BUG_ON(len > guc->send_regs.count);
506 
507 	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST);
508 	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST);
509 
510 	mutex_lock(&guc->send_mutex);
511 	intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
512 
513 retry:
514 	for (i = 0; i < len; i++)
515 		intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
516 
517 	intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
518 
519 	intel_guc_notify(guc);
520 
521 	/*
522 	 * No GuC command should ever take longer than 10ms.
523 	 * Fast commands should still complete in 10us.
524 	 */
525 	ret = __intel_wait_for_register_fw(uncore,
526 					   guc_send_reg(guc, 0),
527 					   GUC_HXG_MSG_0_ORIGIN,
528 					   FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
529 						      GUC_HXG_ORIGIN_GUC),
530 					   10, 10, &header);
531 	if (unlikely(ret)) {
532 timeout:
533 		guc_err(guc, "mmio request %#x: no reply %x\n",
534 			request[0], header);
535 		goto out;
536 	}
537 
538 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
539 #define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \
540 		FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC || \
541 		FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_NO_RESPONSE_BUSY; })
542 
543 		ret = wait_for(done, 1000);
544 		if (unlikely(ret))
545 			goto timeout;
546 		if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
547 				       GUC_HXG_ORIGIN_GUC))
548 			goto proto;
549 #undef done
550 	}
551 
552 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
553 		u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
554 
555 		guc_dbg(guc, "mmio request %#x: retrying, reason %u\n",
556 			request[0], reason);
557 		goto retry;
558 	}
559 
560 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_RESPONSE_FAILURE) {
561 		u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
562 		u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
563 
564 		guc_err(guc, "mmio request %#x: failure %x/%u\n",
565 			request[0], error, hint);
566 		ret = -ENXIO;
567 		goto out;
568 	}
569 
570 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
571 proto:
572 		guc_err(guc, "mmio request %#x: unexpected reply %#x\n",
573 			request[0], header);
574 		ret = -EPROTO;
575 		goto out;
576 	}
577 
578 	if (response_buf) {
579 		int count = min(response_buf_size, guc->send_regs.count);
580 
581 		GEM_BUG_ON(!count);
582 
583 		response_buf[0] = header;
584 
585 		for (i = 1; i < count; i++)
586 			response_buf[i] = intel_uncore_read(uncore,
587 							    guc_send_reg(guc, i));
588 
589 		/* Use number of copied dwords as our return value */
590 		ret = count;
591 	} else {
592 		/* Use data from the GuC response as our return value */
593 		ret = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
594 	}
595 
596 out:
597 	intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
598 	mutex_unlock(&guc->send_mutex);
599 
600 	return ret;
601 }
602 
603 int intel_guc_crash_process_msg(struct intel_guc *guc, u32 action)
604 {
605 	if (action == INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED)
606 		guc_err(guc, "Crash dump notification\n");
607 	else if (action == INTEL_GUC_ACTION_NOTIFY_EXCEPTION)
608 		guc_err(guc, "Exception notification\n");
609 	else
610 		guc_err(guc, "Unknown crash notification: 0x%04X\n", action);
611 
612 	queue_work(system_unbound_wq, &guc->dead_guc_worker);
613 
614 	return 0;
615 }
616 
617 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
618 				       const u32 *payload, u32 len)
619 {
620 	u32 msg;
621 
622 	if (unlikely(!len))
623 		return -EPROTO;
624 
625 	/* Make sure to handle only enabled messages */
626 	msg = payload[0] & guc->msg_enabled_mask;
627 
628 	if (msg & INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)
629 		guc_err(guc, "Received early crash dump notification!\n");
630 	if (msg & INTEL_GUC_RECV_MSG_EXCEPTION)
631 		guc_err(guc, "Received early exception notification!\n");
632 
633 	if (msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | INTEL_GUC_RECV_MSG_EXCEPTION))
634 		queue_work(system_unbound_wq, &guc->dead_guc_worker);
635 
636 	return 0;
637 }
638 
639 /**
640  * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
641  * @guc: intel_guc structure
642  * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
643  *
644  * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
645  * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
646  * intel_huc_auth().
647  *
648  * Return:	non-zero code on error
649  */
650 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
651 {
652 	u32 action[] = {
653 		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
654 		rsa_offset
655 	};
656 
657 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
658 }
659 
660 /**
661  * intel_guc_suspend() - notify GuC entering suspend state
662  * @guc:	the guc
663  */
664 int intel_guc_suspend(struct intel_guc *guc)
665 {
666 	int ret;
667 	u32 action[] = {
668 		INTEL_GUC_ACTION_CLIENT_SOFT_RESET,
669 	};
670 
671 	if (!intel_guc_is_ready(guc))
672 		return 0;
673 
674 	if (intel_guc_submission_is_used(guc)) {
675 		flush_work(&guc->dead_guc_worker);
676 
677 		/*
678 		 * This H2G MMIO command tears down the GuC in two steps. First it will
679 		 * generate a G2H CTB for every active context indicating a reset. In
680 		 * practice the i915 shouldn't ever get a G2H as suspend should only be
681 		 * called when the GPU is idle. Next, it tears down the CTBs and this
682 		 * H2G MMIO command completes.
683 		 *
684 		 * Don't abort on a failure code from the GuC. Keep going and do the
685 		 * clean up in santize() and re-initialisation on resume and hopefully
686 		 * the error here won't be problematic.
687 		 */
688 		ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
689 		if (ret)
690 			guc_err(guc, "suspend: RESET_CLIENT action failed with %pe\n",
691 				ERR_PTR(ret));
692 	}
693 
694 	/* Signal that the GuC isn't running. */
695 	intel_guc_sanitize(guc);
696 
697 	return 0;
698 }
699 
700 /**
701  * intel_guc_resume() - notify GuC resuming from suspend state
702  * @guc:	the guc
703  */
704 int intel_guc_resume(struct intel_guc *guc)
705 {
706 	/*
707 	 * NB: This function can still be called even if GuC submission is
708 	 * disabled, e.g. if GuC is enabled for HuC authentication only. Thus,
709 	 * if any code is later added here, it must be support doing nothing
710 	 * if submission is disabled (as per intel_guc_suspend).
711 	 */
712 	return 0;
713 }
714 
715 /**
716  * DOC: GuC Memory Management
717  *
718  * GuC can't allocate any memory for its own usage, so all the allocations must
719  * be handled by the host driver. GuC accesses the memory via the GGTT, with the
720  * exception of the top and bottom parts of the 4GB address space, which are
721  * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
722  * or other parts of the HW. The driver must take care not to place objects that
723  * the GuC is going to access in these reserved ranges. The layout of the GuC
724  * address space is shown below:
725  *
726  * ::
727  *
728  *     +===========> +====================+ <== FFFF_FFFF
729  *     ^             |      Reserved      |
730  *     |             +====================+ <== GUC_GGTT_TOP
731  *     |             |                    |
732  *     |             |        DRAM        |
733  *    GuC            |                    |
734  *  Address    +===> +====================+ <== GuC ggtt_pin_bias
735  *   Space     ^     |                    |
736  *     |       |     |                    |
737  *     |      GuC    |        GuC         |
738  *     |     WOPCM   |       WOPCM        |
739  *     |      Size   |                    |
740  *     |       |     |                    |
741  *     v       v     |                    |
742  *     +=======+===> +====================+ <== 0000_0000
743  *
744  * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
745  * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
746  * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
747  */
748 
749 /**
750  * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
751  * @guc:	the guc
752  * @size:	size of area to allocate (both virtual space and memory)
753  *
754  * This is a wrapper to create an object for use with the GuC. In order to
755  * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
756  * both some backing storage and a range inside the Global GTT. We must pin
757  * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
758  * range is reserved inside GuC.
759  *
760  * Return:	A i915_vma if successful, otherwise an ERR_PTR.
761  */
762 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
763 {
764 	struct intel_gt *gt = guc_to_gt(guc);
765 	struct drm_i915_gem_object *obj;
766 	struct i915_vma *vma;
767 	u64 flags;
768 	int ret;
769 
770 	if (HAS_LMEM(gt->i915))
771 		obj = i915_gem_object_create_lmem(gt->i915, size,
772 						  I915_BO_ALLOC_CPU_CLEAR |
773 						  I915_BO_ALLOC_CONTIGUOUS |
774 						  I915_BO_ALLOC_PM_EARLY);
775 	else
776 		obj = i915_gem_object_create_shmem(gt->i915, size);
777 
778 	if (IS_ERR(obj))
779 		return ERR_CAST(obj);
780 
781 	/*
782 	 * Wa_22016122933: For Media version 13.0, all Media GT shared
783 	 * memory needs to be mapped as WC on CPU side and UC (PAT
784 	 * index 2) on GPU side.
785 	 */
786 	if (intel_gt_needs_wa_22016122933(gt))
787 		i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
788 
789 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
790 	if (IS_ERR(vma))
791 		goto err;
792 
793 	flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
794 	ret = i915_ggtt_pin(vma, NULL, 0, flags);
795 	if (ret) {
796 		vma = ERR_PTR(ret);
797 		goto err;
798 	}
799 
800 	return i915_vma_make_unshrinkable(vma);
801 
802 err:
803 	i915_gem_object_put(obj);
804 	return vma;
805 }
806 
807 /**
808  * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
809  * @guc:	the guc
810  * @size:	size of area to allocate (both virtual space and memory)
811  * @out_vma:	return variable for the allocated vma pointer
812  * @out_vaddr:	return variable for the obj mapping
813  *
814  * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
815  * object with I915_MAP_WB.
816  *
817  * Return:	0 if successful, a negative errno code otherwise.
818  */
819 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
820 				   struct i915_vma **out_vma, void **out_vaddr)
821 {
822 	struct i915_vma *vma;
823 	void *vaddr;
824 
825 	vma = intel_guc_allocate_vma(guc, size);
826 	if (IS_ERR(vma))
827 		return PTR_ERR(vma);
828 
829 	vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
830 						 intel_gt_coherent_map_type(guc_to_gt(guc),
831 									    vma->obj, true));
832 	if (IS_ERR(vaddr)) {
833 		i915_vma_unpin_and_release(&vma, 0);
834 		return PTR_ERR(vaddr);
835 	}
836 
837 	*out_vma = vma;
838 	*out_vaddr = vaddr;
839 
840 	return 0;
841 }
842 
843 static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
844 {
845 	u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
846 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
847 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
848 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_SELF_CFG),
849 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
850 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
851 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32, lower_32_bits(value)),
852 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64, upper_32_bits(value)),
853 	};
854 	int ret;
855 
856 	GEM_BUG_ON(len > 2);
857 	GEM_BUG_ON(len == 1 && upper_32_bits(value));
858 
859 	/* Self config must go over MMIO */
860 	ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
861 
862 	if (unlikely(ret < 0))
863 		return ret;
864 	if (unlikely(ret > 1))
865 		return -EPROTO;
866 	if (unlikely(!ret))
867 		return -ENOKEY;
868 
869 	return 0;
870 }
871 
872 static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
873 {
874 	int err = __guc_action_self_cfg(guc, key, len, value);
875 
876 	if (unlikely(err))
877 		guc_probe_error(guc, "Unsuccessful self-config (%pe) key %#hx value %#llx\n",
878 				ERR_PTR(err), key, value);
879 	return err;
880 }
881 
882 int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value)
883 {
884 	return __guc_self_cfg(guc, key, 1, value);
885 }
886 
887 int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value)
888 {
889 	return __guc_self_cfg(guc, key, 2, value);
890 }
891 
892 /**
893  * intel_guc_load_status - dump information about GuC load status
894  * @guc: the GuC
895  * @p: the &drm_printer
896  *
897  * Pretty printer for GuC load status.
898  */
899 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
900 {
901 	struct intel_gt *gt = guc_to_gt(guc);
902 	struct intel_uncore *uncore = gt->uncore;
903 	intel_wakeref_t wakeref;
904 
905 	if (!intel_guc_is_supported(guc)) {
906 		drm_printf(p, "GuC not supported\n");
907 		return;
908 	}
909 
910 	if (!intel_guc_is_wanted(guc)) {
911 		drm_printf(p, "GuC disabled\n");
912 		return;
913 	}
914 
915 	intel_uc_fw_dump(&guc->fw, p);
916 
917 	with_intel_runtime_pm(uncore->rpm, wakeref) {
918 		u32 status = intel_uncore_read(uncore, GUC_STATUS);
919 		u32 i;
920 
921 		drm_printf(p, "GuC status 0x%08x:\n", status);
922 		drm_printf(p, "\tBootrom status = 0x%x\n",
923 			   (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
924 		drm_printf(p, "\tuKernel status = 0x%x\n",
925 			   (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
926 		drm_printf(p, "\tMIA Core status = 0x%x\n",
927 			   (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
928 		drm_puts(p, "Scratch registers:\n");
929 		for (i = 0; i < 16; i++) {
930 			drm_printf(p, "\t%2d: \t0x%x\n",
931 				   i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
932 		}
933 	}
934 }
935 
936 void intel_guc_write_barrier(struct intel_guc *guc)
937 {
938 	struct intel_gt *gt = guc_to_gt(guc);
939 
940 	if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
941 		/*
942 		 * Ensure intel_uncore_write_fw can be used rather than
943 		 * intel_uncore_write.
944 		 */
945 		GEM_BUG_ON(guc->send_regs.fw_domains);
946 
947 		/*
948 		 * This register is used by the i915 and GuC for MMIO based
949 		 * communication. Once we are in this code CTBs are the only
950 		 * method the i915 uses to communicate with the GuC so it is
951 		 * safe to write to this register (a value of 0 is NOP for MMIO
952 		 * communication). If we ever start mixing CTBs and MMIOs a new
953 		 * register will have to be chosen. This function is also used
954 		 * to enforce ordering of a work queue item write and an update
955 		 * to the process descriptor. When a work queue is being used,
956 		 * CTBs are also the only mechanism of communication.
957 		 */
958 		intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
959 	} else {
960 		/* wmb() sufficient for a barrier if in smem */
961 		wmb();
962 	}
963 }
964