xref: /linux/drivers/gpu/drm/i915/gt/uc/intel_guc.c (revision b9d7eb6a31be296ca0af95641a23c4c758703c0a)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5 
6 #include "gem/i915_gem_lmem.h"
7 #include "gt/intel_gt.h"
8 #include "gt/intel_gt_irq.h"
9 #include "gt/intel_gt_pm_irq.h"
10 #include "gt/intel_gt_regs.h"
11 #include "intel_guc.h"
12 #include "intel_guc_slpc.h"
13 #include "intel_guc_ads.h"
14 #include "intel_guc_submission.h"
15 #include "i915_drv.h"
16 #include "i915_irq.h"
17 
18 /**
19  * DOC: GuC
20  *
21  * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
22  * designed to offload some of the functionality usually performed by the host
23  * driver; currently the main operations it can take care of are:
24  *
25  * - Authentication of the HuC, which is required to fully enable HuC usage.
26  * - Low latency graphics context scheduling (a.k.a. GuC submission).
27  * - GT Power management.
28  *
29  * The enable_guc module parameter can be used to select which of those
30  * operations to enable within GuC. Note that not all the operations are
31  * supported on all gen9+ platforms.
32  *
33  * Enabling the GuC is not mandatory and therefore the firmware is only loaded
34  * if at least one of the operations is selected. However, not loading the GuC
35  * might result in the loss of some features that do require the GuC (currently
36  * just the HuC, but more are expected to land in the future).
37  */
38 
39 void intel_guc_notify(struct intel_guc *guc)
40 {
41 	struct intel_gt *gt = guc_to_gt(guc);
42 
43 	/*
44 	 * On Gen11+, the value written to the register is passes as a payload
45 	 * to the FW. However, the FW currently treats all values the same way
46 	 * (H2G interrupt), so we can just write the value that the HW expects
47 	 * on older gens.
48 	 */
49 	intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
50 }
51 
52 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
53 {
54 	GEM_BUG_ON(!guc->send_regs.base);
55 	GEM_BUG_ON(!guc->send_regs.count);
56 	GEM_BUG_ON(i >= guc->send_regs.count);
57 
58 	return _MMIO(guc->send_regs.base + 4 * i);
59 }
60 
61 void intel_guc_init_send_regs(struct intel_guc *guc)
62 {
63 	struct intel_gt *gt = guc_to_gt(guc);
64 	enum forcewake_domains fw_domains = 0;
65 	unsigned int i;
66 
67 	GEM_BUG_ON(!guc->send_regs.base);
68 	GEM_BUG_ON(!guc->send_regs.count);
69 
70 	for (i = 0; i < guc->send_regs.count; i++) {
71 		fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
72 					guc_send_reg(guc, i),
73 					FW_REG_READ | FW_REG_WRITE);
74 	}
75 	guc->send_regs.fw_domains = fw_domains;
76 }
77 
78 static void gen9_reset_guc_interrupts(struct intel_guc *guc)
79 {
80 	struct intel_gt *gt = guc_to_gt(guc);
81 
82 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
83 
84 	spin_lock_irq(&gt->irq_lock);
85 	gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
86 	spin_unlock_irq(&gt->irq_lock);
87 }
88 
89 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
90 {
91 	struct intel_gt *gt = guc_to_gt(guc);
92 
93 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
94 
95 	spin_lock_irq(&gt->irq_lock);
96 	WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
97 		     gt->pm_guc_events);
98 	gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
99 	spin_unlock_irq(&gt->irq_lock);
100 }
101 
102 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
103 {
104 	struct intel_gt *gt = guc_to_gt(guc);
105 
106 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
107 
108 	spin_lock_irq(&gt->irq_lock);
109 
110 	gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
111 
112 	spin_unlock_irq(&gt->irq_lock);
113 	intel_synchronize_irq(gt->i915);
114 
115 	gen9_reset_guc_interrupts(guc);
116 }
117 
118 static void gen11_reset_guc_interrupts(struct intel_guc *guc)
119 {
120 	struct intel_gt *gt = guc_to_gt(guc);
121 
122 	spin_lock_irq(&gt->irq_lock);
123 	gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
124 	spin_unlock_irq(&gt->irq_lock);
125 }
126 
127 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
128 {
129 	struct intel_gt *gt = guc_to_gt(guc);
130 	u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
131 
132 	spin_lock_irq(&gt->irq_lock);
133 	WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
134 	intel_uncore_write(gt->uncore,
135 			   GEN11_GUC_SG_INTR_ENABLE, events);
136 	intel_uncore_write(gt->uncore,
137 			   GEN11_GUC_SG_INTR_MASK, ~events);
138 	spin_unlock_irq(&gt->irq_lock);
139 }
140 
141 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
142 {
143 	struct intel_gt *gt = guc_to_gt(guc);
144 
145 	spin_lock_irq(&gt->irq_lock);
146 
147 	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
148 	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
149 
150 	spin_unlock_irq(&gt->irq_lock);
151 	intel_synchronize_irq(gt->i915);
152 
153 	gen11_reset_guc_interrupts(guc);
154 }
155 
156 void intel_guc_init_early(struct intel_guc *guc)
157 {
158 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
159 
160 	intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
161 	intel_guc_ct_init_early(&guc->ct);
162 	intel_guc_log_init_early(&guc->log);
163 	intel_guc_submission_init_early(guc);
164 	intel_guc_slpc_init_early(&guc->slpc);
165 	intel_guc_rc_init_early(guc);
166 
167 	mutex_init(&guc->send_mutex);
168 	spin_lock_init(&guc->irq_lock);
169 	if (GRAPHICS_VER(i915) >= 11) {
170 		guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
171 		guc->interrupts.reset = gen11_reset_guc_interrupts;
172 		guc->interrupts.enable = gen11_enable_guc_interrupts;
173 		guc->interrupts.disable = gen11_disable_guc_interrupts;
174 		guc->send_regs.base =
175 			i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
176 		guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
177 
178 	} else {
179 		guc->notify_reg = GUC_SEND_INTERRUPT;
180 		guc->interrupts.reset = gen9_reset_guc_interrupts;
181 		guc->interrupts.enable = gen9_enable_guc_interrupts;
182 		guc->interrupts.disable = gen9_disable_guc_interrupts;
183 		guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
184 		guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
185 		BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
186 	}
187 }
188 
189 void intel_guc_init_late(struct intel_guc *guc)
190 {
191 	intel_guc_ads_init_late(guc);
192 }
193 
194 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
195 {
196 	u32 level = intel_guc_log_get_level(&guc->log);
197 	u32 flags = 0;
198 
199 	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
200 		flags |= GUC_LOG_DISABLED;
201 	else
202 		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
203 			 GUC_LOG_VERBOSITY_SHIFT;
204 
205 	return flags;
206 }
207 
208 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
209 {
210 	u32 flags = 0;
211 
212 	if (!intel_guc_submission_is_used(guc))
213 		flags |= GUC_CTL_DISABLE_SCHEDULER;
214 
215 	if (intel_guc_slpc_is_used(guc))
216 		flags |= GUC_CTL_ENABLE_SLPC;
217 
218 	return flags;
219 }
220 
221 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
222 {
223 	u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
224 	u32 flags;
225 
226 	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
227 	#define UNIT SZ_1M
228 	#define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
229 	#else
230 	#define UNIT SZ_4K
231 	#define FLAG 0
232 	#endif
233 
234 	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
235 	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
236 	BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
237 	BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, UNIT));
238 
239 	BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
240 			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
241 	BUILD_BUG_ON((DEBUG_BUFFER_SIZE / UNIT - 1) >
242 			(GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
243 
244 	flags = GUC_LOG_VALID |
245 		GUC_LOG_NOTIFY_ON_HALF_FULL |
246 		FLAG |
247 		((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
248 		((DEBUG_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
249 		(offset << GUC_LOG_BUF_ADDR_SHIFT);
250 
251 	#undef UNIT
252 	#undef FLAG
253 
254 	return flags;
255 }
256 
257 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
258 {
259 	u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
260 	u32 flags = ads << GUC_ADS_ADDR_SHIFT;
261 
262 	return flags;
263 }
264 
265 /*
266  * Initialise the GuC parameter block before starting the firmware
267  * transfer. These parameters are read by the firmware on startup
268  * and cannot be changed thereafter.
269  */
270 static void guc_init_params(struct intel_guc *guc)
271 {
272 	u32 *params = guc->params;
273 	int i;
274 
275 	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
276 
277 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
278 	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
279 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
280 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
281 
282 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
283 		DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
284 }
285 
286 /*
287  * Initialise the GuC parameter block before starting the firmware
288  * transfer. These parameters are read by the firmware on startup
289  * and cannot be changed thereafter.
290  */
291 void intel_guc_write_params(struct intel_guc *guc)
292 {
293 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
294 	int i;
295 
296 	/*
297 	 * All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and
298 	 * they are power context saved so it's ok to release forcewake
299 	 * when we are done here and take it again at xfer time.
300 	 */
301 	intel_uncore_forcewake_get(uncore, FORCEWAKE_GT);
302 
303 	intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
304 
305 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
306 		intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
307 
308 	intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
309 }
310 
311 int intel_guc_init(struct intel_guc *guc)
312 {
313 	struct intel_gt *gt = guc_to_gt(guc);
314 	int ret;
315 
316 	ret = intel_uc_fw_init(&guc->fw);
317 	if (ret)
318 		goto out;
319 
320 	ret = intel_guc_log_create(&guc->log);
321 	if (ret)
322 		goto err_fw;
323 
324 	ret = intel_guc_ads_create(guc);
325 	if (ret)
326 		goto err_log;
327 	GEM_BUG_ON(!guc->ads_vma);
328 
329 	ret = intel_guc_ct_init(&guc->ct);
330 	if (ret)
331 		goto err_ads;
332 
333 	if (intel_guc_submission_is_used(guc)) {
334 		/*
335 		 * This is stuff we need to have available at fw load time
336 		 * if we are planning to enable submission later
337 		 */
338 		ret = intel_guc_submission_init(guc);
339 		if (ret)
340 			goto err_ct;
341 	}
342 
343 	if (intel_guc_slpc_is_used(guc)) {
344 		ret = intel_guc_slpc_init(&guc->slpc);
345 		if (ret)
346 			goto err_submission;
347 	}
348 
349 	/* now that everything is perma-pinned, initialize the parameters */
350 	guc_init_params(guc);
351 
352 	/* We need to notify the guc whenever we change the GGTT */
353 	i915_ggtt_enable_guc(gt->ggtt);
354 
355 	intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
356 
357 	return 0;
358 
359 err_submission:
360 	intel_guc_submission_fini(guc);
361 err_ct:
362 	intel_guc_ct_fini(&guc->ct);
363 err_ads:
364 	intel_guc_ads_destroy(guc);
365 err_log:
366 	intel_guc_log_destroy(&guc->log);
367 err_fw:
368 	intel_uc_fw_fini(&guc->fw);
369 out:
370 	i915_probe_error(gt->i915, "failed with %d\n", ret);
371 	return ret;
372 }
373 
374 void intel_guc_fini(struct intel_guc *guc)
375 {
376 	struct intel_gt *gt = guc_to_gt(guc);
377 
378 	if (!intel_uc_fw_is_loadable(&guc->fw))
379 		return;
380 
381 	i915_ggtt_disable_guc(gt->ggtt);
382 
383 	if (intel_guc_slpc_is_used(guc))
384 		intel_guc_slpc_fini(&guc->slpc);
385 
386 	if (intel_guc_submission_is_used(guc))
387 		intel_guc_submission_fini(guc);
388 
389 	intel_guc_ct_fini(&guc->ct);
390 
391 	intel_guc_ads_destroy(guc);
392 	intel_guc_log_destroy(&guc->log);
393 	intel_uc_fw_fini(&guc->fw);
394 }
395 
396 /*
397  * This function implements the MMIO based host to GuC interface.
398  */
399 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
400 			u32 *response_buf, u32 response_buf_size)
401 {
402 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
403 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
404 	u32 header;
405 	int i;
406 	int ret;
407 
408 	GEM_BUG_ON(!len);
409 	GEM_BUG_ON(len > guc->send_regs.count);
410 
411 	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST);
412 	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST);
413 
414 	mutex_lock(&guc->send_mutex);
415 	intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
416 
417 retry:
418 	for (i = 0; i < len; i++)
419 		intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
420 
421 	intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
422 
423 	intel_guc_notify(guc);
424 
425 	/*
426 	 * No GuC command should ever take longer than 10ms.
427 	 * Fast commands should still complete in 10us.
428 	 */
429 	ret = __intel_wait_for_register_fw(uncore,
430 					   guc_send_reg(guc, 0),
431 					   GUC_HXG_MSG_0_ORIGIN,
432 					   FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
433 						      GUC_HXG_ORIGIN_GUC),
434 					   10, 10, &header);
435 	if (unlikely(ret)) {
436 timeout:
437 		drm_err(&i915->drm, "mmio request %#x: no reply %x\n",
438 			request[0], header);
439 		goto out;
440 	}
441 
442 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
443 #define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \
444 		FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC || \
445 		FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_NO_RESPONSE_BUSY; })
446 
447 		ret = wait_for(done, 1000);
448 		if (unlikely(ret))
449 			goto timeout;
450 		if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
451 				       GUC_HXG_ORIGIN_GUC))
452 			goto proto;
453 #undef done
454 	}
455 
456 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
457 		u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
458 
459 		drm_dbg(&i915->drm, "mmio request %#x: retrying, reason %u\n",
460 			request[0], reason);
461 		goto retry;
462 	}
463 
464 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_RESPONSE_FAILURE) {
465 		u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
466 		u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
467 
468 		drm_err(&i915->drm, "mmio request %#x: failure %x/%u\n",
469 			request[0], error, hint);
470 		ret = -ENXIO;
471 		goto out;
472 	}
473 
474 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
475 proto:
476 		drm_err(&i915->drm, "mmio request %#x: unexpected reply %#x\n",
477 			request[0], header);
478 		ret = -EPROTO;
479 		goto out;
480 	}
481 
482 	if (response_buf) {
483 		int count = min(response_buf_size, guc->send_regs.count);
484 
485 		GEM_BUG_ON(!count);
486 
487 		response_buf[0] = header;
488 
489 		for (i = 1; i < count; i++)
490 			response_buf[i] = intel_uncore_read(uncore,
491 							    guc_send_reg(guc, i));
492 
493 		/* Use number of copied dwords as our return value */
494 		ret = count;
495 	} else {
496 		/* Use data from the GuC response as our return value */
497 		ret = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
498 	}
499 
500 out:
501 	intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
502 	mutex_unlock(&guc->send_mutex);
503 
504 	return ret;
505 }
506 
507 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
508 				       const u32 *payload, u32 len)
509 {
510 	u32 msg;
511 
512 	if (unlikely(!len))
513 		return -EPROTO;
514 
515 	/* Make sure to handle only enabled messages */
516 	msg = payload[0] & guc->msg_enabled_mask;
517 
518 	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
519 		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
520 		intel_guc_log_handle_flush_event(&guc->log);
521 
522 	return 0;
523 }
524 
525 /**
526  * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
527  * @guc: intel_guc structure
528  * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
529  *
530  * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
531  * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
532  * intel_huc_auth().
533  *
534  * Return:	non-zero code on error
535  */
536 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
537 {
538 	u32 action[] = {
539 		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
540 		rsa_offset
541 	};
542 
543 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
544 }
545 
546 /**
547  * intel_guc_suspend() - notify GuC entering suspend state
548  * @guc:	the guc
549  */
550 int intel_guc_suspend(struct intel_guc *guc)
551 {
552 	int ret;
553 	u32 action[] = {
554 		INTEL_GUC_ACTION_RESET_CLIENT,
555 	};
556 
557 	if (!intel_guc_is_ready(guc))
558 		return 0;
559 
560 	if (intel_guc_submission_is_used(guc)) {
561 		/*
562 		 * This H2G MMIO command tears down the GuC in two steps. First it will
563 		 * generate a G2H CTB for every active context indicating a reset. In
564 		 * practice the i915 shouldn't ever get a G2H as suspend should only be
565 		 * called when the GPU is idle. Next, it tears down the CTBs and this
566 		 * H2G MMIO command completes.
567 		 *
568 		 * Don't abort on a failure code from the GuC. Keep going and do the
569 		 * clean up in santize() and re-initialisation on resume and hopefully
570 		 * the error here won't be problematic.
571 		 */
572 		ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
573 		if (ret)
574 			DRM_ERROR("GuC suspend: RESET_CLIENT action failed with error %d!\n", ret);
575 	}
576 
577 	/* Signal that the GuC isn't running. */
578 	intel_guc_sanitize(guc);
579 
580 	return 0;
581 }
582 
583 /**
584  * intel_guc_resume() - notify GuC resuming from suspend state
585  * @guc:	the guc
586  */
587 int intel_guc_resume(struct intel_guc *guc)
588 {
589 	/*
590 	 * NB: This function can still be called even if GuC submission is
591 	 * disabled, e.g. if GuC is enabled for HuC authentication only. Thus,
592 	 * if any code is later added here, it must be support doing nothing
593 	 * if submission is disabled (as per intel_guc_suspend).
594 	 */
595 	return 0;
596 }
597 
598 /**
599  * DOC: GuC Memory Management
600  *
601  * GuC can't allocate any memory for its own usage, so all the allocations must
602  * be handled by the host driver. GuC accesses the memory via the GGTT, with the
603  * exception of the top and bottom parts of the 4GB address space, which are
604  * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
605  * or other parts of the HW. The driver must take care not to place objects that
606  * the GuC is going to access in these reserved ranges. The layout of the GuC
607  * address space is shown below:
608  *
609  * ::
610  *
611  *     +===========> +====================+ <== FFFF_FFFF
612  *     ^             |      Reserved      |
613  *     |             +====================+ <== GUC_GGTT_TOP
614  *     |             |                    |
615  *     |             |        DRAM        |
616  *    GuC            |                    |
617  *  Address    +===> +====================+ <== GuC ggtt_pin_bias
618  *   Space     ^     |                    |
619  *     |       |     |                    |
620  *     |      GuC    |        GuC         |
621  *     |     WOPCM   |       WOPCM        |
622  *     |      Size   |                    |
623  *     |       |     |                    |
624  *     v       v     |                    |
625  *     +=======+===> +====================+ <== 0000_0000
626  *
627  * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
628  * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
629  * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
630  */
631 
632 /**
633  * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
634  * @guc:	the guc
635  * @size:	size of area to allocate (both virtual space and memory)
636  *
637  * This is a wrapper to create an object for use with the GuC. In order to
638  * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
639  * both some backing storage and a range inside the Global GTT. We must pin
640  * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
641  * range is reserved inside GuC.
642  *
643  * Return:	A i915_vma if successful, otherwise an ERR_PTR.
644  */
645 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
646 {
647 	struct intel_gt *gt = guc_to_gt(guc);
648 	struct drm_i915_gem_object *obj;
649 	struct i915_vma *vma;
650 	u64 flags;
651 	int ret;
652 
653 	if (HAS_LMEM(gt->i915))
654 		obj = i915_gem_object_create_lmem(gt->i915, size,
655 						  I915_BO_ALLOC_CPU_CLEAR |
656 						  I915_BO_ALLOC_CONTIGUOUS |
657 						  I915_BO_ALLOC_PM_EARLY);
658 	else
659 		obj = i915_gem_object_create_shmem(gt->i915, size);
660 
661 	if (IS_ERR(obj))
662 		return ERR_CAST(obj);
663 
664 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
665 	if (IS_ERR(vma))
666 		goto err;
667 
668 	flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
669 	ret = i915_ggtt_pin(vma, NULL, 0, flags);
670 	if (ret) {
671 		vma = ERR_PTR(ret);
672 		goto err;
673 	}
674 
675 	return i915_vma_make_unshrinkable(vma);
676 
677 err:
678 	i915_gem_object_put(obj);
679 	return vma;
680 }
681 
682 /**
683  * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
684  * @guc:	the guc
685  * @size:	size of area to allocate (both virtual space and memory)
686  * @out_vma:	return variable for the allocated vma pointer
687  * @out_vaddr:	return variable for the obj mapping
688  *
689  * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
690  * object with I915_MAP_WB.
691  *
692  * Return:	0 if successful, a negative errno code otherwise.
693  */
694 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
695 				   struct i915_vma **out_vma, void **out_vaddr)
696 {
697 	struct i915_vma *vma;
698 	void *vaddr;
699 
700 	vma = intel_guc_allocate_vma(guc, size);
701 	if (IS_ERR(vma))
702 		return PTR_ERR(vma);
703 
704 	vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
705 						 i915_coherent_map_type(guc_to_gt(guc)->i915,
706 									vma->obj, true));
707 	if (IS_ERR(vaddr)) {
708 		i915_vma_unpin_and_release(&vma, 0);
709 		return PTR_ERR(vaddr);
710 	}
711 
712 	*out_vma = vma;
713 	*out_vaddr = vaddr;
714 
715 	return 0;
716 }
717 
718 /**
719  * intel_guc_load_status - dump information about GuC load status
720  * @guc: the GuC
721  * @p: the &drm_printer
722  *
723  * Pretty printer for GuC load status.
724  */
725 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
726 {
727 	struct intel_gt *gt = guc_to_gt(guc);
728 	struct intel_uncore *uncore = gt->uncore;
729 	intel_wakeref_t wakeref;
730 
731 	if (!intel_guc_is_supported(guc)) {
732 		drm_printf(p, "GuC not supported\n");
733 		return;
734 	}
735 
736 	if (!intel_guc_is_wanted(guc)) {
737 		drm_printf(p, "GuC disabled\n");
738 		return;
739 	}
740 
741 	intel_uc_fw_dump(&guc->fw, p);
742 
743 	with_intel_runtime_pm(uncore->rpm, wakeref) {
744 		u32 status = intel_uncore_read(uncore, GUC_STATUS);
745 		u32 i;
746 
747 		drm_printf(p, "\nGuC status 0x%08x:\n", status);
748 		drm_printf(p, "\tBootrom status = 0x%x\n",
749 			   (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
750 		drm_printf(p, "\tuKernel status = 0x%x\n",
751 			   (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
752 		drm_printf(p, "\tMIA Core status = 0x%x\n",
753 			   (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
754 		drm_puts(p, "\nScratch registers:\n");
755 		for (i = 0; i < 16; i++) {
756 			drm_printf(p, "\t%2d: \t0x%x\n",
757 				   i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
758 		}
759 	}
760 }
761 
762 void intel_guc_write_barrier(struct intel_guc *guc)
763 {
764 	struct intel_gt *gt = guc_to_gt(guc);
765 
766 	if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
767 		/*
768 		 * Ensure intel_uncore_write_fw can be used rather than
769 		 * intel_uncore_write.
770 		 */
771 		GEM_BUG_ON(guc->send_regs.fw_domains);
772 
773 		/*
774 		 * This register is used by the i915 and GuC for MMIO based
775 		 * communication. Once we are in this code CTBs are the only
776 		 * method the i915 uses to communicate with the GuC so it is
777 		 * safe to write to this register (a value of 0 is NOP for MMIO
778 		 * communication). If we ever start mixing CTBs and MMIOs a new
779 		 * register will have to be chosen. This function is also used
780 		 * to enforce ordering of a work queue item write and an update
781 		 * to the process descriptor. When a work queue is being used,
782 		 * CTBs are also the only mechanism of communication.
783 		 */
784 		intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
785 	} else {
786 		/* wmb() sufficient for a barrier if in smem */
787 		wmb();
788 	}
789 }
790