xref: /linux/drivers/gpu/drm/i915/gt/uc/intel_uc.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include <linux/string_helpers.h>
7 
8 #include "gt/intel_gt.h"
9 #include "gt/intel_gt_print.h"
10 #include "gt/intel_reset.h"
11 #include "intel_gsc_fw.h"
12 #include "intel_gsc_uc.h"
13 #include "intel_guc.h"
14 #include "intel_guc_ads.h"
15 #include "intel_guc_print.h"
16 #include "intel_guc_submission.h"
17 #include "gt/intel_rps.h"
18 #include "intel_uc.h"
19 
20 #include "i915_drv.h"
21 #include "i915_hwmon.h"
22 
23 static const struct intel_uc_ops uc_ops_off;
24 static const struct intel_uc_ops uc_ops_on;
25 
26 static void uc_expand_default_options(struct intel_uc *uc)
27 {
28 	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
29 
30 	if (i915->params.enable_guc != -1)
31 		return;
32 
33 	/* Don't enable GuC/HuC on pre-Gen12 */
34 	if (GRAPHICS_VER(i915) < 12) {
35 		i915->params.enable_guc = 0;
36 		return;
37 	}
38 
39 	/* Don't enable GuC/HuC on older Gen12 platforms */
40 	if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
41 		i915->params.enable_guc = 0;
42 		return;
43 	}
44 
45 	/* Intermediate platforms are HuC authentication only */
46 	if (IS_ALDERLAKE_S(i915) && !IS_RAPTORLAKE_S(i915)) {
47 		i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
48 		return;
49 	}
50 
51 	/* Default: enable HuC authentication and GuC submission */
52 	i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION;
53 }
54 
55 /* Reset GuC providing us with fresh state for both GuC and HuC.
56  */
57 static int __intel_uc_reset_hw(struct intel_uc *uc)
58 {
59 	struct intel_gt *gt = uc_to_gt(uc);
60 	int ret;
61 	u32 guc_status;
62 
63 	ret = intel_reset_guc(gt);
64 	if (ret) {
65 		gt_err(gt, "Failed to reset GuC, ret = %d\n", ret);
66 		return ret;
67 	}
68 
69 	guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
70 	gt_WARN(gt, !(guc_status & GS_MIA_IN_RESET),
71 		"GuC status: 0x%x, MIA core expected to be in reset\n",
72 		guc_status);
73 
74 	return ret;
75 }
76 
77 static void __confirm_options(struct intel_uc *uc)
78 {
79 	struct intel_gt *gt = uc_to_gt(uc);
80 	struct drm_i915_private *i915 = gt->i915;
81 
82 	gt_dbg(gt, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
83 	       i915->params.enable_guc,
84 	       str_yes_no(intel_uc_wants_guc(uc)),
85 	       str_yes_no(intel_uc_wants_guc_submission(uc)),
86 	       str_yes_no(intel_uc_wants_huc(uc)),
87 	       str_yes_no(intel_uc_wants_guc_slpc(uc)));
88 
89 	if (i915->params.enable_guc == 0) {
90 		GEM_BUG_ON(intel_uc_wants_guc(uc));
91 		GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
92 		GEM_BUG_ON(intel_uc_wants_huc(uc));
93 		GEM_BUG_ON(intel_uc_wants_guc_slpc(uc));
94 		return;
95 	}
96 
97 	if (!intel_uc_supports_guc(uc))
98 		gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
99 			i915->params.enable_guc, "GuC is not supported!");
100 
101 	if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
102 	    !intel_uc_supports_guc_submission(uc))
103 		gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
104 			i915->params.enable_guc, "GuC submission is N/A");
105 
106 	if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
107 		gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
108 			i915->params.enable_guc, "undocumented flag");
109 }
110 
111 void intel_uc_init_early(struct intel_uc *uc)
112 {
113 	uc_expand_default_options(uc);
114 
115 	intel_guc_init_early(&uc->guc);
116 	intel_huc_init_early(&uc->huc);
117 	intel_gsc_uc_init_early(&uc->gsc);
118 
119 	__confirm_options(uc);
120 
121 	if (intel_uc_wants_guc(uc))
122 		uc->ops = &uc_ops_on;
123 	else
124 		uc->ops = &uc_ops_off;
125 }
126 
127 void intel_uc_init_late(struct intel_uc *uc)
128 {
129 	intel_guc_init_late(&uc->guc);
130 	intel_gsc_uc_load_start(&uc->gsc);
131 }
132 
133 void intel_uc_driver_late_release(struct intel_uc *uc)
134 {
135 	intel_huc_fini_late(&uc->huc);
136 }
137 
138 /**
139  * intel_uc_init_mmio - setup uC MMIO access
140  * @uc: the intel_uc structure
141  *
142  * Setup minimal state necessary for MMIO accesses later in the
143  * initialization sequence.
144  */
145 void intel_uc_init_mmio(struct intel_uc *uc)
146 {
147 	intel_guc_init_send_regs(&uc->guc);
148 }
149 
150 static void __uc_capture_load_err_log(struct intel_uc *uc)
151 {
152 	struct intel_guc *guc = &uc->guc;
153 
154 	if (guc->log.vma && !uc->load_err_log)
155 		uc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
156 }
157 
158 static void __uc_free_load_err_log(struct intel_uc *uc)
159 {
160 	struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log);
161 
162 	if (log)
163 		i915_gem_object_put(log);
164 }
165 
166 void intel_uc_driver_remove(struct intel_uc *uc)
167 {
168 	intel_uc_fini_hw(uc);
169 	intel_uc_fini(uc);
170 	__uc_free_load_err_log(uc);
171 }
172 
173 /*
174  * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
175  * register using the same bits used in the CT message payload. Since our
176  * communication channel with guc is turned off at this point, we can save the
177  * message and handle it after we turn it back on.
178  */
179 static void guc_clear_mmio_msg(struct intel_guc *guc)
180 {
181 	intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
182 }
183 
184 static void guc_get_mmio_msg(struct intel_guc *guc)
185 {
186 	u32 val;
187 
188 	spin_lock_irq(&guc->irq_lock);
189 
190 	val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
191 	guc->mmio_msg |= val & guc->msg_enabled_mask;
192 
193 	/*
194 	 * clear all events, including the ones we're not currently servicing,
195 	 * to make sure we don't try to process a stale message if we enable
196 	 * handling of more events later.
197 	 */
198 	guc_clear_mmio_msg(guc);
199 
200 	spin_unlock_irq(&guc->irq_lock);
201 }
202 
203 static void guc_handle_mmio_msg(struct intel_guc *guc)
204 {
205 	/* we need communication to be enabled to reply to GuC */
206 	GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
207 
208 	spin_lock_irq(&guc->irq_lock);
209 	if (guc->mmio_msg) {
210 		intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
211 		guc->mmio_msg = 0;
212 	}
213 	spin_unlock_irq(&guc->irq_lock);
214 }
215 
216 static int guc_enable_communication(struct intel_guc *guc)
217 {
218 	struct intel_gt *gt = guc_to_gt(guc);
219 	int ret;
220 
221 	GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
222 
223 	ret = intel_guc_ct_enable(&guc->ct);
224 	if (ret)
225 		return ret;
226 
227 	/* check for mmio messages received before/during the CT enable */
228 	guc_get_mmio_msg(guc);
229 	guc_handle_mmio_msg(guc);
230 
231 	intel_guc_enable_interrupts(guc);
232 
233 	/* check for CT messages received before we enabled interrupts */
234 	spin_lock_irq(gt->irq_lock);
235 	intel_guc_ct_event_handler(&guc->ct);
236 	spin_unlock_irq(gt->irq_lock);
237 
238 	guc_dbg(guc, "communication enabled\n");
239 
240 	return 0;
241 }
242 
243 static void guc_disable_communication(struct intel_guc *guc)
244 {
245 	/*
246 	 * Events generated during or after CT disable are logged by guc in
247 	 * via mmio. Make sure the register is clear before disabling CT since
248 	 * all events we cared about have already been processed via CT.
249 	 */
250 	guc_clear_mmio_msg(guc);
251 
252 	intel_guc_disable_interrupts(guc);
253 
254 	intel_guc_ct_disable(&guc->ct);
255 
256 	/*
257 	 * Check for messages received during/after the CT disable. We do not
258 	 * expect any messages to have arrived via CT between the interrupt
259 	 * disable and the CT disable because GuC should've been idle until we
260 	 * triggered the CT disable protocol.
261 	 */
262 	guc_get_mmio_msg(guc);
263 
264 	guc_dbg(guc, "communication disabled\n");
265 }
266 
267 static void __uc_fetch_firmwares(struct intel_uc *uc)
268 {
269 	struct intel_gt *gt = uc_to_gt(uc);
270 	int err;
271 
272 	GEM_BUG_ON(!intel_uc_wants_guc(uc));
273 
274 	err = intel_uc_fw_fetch(&uc->guc.fw);
275 	if (err) {
276 		/* Make sure we transition out of transient "SELECTED" state */
277 		if (intel_uc_wants_huc(uc)) {
278 			gt_dbg(gt, "Failed to fetch GuC fw (%pe) disabling HuC\n", ERR_PTR(err));
279 			intel_uc_fw_change_status(&uc->huc.fw,
280 						  INTEL_UC_FIRMWARE_ERROR);
281 		}
282 
283 		if (intel_uc_wants_gsc_uc(uc)) {
284 			gt_dbg(gt, "Failed to fetch GuC fw (%pe) disabling GSC\n", ERR_PTR(err));
285 			intel_uc_fw_change_status(&uc->gsc.fw,
286 						  INTEL_UC_FIRMWARE_ERROR);
287 		}
288 
289 		return;
290 	}
291 
292 	if (intel_uc_wants_huc(uc))
293 		intel_uc_fw_fetch(&uc->huc.fw);
294 
295 	if (intel_uc_wants_gsc_uc(uc))
296 		intel_uc_fw_fetch(&uc->gsc.fw);
297 }
298 
299 static void __uc_cleanup_firmwares(struct intel_uc *uc)
300 {
301 	intel_uc_fw_cleanup_fetch(&uc->gsc.fw);
302 	intel_uc_fw_cleanup_fetch(&uc->huc.fw);
303 	intel_uc_fw_cleanup_fetch(&uc->guc.fw);
304 }
305 
306 static int __uc_init(struct intel_uc *uc)
307 {
308 	struct intel_guc *guc = &uc->guc;
309 	struct intel_huc *huc = &uc->huc;
310 	int ret;
311 
312 	GEM_BUG_ON(!intel_uc_wants_guc(uc));
313 
314 	if (!intel_uc_uses_guc(uc))
315 		return 0;
316 
317 	ret = intel_guc_init(guc);
318 	if (ret)
319 		return ret;
320 
321 	if (intel_uc_uses_huc(uc))
322 		intel_huc_init(huc);
323 
324 	if (intel_uc_uses_gsc_uc(uc))
325 		intel_gsc_uc_init(&uc->gsc);
326 
327 	return 0;
328 }
329 ALLOW_ERROR_INJECTION(__uc_init, ERRNO);
330 
331 static void __uc_fini(struct intel_uc *uc)
332 {
333 	intel_gsc_uc_fini(&uc->gsc);
334 	intel_huc_fini(&uc->huc);
335 	intel_guc_fini(&uc->guc);
336 }
337 
338 static int __uc_sanitize(struct intel_uc *uc)
339 {
340 	struct intel_guc *guc = &uc->guc;
341 	struct intel_huc *huc = &uc->huc;
342 
343 	GEM_BUG_ON(!intel_uc_supports_guc(uc));
344 
345 	intel_huc_sanitize(huc);
346 	intel_guc_sanitize(guc);
347 
348 	return __intel_uc_reset_hw(uc);
349 }
350 
351 /* Initialize and verify the uC regs related to uC positioning in WOPCM */
352 static int uc_init_wopcm(struct intel_uc *uc)
353 {
354 	struct intel_gt *gt = uc_to_gt(uc);
355 	struct intel_uncore *uncore = gt->uncore;
356 	u32 base = intel_wopcm_guc_base(&gt->wopcm);
357 	u32 size = intel_wopcm_guc_size(&gt->wopcm);
358 	u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0;
359 	u32 mask;
360 	int err;
361 
362 	if (unlikely(!base || !size)) {
363 		gt_probe_error(gt, "Unsuccessful WOPCM partitioning\n");
364 		return -E2BIG;
365 	}
366 
367 	GEM_BUG_ON(!intel_uc_supports_guc(uc));
368 	GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK));
369 	GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK);
370 	GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
371 	GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
372 
373 	mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
374 	err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask,
375 					    size | GUC_WOPCM_SIZE_LOCKED);
376 	if (err)
377 		goto err_out;
378 
379 	mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
380 	err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET,
381 					    base | huc_agent, mask,
382 					    base | huc_agent |
383 					    GUC_WOPCM_OFFSET_VALID);
384 	if (err)
385 		goto err_out;
386 
387 	return 0;
388 
389 err_out:
390 	gt_probe_error(gt, "Failed to init uC WOPCM registers!\n");
391 	gt_probe_error(gt, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
392 		       i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
393 		       intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
394 	gt_probe_error(gt, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
395 		       i915_mmio_reg_offset(GUC_WOPCM_SIZE),
396 		       intel_uncore_read(uncore, GUC_WOPCM_SIZE));
397 
398 	return err;
399 }
400 
401 static bool uc_is_wopcm_locked(struct intel_uc *uc)
402 {
403 	struct intel_gt *gt = uc_to_gt(uc);
404 	struct intel_uncore *uncore = gt->uncore;
405 
406 	return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) ||
407 	       (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
408 }
409 
410 static int __uc_check_hw(struct intel_uc *uc)
411 {
412 	if (uc->fw_table_invalid)
413 		return -EIO;
414 
415 	if (!intel_uc_supports_guc(uc))
416 		return 0;
417 
418 	/*
419 	 * We can silently continue without GuC only if it was never enabled
420 	 * before on this system after reboot, otherwise we risk GPU hangs.
421 	 * To check if GuC was loaded before we look at WOPCM registers.
422 	 */
423 	if (uc_is_wopcm_locked(uc))
424 		return -EIO;
425 
426 	return 0;
427 }
428 
429 static void print_fw_ver(struct intel_gt *gt, struct intel_uc_fw *fw)
430 {
431 	gt_info(gt, "%s firmware %s version %u.%u.%u\n",
432 		intel_uc_fw_type_repr(fw->type), fw->file_selected.path,
433 		fw->file_selected.ver.major,
434 		fw->file_selected.ver.minor,
435 		fw->file_selected.ver.patch);
436 }
437 
438 static int __uc_init_hw(struct intel_uc *uc)
439 {
440 	struct intel_gt *gt = uc_to_gt(uc);
441 	struct drm_i915_private *i915 = gt->i915;
442 	struct intel_guc *guc = &uc->guc;
443 	struct intel_huc *huc = &uc->huc;
444 	int ret, attempts;
445 	bool pl1en = false;
446 
447 	GEM_BUG_ON(!intel_uc_supports_guc(uc));
448 	GEM_BUG_ON(!intel_uc_wants_guc(uc));
449 
450 	print_fw_ver(gt, &guc->fw);
451 
452 	if (intel_uc_uses_huc(uc))
453 		print_fw_ver(gt, &huc->fw);
454 
455 	if (!intel_uc_fw_is_loadable(&guc->fw)) {
456 		ret = __uc_check_hw(uc) ||
457 		      intel_uc_fw_is_overridden(&guc->fw) ||
458 		      intel_uc_wants_guc_submission(uc) ?
459 		      intel_uc_fw_status_to_error(guc->fw.status) : 0;
460 		goto err_out;
461 	}
462 
463 	ret = uc_init_wopcm(uc);
464 	if (ret)
465 		goto err_out;
466 
467 	intel_guc_reset_interrupts(guc);
468 
469 	/* WaEnableuKernelHeaderValidFix:skl */
470 	/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
471 	if (GRAPHICS_VER(i915) == 9)
472 		attempts = 3;
473 	else
474 		attempts = 1;
475 
476 	/* Disable a potentially low PL1 power limit to allow freq to be raised */
477 	i915_hwmon_power_max_disable(gt->i915, &pl1en);
478 
479 	intel_rps_raise_unslice(&uc_to_gt(uc)->rps);
480 
481 	while (attempts--) {
482 		/*
483 		 * Always reset the GuC just before (re)loading, so
484 		 * that the state and timing are fairly predictable
485 		 */
486 		ret = __uc_sanitize(uc);
487 		if (ret)
488 			goto err_rps;
489 
490 		intel_huc_fw_upload(huc);
491 		intel_guc_ads_reset(guc);
492 		intel_guc_write_params(guc);
493 		ret = intel_guc_fw_upload(guc);
494 		if (ret == 0)
495 			break;
496 
497 		gt_dbg(gt, "GuC fw load failed (%pe) will reset and retry %d more time(s)\n",
498 		       ERR_PTR(ret), attempts);
499 	}
500 
501 	/* Did we succeed or run out of retries? */
502 	if (ret)
503 		goto err_log_capture;
504 
505 	ret = guc_enable_communication(guc);
506 	if (ret)
507 		goto err_log_capture;
508 
509 	/*
510 	 * GSC-loaded HuC is authenticated by the GSC, so we don't need to
511 	 * trigger the auth here. However, given that the HuC loaded this way
512 	 * survive GT reset, we still need to update our SW bookkeeping to make
513 	 * sure it reflects the correct HW status.
514 	 */
515 	if (intel_huc_is_loaded_by_gsc(huc))
516 		intel_huc_update_auth_status(huc);
517 	else
518 		intel_huc_auth(huc, INTEL_HUC_AUTH_BY_GUC);
519 
520 	if (intel_uc_uses_guc_submission(uc)) {
521 		ret = intel_guc_submission_enable(guc);
522 		if (ret)
523 			goto err_log_capture;
524 	}
525 
526 	if (intel_uc_uses_guc_slpc(uc)) {
527 		ret = intel_guc_slpc_enable(&guc->slpc);
528 		if (ret)
529 			goto err_submission;
530 	} else {
531 		/* Restore GT back to RPn for non-SLPC path */
532 		intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
533 	}
534 
535 	i915_hwmon_power_max_restore(gt->i915, pl1en);
536 
537 	guc_info(guc, "submission %s\n", str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
538 	guc_info(guc, "SLPC %s\n", str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
539 
540 	return 0;
541 
542 	/*
543 	 * We've failed to load the firmware :(
544 	 */
545 err_submission:
546 	intel_guc_submission_disable(guc);
547 err_log_capture:
548 	__uc_capture_load_err_log(uc);
549 err_rps:
550 	/* Return GT back to RPn */
551 	intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
552 
553 	i915_hwmon_power_max_restore(gt->i915, pl1en);
554 err_out:
555 	__uc_sanitize(uc);
556 
557 	if (!ret) {
558 		gt_notice(gt, "GuC is uninitialized\n");
559 		/* We want to run without GuC submission */
560 		return 0;
561 	}
562 
563 	gt_probe_error(gt, "GuC initialization failed %pe\n", ERR_PTR(ret));
564 
565 	/* We want to keep KMS alive */
566 	return -EIO;
567 }
568 
569 static void __uc_fini_hw(struct intel_uc *uc)
570 {
571 	struct intel_guc *guc = &uc->guc;
572 
573 	if (!intel_guc_is_fw_running(guc))
574 		return;
575 
576 	if (intel_uc_uses_guc_submission(uc))
577 		intel_guc_submission_disable(guc);
578 
579 	__uc_sanitize(uc);
580 }
581 
582 /**
583  * intel_uc_reset_prepare - Prepare for reset
584  * @uc: the intel_uc structure
585  *
586  * Preparing for full gpu reset.
587  */
588 void intel_uc_reset_prepare(struct intel_uc *uc)
589 {
590 	struct intel_guc *guc = &uc->guc;
591 
592 	uc->reset_in_progress = true;
593 
594 	/* Nothing to do if GuC isn't supported */
595 	if (!intel_uc_supports_guc(uc))
596 		return;
597 
598 	/* Firmware expected to be running when this function is called */
599 	if (!intel_guc_is_ready(guc))
600 		goto sanitize;
601 
602 	if (intel_uc_uses_guc_submission(uc))
603 		intel_guc_submission_reset_prepare(guc);
604 
605 sanitize:
606 	__uc_sanitize(uc);
607 }
608 
609 void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled)
610 {
611 	struct intel_guc *guc = &uc->guc;
612 
613 	/* Firmware can not be running when this function is called  */
614 	if (intel_uc_uses_guc_submission(uc))
615 		intel_guc_submission_reset(guc, stalled);
616 }
617 
618 void intel_uc_reset_finish(struct intel_uc *uc)
619 {
620 	struct intel_guc *guc = &uc->guc;
621 
622 	/*
623 	 * NB: The wedge code path results in prepare -> prepare -> finish -> finish.
624 	 * So this function is sometimes called with the in-progress flag not set.
625 	 */
626 	uc->reset_in_progress = false;
627 
628 	/* Firmware expected to be running when this function is called */
629 	if (intel_uc_uses_guc_submission(uc))
630 		intel_guc_submission_reset_finish(guc);
631 }
632 
633 void intel_uc_cancel_requests(struct intel_uc *uc)
634 {
635 	struct intel_guc *guc = &uc->guc;
636 
637 	/* Firmware can not be running when this function is called  */
638 	if (intel_uc_uses_guc_submission(uc))
639 		intel_guc_submission_cancel_requests(guc);
640 }
641 
642 void intel_uc_runtime_suspend(struct intel_uc *uc)
643 {
644 	struct intel_guc *guc = &uc->guc;
645 
646 	if (!intel_guc_is_ready(guc)) {
647 		guc->interrupts.enabled = false;
648 		return;
649 	}
650 
651 	/*
652 	 * Wait for any outstanding CTB before tearing down communication /w the
653 	 * GuC.
654 	 */
655 #define OUTSTANDING_CTB_TIMEOUT_PERIOD	(HZ / 5)
656 	intel_guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h,
657 				       false, OUTSTANDING_CTB_TIMEOUT_PERIOD);
658 	GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
659 
660 	guc_disable_communication(guc);
661 }
662 
663 void intel_uc_suspend(struct intel_uc *uc)
664 {
665 	struct intel_guc *guc = &uc->guc;
666 	intel_wakeref_t wakeref;
667 	int err;
668 
669 	/* flush the GSC worker */
670 	intel_gsc_uc_flush_work(&uc->gsc);
671 
672 	wake_up_all_tlb_invalidate(guc);
673 
674 	if (!intel_guc_is_ready(guc)) {
675 		guc->interrupts.enabled = false;
676 		return;
677 	}
678 
679 	intel_guc_submission_flush_work(guc);
680 
681 	with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) {
682 		err = intel_guc_suspend(guc);
683 		if (err)
684 			guc_dbg(guc, "Failed to suspend, %pe", ERR_PTR(err));
685 	}
686 }
687 
688 static void __uc_resume_mappings(struct intel_uc *uc)
689 {
690 	intel_uc_fw_resume_mapping(&uc->guc.fw);
691 	intel_uc_fw_resume_mapping(&uc->huc.fw);
692 }
693 
694 static int __uc_resume(struct intel_uc *uc, bool enable_communication)
695 {
696 	struct intel_guc *guc = &uc->guc;
697 	struct intel_gt *gt = guc_to_gt(guc);
698 	int err;
699 
700 	if (!intel_guc_is_fw_running(guc))
701 		return 0;
702 
703 	/* Make sure we enable communication if and only if it's disabled */
704 	GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
705 
706 	if (enable_communication)
707 		guc_enable_communication(guc);
708 
709 	/* If we are only resuming GuC communication but not reloading
710 	 * GuC, we need to ensure the ARAT timer interrupt is enabled
711 	 * again. In case of GuC reload, it is enabled during SLPC enable.
712 	 */
713 	if (enable_communication && intel_uc_uses_guc_slpc(uc))
714 		intel_guc_pm_intrmsk_enable(gt);
715 
716 	err = intel_guc_resume(guc);
717 	if (err) {
718 		guc_dbg(guc, "Failed to resume, %pe", ERR_PTR(err));
719 		return err;
720 	}
721 
722 	intel_gsc_uc_resume(&uc->gsc);
723 
724 	if (intel_guc_tlb_invalidation_is_available(guc)) {
725 		intel_guc_invalidate_tlb_engines(guc);
726 		intel_guc_invalidate_tlb_guc(guc);
727 	}
728 
729 	return 0;
730 }
731 
732 int intel_uc_resume(struct intel_uc *uc)
733 {
734 	/*
735 	 * When coming out of S3/S4 we sanitize and re-init the HW, so
736 	 * communication is already re-enabled at this point.
737 	 */
738 	return __uc_resume(uc, false);
739 }
740 
741 int intel_uc_runtime_resume(struct intel_uc *uc)
742 {
743 	/*
744 	 * During runtime resume we don't sanitize, so we need to re-init
745 	 * communication as well.
746 	 */
747 	return __uc_resume(uc, true);
748 }
749 
750 static const struct intel_uc_ops uc_ops_off = {
751 	.init_hw = __uc_check_hw,
752 	.fini = __uc_fini, /* to clean-up the init_early initialization */
753 };
754 
755 static const struct intel_uc_ops uc_ops_on = {
756 	.sanitize = __uc_sanitize,
757 
758 	.init_fw = __uc_fetch_firmwares,
759 	.fini_fw = __uc_cleanup_firmwares,
760 
761 	.init = __uc_init,
762 	.fini = __uc_fini,
763 
764 	.init_hw = __uc_init_hw,
765 	.fini_hw = __uc_fini_hw,
766 
767 	.resume_mappings = __uc_resume_mappings,
768 };
769