xref: /linux/drivers/gpu/drm/i915/gt/uc/intel_guc.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
13ea58029SMichal Wajdeczko // SPDX-License-Identifier: MIT
20f261b24SDaniele Ceraolo Spurio /*
33ea58029SMichal Wajdeczko  * Copyright © 2014-2019 Intel Corporation
40f261b24SDaniele Ceraolo Spurio  */
50f261b24SDaniele Ceraolo Spurio 
67acbbc7cSDaniele Ceraolo Spurio #include "gem/i915_gem_lmem.h"
784b1ca2fSDaniele Ceraolo Spurio #include "gt/intel_gt.h"
89fb94522SAndi Shyti #include "gt/intel_gt_irq.h"
99fb94522SAndi Shyti #include "gt/intel_gt_pm_irq.h"
100d6419e9SMatt Roper #include "gt/intel_gt_regs.h"
110f261b24SDaniele Ceraolo Spurio #include "intel_guc.h"
120f261b24SDaniele Ceraolo Spurio #include "intel_guc_ads.h"
1324492514SAlan Previn #include "intel_guc_capture.h"
14ecb89c2cSMichal Wajdeczko #include "intel_guc_print.h"
1524492514SAlan Previn #include "intel_guc_slpc.h"
160f261b24SDaniele Ceraolo Spurio #include "intel_guc_submission.h"
170f261b24SDaniele Ceraolo Spurio #include "i915_drv.h"
1880dfdeb7SJani Nikula #include "i915_irq.h"
19476f62b8SJani Nikula #include "i915_reg.h"
200f261b24SDaniele Ceraolo Spurio 
21218151e9SDaniele Ceraolo Spurio /**
22218151e9SDaniele Ceraolo Spurio  * DOC: GuC
23218151e9SDaniele Ceraolo Spurio  *
24218151e9SDaniele Ceraolo Spurio  * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
25218151e9SDaniele Ceraolo Spurio  * designed to offload some of the functionality usually performed by the host
26218151e9SDaniele Ceraolo Spurio  * driver; currently the main operations it can take care of are:
27218151e9SDaniele Ceraolo Spurio  *
28218151e9SDaniele Ceraolo Spurio  * - Authentication of the HuC, which is required to fully enable HuC usage.
29218151e9SDaniele Ceraolo Spurio  * - Low latency graphics context scheduling (a.k.a. GuC submission).
30218151e9SDaniele Ceraolo Spurio  * - GT Power management.
31218151e9SDaniele Ceraolo Spurio  *
32218151e9SDaniele Ceraolo Spurio  * The enable_guc module parameter can be used to select which of those
33218151e9SDaniele Ceraolo Spurio  * operations to enable within GuC. Note that not all the operations are
34218151e9SDaniele Ceraolo Spurio  * supported on all gen9+ platforms.
35218151e9SDaniele Ceraolo Spurio  *
36218151e9SDaniele Ceraolo Spurio  * Enabling the GuC is not mandatory and therefore the firmware is only loaded
37218151e9SDaniele Ceraolo Spurio  * if at least one of the operations is selected. However, not loading the GuC
38218151e9SDaniele Ceraolo Spurio  * might result in the loss of some features that do require the GuC (currently
39218151e9SDaniele Ceraolo Spurio  * just the HuC, but more are expected to land in the future).
40218151e9SDaniele Ceraolo Spurio  */
41218151e9SDaniele Ceraolo Spurio 
intel_guc_notify(struct intel_guc * guc)42f20c6b27SDaniele Ceraolo Spurio void intel_guc_notify(struct intel_guc *guc)
430f261b24SDaniele Ceraolo Spurio {
4484b1ca2fSDaniele Ceraolo Spurio 	struct intel_gt *gt = guc_to_gt(guc);
450f261b24SDaniele Ceraolo Spurio 
46f20c6b27SDaniele Ceraolo Spurio 	/*
47f20c6b27SDaniele Ceraolo Spurio 	 * On Gen11+, the value written to the register is passes as a payload
48f20c6b27SDaniele Ceraolo Spurio 	 * to the FW. However, the FW currently treats all values the same way
49f20c6b27SDaniele Ceraolo Spurio 	 * (H2G interrupt), so we can just write the value that the HW expects
50f20c6b27SDaniele Ceraolo Spurio 	 * on older gens.
51f20c6b27SDaniele Ceraolo Spurio 	 */
52f20c6b27SDaniele Ceraolo Spurio 	intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
530f261b24SDaniele Ceraolo Spurio }
540f261b24SDaniele Ceraolo Spurio 
guc_send_reg(struct intel_guc * guc,u32 i)550f261b24SDaniele Ceraolo Spurio static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
560f261b24SDaniele Ceraolo Spurio {
570f261b24SDaniele Ceraolo Spurio 	GEM_BUG_ON(!guc->send_regs.base);
580f261b24SDaniele Ceraolo Spurio 	GEM_BUG_ON(!guc->send_regs.count);
590f261b24SDaniele Ceraolo Spurio 	GEM_BUG_ON(i >= guc->send_regs.count);
600f261b24SDaniele Ceraolo Spurio 
610f261b24SDaniele Ceraolo Spurio 	return _MMIO(guc->send_regs.base + 4 * i);
620f261b24SDaniele Ceraolo Spurio }
630f261b24SDaniele Ceraolo Spurio 
intel_guc_init_send_regs(struct intel_guc * guc)640f261b24SDaniele Ceraolo Spurio void intel_guc_init_send_regs(struct intel_guc *guc)
650f261b24SDaniele Ceraolo Spurio {
6684b1ca2fSDaniele Ceraolo Spurio 	struct intel_gt *gt = guc_to_gt(guc);
670f261b24SDaniele Ceraolo Spurio 	enum forcewake_domains fw_domains = 0;
680f261b24SDaniele Ceraolo Spurio 	unsigned int i;
690f261b24SDaniele Ceraolo Spurio 
70e09be87aSMichal Wajdeczko 	GEM_BUG_ON(!guc->send_regs.base);
71e09be87aSMichal Wajdeczko 	GEM_BUG_ON(!guc->send_regs.count);
720f261b24SDaniele Ceraolo Spurio 
730f261b24SDaniele Ceraolo Spurio 	for (i = 0; i < guc->send_regs.count; i++) {
7484b1ca2fSDaniele Ceraolo Spurio 		fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
750f261b24SDaniele Ceraolo Spurio 					guc_send_reg(guc, i),
760f261b24SDaniele Ceraolo Spurio 					FW_REG_READ | FW_REG_WRITE);
770f261b24SDaniele Ceraolo Spurio 	}
780f261b24SDaniele Ceraolo Spurio 	guc->send_regs.fw_domains = fw_domains;
790f261b24SDaniele Ceraolo Spurio }
800f261b24SDaniele Ceraolo Spurio 
gen9_reset_guc_interrupts(struct intel_guc * guc)819fb94522SAndi Shyti static void gen9_reset_guc_interrupts(struct intel_guc *guc)
829fb94522SAndi Shyti {
839fb94522SAndi Shyti 	struct intel_gt *gt = guc_to_gt(guc);
849fb94522SAndi Shyti 
859fb94522SAndi Shyti 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
869fb94522SAndi Shyti 
8703d2c54dSMatt Roper 	spin_lock_irq(gt->irq_lock);
889fb94522SAndi Shyti 	gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
8903d2c54dSMatt Roper 	spin_unlock_irq(gt->irq_lock);
909fb94522SAndi Shyti }
919fb94522SAndi Shyti 
gen9_enable_guc_interrupts(struct intel_guc * guc)929fb94522SAndi Shyti static void gen9_enable_guc_interrupts(struct intel_guc *guc)
939fb94522SAndi Shyti {
949fb94522SAndi Shyti 	struct intel_gt *gt = guc_to_gt(guc);
959fb94522SAndi Shyti 
969fb94522SAndi Shyti 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
979fb94522SAndi Shyti 
9803d2c54dSMatt Roper 	spin_lock_irq(gt->irq_lock);
99ecb89c2cSMichal Wajdeczko 	guc_WARN_ON_ONCE(guc, intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
1009fb94522SAndi Shyti 			 gt->pm_guc_events);
1019fb94522SAndi Shyti 	gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
10203d2c54dSMatt Roper 	spin_unlock_irq(gt->irq_lock);
103a187f13dSDaniele Ceraolo Spurio 
104a187f13dSDaniele Ceraolo Spurio 	guc->interrupts.enabled = true;
1059fb94522SAndi Shyti }
1069fb94522SAndi Shyti 
gen9_disable_guc_interrupts(struct intel_guc * guc)1079fb94522SAndi Shyti static void gen9_disable_guc_interrupts(struct intel_guc *guc)
1089fb94522SAndi Shyti {
1099fb94522SAndi Shyti 	struct intel_gt *gt = guc_to_gt(guc);
1109fb94522SAndi Shyti 
1119fb94522SAndi Shyti 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
112a187f13dSDaniele Ceraolo Spurio 	guc->interrupts.enabled = false;
1139fb94522SAndi Shyti 
11403d2c54dSMatt Roper 	spin_lock_irq(gt->irq_lock);
1159fb94522SAndi Shyti 
1169fb94522SAndi Shyti 	gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
1179fb94522SAndi Shyti 
11803d2c54dSMatt Roper 	spin_unlock_irq(gt->irq_lock);
1199fb94522SAndi Shyti 	intel_synchronize_irq(gt->i915);
1209fb94522SAndi Shyti 
1219fb94522SAndi Shyti 	gen9_reset_guc_interrupts(guc);
1229fb94522SAndi Shyti }
1239fb94522SAndi Shyti 
__gen11_reset_guc_interrupts(struct intel_gt * gt)124a187f13dSDaniele Ceraolo Spurio static bool __gen11_reset_guc_interrupts(struct intel_gt *gt)
125a187f13dSDaniele Ceraolo Spurio {
126a187f13dSDaniele Ceraolo Spurio 	u32 irq = gt->type == GT_MEDIA ? MTL_MGUC : GEN11_GUC;
127a187f13dSDaniele Ceraolo Spurio 
128a187f13dSDaniele Ceraolo Spurio 	lockdep_assert_held(gt->irq_lock);
129a187f13dSDaniele Ceraolo Spurio 	return gen11_gt_reset_one_iir(gt, 0, irq);
130a187f13dSDaniele Ceraolo Spurio }
131a187f13dSDaniele Ceraolo Spurio 
gen11_reset_guc_interrupts(struct intel_guc * guc)1329fb94522SAndi Shyti static void gen11_reset_guc_interrupts(struct intel_guc *guc)
1339fb94522SAndi Shyti {
1349fb94522SAndi Shyti 	struct intel_gt *gt = guc_to_gt(guc);
1359fb94522SAndi Shyti 
13603d2c54dSMatt Roper 	spin_lock_irq(gt->irq_lock);
137a187f13dSDaniele Ceraolo Spurio 	__gen11_reset_guc_interrupts(gt);
13803d2c54dSMatt Roper 	spin_unlock_irq(gt->irq_lock);
1399fb94522SAndi Shyti }
1409fb94522SAndi Shyti 
gen11_enable_guc_interrupts(struct intel_guc * guc)1419fb94522SAndi Shyti static void gen11_enable_guc_interrupts(struct intel_guc *guc)
1429fb94522SAndi Shyti {
1439fb94522SAndi Shyti 	struct intel_gt *gt = guc_to_gt(guc);
1449fb94522SAndi Shyti 
14503d2c54dSMatt Roper 	spin_lock_irq(gt->irq_lock);
146a187f13dSDaniele Ceraolo Spurio 	__gen11_reset_guc_interrupts(gt);
14703d2c54dSMatt Roper 	spin_unlock_irq(gt->irq_lock);
148a187f13dSDaniele Ceraolo Spurio 
149a187f13dSDaniele Ceraolo Spurio 	guc->interrupts.enabled = true;
1509fb94522SAndi Shyti }
1519fb94522SAndi Shyti 
gen11_disable_guc_interrupts(struct intel_guc * guc)1529fb94522SAndi Shyti static void gen11_disable_guc_interrupts(struct intel_guc *guc)
1539fb94522SAndi Shyti {
1549fb94522SAndi Shyti 	struct intel_gt *gt = guc_to_gt(guc);
1559fb94522SAndi Shyti 
156a187f13dSDaniele Ceraolo Spurio 	guc->interrupts.enabled = false;
1579fb94522SAndi Shyti 	intel_synchronize_irq(gt->i915);
1589fb94522SAndi Shyti 
1599fb94522SAndi Shyti 	gen11_reset_guc_interrupts(guc);
1609fb94522SAndi Shyti }
1619fb94522SAndi Shyti 
guc_dead_worker_func(struct work_struct * w)162b2edc414SJohn Harrison static void guc_dead_worker_func(struct work_struct *w)
163b2edc414SJohn Harrison {
164b2edc414SJohn Harrison 	struct intel_guc *guc = container_of(w, struct intel_guc, dead_guc_worker);
165b2edc414SJohn Harrison 	struct intel_gt *gt = guc_to_gt(guc);
166b2edc414SJohn Harrison 	unsigned long last = guc->last_dead_guc_jiffies;
167b2edc414SJohn Harrison 	unsigned long delta = jiffies_to_msecs(jiffies - last);
168b2edc414SJohn Harrison 
169b2edc414SJohn Harrison 	if (delta < 500) {
170b2edc414SJohn Harrison 		intel_gt_set_wedged(gt);
171b2edc414SJohn Harrison 	} else {
172b2edc414SJohn Harrison 		intel_gt_handle_error(gt, ALL_ENGINES, I915_ERROR_CAPTURE, "dead GuC");
173b2edc414SJohn Harrison 		guc->last_dead_guc_jiffies = jiffies;
174b2edc414SJohn Harrison 	}
175b2edc414SJohn Harrison }
176b2edc414SJohn Harrison 
intel_guc_init_early(struct intel_guc * guc)1770f261b24SDaniele Ceraolo Spurio void intel_guc_init_early(struct intel_guc *guc)
1780f261b24SDaniele Ceraolo Spurio {
179b910f716SDaniele Ceraolo Spurio 	struct intel_gt *gt = guc_to_gt(guc);
180b910f716SDaniele Ceraolo Spurio 	struct drm_i915_private *i915 = gt->i915;
1810f261b24SDaniele Ceraolo Spurio 
1823532e75dSDaniele Ceraolo Spurio 	intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, true);
1830f261b24SDaniele Ceraolo Spurio 	intel_guc_ct_init_early(&guc->ct);
1840f261b24SDaniele Ceraolo Spurio 	intel_guc_log_init_early(&guc->log);
185724df646SMichal Wajdeczko 	intel_guc_submission_init_early(guc);
186dff0fc49SVinay Belgaumkar 	intel_guc_slpc_init_early(&guc->slpc);
187216d56c5SVinay Belgaumkar 	intel_guc_rc_init_early(guc);
1880f261b24SDaniele Ceraolo Spurio 
189b2edc414SJohn Harrison 	INIT_WORK(&guc->dead_guc_worker, guc_dead_worker_func);
190b2edc414SJohn Harrison 
1910f261b24SDaniele Ceraolo Spurio 	mutex_init(&guc->send_mutex);
1920f261b24SDaniele Ceraolo Spurio 	spin_lock_init(&guc->irq_lock);
193c816723bSLucas De Marchi 	if (GRAPHICS_VER(i915) >= 11) {
1940f261b24SDaniele Ceraolo Spurio 		guc->interrupts.reset = gen11_reset_guc_interrupts;
1950f261b24SDaniele Ceraolo Spurio 		guc->interrupts.enable = gen11_enable_guc_interrupts;
1960f261b24SDaniele Ceraolo Spurio 		guc->interrupts.disable = gen11_disable_guc_interrupts;
197b910f716SDaniele Ceraolo Spurio 		if (gt->type == GT_MEDIA) {
198b910f716SDaniele Ceraolo Spurio 			guc->notify_reg = MEDIA_GUC_HOST_INTERRUPT;
199b910f716SDaniele Ceraolo Spurio 			guc->send_regs.base = i915_mmio_reg_offset(MEDIA_SOFT_SCRATCH(0));
200b910f716SDaniele Ceraolo Spurio 		} else {
201b910f716SDaniele Ceraolo Spurio 			guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
202b910f716SDaniele Ceraolo Spurio 			guc->send_regs.base = i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
203b910f716SDaniele Ceraolo Spurio 		}
204b910f716SDaniele Ceraolo Spurio 
205e09be87aSMichal Wajdeczko 		guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
206e09be87aSMichal Wajdeczko 
2070f261b24SDaniele Ceraolo Spurio 	} else {
208f20c6b27SDaniele Ceraolo Spurio 		guc->notify_reg = GUC_SEND_INTERRUPT;
2090f261b24SDaniele Ceraolo Spurio 		guc->interrupts.reset = gen9_reset_guc_interrupts;
2100f261b24SDaniele Ceraolo Spurio 		guc->interrupts.enable = gen9_enable_guc_interrupts;
2110f261b24SDaniele Ceraolo Spurio 		guc->interrupts.disable = gen9_disable_guc_interrupts;
212e09be87aSMichal Wajdeczko 		guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
213e09be87aSMichal Wajdeczko 		guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
214e09be87aSMichal Wajdeczko 		BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
2150f261b24SDaniele Ceraolo Spurio 	}
21677b6f79dSJohn Harrison 
21777b6f79dSJohn Harrison 	intel_guc_enable_msg(guc, INTEL_GUC_RECV_MSG_EXCEPTION |
21877b6f79dSJohn Harrison 				  INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
2190f261b24SDaniele Ceraolo Spurio }
2200f261b24SDaniele Ceraolo Spurio 
intel_guc_init_late(struct intel_guc * guc)221481d458cSJohn Harrison void intel_guc_init_late(struct intel_guc *guc)
222481d458cSJohn Harrison {
223481d458cSJohn Harrison 	intel_guc_ads_init_late(guc);
224481d458cSJohn Harrison }
225481d458cSJohn Harrison 
guc_ctl_debug_flags(struct intel_guc * guc)2260f261b24SDaniele Ceraolo Spurio static u32 guc_ctl_debug_flags(struct intel_guc *guc)
2270f261b24SDaniele Ceraolo Spurio {
2280f261b24SDaniele Ceraolo Spurio 	u32 level = intel_guc_log_get_level(&guc->log);
2290f261b24SDaniele Ceraolo Spurio 	u32 flags = 0;
2300f261b24SDaniele Ceraolo Spurio 
2310f261b24SDaniele Ceraolo Spurio 	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
2320f261b24SDaniele Ceraolo Spurio 		flags |= GUC_LOG_DISABLED;
2330f261b24SDaniele Ceraolo Spurio 	else
2340f261b24SDaniele Ceraolo Spurio 		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
2350f261b24SDaniele Ceraolo Spurio 			 GUC_LOG_VERBOSITY_SHIFT;
2360f261b24SDaniele Ceraolo Spurio 
2370f261b24SDaniele Ceraolo Spurio 	return flags;
2380f261b24SDaniele Ceraolo Spurio }
2390f261b24SDaniele Ceraolo Spurio 
guc_ctl_feature_flags(struct intel_guc * guc)2400f261b24SDaniele Ceraolo Spurio static u32 guc_ctl_feature_flags(struct intel_guc *guc)
2410f261b24SDaniele Ceraolo Spurio {
2420f261b24SDaniele Ceraolo Spurio 	u32 flags = 0;
2430f261b24SDaniele Ceraolo Spurio 
244202c98e7SDaniele Ceraolo Spurio 	if (!intel_guc_submission_is_used(guc))
2450f261b24SDaniele Ceraolo Spurio 		flags |= GUC_CTL_DISABLE_SCHEDULER;
2460f261b24SDaniele Ceraolo Spurio 
2477695d08fSVinay Belgaumkar 	if (intel_guc_slpc_is_used(guc))
2487695d08fSVinay Belgaumkar 		flags |= GUC_CTL_ENABLE_SLPC;
2497695d08fSVinay Belgaumkar 
2500f261b24SDaniele Ceraolo Spurio 	return flags;
2510f261b24SDaniele Ceraolo Spurio }
2520f261b24SDaniele Ceraolo Spurio 
guc_ctl_log_params_flags(struct intel_guc * guc)2530f261b24SDaniele Ceraolo Spurio static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
2540f261b24SDaniele Ceraolo Spurio {
2558ad0152aSJohn Harrison 	struct intel_guc_log *log = &guc->log;
2568ad0152aSJohn Harrison 	u32 offset, flags;
2570f261b24SDaniele Ceraolo Spurio 
2588ad0152aSJohn Harrison 	GEM_BUG_ON(!log->sizes_initialised);
25977b6f79dSJohn Harrison 
2608ad0152aSJohn Harrison 	offset = intel_guc_ggtt_offset(guc, log->vma) >> PAGE_SHIFT;
2610f261b24SDaniele Ceraolo Spurio 
2620f261b24SDaniele Ceraolo Spurio 	flags = GUC_LOG_VALID |
2630f261b24SDaniele Ceraolo Spurio 		GUC_LOG_NOTIFY_ON_HALF_FULL |
2648ad0152aSJohn Harrison 		log->sizes[GUC_LOG_SECTIONS_DEBUG].flag |
2658ad0152aSJohn Harrison 		log->sizes[GUC_LOG_SECTIONS_CAPTURE].flag |
2668ad0152aSJohn Harrison 		(log->sizes[GUC_LOG_SECTIONS_CRASH].count << GUC_LOG_CRASH_SHIFT) |
2678ad0152aSJohn Harrison 		(log->sizes[GUC_LOG_SECTIONS_DEBUG].count << GUC_LOG_DEBUG_SHIFT) |
2688ad0152aSJohn Harrison 		(log->sizes[GUC_LOG_SECTIONS_CAPTURE].count << GUC_LOG_CAPTURE_SHIFT) |
2690f261b24SDaniele Ceraolo Spurio 		(offset << GUC_LOG_BUF_ADDR_SHIFT);
2700f261b24SDaniele Ceraolo Spurio 
2710f261b24SDaniele Ceraolo Spurio 	return flags;
2720f261b24SDaniele Ceraolo Spurio }
2730f261b24SDaniele Ceraolo Spurio 
guc_ctl_ads_flags(struct intel_guc * guc)2740f261b24SDaniele Ceraolo Spurio static u32 guc_ctl_ads_flags(struct intel_guc *guc)
2750f261b24SDaniele Ceraolo Spurio {
2760f261b24SDaniele Ceraolo Spurio 	u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
2770f261b24SDaniele Ceraolo Spurio 	u32 flags = ads << GUC_ADS_ADDR_SHIFT;
2780f261b24SDaniele Ceraolo Spurio 
2790f261b24SDaniele Ceraolo Spurio 	return flags;
2800f261b24SDaniele Ceraolo Spurio }
2810f261b24SDaniele Ceraolo Spurio 
guc_ctl_wa_flags(struct intel_guc * guc)28277b6f79dSJohn Harrison static u32 guc_ctl_wa_flags(struct intel_guc *guc)
28377b6f79dSJohn Harrison {
28477b6f79dSJohn Harrison 	struct intel_gt *gt = guc_to_gt(guc);
28577b6f79dSJohn Harrison 	u32 flags = 0;
28677b6f79dSJohn Harrison 
28777b6f79dSJohn Harrison 	/* Wa_22012773006:gen11,gen12 < XeHP */
28877b6f79dSJohn Harrison 	if (GRAPHICS_VER(gt->i915) >= 11 &&
28948ba4a6dSLucas De Marchi 	    GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 55))
29077b6f79dSJohn Harrison 		flags |= GUC_WA_POLLCS;
29177b6f79dSJohn Harrison 
29241bb543fSMatt Roper 	/* Wa_14014475959 */
2935a213086SMatt Roper 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
29441bb543fSMatt Roper 	    IS_DG2(gt->i915))
295717f9badSMatthew Brost 		flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
296717f9badSMatthew Brost 
297f673d59eSJohn Harrison 	/* Wa_16019325821 */
2987ad6a8faSJohn Harrison 	/* Wa_14019159160 */
299*104bcfaeSJohn Harrison 	if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
300f673d59eSJohn Harrison 		flags |= GUC_WA_RCS_CCS_SWITCHOUT;
301f673d59eSJohn Harrison 
302c6b41c4dSJohn Harrison 	/*
303eaeb4b36SMatt Roper 	 * Wa_14012197797
304eaeb4b36SMatt Roper 	 * Wa_22011391025
305c6b41c4dSJohn Harrison 	 *
306c6b41c4dSJohn Harrison 	 * The same WA bit is used for both and 22011391025 is applicable to
307c6b41c4dSJohn Harrison 	 * all DG2.
308c6b41c4dSJohn Harrison 	 */
309c6b41c4dSJohn Harrison 	if (IS_DG2(gt->i915))
310c6b41c4dSJohn Harrison 		flags |= GUC_WA_DUAL_QUEUE;
311c6b41c4dSJohn Harrison 
3120667429cSUmesh Nerlige Ramappa 	/* Wa_22011802037: graphics version 11/12 */
31328c46feeSMatt Roper 	if (intel_engine_reset_needs_wa_22011802037(gt))
314dac38381SUmesh Nerlige Ramappa 		flags |= GUC_WA_PRE_PARSER;
315dac38381SUmesh Nerlige Ramappa 
316307f722bSJohn Harrison 	/*
317eaeb4b36SMatt Roper 	 * Wa_22012727170
318eaeb4b36SMatt Roper 	 * Wa_22012727685
319307f722bSJohn Harrison 	 */
320eaeb4b36SMatt Roper 	if (IS_DG2_G11(gt->i915))
321307f722bSJohn Harrison 		flags |= GUC_WA_CONTEXT_ISOLATION;
322307f722bSJohn Harrison 
3234ae86a7fSJohn Harrison 	/*
3244ae86a7fSJohn Harrison 	 * Wa_14018913170: Applicable to all platforms supported by i915 so
3254ae86a7fSJohn Harrison 	 * don't bother testing for all X/Y/Z platforms explicitly.
3264ae86a7fSJohn Harrison 	 */
32768b89e23SDave Airlie 	if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0))
328ca1e2a83SDaniele Ceraolo Spurio 		flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
329ca1e2a83SDaniele Ceraolo Spurio 
33077b6f79dSJohn Harrison 	return flags;
33177b6f79dSJohn Harrison }
33277b6f79dSJohn Harrison 
guc_ctl_devid(struct intel_guc * guc)33377b6f79dSJohn Harrison static u32 guc_ctl_devid(struct intel_guc *guc)
33477b6f79dSJohn Harrison {
335be5bcc4bSAndi Shyti 	struct drm_i915_private *i915 = guc_to_i915(guc);
33677b6f79dSJohn Harrison 
33777b6f79dSJohn Harrison 	return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915);
33877b6f79dSJohn Harrison }
33977b6f79dSJohn Harrison 
3400f261b24SDaniele Ceraolo Spurio /*
3410f261b24SDaniele Ceraolo Spurio  * Initialise the GuC parameter block before starting the firmware
3420f261b24SDaniele Ceraolo Spurio  * transfer. These parameters are read by the firmware on startup
3430f261b24SDaniele Ceraolo Spurio  * and cannot be changed thereafter.
3440f261b24SDaniele Ceraolo Spurio  */
guc_init_params(struct intel_guc * guc)3452bf8fb39SDaniele Ceraolo Spurio static void guc_init_params(struct intel_guc *guc)
3460f261b24SDaniele Ceraolo Spurio {
3472bf8fb39SDaniele Ceraolo Spurio 	u32 *params = guc->params;
3480f261b24SDaniele Ceraolo Spurio 	int i;
3490f261b24SDaniele Ceraolo Spurio 
3502bf8fb39SDaniele Ceraolo Spurio 	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
3510f261b24SDaniele Ceraolo Spurio 
3520f261b24SDaniele Ceraolo Spurio 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
3530f261b24SDaniele Ceraolo Spurio 	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
3540f261b24SDaniele Ceraolo Spurio 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
3550f261b24SDaniele Ceraolo Spurio 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
35677b6f79dSJohn Harrison 	params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
35777b6f79dSJohn Harrison 	params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
3580f261b24SDaniele Ceraolo Spurio 
3590f261b24SDaniele Ceraolo Spurio 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
360ecb89c2cSMichal Wajdeczko 		guc_dbg(guc, "param[%2d] = %#x\n", i, params[i]);
3612bf8fb39SDaniele Ceraolo Spurio }
3622bf8fb39SDaniele Ceraolo Spurio 
3632bf8fb39SDaniele Ceraolo Spurio /*
3642bf8fb39SDaniele Ceraolo Spurio  * Initialise the GuC parameter block before starting the firmware
3652bf8fb39SDaniele Ceraolo Spurio  * transfer. These parameters are read by the firmware on startup
3662bf8fb39SDaniele Ceraolo Spurio  * and cannot be changed thereafter.
3672bf8fb39SDaniele Ceraolo Spurio  */
intel_guc_write_params(struct intel_guc * guc)3682bf8fb39SDaniele Ceraolo Spurio void intel_guc_write_params(struct intel_guc *guc)
3692bf8fb39SDaniele Ceraolo Spurio {
3702bf8fb39SDaniele Ceraolo Spurio 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
3712bf8fb39SDaniele Ceraolo Spurio 	int i;
3720f261b24SDaniele Ceraolo Spurio 
3730f261b24SDaniele Ceraolo Spurio 	/*
37455e3c170SMatt Roper 	 * All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and
3750f261b24SDaniele Ceraolo Spurio 	 * they are power context saved so it's ok to release forcewake
3760f261b24SDaniele Ceraolo Spurio 	 * when we are done here and take it again at xfer time.
3770f261b24SDaniele Ceraolo Spurio 	 */
37855e3c170SMatt Roper 	intel_uncore_forcewake_get(uncore, FORCEWAKE_GT);
3790f261b24SDaniele Ceraolo Spurio 
38084b1ca2fSDaniele Ceraolo Spurio 	intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
3810f261b24SDaniele Ceraolo Spurio 
3820f261b24SDaniele Ceraolo Spurio 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
3832bf8fb39SDaniele Ceraolo Spurio 		intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
3840f261b24SDaniele Ceraolo Spurio 
38555e3c170SMatt Roper 	intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
3860f261b24SDaniele Ceraolo Spurio }
3870f261b24SDaniele Ceraolo Spurio 
intel_guc_dump_time_info(struct intel_guc * guc,struct drm_printer * p)388368d179aSJohn Harrison void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p)
389368d179aSJohn Harrison {
390368d179aSJohn Harrison 	struct intel_gt *gt = guc_to_gt(guc);
391368d179aSJohn Harrison 	intel_wakeref_t wakeref;
392368d179aSJohn Harrison 	u32 stamp = 0;
393368d179aSJohn Harrison 	u64 ktime;
394368d179aSJohn Harrison 
395368d179aSJohn Harrison 	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
396368d179aSJohn Harrison 		stamp = intel_uncore_read(gt->uncore, GUCPMTIMESTAMP);
397368d179aSJohn Harrison 	ktime = ktime_get_boottime_ns();
398368d179aSJohn Harrison 
399368d179aSJohn Harrison 	drm_printf(p, "Kernel timestamp: 0x%08llX [%llu]\n", ktime, ktime);
400368d179aSJohn Harrison 	drm_printf(p, "GuC timestamp: 0x%08X [%u]\n", stamp, stamp);
401368d179aSJohn Harrison 	drm_printf(p, "CS timestamp frequency: %u Hz, %u ns\n",
402368d179aSJohn Harrison 		   gt->clock_frequency, gt->clock_period_ns);
403368d179aSJohn Harrison }
404368d179aSJohn Harrison 
intel_guc_init(struct intel_guc * guc)4052bf8fb39SDaniele Ceraolo Spurio int intel_guc_init(struct intel_guc *guc)
4062bf8fb39SDaniele Ceraolo Spurio {
4072bf8fb39SDaniele Ceraolo Spurio 	int ret;
4082bf8fb39SDaniele Ceraolo Spurio 
4092bf8fb39SDaniele Ceraolo Spurio 	ret = intel_uc_fw_init(&guc->fw);
4102bf8fb39SDaniele Ceraolo Spurio 	if (ret)
41142f96e5bSDaniele Ceraolo Spurio 		goto out;
4122bf8fb39SDaniele Ceraolo Spurio 
4132bf8fb39SDaniele Ceraolo Spurio 	ret = intel_guc_log_create(&guc->log);
4142bf8fb39SDaniele Ceraolo Spurio 	if (ret)
415034982cfSDaniele Ceraolo Spurio 		goto err_fw;
4162bf8fb39SDaniele Ceraolo Spurio 
41724492514SAlan Previn 	ret = intel_guc_capture_init(guc);
4182bf8fb39SDaniele Ceraolo Spurio 	if (ret)
4192bf8fb39SDaniele Ceraolo Spurio 		goto err_log;
42024492514SAlan Previn 
42124492514SAlan Previn 	ret = intel_guc_ads_create(guc);
42224492514SAlan Previn 	if (ret)
42324492514SAlan Previn 		goto err_capture;
42424492514SAlan Previn 
4252bf8fb39SDaniele Ceraolo Spurio 	GEM_BUG_ON(!guc->ads_vma);
4262bf8fb39SDaniele Ceraolo Spurio 
4272bf8fb39SDaniele Ceraolo Spurio 	ret = intel_guc_ct_init(&guc->ct);
4282bf8fb39SDaniele Ceraolo Spurio 	if (ret)
4292bf8fb39SDaniele Ceraolo Spurio 		goto err_ads;
4302bf8fb39SDaniele Ceraolo Spurio 
431202c98e7SDaniele Ceraolo Spurio 	if (intel_guc_submission_is_used(guc)) {
432edad2547SDaniele Ceraolo Spurio 		/*
433edad2547SDaniele Ceraolo Spurio 		 * This is stuff we need to have available at fw load time
434edad2547SDaniele Ceraolo Spurio 		 * if we are planning to enable submission later
435edad2547SDaniele Ceraolo Spurio 		 */
436edad2547SDaniele Ceraolo Spurio 		ret = intel_guc_submission_init(guc);
437edad2547SDaniele Ceraolo Spurio 		if (ret)
438edad2547SDaniele Ceraolo Spurio 			goto err_ct;
439edad2547SDaniele Ceraolo Spurio 	}
440edad2547SDaniele Ceraolo Spurio 
441869cd27eSVinay Belgaumkar 	if (intel_guc_slpc_is_used(guc)) {
442869cd27eSVinay Belgaumkar 		ret = intel_guc_slpc_init(&guc->slpc);
443869cd27eSVinay Belgaumkar 		if (ret)
444869cd27eSVinay Belgaumkar 			goto err_submission;
445869cd27eSVinay Belgaumkar 	}
446869cd27eSVinay Belgaumkar 
4472bf8fb39SDaniele Ceraolo Spurio 	/* now that everything is perma-pinned, initialize the parameters */
4482bf8fb39SDaniele Ceraolo Spurio 	guc_init_params(guc);
4492bf8fb39SDaniele Ceraolo Spurio 
45042f96e5bSDaniele Ceraolo Spurio 	intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
45142f96e5bSDaniele Ceraolo Spurio 
4522bf8fb39SDaniele Ceraolo Spurio 	return 0;
4532bf8fb39SDaniele Ceraolo Spurio 
454869cd27eSVinay Belgaumkar err_submission:
455869cd27eSVinay Belgaumkar 	intel_guc_submission_fini(guc);
456edad2547SDaniele Ceraolo Spurio err_ct:
457edad2547SDaniele Ceraolo Spurio 	intel_guc_ct_fini(&guc->ct);
4582bf8fb39SDaniele Ceraolo Spurio err_ads:
4592bf8fb39SDaniele Ceraolo Spurio 	intel_guc_ads_destroy(guc);
46024492514SAlan Previn err_capture:
46124492514SAlan Previn 	intel_guc_capture_destroy(guc);
4622bf8fb39SDaniele Ceraolo Spurio err_log:
4632bf8fb39SDaniele Ceraolo Spurio 	intel_guc_log_destroy(&guc->log);
4642bf8fb39SDaniele Ceraolo Spurio err_fw:
4652bf8fb39SDaniele Ceraolo Spurio 	intel_uc_fw_fini(&guc->fw);
46642f96e5bSDaniele Ceraolo Spurio out:
467b76c14c8SDaniele Ceraolo Spurio 	intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
468ecb89c2cSMichal Wajdeczko 	guc_probe_error(guc, "failed with %pe\n", ERR_PTR(ret));
4692bf8fb39SDaniele Ceraolo Spurio 	return ret;
4702bf8fb39SDaniele Ceraolo Spurio }
4712bf8fb39SDaniele Ceraolo Spurio 
intel_guc_fini(struct intel_guc * guc)4722bf8fb39SDaniele Ceraolo Spurio void intel_guc_fini(struct intel_guc *guc)
4732bf8fb39SDaniele Ceraolo Spurio {
47442f96e5bSDaniele Ceraolo Spurio 	if (!intel_uc_fw_is_loadable(&guc->fw))
4750075a20aSMichal Wajdeczko 		return;
4760075a20aSMichal Wajdeczko 
477b2edc414SJohn Harrison 	flush_work(&guc->dead_guc_worker);
478b2edc414SJohn Harrison 
479869cd27eSVinay Belgaumkar 	if (intel_guc_slpc_is_used(guc))
480869cd27eSVinay Belgaumkar 		intel_guc_slpc_fini(&guc->slpc);
481869cd27eSVinay Belgaumkar 
482202c98e7SDaniele Ceraolo Spurio 	if (intel_guc_submission_is_used(guc))
483edad2547SDaniele Ceraolo Spurio 		intel_guc_submission_fini(guc);
484edad2547SDaniele Ceraolo Spurio 
4852bf8fb39SDaniele Ceraolo Spurio 	intel_guc_ct_fini(&guc->ct);
4862bf8fb39SDaniele Ceraolo Spurio 
4872bf8fb39SDaniele Ceraolo Spurio 	intel_guc_ads_destroy(guc);
48824492514SAlan Previn 	intel_guc_capture_destroy(guc);
4892bf8fb39SDaniele Ceraolo Spurio 	intel_guc_log_destroy(&guc->log);
4902bf8fb39SDaniele Ceraolo Spurio 	intel_uc_fw_fini(&guc->fw);
4912bf8fb39SDaniele Ceraolo Spurio }
4922bf8fb39SDaniele Ceraolo Spurio 
4930f261b24SDaniele Ceraolo Spurio /*
4940f261b24SDaniele Ceraolo Spurio  * This function implements the MMIO based host to GuC interface.
4950f261b24SDaniele Ceraolo Spurio  */
intel_guc_send_mmio(struct intel_guc * guc,const u32 * request,u32 len,u32 * response_buf,u32 response_buf_size)496572f2a5cSMichal Wajdeczko int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
4970f261b24SDaniele Ceraolo Spurio 			u32 *response_buf, u32 response_buf_size)
4980f261b24SDaniele Ceraolo Spurio {
49984b1ca2fSDaniele Ceraolo Spurio 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
500572f2a5cSMichal Wajdeczko 	u32 header;
5010f261b24SDaniele Ceraolo Spurio 	int i;
5020f261b24SDaniele Ceraolo Spurio 	int ret;
5030f261b24SDaniele Ceraolo Spurio 
5040f261b24SDaniele Ceraolo Spurio 	GEM_BUG_ON(!len);
5050f261b24SDaniele Ceraolo Spurio 	GEM_BUG_ON(len > guc->send_regs.count);
5060f261b24SDaniele Ceraolo Spurio 
507572f2a5cSMichal Wajdeczko 	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST);
508572f2a5cSMichal Wajdeczko 	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST);
5090f261b24SDaniele Ceraolo Spurio 
5100f261b24SDaniele Ceraolo Spurio 	mutex_lock(&guc->send_mutex);
5110f261b24SDaniele Ceraolo Spurio 	intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
5120f261b24SDaniele Ceraolo Spurio 
513572f2a5cSMichal Wajdeczko retry:
5140f261b24SDaniele Ceraolo Spurio 	for (i = 0; i < len; i++)
515572f2a5cSMichal Wajdeczko 		intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
5160f261b24SDaniele Ceraolo Spurio 
5170f261b24SDaniele Ceraolo Spurio 	intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
5180f261b24SDaniele Ceraolo Spurio 
5190f261b24SDaniele Ceraolo Spurio 	intel_guc_notify(guc);
5200f261b24SDaniele Ceraolo Spurio 
5210f261b24SDaniele Ceraolo Spurio 	/*
5220f261b24SDaniele Ceraolo Spurio 	 * No GuC command should ever take longer than 10ms.
5230f261b24SDaniele Ceraolo Spurio 	 * Fast commands should still complete in 10us.
5240f261b24SDaniele Ceraolo Spurio 	 */
5250f261b24SDaniele Ceraolo Spurio 	ret = __intel_wait_for_register_fw(uncore,
5260f261b24SDaniele Ceraolo Spurio 					   guc_send_reg(guc, 0),
527572f2a5cSMichal Wajdeczko 					   GUC_HXG_MSG_0_ORIGIN,
528572f2a5cSMichal Wajdeczko 					   FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
529572f2a5cSMichal Wajdeczko 						      GUC_HXG_ORIGIN_GUC),
530572f2a5cSMichal Wajdeczko 					   10, 10, &header);
531572f2a5cSMichal Wajdeczko 	if (unlikely(ret)) {
532572f2a5cSMichal Wajdeczko timeout:
533ecb89c2cSMichal Wajdeczko 		guc_err(guc, "mmio request %#x: no reply %x\n",
534572f2a5cSMichal Wajdeczko 			request[0], header);
535572f2a5cSMichal Wajdeczko 		goto out;
536572f2a5cSMichal Wajdeczko 	}
5370f261b24SDaniele Ceraolo Spurio 
538572f2a5cSMichal Wajdeczko 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
539572f2a5cSMichal Wajdeczko #define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \
540572f2a5cSMichal Wajdeczko 		FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC || \
541572f2a5cSMichal Wajdeczko 		FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_NO_RESPONSE_BUSY; })
542572f2a5cSMichal Wajdeczko 
543572f2a5cSMichal Wajdeczko 		ret = wait_for(done, 1000);
544572f2a5cSMichal Wajdeczko 		if (unlikely(ret))
545572f2a5cSMichal Wajdeczko 			goto timeout;
546572f2a5cSMichal Wajdeczko 		if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
547572f2a5cSMichal Wajdeczko 				       GUC_HXG_ORIGIN_GUC))
548572f2a5cSMichal Wajdeczko 			goto proto;
549572f2a5cSMichal Wajdeczko #undef done
550572f2a5cSMichal Wajdeczko 	}
551572f2a5cSMichal Wajdeczko 
552572f2a5cSMichal Wajdeczko 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
553572f2a5cSMichal Wajdeczko 		u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
554572f2a5cSMichal Wajdeczko 
555ecb89c2cSMichal Wajdeczko 		guc_dbg(guc, "mmio request %#x: retrying, reason %u\n",
556572f2a5cSMichal Wajdeczko 			request[0], reason);
557572f2a5cSMichal Wajdeczko 		goto retry;
558572f2a5cSMichal Wajdeczko 	}
559572f2a5cSMichal Wajdeczko 
560572f2a5cSMichal Wajdeczko 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_RESPONSE_FAILURE) {
561572f2a5cSMichal Wajdeczko 		u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
562572f2a5cSMichal Wajdeczko 		u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
563572f2a5cSMichal Wajdeczko 
564ecb89c2cSMichal Wajdeczko 		guc_err(guc, "mmio request %#x: failure %x/%u\n",
565572f2a5cSMichal Wajdeczko 			request[0], error, hint);
566572f2a5cSMichal Wajdeczko 		ret = -ENXIO;
567572f2a5cSMichal Wajdeczko 		goto out;
568572f2a5cSMichal Wajdeczko 	}
569572f2a5cSMichal Wajdeczko 
570572f2a5cSMichal Wajdeczko 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
571572f2a5cSMichal Wajdeczko proto:
572ecb89c2cSMichal Wajdeczko 		guc_err(guc, "mmio request %#x: unexpected reply %#x\n",
573572f2a5cSMichal Wajdeczko 			request[0], header);
574572f2a5cSMichal Wajdeczko 		ret = -EPROTO;
5750f261b24SDaniele Ceraolo Spurio 		goto out;
5760f261b24SDaniele Ceraolo Spurio 	}
5770f261b24SDaniele Ceraolo Spurio 
5780f261b24SDaniele Ceraolo Spurio 	if (response_buf) {
579572f2a5cSMichal Wajdeczko 		int count = min(response_buf_size, guc->send_regs.count);
5800f261b24SDaniele Ceraolo Spurio 
581572f2a5cSMichal Wajdeczko 		GEM_BUG_ON(!count);
582572f2a5cSMichal Wajdeczko 
583572f2a5cSMichal Wajdeczko 		response_buf[0] = header;
584572f2a5cSMichal Wajdeczko 
585572f2a5cSMichal Wajdeczko 		for (i = 1; i < count; i++)
58684b1ca2fSDaniele Ceraolo Spurio 			response_buf[i] = intel_uncore_read(uncore,
587572f2a5cSMichal Wajdeczko 							    guc_send_reg(guc, i));
5880f261b24SDaniele Ceraolo Spurio 
589572f2a5cSMichal Wajdeczko 		/* Use number of copied dwords as our return value */
590572f2a5cSMichal Wajdeczko 		ret = count;
591572f2a5cSMichal Wajdeczko 	} else {
5920f261b24SDaniele Ceraolo Spurio 		/* Use data from the GuC response as our return value */
593572f2a5cSMichal Wajdeczko 		ret = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
594572f2a5cSMichal Wajdeczko 	}
5950f261b24SDaniele Ceraolo Spurio 
5960f261b24SDaniele Ceraolo Spurio out:
5970f261b24SDaniele Ceraolo Spurio 	intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
5980f261b24SDaniele Ceraolo Spurio 	mutex_unlock(&guc->send_mutex);
5990f261b24SDaniele Ceraolo Spurio 
6000f261b24SDaniele Ceraolo Spurio 	return ret;
6010f261b24SDaniele Ceraolo Spurio }
6020f261b24SDaniele Ceraolo Spurio 
intel_guc_crash_process_msg(struct intel_guc * guc,u32 action)603b2edc414SJohn Harrison int intel_guc_crash_process_msg(struct intel_guc *guc, u32 action)
604b2edc414SJohn Harrison {
605b2edc414SJohn Harrison 	if (action == INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED)
606b2edc414SJohn Harrison 		guc_err(guc, "Crash dump notification\n");
607b2edc414SJohn Harrison 	else if (action == INTEL_GUC_ACTION_NOTIFY_EXCEPTION)
608b2edc414SJohn Harrison 		guc_err(guc, "Exception notification\n");
609b2edc414SJohn Harrison 	else
610b2edc414SJohn Harrison 		guc_err(guc, "Unknown crash notification: 0x%04X\n", action);
611b2edc414SJohn Harrison 
612b2edc414SJohn Harrison 	queue_work(system_unbound_wq, &guc->dead_guc_worker);
613b2edc414SJohn Harrison 
614b2edc414SJohn Harrison 	return 0;
615b2edc414SJohn Harrison }
616b2edc414SJohn Harrison 
intel_guc_to_host_process_recv_msg(struct intel_guc * guc,const u32 * payload,u32 len)6170f261b24SDaniele Ceraolo Spurio int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
6180f261b24SDaniele Ceraolo Spurio 				       const u32 *payload, u32 len)
6190f261b24SDaniele Ceraolo Spurio {
6200f261b24SDaniele Ceraolo Spurio 	u32 msg;
6210f261b24SDaniele Ceraolo Spurio 
6220f261b24SDaniele Ceraolo Spurio 	if (unlikely(!len))
6230f261b24SDaniele Ceraolo Spurio 		return -EPROTO;
6240f261b24SDaniele Ceraolo Spurio 
6250f261b24SDaniele Ceraolo Spurio 	/* Make sure to handle only enabled messages */
6260f261b24SDaniele Ceraolo Spurio 	msg = payload[0] & guc->msg_enabled_mask;
6270f261b24SDaniele Ceraolo Spurio 
62877b6f79dSJohn Harrison 	if (msg & INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)
629ecb89c2cSMichal Wajdeczko 		guc_err(guc, "Received early crash dump notification!\n");
63077b6f79dSJohn Harrison 	if (msg & INTEL_GUC_RECV_MSG_EXCEPTION)
631ecb89c2cSMichal Wajdeczko 		guc_err(guc, "Received early exception notification!\n");
6320f261b24SDaniele Ceraolo Spurio 
633b2edc414SJohn Harrison 	if (msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | INTEL_GUC_RECV_MSG_EXCEPTION))
634b2edc414SJohn Harrison 		queue_work(system_unbound_wq, &guc->dead_guc_worker);
635b2edc414SJohn Harrison 
6360f261b24SDaniele Ceraolo Spurio 	return 0;
6370f261b24SDaniele Ceraolo Spurio }
6380f261b24SDaniele Ceraolo Spurio 
6390f261b24SDaniele Ceraolo Spurio /**
6400f261b24SDaniele Ceraolo Spurio  * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
6410f261b24SDaniele Ceraolo Spurio  * @guc: intel_guc structure
6420f261b24SDaniele Ceraolo Spurio  * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
6430f261b24SDaniele Ceraolo Spurio  *
6440f261b24SDaniele Ceraolo Spurio  * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
6450f261b24SDaniele Ceraolo Spurio  * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
6460f261b24SDaniele Ceraolo Spurio  * intel_huc_auth().
6470f261b24SDaniele Ceraolo Spurio  *
6480f261b24SDaniele Ceraolo Spurio  * Return:	non-zero code on error
6490f261b24SDaniele Ceraolo Spurio  */
intel_guc_auth_huc(struct intel_guc * guc,u32 rsa_offset)6500f261b24SDaniele Ceraolo Spurio int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
6510f261b24SDaniele Ceraolo Spurio {
6520f261b24SDaniele Ceraolo Spurio 	u32 action[] = {
6530f261b24SDaniele Ceraolo Spurio 		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
6540f261b24SDaniele Ceraolo Spurio 		rsa_offset
6550f261b24SDaniele Ceraolo Spurio 	};
6560f261b24SDaniele Ceraolo Spurio 
6570f261b24SDaniele Ceraolo Spurio 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
6580f261b24SDaniele Ceraolo Spurio }
6590f261b24SDaniele Ceraolo Spurio 
6600f261b24SDaniele Ceraolo Spurio /**
6610f261b24SDaniele Ceraolo Spurio  * intel_guc_suspend() - notify GuC entering suspend state
6620f261b24SDaniele Ceraolo Spurio  * @guc:	the guc
6630f261b24SDaniele Ceraolo Spurio  */
intel_guc_suspend(struct intel_guc * guc)6640f261b24SDaniele Ceraolo Spurio int intel_guc_suspend(struct intel_guc *guc)
6650f261b24SDaniele Ceraolo Spurio {
6660f261b24SDaniele Ceraolo Spurio 	int ret;
6670f261b24SDaniele Ceraolo Spurio 	u32 action[] = {
66877b6f79dSJohn Harrison 		INTEL_GUC_ACTION_CLIENT_SOFT_RESET,
6690f261b24SDaniele Ceraolo Spurio 	};
6700f261b24SDaniele Ceraolo Spurio 
671cad46a33SMatthew Brost 	if (!intel_guc_is_ready(guc))
67282e0c5bbSDon Hiatt 		return 0;
67382e0c5bbSDon Hiatt 
674cad46a33SMatthew Brost 	if (intel_guc_submission_is_used(guc)) {
675b2edc414SJohn Harrison 		flush_work(&guc->dead_guc_worker);
676b2edc414SJohn Harrison 
67782e0c5bbSDon Hiatt 		/*
678cad46a33SMatthew Brost 		 * This H2G MMIO command tears down the GuC in two steps. First it will
679cad46a33SMatthew Brost 		 * generate a G2H CTB for every active context indicating a reset. In
680cad46a33SMatthew Brost 		 * practice the i915 shouldn't ever get a G2H as suspend should only be
681cad46a33SMatthew Brost 		 * called when the GPU is idle. Next, it tears down the CTBs and this
682cad46a33SMatthew Brost 		 * H2G MMIO command completes.
683cad46a33SMatthew Brost 		 *
684cad46a33SMatthew Brost 		 * Don't abort on a failure code from the GuC. Keep going and do the
685cad46a33SMatthew Brost 		 * clean up in santize() and re-initialisation on resume and hopefully
686cad46a33SMatthew Brost 		 * the error here won't be problematic.
6870f261b24SDaniele Ceraolo Spurio 		 */
688cad46a33SMatthew Brost 		ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
6890f261b24SDaniele Ceraolo Spurio 		if (ret)
690ecb89c2cSMichal Wajdeczko 			guc_err(guc, "suspend: RESET_CLIENT action failed with %pe\n",
691ecb89c2cSMichal Wajdeczko 				ERR_PTR(ret));
6920f261b24SDaniele Ceraolo Spurio 	}
6930f261b24SDaniele Ceraolo Spurio 
694cad46a33SMatthew Brost 	/* Signal that the GuC isn't running. */
695cad46a33SMatthew Brost 	intel_guc_sanitize(guc);
696cad46a33SMatthew Brost 
6970f261b24SDaniele Ceraolo Spurio 	return 0;
6980f261b24SDaniele Ceraolo Spurio }
6990f261b24SDaniele Ceraolo Spurio 
7000f261b24SDaniele Ceraolo Spurio /**
7010f261b24SDaniele Ceraolo Spurio  * intel_guc_resume() - notify GuC resuming from suspend state
7020f261b24SDaniele Ceraolo Spurio  * @guc:	the guc
7030f261b24SDaniele Ceraolo Spurio  */
intel_guc_resume(struct intel_guc * guc)7040f261b24SDaniele Ceraolo Spurio int intel_guc_resume(struct intel_guc *guc)
7050f261b24SDaniele Ceraolo Spurio {
706cad46a33SMatthew Brost 	/*
707cad46a33SMatthew Brost 	 * NB: This function can still be called even if GuC submission is
708cad46a33SMatthew Brost 	 * disabled, e.g. if GuC is enabled for HuC authentication only. Thus,
709cad46a33SMatthew Brost 	 * if any code is later added here, it must be support doing nothing
710cad46a33SMatthew Brost 	 * if submission is disabled (as per intel_guc_suspend).
711cad46a33SMatthew Brost 	 */
71282e0c5bbSDon Hiatt 	return 0;
7130f261b24SDaniele Ceraolo Spurio }
7140f261b24SDaniele Ceraolo Spurio 
7150f261b24SDaniele Ceraolo Spurio /**
716218151e9SDaniele Ceraolo Spurio  * DOC: GuC Memory Management
7170f261b24SDaniele Ceraolo Spurio  *
718218151e9SDaniele Ceraolo Spurio  * GuC can't allocate any memory for its own usage, so all the allocations must
719218151e9SDaniele Ceraolo Spurio  * be handled by the host driver. GuC accesses the memory via the GGTT, with the
720218151e9SDaniele Ceraolo Spurio  * exception of the top and bottom parts of the 4GB address space, which are
721218151e9SDaniele Ceraolo Spurio  * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
722218151e9SDaniele Ceraolo Spurio  * or other parts of the HW. The driver must take care not to place objects that
723218151e9SDaniele Ceraolo Spurio  * the GuC is going to access in these reserved ranges. The layout of the GuC
724218151e9SDaniele Ceraolo Spurio  * address space is shown below:
7250f261b24SDaniele Ceraolo Spurio  *
7260f261b24SDaniele Ceraolo Spurio  * ::
7270f261b24SDaniele Ceraolo Spurio  *
7280f261b24SDaniele Ceraolo Spurio  *     +===========> +====================+ <== FFFF_FFFF
7290f261b24SDaniele Ceraolo Spurio  *     ^             |      Reserved      |
7300f261b24SDaniele Ceraolo Spurio  *     |             +====================+ <== GUC_GGTT_TOP
7310f261b24SDaniele Ceraolo Spurio  *     |             |                    |
7320f261b24SDaniele Ceraolo Spurio  *     |             |        DRAM        |
7330f261b24SDaniele Ceraolo Spurio  *    GuC            |                    |
7340f261b24SDaniele Ceraolo Spurio  *  Address    +===> +====================+ <== GuC ggtt_pin_bias
7350f261b24SDaniele Ceraolo Spurio  *   Space     ^     |                    |
7360f261b24SDaniele Ceraolo Spurio  *     |       |     |                    |
7370f261b24SDaniele Ceraolo Spurio  *     |      GuC    |        GuC         |
7380f261b24SDaniele Ceraolo Spurio  *     |     WOPCM   |       WOPCM        |
7390f261b24SDaniele Ceraolo Spurio  *     |      Size   |                    |
7400f261b24SDaniele Ceraolo Spurio  *     |       |     |                    |
7410f261b24SDaniele Ceraolo Spurio  *     v       v     |                    |
7420f261b24SDaniele Ceraolo Spurio  *     +=======+===> +====================+ <== 0000_0000
7430f261b24SDaniele Ceraolo Spurio  *
7440f261b24SDaniele Ceraolo Spurio  * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
7450f261b24SDaniele Ceraolo Spurio  * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
7460f261b24SDaniele Ceraolo Spurio  * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
7470f261b24SDaniele Ceraolo Spurio  */
7480f261b24SDaniele Ceraolo Spurio 
7490f261b24SDaniele Ceraolo Spurio /**
7500f261b24SDaniele Ceraolo Spurio  * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
7510f261b24SDaniele Ceraolo Spurio  * @guc:	the guc
7520f261b24SDaniele Ceraolo Spurio  * @size:	size of area to allocate (both virtual space and memory)
7530f261b24SDaniele Ceraolo Spurio  *
7540f261b24SDaniele Ceraolo Spurio  * This is a wrapper to create an object for use with the GuC. In order to
7550f261b24SDaniele Ceraolo Spurio  * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
7560f261b24SDaniele Ceraolo Spurio  * both some backing storage and a range inside the Global GTT. We must pin
7570f261b24SDaniele Ceraolo Spurio  * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
7580f261b24SDaniele Ceraolo Spurio  * range is reserved inside GuC.
7590f261b24SDaniele Ceraolo Spurio  *
7600f261b24SDaniele Ceraolo Spurio  * Return:	A i915_vma if successful, otherwise an ERR_PTR.
7610f261b24SDaniele Ceraolo Spurio  */
intel_guc_allocate_vma(struct intel_guc * guc,u32 size)7620f261b24SDaniele Ceraolo Spurio struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
7630f261b24SDaniele Ceraolo Spurio {
76484b1ca2fSDaniele Ceraolo Spurio 	struct intel_gt *gt = guc_to_gt(guc);
7650f261b24SDaniele Ceraolo Spurio 	struct drm_i915_gem_object *obj;
7660f261b24SDaniele Ceraolo Spurio 	struct i915_vma *vma;
7670f261b24SDaniele Ceraolo Spurio 	u64 flags;
7680f261b24SDaniele Ceraolo Spurio 	int ret;
7690f261b24SDaniele Ceraolo Spurio 
7707acbbc7cSDaniele Ceraolo Spurio 	if (HAS_LMEM(gt->i915))
7717acbbc7cSDaniele Ceraolo Spurio 		obj = i915_gem_object_create_lmem(gt->i915, size,
7727acbbc7cSDaniele Ceraolo Spurio 						  I915_BO_ALLOC_CPU_CLEAR |
773a259cc14SThomas Hellström 						  I915_BO_ALLOC_CONTIGUOUS |
774a259cc14SThomas Hellström 						  I915_BO_ALLOC_PM_EARLY);
7757acbbc7cSDaniele Ceraolo Spurio 	else
77684b1ca2fSDaniele Ceraolo Spurio 		obj = i915_gem_object_create_shmem(gt->i915, size);
7777acbbc7cSDaniele Ceraolo Spurio 
7780f261b24SDaniele Ceraolo Spurio 	if (IS_ERR(obj))
7790f261b24SDaniele Ceraolo Spurio 		return ERR_CAST(obj);
7800f261b24SDaniele Ceraolo Spurio 
781a161b6dbSFei Yang 	/*
782f1530f91SJonathan Cavitt 	 * Wa_22016122933: For Media version 13.0, all Media GT shared
783f1530f91SJonathan Cavitt 	 * memory needs to be mapped as WC on CPU side and UC (PAT
784f1530f91SJonathan Cavitt 	 * index 2) on GPU side.
785a161b6dbSFei Yang 	 */
786f1530f91SJonathan Cavitt 	if (intel_gt_needs_wa_22016122933(gt))
787a161b6dbSFei Yang 		i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
788a161b6dbSFei Yang 
78984b1ca2fSDaniele Ceraolo Spurio 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
7900f261b24SDaniele Ceraolo Spurio 	if (IS_ERR(vma))
7910f261b24SDaniele Ceraolo Spurio 		goto err;
7920f261b24SDaniele Ceraolo Spurio 
793e3793468SChris Wilson 	flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
79447b08693SMaarten Lankhorst 	ret = i915_ggtt_pin(vma, NULL, 0, flags);
7950f261b24SDaniele Ceraolo Spurio 	if (ret) {
7960f261b24SDaniele Ceraolo Spurio 		vma = ERR_PTR(ret);
7970f261b24SDaniele Ceraolo Spurio 		goto err;
7980f261b24SDaniele Ceraolo Spurio 	}
7990f261b24SDaniele Ceraolo Spurio 
8001aff1903SChris Wilson 	return i915_vma_make_unshrinkable(vma);
8010f261b24SDaniele Ceraolo Spurio 
8020f261b24SDaniele Ceraolo Spurio err:
8030f261b24SDaniele Ceraolo Spurio 	i915_gem_object_put(obj);
8040f261b24SDaniele Ceraolo Spurio 	return vma;
8050f261b24SDaniele Ceraolo Spurio }
80618c094b3SDaniele Ceraolo Spurio 
80718c094b3SDaniele Ceraolo Spurio /**
80818c094b3SDaniele Ceraolo Spurio  * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
80918c094b3SDaniele Ceraolo Spurio  * @guc:	the guc
81018c094b3SDaniele Ceraolo Spurio  * @size:	size of area to allocate (both virtual space and memory)
81118c094b3SDaniele Ceraolo Spurio  * @out_vma:	return variable for the allocated vma pointer
81218c094b3SDaniele Ceraolo Spurio  * @out_vaddr:	return variable for the obj mapping
81318c094b3SDaniele Ceraolo Spurio  *
81418c094b3SDaniele Ceraolo Spurio  * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
81518c094b3SDaniele Ceraolo Spurio  * object with I915_MAP_WB.
81618c094b3SDaniele Ceraolo Spurio  *
81718c094b3SDaniele Ceraolo Spurio  * Return:	0 if successful, a negative errno code otherwise.
81818c094b3SDaniele Ceraolo Spurio  */
intel_guc_allocate_and_map_vma(struct intel_guc * guc,u32 size,struct i915_vma ** out_vma,void ** out_vaddr)81918c094b3SDaniele Ceraolo Spurio int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
82018c094b3SDaniele Ceraolo Spurio 				   struct i915_vma **out_vma, void **out_vaddr)
82118c094b3SDaniele Ceraolo Spurio {
82218c094b3SDaniele Ceraolo Spurio 	struct i915_vma *vma;
82318c094b3SDaniele Ceraolo Spurio 	void *vaddr;
82418c094b3SDaniele Ceraolo Spurio 
82518c094b3SDaniele Ceraolo Spurio 	vma = intel_guc_allocate_vma(guc, size);
82618c094b3SDaniele Ceraolo Spurio 	if (IS_ERR(vma))
82718c094b3SDaniele Ceraolo Spurio 		return PTR_ERR(vma);
82818c094b3SDaniele Ceraolo Spurio 
829fa85bfd1SVenkata Sandeep Dhanalakota 	vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
830115cdccaSJonathan Cavitt 						 intel_gt_coherent_map_type(guc_to_gt(guc),
831fa85bfd1SVenkata Sandeep Dhanalakota 									    vma->obj, true));
83218c094b3SDaniele Ceraolo Spurio 	if (IS_ERR(vaddr)) {
83318c094b3SDaniele Ceraolo Spurio 		i915_vma_unpin_and_release(&vma, 0);
83418c094b3SDaniele Ceraolo Spurio 		return PTR_ERR(vaddr);
83518c094b3SDaniele Ceraolo Spurio 	}
83618c094b3SDaniele Ceraolo Spurio 
83718c094b3SDaniele Ceraolo Spurio 	*out_vma = vma;
83818c094b3SDaniele Ceraolo Spurio 	*out_vaddr = vaddr;
83918c094b3SDaniele Ceraolo Spurio 
84018c094b3SDaniele Ceraolo Spurio 	return 0;
84118c094b3SDaniele Ceraolo Spurio }
84234904bd6SDaniele Ceraolo Spurio 
__guc_action_self_cfg(struct intel_guc * guc,u16 key,u16 len,u64 value)84377b6f79dSJohn Harrison static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
84477b6f79dSJohn Harrison {
84577b6f79dSJohn Harrison 	u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
84677b6f79dSJohn Harrison 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
84777b6f79dSJohn Harrison 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
84877b6f79dSJohn Harrison 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_SELF_CFG),
84977b6f79dSJohn Harrison 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
85077b6f79dSJohn Harrison 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
85177b6f79dSJohn Harrison 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32, lower_32_bits(value)),
85277b6f79dSJohn Harrison 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64, upper_32_bits(value)),
85377b6f79dSJohn Harrison 	};
85477b6f79dSJohn Harrison 	int ret;
85577b6f79dSJohn Harrison 
85677b6f79dSJohn Harrison 	GEM_BUG_ON(len > 2);
85777b6f79dSJohn Harrison 	GEM_BUG_ON(len == 1 && upper_32_bits(value));
85877b6f79dSJohn Harrison 
85977b6f79dSJohn Harrison 	/* Self config must go over MMIO */
86077b6f79dSJohn Harrison 	ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
86177b6f79dSJohn Harrison 
86277b6f79dSJohn Harrison 	if (unlikely(ret < 0))
86377b6f79dSJohn Harrison 		return ret;
86477b6f79dSJohn Harrison 	if (unlikely(ret > 1))
86577b6f79dSJohn Harrison 		return -EPROTO;
86677b6f79dSJohn Harrison 	if (unlikely(!ret))
86777b6f79dSJohn Harrison 		return -ENOKEY;
86877b6f79dSJohn Harrison 
86977b6f79dSJohn Harrison 	return 0;
87077b6f79dSJohn Harrison }
87177b6f79dSJohn Harrison 
__guc_self_cfg(struct intel_guc * guc,u16 key,u16 len,u64 value)87277b6f79dSJohn Harrison static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
87377b6f79dSJohn Harrison {
87477b6f79dSJohn Harrison 	int err = __guc_action_self_cfg(guc, key, len, value);
87577b6f79dSJohn Harrison 
87677b6f79dSJohn Harrison 	if (unlikely(err))
877ecb89c2cSMichal Wajdeczko 		guc_probe_error(guc, "Unsuccessful self-config (%pe) key %#hx value %#llx\n",
87877b6f79dSJohn Harrison 				ERR_PTR(err), key, value);
87977b6f79dSJohn Harrison 	return err;
88077b6f79dSJohn Harrison }
88177b6f79dSJohn Harrison 
intel_guc_self_cfg32(struct intel_guc * guc,u16 key,u32 value)88277b6f79dSJohn Harrison int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value)
88377b6f79dSJohn Harrison {
88477b6f79dSJohn Harrison 	return __guc_self_cfg(guc, key, 1, value);
88577b6f79dSJohn Harrison }
88677b6f79dSJohn Harrison 
intel_guc_self_cfg64(struct intel_guc * guc,u16 key,u64 value)88777b6f79dSJohn Harrison int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value)
88877b6f79dSJohn Harrison {
88977b6f79dSJohn Harrison 	return __guc_self_cfg(guc, key, 2, value);
89077b6f79dSJohn Harrison }
89177b6f79dSJohn Harrison 
89234904bd6SDaniele Ceraolo Spurio /**
89334904bd6SDaniele Ceraolo Spurio  * intel_guc_load_status - dump information about GuC load status
89434904bd6SDaniele Ceraolo Spurio  * @guc: the GuC
89534904bd6SDaniele Ceraolo Spurio  * @p: the &drm_printer
89634904bd6SDaniele Ceraolo Spurio  *
89734904bd6SDaniele Ceraolo Spurio  * Pretty printer for GuC load status.
89834904bd6SDaniele Ceraolo Spurio  */
intel_guc_load_status(struct intel_guc * guc,struct drm_printer * p)89934904bd6SDaniele Ceraolo Spurio void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
90034904bd6SDaniele Ceraolo Spurio {
90134904bd6SDaniele Ceraolo Spurio 	struct intel_gt *gt = guc_to_gt(guc);
90234904bd6SDaniele Ceraolo Spurio 	struct intel_uncore *uncore = gt->uncore;
90334904bd6SDaniele Ceraolo Spurio 	intel_wakeref_t wakeref;
90434904bd6SDaniele Ceraolo Spurio 
90534904bd6SDaniele Ceraolo Spurio 	if (!intel_guc_is_supported(guc)) {
90634904bd6SDaniele Ceraolo Spurio 		drm_printf(p, "GuC not supported\n");
90734904bd6SDaniele Ceraolo Spurio 		return;
90834904bd6SDaniele Ceraolo Spurio 	}
90934904bd6SDaniele Ceraolo Spurio 
91034904bd6SDaniele Ceraolo Spurio 	if (!intel_guc_is_wanted(guc)) {
91134904bd6SDaniele Ceraolo Spurio 		drm_printf(p, "GuC disabled\n");
91234904bd6SDaniele Ceraolo Spurio 		return;
91334904bd6SDaniele Ceraolo Spurio 	}
91434904bd6SDaniele Ceraolo Spurio 
91534904bd6SDaniele Ceraolo Spurio 	intel_uc_fw_dump(&guc->fw, p);
91634904bd6SDaniele Ceraolo Spurio 
91734904bd6SDaniele Ceraolo Spurio 	with_intel_runtime_pm(uncore->rpm, wakeref) {
91834904bd6SDaniele Ceraolo Spurio 		u32 status = intel_uncore_read(uncore, GUC_STATUS);
91934904bd6SDaniele Ceraolo Spurio 		u32 i;
92034904bd6SDaniele Ceraolo Spurio 
921cc2e0cf0SJohn Harrison 		drm_printf(p, "GuC status 0x%08x:\n", status);
92234904bd6SDaniele Ceraolo Spurio 		drm_printf(p, "\tBootrom status = 0x%x\n",
92334904bd6SDaniele Ceraolo Spurio 			   (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
92434904bd6SDaniele Ceraolo Spurio 		drm_printf(p, "\tuKernel status = 0x%x\n",
92534904bd6SDaniele Ceraolo Spurio 			   (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
92634904bd6SDaniele Ceraolo Spurio 		drm_printf(p, "\tMIA Core status = 0x%x\n",
92734904bd6SDaniele Ceraolo Spurio 			   (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
928cc2e0cf0SJohn Harrison 		drm_puts(p, "Scratch registers:\n");
92934904bd6SDaniele Ceraolo Spurio 		for (i = 0; i < 16; i++) {
93034904bd6SDaniele Ceraolo Spurio 			drm_printf(p, "\t%2d: \t0x%x\n",
93134904bd6SDaniele Ceraolo Spurio 				   i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
93234904bd6SDaniele Ceraolo Spurio 		}
93334904bd6SDaniele Ceraolo Spurio 	}
93434904bd6SDaniele Ceraolo Spurio }
9356b540bf6SMatthew Brost 
intel_guc_write_barrier(struct intel_guc * guc)9366b540bf6SMatthew Brost void intel_guc_write_barrier(struct intel_guc *guc)
9376b540bf6SMatthew Brost {
9386b540bf6SMatthew Brost 	struct intel_gt *gt = guc_to_gt(guc);
9396b540bf6SMatthew Brost 
9406b540bf6SMatthew Brost 	if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
9416b540bf6SMatthew Brost 		/*
9426b540bf6SMatthew Brost 		 * Ensure intel_uncore_write_fw can be used rather than
9436b540bf6SMatthew Brost 		 * intel_uncore_write.
9446b540bf6SMatthew Brost 		 */
9456b540bf6SMatthew Brost 		GEM_BUG_ON(guc->send_regs.fw_domains);
9466b540bf6SMatthew Brost 
9476b540bf6SMatthew Brost 		/*
9486b540bf6SMatthew Brost 		 * This register is used by the i915 and GuC for MMIO based
9496b540bf6SMatthew Brost 		 * communication. Once we are in this code CTBs are the only
9506b540bf6SMatthew Brost 		 * method the i915 uses to communicate with the GuC so it is
9516b540bf6SMatthew Brost 		 * safe to write to this register (a value of 0 is NOP for MMIO
9526b540bf6SMatthew Brost 		 * communication). If we ever start mixing CTBs and MMIOs a new
9536b540bf6SMatthew Brost 		 * register will have to be chosen. This function is also used
9546b540bf6SMatthew Brost 		 * to enforce ordering of a work queue item write and an update
9556b540bf6SMatthew Brost 		 * to the process descriptor. When a work queue is being used,
9566b540bf6SMatthew Brost 		 * CTBs are also the only mechanism of communication.
9576b540bf6SMatthew Brost 		 */
9586b540bf6SMatthew Brost 		intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
9596b540bf6SMatthew Brost 	} else {
9606b540bf6SMatthew Brost 		/* wmb() sufficient for a barrier if in smem */
9616b540bf6SMatthew Brost 		wmb();
9626b540bf6SMatthew Brost 	}
9636b540bf6SMatthew Brost }
964