Lines Matching refs:guc

42 void intel_guc_notify(struct intel_guc *guc)
44 struct intel_gt *gt = guc_to_gt(guc);
52 intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
55 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
57 GEM_BUG_ON(!guc->send_regs.base);
58 GEM_BUG_ON(!guc->send_regs.count);
59 GEM_BUG_ON(i >= guc->send_regs.count);
61 return _MMIO(guc->send_regs.base + 4 * i);
64 void intel_guc_init_send_regs(struct intel_guc *guc)
66 struct intel_gt *gt = guc_to_gt(guc);
70 GEM_BUG_ON(!guc->send_regs.base);
71 GEM_BUG_ON(!guc->send_regs.count);
73 for (i = 0; i < guc->send_regs.count; i++) {
75 guc_send_reg(guc, i),
78 guc->send_regs.fw_domains = fw_domains;
81 static void gen9_reset_guc_interrupts(struct intel_guc *guc)
83 struct intel_gt *gt = guc_to_gt(guc);
92 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
94 struct intel_gt *gt = guc_to_gt(guc);
99 guc_WARN_ON_ONCE(guc, intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
104 guc->interrupts.enabled = true;
107 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
109 struct intel_gt *gt = guc_to_gt(guc);
112 guc->interrupts.enabled = false;
121 gen9_reset_guc_interrupts(guc);
132 static void gen11_reset_guc_interrupts(struct intel_guc *guc)
134 struct intel_gt *gt = guc_to_gt(guc);
141 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
143 struct intel_gt *gt = guc_to_gt(guc);
149 guc->interrupts.enabled = true;
152 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
154 struct intel_gt *gt = guc_to_gt(guc);
156 guc->interrupts.enabled = false;
159 gen11_reset_guc_interrupts(guc);
164 struct intel_guc *guc = container_of(w, struct intel_guc, dead_guc_worker);
165 struct intel_gt *gt = guc_to_gt(guc);
166 unsigned long last = guc->last_dead_guc_jiffies;
173 guc->last_dead_guc_jiffies = jiffies;
177 void intel_guc_init_early(struct intel_guc *guc)
179 struct intel_gt *gt = guc_to_gt(guc);
182 intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, true);
183 intel_guc_ct_init_early(&guc->ct);
184 intel_guc_log_init_early(&guc->log);
185 intel_guc_submission_init_early(guc);
186 intel_guc_slpc_init_early(&guc->slpc);
187 intel_guc_rc_init_early(guc);
189 INIT_WORK(&guc->dead_guc_worker, guc_dead_worker_func);
191 mutex_init(&guc->send_mutex);
192 spin_lock_init(&guc->irq_lock);
194 guc->interrupts.reset = gen11_reset_guc_interrupts;
195 guc->interrupts.enable = gen11_enable_guc_interrupts;
196 guc->interrupts.disable = gen11_disable_guc_interrupts;
198 guc->notify_reg = MEDIA_GUC_HOST_INTERRUPT;
199 guc->send_regs.base = i915_mmio_reg_offset(MEDIA_SOFT_SCRATCH(0));
201 guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
202 guc->send_regs.base = i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
205 guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
208 guc->notify_reg = GUC_SEND_INTERRUPT;
209 guc->interrupts.reset = gen9_reset_guc_interrupts;
210 guc->interrupts.enable = gen9_enable_guc_interrupts;
211 guc->interrupts.disable = gen9_disable_guc_interrupts;
212 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
213 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
217 intel_guc_enable_msg(guc, INTEL_GUC_RECV_MSG_EXCEPTION |
221 void intel_guc_init_late(struct intel_guc *guc)
223 intel_guc_ads_init_late(guc);
226 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
228 u32 level = intel_guc_log_get_level(&guc->log);
240 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
242 struct intel_gt *gt = guc_to_gt(guc);
252 if (!intel_guc_submission_is_used(guc))
255 if (intel_guc_slpc_is_used(guc))
261 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
263 struct intel_guc_log *log = &guc->log;
268 offset = intel_guc_ggtt_offset(guc, log->vma) >> PAGE_SHIFT;
282 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
284 u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
290 static u32 guc_ctl_wa_flags(struct intel_guc *guc)
292 struct intel_gt *gt = guc_to_gt(guc);
340 if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0))
346 static u32 guc_ctl_devid(struct intel_guc *guc)
348 struct drm_i915_private *i915 = guc_to_i915(guc);
358 static void guc_init_params(struct intel_guc *guc)
360 u32 *params = guc->params;
363 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
365 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
366 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
367 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
368 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
369 params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
370 params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
373 guc_dbg(guc, "param[%2d] = %#x\n", i, params[i]);
381 void intel_guc_write_params(struct intel_guc *guc)
383 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
396 intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
401 void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p)
403 struct intel_gt *gt = guc_to_gt(guc);
418 int intel_guc_init(struct intel_guc *guc)
422 ret = intel_uc_fw_init(&guc->fw);
426 ret = intel_guc_log_create(&guc->log);
430 ret = intel_guc_capture_init(guc);
434 ret = intel_guc_ads_create(guc);
438 GEM_BUG_ON(!guc->ads_vma);
440 ret = intel_guc_ct_init(&guc->ct);
444 if (intel_guc_submission_is_used(guc)) {
449 ret = intel_guc_submission_init(guc);
454 if (intel_guc_slpc_is_used(guc)) {
455 ret = intel_guc_slpc_init(&guc->slpc);
461 guc_init_params(guc);
463 intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
468 intel_guc_submission_fini(guc);
470 intel_guc_ct_fini(&guc->ct);
472 intel_guc_ads_destroy(guc);
474 intel_guc_capture_destroy(guc);
476 intel_guc_log_destroy(&guc->log);
478 intel_uc_fw_fini(&guc->fw);
480 intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
481 guc_probe_error(guc, "failed with %pe\n", ERR_PTR(ret));
485 void intel_guc_fini(struct intel_guc *guc)
487 if (!intel_uc_fw_is_loadable(&guc->fw))
490 flush_work(&guc->dead_guc_worker);
492 if (intel_guc_slpc_is_used(guc))
493 intel_guc_slpc_fini(&guc->slpc);
495 if (intel_guc_submission_is_used(guc))
496 intel_guc_submission_fini(guc);
498 intel_guc_ct_fini(&guc->ct);
500 intel_guc_ads_destroy(guc);
501 intel_guc_capture_destroy(guc);
502 intel_guc_log_destroy(&guc->log);
503 intel_uc_fw_fini(&guc->fw);
509 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
512 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
518 GEM_BUG_ON(len > guc->send_regs.count);
523 mutex_lock(&guc->send_mutex);
524 intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
528 intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
530 intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
532 intel_guc_notify(guc);
539 guc_send_reg(guc, 0),
546 guc_err(guc, "mmio request %#x: no reply %x\n",
552 #define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \
568 guc_dbg(guc, "mmio request %#x: retrying, reason %u\n",
577 guc_err(guc, "mmio request %#x: failure %x/%u\n",
585 guc_err(guc, "mmio request %#x: unexpected reply %#x\n",
592 int count = min(response_buf_size, guc->send_regs.count);
600 guc_send_reg(guc, i));
610 intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
611 mutex_unlock(&guc->send_mutex);
616 int intel_guc_crash_process_msg(struct intel_guc *guc, u32 action)
619 guc_err(guc, "Crash dump notification\n");
621 guc_err(guc, "Exception notification\n");
623 guc_err(guc, "Unknown crash notification: 0x%04X\n", action);
625 queue_work(system_unbound_wq, &guc->dead_guc_worker);
630 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
639 msg = payload[0] & guc->msg_enabled_mask;
642 guc_err(guc, "Received early crash dump notification!\n");
644 guc_err(guc, "Received early exception notification!\n");
647 queue_work(system_unbound_wq, &guc->dead_guc_worker);
654 * @guc: intel_guc structure
663 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
670 return intel_guc_send(guc, action, ARRAY_SIZE(action));
675 * @guc: the guc
677 int intel_guc_suspend(struct intel_guc *guc)
684 if (!intel_guc_is_ready(guc))
687 if (intel_guc_submission_is_used(guc)) {
688 flush_work(&guc->dead_guc_worker);
701 ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
703 guc_err(guc, "suspend: RESET_CLIENT action failed with %pe\n",
708 intel_guc_sanitize(guc);
715 * @guc: the guc
717 int intel_guc_resume(struct intel_guc *guc)
764 * @guc: the guc
775 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
777 struct intel_gt *gt = guc_to_gt(guc);
822 * @guc: the guc
832 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
838 vma = intel_guc_allocate_vma(guc, size);
843 intel_gt_coherent_map_type(guc_to_gt(guc),
856 static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
873 ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
885 static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
887 int err = __guc_action_self_cfg(guc, key, len, value);
890 guc_probe_error(guc, "Unsuccessful self-config (%pe) key %#hx value %#llx\n",
895 int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value)
897 return __guc_self_cfg(guc, key, 1, value);
900 int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value)
902 return __guc_self_cfg(guc, key, 2, value);
907 * @guc: the GuC
912 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
914 struct intel_gt *gt = guc_to_gt(guc);
918 if (!intel_guc_is_supported(guc)) {
923 if (!intel_guc_is_wanted(guc)) {
928 intel_uc_fw_dump(&guc->fw, p);
949 void intel_guc_write_barrier(struct intel_guc *guc)
951 struct intel_gt *gt = guc_to_gt(guc);
953 if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
958 GEM_BUG_ON(guc->send_regs.fw_domains);