1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include "xe_gsc.h"
7
8 #include <linux/delay.h>
9
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12
13 #include <generated/xe_wa_oob.h>
14
15 #include "abi/gsc_mkhi_commands_abi.h"
16 #include "xe_bb.h"
17 #include "xe_bo.h"
18 #include "xe_device.h"
19 #include "xe_exec_queue.h"
20 #include "xe_force_wake.h"
21 #include "xe_gsc_proxy.h"
22 #include "xe_gsc_submit.h"
23 #include "xe_gt.h"
24 #include "xe_gt_mcr.h"
25 #include "xe_gt_printk.h"
26 #include "xe_guc_pc.h"
27 #include "xe_huc.h"
28 #include "xe_map.h"
29 #include "xe_mmio.h"
30 #include "xe_pm.h"
31 #include "xe_sched_job.h"
32 #include "xe_uc_fw.h"
33 #include "xe_wa.h"
34 #include "instructions/xe_gsc_commands.h"
35 #include "regs/xe_gsc_regs.h"
36 #include "regs/xe_gt_regs.h"
37
38 static struct xe_gt *
gsc_to_gt(struct xe_gsc * gsc)39 gsc_to_gt(struct xe_gsc *gsc)
40 {
41 return container_of(gsc, struct xe_gt, uc.gsc);
42 }
43
memcpy_fw(struct xe_gsc * gsc)44 static int memcpy_fw(struct xe_gsc *gsc)
45 {
46 struct xe_gt *gt = gsc_to_gt(gsc);
47 struct xe_device *xe = gt_to_xe(gt);
48 u32 fw_size = gsc->fw.size;
49 void *storage;
50
51 /*
52 * FIXME: xe_migrate_copy does not work with stolen mem yet, so we use
53 * a memcpy for now.
54 */
55 storage = kmalloc(fw_size, GFP_KERNEL);
56 if (!storage)
57 return -ENOMEM;
58
59 xe_map_memcpy_from(xe, storage, &gsc->fw.bo->vmap, 0, fw_size);
60 xe_map_memcpy_to(xe, &gsc->private->vmap, 0, storage, fw_size);
61 xe_map_memset(xe, &gsc->private->vmap, fw_size, 0, gsc->private->size - fw_size);
62
63 kfree(storage);
64
65 return 0;
66 }
67
emit_gsc_upload(struct xe_gsc * gsc)68 static int emit_gsc_upload(struct xe_gsc *gsc)
69 {
70 struct xe_gt *gt = gsc_to_gt(gsc);
71 u64 offset = xe_bo_ggtt_addr(gsc->private);
72 struct xe_bb *bb;
73 struct xe_sched_job *job;
74 struct dma_fence *fence;
75 long timeout;
76
77 bb = xe_bb_new(gt, 4, false);
78 if (IS_ERR(bb))
79 return PTR_ERR(bb);
80
81 bb->cs[bb->len++] = GSC_FW_LOAD;
82 bb->cs[bb->len++] = lower_32_bits(offset);
83 bb->cs[bb->len++] = upper_32_bits(offset);
84 bb->cs[bb->len++] = (gsc->private->size / SZ_4K) | GSC_FW_LOAD_LIMIT_VALID;
85
86 job = xe_bb_create_job(gsc->q, bb);
87 if (IS_ERR(job)) {
88 xe_bb_free(bb, NULL);
89 return PTR_ERR(job);
90 }
91
92 xe_sched_job_arm(job);
93 fence = dma_fence_get(&job->drm.s_fence->finished);
94 xe_sched_job_push(job);
95
96 timeout = dma_fence_wait_timeout(fence, false, HZ);
97 dma_fence_put(fence);
98 xe_bb_free(bb, NULL);
99 if (timeout < 0)
100 return timeout;
101 else if (!timeout)
102 return -ETIME;
103
104 return 0;
105 }
106
107 #define version_query_wr(xe_, map_, offset_, field_, val_) \
108 xe_map_wr_field(xe_, map_, offset_, struct gsc_get_compatibility_version_in, field_, val_)
109 #define version_query_rd(xe_, map_, offset_, field_) \
110 xe_map_rd_field(xe_, map_, offset_, struct gsc_get_compatibility_version_out, field_)
111
emit_version_query_msg(struct xe_device * xe,struct iosys_map * map,u32 wr_offset)112 static u32 emit_version_query_msg(struct xe_device *xe, struct iosys_map *map, u32 wr_offset)
113 {
114 xe_map_memset(xe, map, wr_offset, 0, sizeof(struct gsc_get_compatibility_version_in));
115
116 version_query_wr(xe, map, wr_offset, header.group_id, MKHI_GROUP_ID_GFX_SRV);
117 version_query_wr(xe, map, wr_offset, header.command,
118 MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION);
119
120 return wr_offset + sizeof(struct gsc_get_compatibility_version_in);
121 }
122
123 #define GSC_VER_PKT_SZ SZ_4K /* 4K each for input and output */
query_compatibility_version(struct xe_gsc * gsc)124 static int query_compatibility_version(struct xe_gsc *gsc)
125 {
126 struct xe_uc_fw_version *compat = &gsc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
127 struct xe_gt *gt = gsc_to_gt(gsc);
128 struct xe_tile *tile = gt_to_tile(gt);
129 struct xe_device *xe = gt_to_xe(gt);
130 struct xe_bo *bo;
131 u32 wr_offset;
132 u32 rd_offset;
133 u64 ggtt_offset;
134 int err;
135
136 bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2,
137 ttm_bo_type_kernel,
138 XE_BO_FLAG_SYSTEM |
139 XE_BO_FLAG_GGTT);
140 if (IS_ERR(bo)) {
141 xe_gt_err(gt, "failed to allocate bo for GSC version query\n");
142 return PTR_ERR(bo);
143 }
144
145 ggtt_offset = xe_bo_ggtt_addr(bo);
146
147 wr_offset = xe_gsc_emit_header(xe, &bo->vmap, 0, HECI_MEADDRESS_MKHI, 0,
148 sizeof(struct gsc_get_compatibility_version_in));
149 wr_offset = emit_version_query_msg(xe, &bo->vmap, wr_offset);
150
151 err = xe_gsc_pkt_submit_kernel(gsc, ggtt_offset, wr_offset,
152 ggtt_offset + GSC_VER_PKT_SZ,
153 GSC_VER_PKT_SZ);
154 if (err) {
155 xe_gt_err(gt,
156 "failed to submit GSC request for compatibility version: %d\n",
157 err);
158 goto out_bo;
159 }
160
161 err = xe_gsc_read_out_header(xe, &bo->vmap, GSC_VER_PKT_SZ,
162 sizeof(struct gsc_get_compatibility_version_out),
163 &rd_offset);
164 if (err) {
165 xe_gt_err(gt, "HuC: invalid GSC reply for version query (err=%d)\n", err);
166 return err;
167 }
168
169 compat->major = version_query_rd(xe, &bo->vmap, rd_offset, proj_major);
170 compat->minor = version_query_rd(xe, &bo->vmap, rd_offset, compat_major);
171 compat->patch = version_query_rd(xe, &bo->vmap, rd_offset, compat_minor);
172
173 xe_gt_info(gt, "found GSC cv%u.%u.%u\n", compat->major, compat->minor, compat->patch);
174
175 out_bo:
176 xe_bo_unpin_map_no_vm(bo);
177 return err;
178 }
179
gsc_fw_is_loaded(struct xe_gt * gt)180 static int gsc_fw_is_loaded(struct xe_gt *gt)
181 {
182 return xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)) &
183 HECI1_FWSTS1_INIT_COMPLETE;
184 }
185
gsc_fw_wait(struct xe_gt * gt)186 static int gsc_fw_wait(struct xe_gt *gt)
187 {
188 /*
189 * GSC load can take up to 250ms from the moment the instruction is
190 * executed by the GSCCS. To account for possible submission delays or
191 * other issues, we use a 500ms timeout in the wait here.
192 */
193 return xe_mmio_wait32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
194 HECI1_FWSTS1_INIT_COMPLETE,
195 HECI1_FWSTS1_INIT_COMPLETE,
196 500 * USEC_PER_MSEC, NULL, false);
197 }
198
gsc_upload(struct xe_gsc * gsc)199 static int gsc_upload(struct xe_gsc *gsc)
200 {
201 struct xe_gt *gt = gsc_to_gt(gsc);
202 struct xe_device *xe = gt_to_xe(gt);
203 int err;
204
205 /* we should only be here if the init step were successful */
206 xe_assert(xe, xe_uc_fw_is_loadable(&gsc->fw) && gsc->q);
207
208 if (gsc_fw_is_loaded(gt)) {
209 xe_gt_err(gt, "GSC already loaded at upload time\n");
210 return -EEXIST;
211 }
212
213 err = memcpy_fw(gsc);
214 if (err) {
215 xe_gt_err(gt, "Failed to memcpy GSC FW\n");
216 return err;
217 }
218
219 /*
220 * GSC is only killed by an FLR, so we need to trigger one on unload to
221 * make sure we stop it. This is because we assign a chunk of memory to
222 * the GSC as part of the FW load, so we need to make sure it stops
223 * using it when we release it to the system on driver unload. Note that
224 * this is not a problem of the unload per-se, because the GSC will not
225 * touch that memory unless there are requests for it coming from the
226 * driver; therefore, no accesses will happen while Xe is not loaded,
227 * but if we re-load the driver then the GSC might wake up and try to
228 * access that old memory location again.
229 * Given that an FLR is a very disruptive action (see the FLR function
230 * for details), we want to do it as the last action before releasing
231 * the access to the MMIO bar, which means we need to do it as part of
232 * mmio cleanup.
233 */
234 xe->needs_flr_on_fini = true;
235
236 err = emit_gsc_upload(gsc);
237 if (err) {
238 xe_gt_err(gt, "Failed to emit GSC FW upload (%pe)\n", ERR_PTR(err));
239 return err;
240 }
241
242 err = gsc_fw_wait(gt);
243 if (err) {
244 xe_gt_err(gt, "Failed to wait for GSC load (%pe)\n", ERR_PTR(err));
245 return err;
246 }
247
248 err = query_compatibility_version(gsc);
249 if (err)
250 return err;
251
252 err = xe_uc_fw_check_version_requirements(&gsc->fw);
253 if (err)
254 return err;
255
256 return 0;
257 }
258
gsc_upload_and_init(struct xe_gsc * gsc)259 static int gsc_upload_and_init(struct xe_gsc *gsc)
260 {
261 struct xe_gt *gt = gsc_to_gt(gsc);
262 struct xe_tile *tile = gt_to_tile(gt);
263 int ret;
264
265 if (XE_WA(tile->primary_gt, 14018094691)) {
266 ret = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
267
268 /*
269 * If the forcewake fails we want to keep going, because the worst
270 * case outcome in failing to apply the WA is that PXP won't work,
271 * which is not fatal. We still throw a warning so the issue is
272 * seen if it happens.
273 */
274 xe_gt_WARN_ON(tile->primary_gt, ret);
275
276 xe_gt_mcr_multicast_write(tile->primary_gt,
277 EU_SYSTOLIC_LIC_THROTTLE_CTL_WITH_LOCK,
278 EU_SYSTOLIC_LIC_THROTTLE_CTL_LOCK_BIT);
279 }
280
281 ret = gsc_upload(gsc);
282
283 if (XE_WA(tile->primary_gt, 14018094691))
284 xe_force_wake_put(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
285
286 if (ret)
287 return ret;
288
289 xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
290
291 /* GSC load is done, restore expected GT frequencies */
292 xe_gt_sanitize_freq(gt);
293
294 xe_gt_dbg(gt, "GSC FW async load completed\n");
295
296 /* HuC auth failure is not fatal */
297 if (xe_huc_is_authenticated(>->uc.huc, XE_HUC_AUTH_VIA_GUC))
298 xe_huc_auth(>->uc.huc, XE_HUC_AUTH_VIA_GSC);
299
300 ret = xe_gsc_proxy_start(gsc);
301 if (ret)
302 return ret;
303
304 xe_gt_dbg(gt, "GSC proxy init completed\n");
305
306 return 0;
307 }
308
gsc_er_complete(struct xe_gt * gt)309 static int gsc_er_complete(struct xe_gt *gt)
310 {
311 u32 er_status;
312
313 if (!gsc_fw_is_loaded(gt))
314 return 0;
315
316 /*
317 * Starting on Xe2, the GSCCS engine reset is a 2-step process. When the
318 * driver or the GuC hit the GDRST register, the CS is immediately reset
319 * and a success is reported, but the GSC shim keeps resetting in the
320 * background. While the shim reset is ongoing, the CS is able to accept
321 * new context submission, but any commands that require the shim will
322 * be stalled until the reset is completed. This means that we can keep
323 * submitting to the GSCCS as long as we make sure that the preemption
324 * timeout is big enough to cover any delay introduced by the reset.
325 * When the shim reset completes, a specific CS interrupt is triggered,
326 * in response to which we need to check the GSCI_TIMER_STATUS register
327 * to see if the reset was successful or not.
328 * Note that the GSCI_TIMER_STATUS register is not power save/restored,
329 * so it gets reset on MC6 entry. However, a reset failure stops MC6,
330 * so in that scenario we're always guaranteed to find the correct
331 * value.
332 */
333 er_status = xe_mmio_read32(gt, GSCI_TIMER_STATUS) & GSCI_TIMER_STATUS_VALUE;
334
335 if (er_status == GSCI_TIMER_STATUS_TIMER_EXPIRED) {
336 /*
337 * XXX: we should trigger an FLR here, but we don't have support
338 * for that yet. Since we can't recover from the error, we
339 * declare the device as wedged.
340 */
341 xe_gt_err(gt, "GSC ER timed out!\n");
342 xe_device_declare_wedged(gt_to_xe(gt));
343 return -EIO;
344 }
345
346 return 0;
347 }
348
gsc_work(struct work_struct * work)349 static void gsc_work(struct work_struct *work)
350 {
351 struct xe_gsc *gsc = container_of(work, typeof(*gsc), work);
352 struct xe_gt *gt = gsc_to_gt(gsc);
353 struct xe_device *xe = gt_to_xe(gt);
354 u32 actions;
355 int ret;
356
357 spin_lock_irq(&gsc->lock);
358 actions = gsc->work_actions;
359 gsc->work_actions = 0;
360 spin_unlock_irq(&gsc->lock);
361
362 xe_pm_runtime_get(xe);
363 xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC));
364
365 if (actions & GSC_ACTION_ER_COMPLETE) {
366 ret = gsc_er_complete(gt);
367 if (ret)
368 goto out;
369 }
370
371 if (actions & GSC_ACTION_FW_LOAD) {
372 ret = gsc_upload_and_init(gsc);
373 if (ret && ret != -EEXIST)
374 xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
375 else
376 xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING);
377 }
378
379 if (actions & GSC_ACTION_SW_PROXY)
380 xe_gsc_proxy_request_handler(gsc);
381
382 out:
383 xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
384 xe_pm_runtime_put(xe);
385 }
386
xe_gsc_hwe_irq_handler(struct xe_hw_engine * hwe,u16 intr_vec)387 void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec)
388 {
389 struct xe_gt *gt = hwe->gt;
390 struct xe_gsc *gsc = >->uc.gsc;
391
392 if (unlikely(!intr_vec))
393 return;
394
395 if (intr_vec & GSC_ER_COMPLETE) {
396 spin_lock(&gsc->lock);
397 gsc->work_actions |= GSC_ACTION_ER_COMPLETE;
398 spin_unlock(&gsc->lock);
399
400 queue_work(gsc->wq, &gsc->work);
401 }
402 }
403
xe_gsc_init(struct xe_gsc * gsc)404 int xe_gsc_init(struct xe_gsc *gsc)
405 {
406 struct xe_gt *gt = gsc_to_gt(gsc);
407 struct xe_tile *tile = gt_to_tile(gt);
408 int ret;
409
410 gsc->fw.type = XE_UC_FW_TYPE_GSC;
411 INIT_WORK(&gsc->work, gsc_work);
412 spin_lock_init(&gsc->lock);
413
414 /* The GSC uC is only available on the media GT */
415 if (tile->media_gt && (gt != tile->media_gt)) {
416 xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED);
417 return 0;
418 }
419
420 /*
421 * Some platforms can have GuC but not GSC. That would cause
422 * xe_uc_fw_init(gsc) to return a "not supported" failure code and abort
423 * all firmware loading. So check for GSC being enabled before
424 * propagating the failure back up. That way the higher level will keep
425 * going and load GuC as appropriate.
426 */
427 ret = xe_uc_fw_init(&gsc->fw);
428 if (!xe_uc_fw_is_enabled(&gsc->fw))
429 return 0;
430 else if (ret)
431 goto out;
432
433 ret = xe_gsc_proxy_init(gsc);
434 if (ret && ret != -ENODEV)
435 goto out;
436
437 return 0;
438
439 out:
440 xe_gt_err(gt, "GSC init failed with %d", ret);
441 return ret;
442 }
443
free_resources(void * arg)444 static void free_resources(void *arg)
445 {
446 struct xe_gsc *gsc = arg;
447
448 if (gsc->wq) {
449 destroy_workqueue(gsc->wq);
450 gsc->wq = NULL;
451 }
452
453 if (gsc->q) {
454 xe_exec_queue_put(gsc->q);
455 gsc->q = NULL;
456 }
457 }
458
xe_gsc_init_post_hwconfig(struct xe_gsc * gsc)459 int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
460 {
461 struct xe_gt *gt = gsc_to_gt(gsc);
462 struct xe_tile *tile = gt_to_tile(gt);
463 struct xe_device *xe = gt_to_xe(gt);
464 struct xe_hw_engine *hwe = xe_gt_hw_engine(gt, XE_ENGINE_CLASS_OTHER, 0, true);
465 struct xe_exec_queue *q;
466 struct workqueue_struct *wq;
467 struct xe_bo *bo;
468 int err;
469
470 if (!xe_uc_fw_is_available(&gsc->fw))
471 return 0;
472
473 if (!hwe)
474 return -ENODEV;
475
476 bo = xe_managed_bo_create_pin_map(xe, tile, SZ_4M,
477 XE_BO_FLAG_STOLEN |
478 XE_BO_FLAG_GGTT);
479 if (IS_ERR(bo))
480 return PTR_ERR(bo);
481
482 q = xe_exec_queue_create(xe, NULL,
483 BIT(hwe->logical_instance), 1, hwe,
484 EXEC_QUEUE_FLAG_KERNEL |
485 EXEC_QUEUE_FLAG_PERMANENT, 0);
486 if (IS_ERR(q)) {
487 xe_gt_err(gt, "Failed to create queue for GSC submission\n");
488 err = PTR_ERR(q);
489 goto out_bo;
490 }
491
492 wq = alloc_ordered_workqueue("gsc-ordered-wq", 0);
493 if (!wq) {
494 err = -ENOMEM;
495 goto out_q;
496 }
497
498 gsc->private = bo;
499 gsc->q = q;
500 gsc->wq = wq;
501
502 err = devm_add_action_or_reset(xe->drm.dev, free_resources, gsc);
503 if (err)
504 return err;
505
506 xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOADABLE);
507
508 return 0;
509
510 out_q:
511 xe_exec_queue_put(q);
512 out_bo:
513 xe_bo_unpin_map_no_vm(bo);
514 return err;
515 }
516
xe_gsc_load_start(struct xe_gsc * gsc)517 void xe_gsc_load_start(struct xe_gsc *gsc)
518 {
519 struct xe_gt *gt = gsc_to_gt(gsc);
520 struct xe_device *xe = gt_to_xe(gt);
521
522 if (!xe_uc_fw_is_loadable(&gsc->fw) || !gsc->q)
523 return;
524
525 /*
526 * The GSC HW is only reset by driver FLR or D3cold entry. We don't
527 * support the former at runtime, while the latter is only supported on
528 * DGFX, for which we don't support GSC. Therefore, if GSC failed to
529 * load previously there is no need to try again because the HW is
530 * stuck in the error state.
531 */
532 xe_assert(xe, !IS_DGFX(xe));
533 if (xe_uc_fw_is_in_error_state(&gsc->fw))
534 return;
535
536 /* GSC FW survives GT reset and D3Hot */
537 if (gsc_fw_is_loaded(gt)) {
538 if (xe_gsc_proxy_init_done(gsc))
539 xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING);
540 else
541 xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
542 return;
543 }
544
545 spin_lock_irq(&gsc->lock);
546 gsc->work_actions |= GSC_ACTION_FW_LOAD;
547 spin_unlock_irq(&gsc->lock);
548
549 queue_work(gsc->wq, &gsc->work);
550 }
551
xe_gsc_wait_for_worker_completion(struct xe_gsc * gsc)552 void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc)
553 {
554 if (xe_uc_fw_is_loadable(&gsc->fw) && gsc->wq)
555 flush_work(&gsc->work);
556 }
557
558 /**
559 * xe_gsc_remove() - Clean up the GSC structures before driver removal
560 * @gsc: the GSC uC
561 */
xe_gsc_remove(struct xe_gsc * gsc)562 void xe_gsc_remove(struct xe_gsc *gsc)
563 {
564 xe_gsc_proxy_remove(gsc);
565 }
566
567 /*
568 * wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a
569 * GSC engine reset by writing a notification bit in the GS1 register and then
570 * triggering an interrupt to GSC; from the interrupt it will take up to 200ms
571 * for the FW to get prepare for the reset, so we need to wait for that amount
572 * of time.
573 * After the reset is complete we need to then clear the GS1 register.
574 */
xe_gsc_wa_14015076503(struct xe_gt * gt,bool prep)575 void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
576 {
577 u32 gs1_set = prep ? HECI_H_GS1_ER_PREP : 0;
578 u32 gs1_clr = prep ? 0 : HECI_H_GS1_ER_PREP;
579
580 /* WA only applies if the GSC is loaded */
581 if (!XE_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt))
582 return;
583
584 xe_mmio_rmw32(gt, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set);
585
586 if (prep) {
587 /* make sure the reset bit is clear when writing the CSR reg */
588 xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE),
589 HECI_H_CSR_RST, HECI_H_CSR_IG);
590 msleep(200);
591 }
592 }
593
594 /**
595 * xe_gsc_print_info - print info about GSC FW status
596 * @gsc: the GSC structure
597 * @p: the printer to be used to print the info
598 */
xe_gsc_print_info(struct xe_gsc * gsc,struct drm_printer * p)599 void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p)
600 {
601 struct xe_gt *gt = gsc_to_gt(gsc);
602 int err;
603
604 xe_uc_fw_print(&gsc->fw, p);
605
606 drm_printf(p, "\tfound security version %u\n", gsc->security_version);
607
608 if (!xe_uc_fw_is_enabled(&gsc->fw))
609 return;
610
611 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
612 if (err)
613 return;
614
615 drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
616 xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)),
617 xe_mmio_read32(gt, HECI_FWSTS2(MTL_GSC_HECI1_BASE)),
618 xe_mmio_read32(gt, HECI_FWSTS3(MTL_GSC_HECI1_BASE)),
619 xe_mmio_read32(gt, HECI_FWSTS4(MTL_GSC_HECI1_BASE)),
620 xe_mmio_read32(gt, HECI_FWSTS5(MTL_GSC_HECI1_BASE)),
621 xe_mmio_read32(gt, HECI_FWSTS6(MTL_GSC_HECI1_BASE)));
622
623 xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
624 }
625