xref: /linux/drivers/gpu/drm/xe/xe_gsc.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_gsc.h"
7 
8 #include <linux/delay.h>
9 
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12 
13 #include <generated/xe_wa_oob.h>
14 
15 #include "abi/gsc_mkhi_commands_abi.h"
16 #include "xe_bb.h"
17 #include "xe_bo.h"
18 #include "xe_device.h"
19 #include "xe_exec_queue.h"
20 #include "xe_force_wake.h"
21 #include "xe_gsc_proxy.h"
22 #include "xe_gsc_submit.h"
23 #include "xe_gt.h"
24 #include "xe_gt_mcr.h"
25 #include "xe_gt_printk.h"
26 #include "xe_guc_pc.h"
27 #include "xe_huc.h"
28 #include "xe_map.h"
29 #include "xe_mmio.h"
30 #include "xe_pm.h"
31 #include "xe_sched_job.h"
32 #include "xe_uc_fw.h"
33 #include "xe_wa.h"
34 #include "instructions/xe_gsc_commands.h"
35 #include "regs/xe_gsc_regs.h"
36 #include "regs/xe_gt_regs.h"
37 #include "regs/xe_irq_regs.h"
38 
39 static struct xe_gt *
gsc_to_gt(struct xe_gsc * gsc)40 gsc_to_gt(struct xe_gsc *gsc)
41 {
42 	return container_of(gsc, struct xe_gt, uc.gsc);
43 }
44 
memcpy_fw(struct xe_gsc * gsc)45 static int memcpy_fw(struct xe_gsc *gsc)
46 {
47 	struct xe_gt *gt = gsc_to_gt(gsc);
48 	struct xe_device *xe = gt_to_xe(gt);
49 	u32 fw_size = gsc->fw.size;
50 	void *storage;
51 
52 	/*
53 	 * FIXME: xe_migrate_copy does not work with stolen mem yet, so we use
54 	 * a memcpy for now.
55 	 */
56 	storage = kmalloc(fw_size, GFP_KERNEL);
57 	if (!storage)
58 		return -ENOMEM;
59 
60 	xe_map_memcpy_from(xe, storage, &gsc->fw.bo->vmap, 0, fw_size);
61 	xe_map_memcpy_to(xe, &gsc->private->vmap, 0, storage, fw_size);
62 	xe_map_memset(xe, &gsc->private->vmap, fw_size, 0, gsc->private->size - fw_size);
63 
64 	kfree(storage);
65 
66 	return 0;
67 }
68 
emit_gsc_upload(struct xe_gsc * gsc)69 static int emit_gsc_upload(struct xe_gsc *gsc)
70 {
71 	struct xe_gt *gt = gsc_to_gt(gsc);
72 	u64 offset = xe_bo_ggtt_addr(gsc->private);
73 	struct xe_bb *bb;
74 	struct xe_sched_job *job;
75 	struct dma_fence *fence;
76 	long timeout;
77 
78 	bb = xe_bb_new(gt, 4, false);
79 	if (IS_ERR(bb))
80 		return PTR_ERR(bb);
81 
82 	bb->cs[bb->len++] = GSC_FW_LOAD;
83 	bb->cs[bb->len++] = lower_32_bits(offset);
84 	bb->cs[bb->len++] = upper_32_bits(offset);
85 	bb->cs[bb->len++] = (gsc->private->size / SZ_4K) | GSC_FW_LOAD_LIMIT_VALID;
86 
87 	job = xe_bb_create_job(gsc->q, bb);
88 	if (IS_ERR(job)) {
89 		xe_bb_free(bb, NULL);
90 		return PTR_ERR(job);
91 	}
92 
93 	xe_sched_job_arm(job);
94 	fence = dma_fence_get(&job->drm.s_fence->finished);
95 	xe_sched_job_push(job);
96 
97 	timeout = dma_fence_wait_timeout(fence, false, HZ);
98 	dma_fence_put(fence);
99 	xe_bb_free(bb, NULL);
100 	if (timeout < 0)
101 		return timeout;
102 	else if (!timeout)
103 		return -ETIME;
104 
105 	return 0;
106 }
107 
108 #define version_query_wr(xe_, map_, offset_, field_, val_) \
109 	xe_map_wr_field(xe_, map_, offset_, struct gsc_get_compatibility_version_in, field_, val_)
110 #define version_query_rd(xe_, map_, offset_, field_) \
111 	xe_map_rd_field(xe_, map_, offset_, struct gsc_get_compatibility_version_out, field_)
112 
emit_version_query_msg(struct xe_device * xe,struct iosys_map * map,u32 wr_offset)113 static u32 emit_version_query_msg(struct xe_device *xe, struct iosys_map *map, u32 wr_offset)
114 {
115 	xe_map_memset(xe, map, wr_offset, 0, sizeof(struct gsc_get_compatibility_version_in));
116 
117 	version_query_wr(xe, map, wr_offset, header.group_id, MKHI_GROUP_ID_GFX_SRV);
118 	version_query_wr(xe, map, wr_offset, header.command,
119 			 MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION);
120 
121 	return wr_offset + sizeof(struct gsc_get_compatibility_version_in);
122 }
123 
124 #define GSC_VER_PKT_SZ SZ_4K /* 4K each for input and output */
query_compatibility_version(struct xe_gsc * gsc)125 static int query_compatibility_version(struct xe_gsc *gsc)
126 {
127 	struct xe_uc_fw_version *compat = &gsc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
128 	struct xe_gt *gt = gsc_to_gt(gsc);
129 	struct xe_tile *tile = gt_to_tile(gt);
130 	struct xe_device *xe = gt_to_xe(gt);
131 	struct xe_bo *bo;
132 	u32 wr_offset;
133 	u32 rd_offset;
134 	u64 ggtt_offset;
135 	int err;
136 
137 	bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2,
138 				  ttm_bo_type_kernel,
139 				  XE_BO_FLAG_SYSTEM |
140 				  XE_BO_FLAG_GGTT);
141 	if (IS_ERR(bo)) {
142 		xe_gt_err(gt, "failed to allocate bo for GSC version query\n");
143 		return PTR_ERR(bo);
144 	}
145 
146 	ggtt_offset = xe_bo_ggtt_addr(bo);
147 
148 	wr_offset = xe_gsc_emit_header(xe, &bo->vmap, 0, HECI_MEADDRESS_MKHI, 0,
149 				       sizeof(struct gsc_get_compatibility_version_in));
150 	wr_offset = emit_version_query_msg(xe, &bo->vmap, wr_offset);
151 
152 	err = xe_gsc_pkt_submit_kernel(gsc, ggtt_offset, wr_offset,
153 				       ggtt_offset + GSC_VER_PKT_SZ,
154 				       GSC_VER_PKT_SZ);
155 	if (err) {
156 		xe_gt_err(gt,
157 			  "failed to submit GSC request for compatibility version: %d\n",
158 			  err);
159 		goto out_bo;
160 	}
161 
162 	err = xe_gsc_read_out_header(xe, &bo->vmap, GSC_VER_PKT_SZ,
163 				     sizeof(struct gsc_get_compatibility_version_out),
164 				     &rd_offset);
165 	if (err) {
166 		xe_gt_err(gt, "HuC: invalid GSC reply for version query (err=%d)\n", err);
167 		return err;
168 	}
169 
170 	compat->major = version_query_rd(xe, &bo->vmap, rd_offset, proj_major);
171 	compat->minor = version_query_rd(xe, &bo->vmap, rd_offset, compat_major);
172 	compat->patch = version_query_rd(xe, &bo->vmap, rd_offset, compat_minor);
173 
174 	xe_gt_info(gt, "found GSC cv%u.%u.%u\n", compat->major, compat->minor, compat->patch);
175 
176 out_bo:
177 	xe_bo_unpin_map_no_vm(bo);
178 	return err;
179 }
180 
gsc_fw_is_loaded(struct xe_gt * gt)181 static int gsc_fw_is_loaded(struct xe_gt *gt)
182 {
183 	return xe_mmio_read32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)) &
184 			      HECI1_FWSTS1_INIT_COMPLETE;
185 }
186 
gsc_fw_wait(struct xe_gt * gt)187 static int gsc_fw_wait(struct xe_gt *gt)
188 {
189 	/*
190 	 * GSC load can take up to 250ms from the moment the instruction is
191 	 * executed by the GSCCS. To account for possible submission delays or
192 	 * other issues, we use a 500ms timeout in the wait here.
193 	 */
194 	return xe_mmio_wait32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
195 			      HECI1_FWSTS1_INIT_COMPLETE,
196 			      HECI1_FWSTS1_INIT_COMPLETE,
197 			      500 * USEC_PER_MSEC, NULL, false);
198 }
199 
gsc_upload(struct xe_gsc * gsc)200 static int gsc_upload(struct xe_gsc *gsc)
201 {
202 	struct xe_gt *gt = gsc_to_gt(gsc);
203 	struct xe_device *xe = gt_to_xe(gt);
204 	int err;
205 
206 	/* we should only be here if the init step were successful */
207 	xe_assert(xe, xe_uc_fw_is_loadable(&gsc->fw) && gsc->q);
208 
209 	if (gsc_fw_is_loaded(gt)) {
210 		xe_gt_err(gt, "GSC already loaded at upload time\n");
211 		return -EEXIST;
212 	}
213 
214 	err = memcpy_fw(gsc);
215 	if (err) {
216 		xe_gt_err(gt, "Failed to memcpy GSC FW\n");
217 		return err;
218 	}
219 
220 	/*
221 	 * GSC is only killed by an FLR, so we need to trigger one on unload to
222 	 * make sure we stop it. This is because we assign a chunk of memory to
223 	 * the GSC as part of the FW load, so we need to make sure it stops
224 	 * using it when we release it to the system on driver unload. Note that
225 	 * this is not a problem of the unload per-se, because the GSC will not
226 	 * touch that memory unless there are requests for it coming from the
227 	 * driver; therefore, no accesses will happen while Xe is not loaded,
228 	 * but if we re-load the driver then the GSC might wake up and try to
229 	 * access that old memory location again.
230 	 * Given that an FLR is a very disruptive action (see the FLR function
231 	 * for details), we want to do it as the last action before releasing
232 	 * the access to the MMIO bar, which means we need to do it as part of
233 	 * mmio cleanup.
234 	 */
235 	xe->needs_flr_on_fini = true;
236 
237 	err = emit_gsc_upload(gsc);
238 	if (err) {
239 		xe_gt_err(gt, "Failed to emit GSC FW upload (%pe)\n", ERR_PTR(err));
240 		return err;
241 	}
242 
243 	err = gsc_fw_wait(gt);
244 	if (err) {
245 		xe_gt_err(gt, "Failed to wait for GSC load (%pe)\n", ERR_PTR(err));
246 		return err;
247 	}
248 
249 	err = query_compatibility_version(gsc);
250 	if (err)
251 		return err;
252 
253 	err = xe_uc_fw_check_version_requirements(&gsc->fw);
254 	if (err)
255 		return err;
256 
257 	return 0;
258 }
259 
gsc_upload_and_init(struct xe_gsc * gsc)260 static int gsc_upload_and_init(struct xe_gsc *gsc)
261 {
262 	struct xe_gt *gt = gsc_to_gt(gsc);
263 	struct xe_tile *tile = gt_to_tile(gt);
264 	unsigned int fw_ref;
265 	int ret;
266 
267 	if (XE_WA(tile->primary_gt, 14018094691)) {
268 		fw_ref = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
269 
270 		/*
271 		 * If the forcewake fails we want to keep going, because the worst
272 		 * case outcome in failing to apply the WA is that PXP won't work,
273 		 * which is not fatal. Forcewake get warns implicitly in case of failure
274 		 */
275 		xe_gt_mcr_multicast_write(tile->primary_gt,
276 					  EU_SYSTOLIC_LIC_THROTTLE_CTL_WITH_LOCK,
277 					  EU_SYSTOLIC_LIC_THROTTLE_CTL_LOCK_BIT);
278 	}
279 
280 	ret = gsc_upload(gsc);
281 
282 	if (XE_WA(tile->primary_gt, 14018094691))
283 		xe_force_wake_put(gt_to_fw(tile->primary_gt), fw_ref);
284 
285 	if (ret)
286 		return ret;
287 
288 	xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
289 
290 	/* GSC load is done, restore expected GT frequencies */
291 	xe_gt_sanitize_freq(gt);
292 
293 	xe_gt_dbg(gt, "GSC FW async load completed\n");
294 
295 	/* HuC auth failure is not fatal */
296 	if (xe_huc_is_authenticated(&gt->uc.huc, XE_HUC_AUTH_VIA_GUC))
297 		xe_huc_auth(&gt->uc.huc, XE_HUC_AUTH_VIA_GSC);
298 
299 	ret = xe_gsc_proxy_start(gsc);
300 	if (ret)
301 		return ret;
302 
303 	xe_gt_dbg(gt, "GSC proxy init completed\n");
304 
305 	return 0;
306 }
307 
gsc_er_complete(struct xe_gt * gt)308 static int gsc_er_complete(struct xe_gt *gt)
309 {
310 	u32 er_status;
311 
312 	if (!gsc_fw_is_loaded(gt))
313 		return 0;
314 
315 	/*
316 	 * Starting on Xe2, the GSCCS engine reset is a 2-step process. When the
317 	 * driver or the GuC hit the GDRST register, the CS is immediately reset
318 	 * and a success is reported, but the GSC shim keeps resetting in the
319 	 * background. While the shim reset is ongoing, the CS is able to accept
320 	 * new context submission, but any commands that require the shim will
321 	 * be stalled until the reset is completed. This means that we can keep
322 	 * submitting to the GSCCS as long as we make sure that the preemption
323 	 * timeout is big enough to cover any delay introduced by the reset.
324 	 * When the shim reset completes, a specific CS interrupt is triggered,
325 	 * in response to which we need to check the GSCI_TIMER_STATUS register
326 	 * to see if the reset was successful or not.
327 	 * Note that the GSCI_TIMER_STATUS register is not power save/restored,
328 	 * so it gets reset on MC6 entry. However, a reset failure stops MC6,
329 	 * so in that scenario we're always guaranteed to find the correct
330 	 * value.
331 	 */
332 	er_status = xe_mmio_read32(&gt->mmio, GSCI_TIMER_STATUS) & GSCI_TIMER_STATUS_VALUE;
333 
334 	if (er_status == GSCI_TIMER_STATUS_TIMER_EXPIRED) {
335 		/*
336 		 * XXX: we should trigger an FLR here, but we don't have support
337 		 * for that yet. Since we can't recover from the error, we
338 		 * declare the device as wedged.
339 		 */
340 		xe_gt_err(gt, "GSC ER timed out!\n");
341 		xe_device_declare_wedged(gt_to_xe(gt));
342 		return -EIO;
343 	}
344 
345 	return 0;
346 }
347 
gsc_work(struct work_struct * work)348 static void gsc_work(struct work_struct *work)
349 {
350 	struct xe_gsc *gsc = container_of(work, typeof(*gsc), work);
351 	struct xe_gt *gt = gsc_to_gt(gsc);
352 	struct xe_device *xe = gt_to_xe(gt);
353 	unsigned int fw_ref;
354 	u32 actions;
355 	int ret;
356 
357 	spin_lock_irq(&gsc->lock);
358 	actions = gsc->work_actions;
359 	gsc->work_actions = 0;
360 	spin_unlock_irq(&gsc->lock);
361 
362 	xe_pm_runtime_get(xe);
363 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
364 
365 	if (actions & GSC_ACTION_ER_COMPLETE) {
366 		ret = gsc_er_complete(gt);
367 		if (ret)
368 			goto out;
369 	}
370 
371 	if (actions & GSC_ACTION_FW_LOAD) {
372 		ret = gsc_upload_and_init(gsc);
373 		if (ret && ret != -EEXIST)
374 			xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
375 		else
376 			xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING);
377 	}
378 
379 	if (actions & GSC_ACTION_SW_PROXY)
380 		xe_gsc_proxy_request_handler(gsc);
381 
382 out:
383 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
384 	xe_pm_runtime_put(xe);
385 }
386 
xe_gsc_hwe_irq_handler(struct xe_hw_engine * hwe,u16 intr_vec)387 void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec)
388 {
389 	struct xe_gt *gt = hwe->gt;
390 	struct xe_gsc *gsc = &gt->uc.gsc;
391 
392 	if (unlikely(!intr_vec))
393 		return;
394 
395 	if (intr_vec & GSC_ER_COMPLETE) {
396 		spin_lock(&gsc->lock);
397 		gsc->work_actions |= GSC_ACTION_ER_COMPLETE;
398 		spin_unlock(&gsc->lock);
399 
400 		queue_work(gsc->wq, &gsc->work);
401 	}
402 }
403 
xe_gsc_init(struct xe_gsc * gsc)404 int xe_gsc_init(struct xe_gsc *gsc)
405 {
406 	struct xe_gt *gt = gsc_to_gt(gsc);
407 	struct xe_tile *tile = gt_to_tile(gt);
408 	int ret;
409 
410 	gsc->fw.type = XE_UC_FW_TYPE_GSC;
411 	INIT_WORK(&gsc->work, gsc_work);
412 	spin_lock_init(&gsc->lock);
413 
414 	/* The GSC uC is only available on the media GT */
415 	if (tile->media_gt && (gt != tile->media_gt)) {
416 		xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED);
417 		return 0;
418 	}
419 
420 	/*
421 	 * Some platforms can have GuC but not GSC. That would cause
422 	 * xe_uc_fw_init(gsc) to return a "not supported" failure code and abort
423 	 * all firmware loading. So check for GSC being enabled before
424 	 * propagating the failure back up. That way the higher level will keep
425 	 * going and load GuC as appropriate.
426 	 */
427 	ret = xe_uc_fw_init(&gsc->fw);
428 	if (!xe_uc_fw_is_enabled(&gsc->fw))
429 		return 0;
430 	else if (ret)
431 		goto out;
432 
433 	ret = xe_gsc_proxy_init(gsc);
434 	if (ret && ret != -ENODEV)
435 		goto out;
436 
437 	return 0;
438 
439 out:
440 	xe_gt_err(gt, "GSC init failed with %d", ret);
441 	return ret;
442 }
443 
free_resources(void * arg)444 static void free_resources(void *arg)
445 {
446 	struct xe_gsc *gsc = arg;
447 
448 	if (gsc->wq) {
449 		destroy_workqueue(gsc->wq);
450 		gsc->wq = NULL;
451 	}
452 
453 	if (gsc->q) {
454 		xe_exec_queue_put(gsc->q);
455 		gsc->q = NULL;
456 	}
457 }
458 
xe_gsc_init_post_hwconfig(struct xe_gsc * gsc)459 int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
460 {
461 	struct xe_gt *gt = gsc_to_gt(gsc);
462 	struct xe_tile *tile = gt_to_tile(gt);
463 	struct xe_device *xe = gt_to_xe(gt);
464 	struct xe_hw_engine *hwe = xe_gt_hw_engine(gt, XE_ENGINE_CLASS_OTHER, 0, true);
465 	struct xe_exec_queue *q;
466 	struct workqueue_struct *wq;
467 	struct xe_bo *bo;
468 	int err;
469 
470 	if (!xe_uc_fw_is_available(&gsc->fw))
471 		return 0;
472 
473 	if (!hwe)
474 		return -ENODEV;
475 
476 	bo = xe_managed_bo_create_pin_map(xe, tile, SZ_4M,
477 					  XE_BO_FLAG_STOLEN |
478 					  XE_BO_FLAG_GGTT);
479 	if (IS_ERR(bo))
480 		return PTR_ERR(bo);
481 
482 	q = xe_exec_queue_create(xe, NULL,
483 				 BIT(hwe->logical_instance), 1, hwe,
484 				 EXEC_QUEUE_FLAG_KERNEL |
485 				 EXEC_QUEUE_FLAG_PERMANENT, 0);
486 	if (IS_ERR(q)) {
487 		xe_gt_err(gt, "Failed to create queue for GSC submission\n");
488 		err = PTR_ERR(q);
489 		goto out_bo;
490 	}
491 
492 	wq = alloc_ordered_workqueue("gsc-ordered-wq", 0);
493 	if (!wq) {
494 		err = -ENOMEM;
495 		goto out_q;
496 	}
497 
498 	gsc->private = bo;
499 	gsc->q = q;
500 	gsc->wq = wq;
501 
502 	err = devm_add_action_or_reset(xe->drm.dev, free_resources, gsc);
503 	if (err)
504 		return err;
505 
506 	xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOADABLE);
507 
508 	return 0;
509 
510 out_q:
511 	xe_exec_queue_put(q);
512 out_bo:
513 	xe_bo_unpin_map_no_vm(bo);
514 	return err;
515 }
516 
xe_gsc_load_start(struct xe_gsc * gsc)517 void xe_gsc_load_start(struct xe_gsc *gsc)
518 {
519 	struct xe_gt *gt = gsc_to_gt(gsc);
520 	struct xe_device *xe = gt_to_xe(gt);
521 
522 	if (!xe_uc_fw_is_loadable(&gsc->fw) || !gsc->q)
523 		return;
524 
525 	/*
526 	 * The GSC HW is only reset by driver FLR or D3cold entry. We don't
527 	 * support the former at runtime, while the latter is only supported on
528 	 * DGFX, for which we don't support GSC. Therefore, if GSC failed to
529 	 * load previously there is no need to try again because the HW is
530 	 * stuck in the error state.
531 	 */
532 	xe_assert(xe, !IS_DGFX(xe));
533 	if (xe_uc_fw_is_in_error_state(&gsc->fw))
534 		return;
535 
536 	/* GSC FW survives GT reset and D3Hot */
537 	if (gsc_fw_is_loaded(gt)) {
538 		if (xe_gsc_proxy_init_done(gsc))
539 			xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING);
540 		else
541 			xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
542 		return;
543 	}
544 
545 	spin_lock_irq(&gsc->lock);
546 	gsc->work_actions |= GSC_ACTION_FW_LOAD;
547 	spin_unlock_irq(&gsc->lock);
548 
549 	queue_work(gsc->wq, &gsc->work);
550 }
551 
xe_gsc_wait_for_worker_completion(struct xe_gsc * gsc)552 void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc)
553 {
554 	if (xe_uc_fw_is_loadable(&gsc->fw) && gsc->wq)
555 		flush_work(&gsc->work);
556 }
557 
558 /**
559  * xe_gsc_remove() - Clean up the GSC structures before driver removal
560  * @gsc: the GSC uC
561  */
xe_gsc_remove(struct xe_gsc * gsc)562 void xe_gsc_remove(struct xe_gsc *gsc)
563 {
564 	xe_gsc_proxy_remove(gsc);
565 }
566 
567 /*
568  * wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a
569  * GSC engine reset by writing a notification bit in the GS1 register and then
570  * triggering an interrupt to GSC; from the interrupt it will take up to 200ms
571  * for the FW to get prepare for the reset, so we need to wait for that amount
572  * of time.
573  * After the reset is complete we need to then clear the GS1 register.
574  */
xe_gsc_wa_14015076503(struct xe_gt * gt,bool prep)575 void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
576 {
577 	u32 gs1_set = prep ? HECI_H_GS1_ER_PREP : 0;
578 	u32 gs1_clr = prep ? 0 : HECI_H_GS1_ER_PREP;
579 
580 	/* WA only applies if the GSC is loaded */
581 	if (!XE_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt))
582 		return;
583 
584 	xe_mmio_rmw32(&gt->mmio, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set);
585 
586 	if (prep) {
587 		/* make sure the reset bit is clear when writing the CSR reg */
588 		xe_mmio_rmw32(&gt->mmio, HECI_H_CSR(MTL_GSC_HECI2_BASE),
589 			      HECI_H_CSR_RST, HECI_H_CSR_IG);
590 		msleep(200);
591 	}
592 }
593 
594 /**
595  * xe_gsc_print_info - print info about GSC FW status
596  * @gsc: the GSC structure
597  * @p: the printer to be used to print the info
598  */
xe_gsc_print_info(struct xe_gsc * gsc,struct drm_printer * p)599 void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p)
600 {
601 	struct xe_gt *gt = gsc_to_gt(gsc);
602 	struct xe_mmio *mmio = &gt->mmio;
603 	unsigned int fw_ref;
604 
605 	xe_uc_fw_print(&gsc->fw, p);
606 
607 	drm_printf(p, "\tfound security version %u\n", gsc->security_version);
608 
609 	if (!xe_uc_fw_is_enabled(&gsc->fw))
610 		return;
611 
612 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
613 	if (!fw_ref)
614 		return;
615 
616 	drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
617 			xe_mmio_read32(mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)),
618 			xe_mmio_read32(mmio, HECI_FWSTS2(MTL_GSC_HECI1_BASE)),
619 			xe_mmio_read32(mmio, HECI_FWSTS3(MTL_GSC_HECI1_BASE)),
620 			xe_mmio_read32(mmio, HECI_FWSTS4(MTL_GSC_HECI1_BASE)),
621 			xe_mmio_read32(mmio, HECI_FWSTS5(MTL_GSC_HECI1_BASE)),
622 			xe_mmio_read32(mmio, HECI_FWSTS6(MTL_GSC_HECI1_BASE)));
623 
624 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
625 }
626