xref: /linux/drivers/gpu/drm/xe/xe_guc_ct.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_ct.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10 #include <linux/delay.h>
11 #include <linux/fault-inject.h>
12 
13 #include <kunit/static_stub.h>
14 
15 #include <drm/drm_managed.h>
16 
17 #include "abi/guc_actions_abi.h"
18 #include "abi/guc_actions_sriov_abi.h"
19 #include "abi/guc_klvs_abi.h"
20 #include "xe_bo.h"
21 #include "xe_devcoredump.h"
22 #include "xe_device.h"
23 #include "xe_gt.h"
24 #include "xe_gt_pagefault.h"
25 #include "xe_gt_printk.h"
26 #include "xe_gt_sriov_pf_control.h"
27 #include "xe_gt_sriov_pf_monitor.h"
28 #include "xe_gt_sriov_printk.h"
29 #include "xe_guc.h"
30 #include "xe_guc_log.h"
31 #include "xe_guc_relay.h"
32 #include "xe_guc_submit.h"
33 #include "xe_guc_tlb_inval.h"
34 #include "xe_map.h"
35 #include "xe_pm.h"
36 #include "xe_trace_guc.h"
37 
38 static void receive_g2h(struct xe_guc_ct *ct);
39 static void g2h_worker_func(struct work_struct *w);
40 static void safe_mode_worker_func(struct work_struct *w);
41 static void ct_exit_safe_mode(struct xe_guc_ct *ct);
42 static void guc_ct_change_state(struct xe_guc_ct *ct,
43 				enum xe_guc_ct_state state);
44 
45 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
46 enum {
47 	/* Internal states, not error conditions */
48 	CT_DEAD_STATE_REARM,			/* 0x0001 */
49 	CT_DEAD_STATE_CAPTURE,			/* 0x0002 */
50 
51 	/* Error conditions */
52 	CT_DEAD_SETUP,				/* 0x0004 */
53 	CT_DEAD_H2G_WRITE,			/* 0x0008 */
54 	CT_DEAD_H2G_HAS_ROOM,			/* 0x0010 */
55 	CT_DEAD_G2H_READ,			/* 0x0020 */
56 	CT_DEAD_G2H_RECV,			/* 0x0040 */
57 	CT_DEAD_G2H_RELEASE,			/* 0x0080 */
58 	CT_DEAD_DEADLOCK,			/* 0x0100 */
59 	CT_DEAD_PROCESS_FAILED,			/* 0x0200 */
60 	CT_DEAD_FAST_G2H,			/* 0x0400 */
61 	CT_DEAD_PARSE_G2H_RESPONSE,		/* 0x0800 */
62 	CT_DEAD_PARSE_G2H_UNKNOWN,		/* 0x1000 */
63 	CT_DEAD_PARSE_G2H_ORIGIN,		/* 0x2000 */
64 	CT_DEAD_PARSE_G2H_TYPE,			/* 0x4000 */
65 	CT_DEAD_CRASH,				/* 0x8000 */
66 };
67 
68 static void ct_dead_worker_func(struct work_struct *w);
69 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code);
70 
71 #define CT_DEAD(ct, ctb, reason_code)		ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code)
72 #else
73 #define CT_DEAD(ct, ctb, reason)			\
74 	do {						\
75 		struct guc_ctb *_ctb = (ctb);		\
76 		if (_ctb)				\
77 			_ctb->info.broken = true;	\
78 	} while (0)
79 #endif
80 
81 /* Used when a CT send wants to block and / or receive data */
82 struct g2h_fence {
83 	u32 *response_buffer;
84 	u32 seqno;
85 	u32 response_data;
86 	u16 response_len;
87 	u16 error;
88 	u16 hint;
89 	u16 reason;
90 	bool cancel;
91 	bool retry;
92 	bool fail;
93 	bool done;
94 };
95 
96 #define make_u64(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
97 
98 static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
99 {
100 	memset(g2h_fence, 0, sizeof(*g2h_fence));
101 	g2h_fence->response_buffer = response_buffer;
102 	g2h_fence->seqno = ~0x0;
103 }
104 
105 static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
106 {
107 	g2h_fence->cancel = true;
108 	g2h_fence->fail = true;
109 	g2h_fence->done = true;
110 }
111 
112 static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
113 {
114 	return g2h_fence->seqno == ~0x0;
115 }
116 
117 static struct xe_guc *
118 ct_to_guc(struct xe_guc_ct *ct)
119 {
120 	return container_of(ct, struct xe_guc, ct);
121 }
122 
123 static struct xe_gt *
124 ct_to_gt(struct xe_guc_ct *ct)
125 {
126 	return container_of(ct, struct xe_gt, uc.guc.ct);
127 }
128 
129 static struct xe_device *
130 ct_to_xe(struct xe_guc_ct *ct)
131 {
132 	return gt_to_xe(ct_to_gt(ct));
133 }
134 
135 /**
136  * DOC: GuC CTB Blob
137  *
138  * We allocate single blob to hold both CTB descriptors and buffers:
139  *
140  *      +--------+-----------------------------------------------+------+
141  *      | offset | contents                                      | size |
142  *      +========+===============================================+======+
143  *      | 0x0000 | H2G CTB Descriptor (send)                     |      |
144  *      +--------+-----------------------------------------------+  4K  |
145  *      | 0x0800 | G2H CTB Descriptor (g2h)                      |      |
146  *      +--------+-----------------------------------------------+------+
147  *      | 0x1000 | H2G CT Buffer (send)                          | n*4K |
148  *      |        |                                               |      |
149  *      +--------+-----------------------------------------------+------+
150  *      | 0x1000 | G2H CT Buffer (g2h)                           | m*4K |
151  *      | + n*4K |                                               |      |
152  *      +--------+-----------------------------------------------+------+
153  *
154  * Size of each ``CT Buffer`` must be multiple of 4K.
155  * We don't expect too many messages in flight at any time, unless we are
156  * using the GuC submission. In that case each request requires a minimum
157  * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
158  * enough space to avoid backpressure on the driver. We increase the size
159  * of the receive buffer (relative to the send) to ensure a G2H response
160  * CTB has a landing spot.
161  *
162  * In addition to submissions, the G2H buffer needs to be able to hold
163  * enough space for recoverable page fault notifications. The number of
164  * page faults is interrupt driven and can be as much as the number of
165  * compute resources available. However, most of the actual work for these
166  * is in a separate page fault worker thread. Therefore we only need to
167  * make sure the queue has enough space to handle all of the submissions
168  * and responses and an extra buffer for incoming page faults.
169  */
170 
171 #define CTB_DESC_SIZE		ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
172 #define CTB_H2G_BUFFER_SIZE	(SZ_4K)
173 #define CTB_G2H_BUFFER_SIZE	(SZ_128K)
174 #define G2H_ROOM_BUFFER_SIZE	(CTB_G2H_BUFFER_SIZE / 2)
175 
176 /**
177  * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full
178  * CT command queue
179  * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future.
180  *
181  * Observation is that a 4KiB buffer full of commands takes a little over a
182  * second to process. Use that to calculate maximum time to process a full CT
183  * command queue.
184  *
185  * Return: Maximum time to process a full CT queue in jiffies.
186  */
187 long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
188 {
189 	BUILD_BUG_ON(!IS_ALIGNED(CTB_H2G_BUFFER_SIZE, SZ_4));
190 	return (CTB_H2G_BUFFER_SIZE / SZ_4K) * HZ;
191 }
192 
193 static size_t guc_ct_size(void)
194 {
195 	return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE +
196 		CTB_G2H_BUFFER_SIZE;
197 }
198 
199 static void guc_ct_fini(struct drm_device *drm, void *arg)
200 {
201 	struct xe_guc_ct *ct = arg;
202 
203 	ct_exit_safe_mode(ct);
204 	destroy_workqueue(ct->g2h_wq);
205 	xa_destroy(&ct->fence_lookup);
206 }
207 
208 static void primelockdep(struct xe_guc_ct *ct)
209 {
210 	if (!IS_ENABLED(CONFIG_LOCKDEP))
211 		return;
212 
213 	fs_reclaim_acquire(GFP_KERNEL);
214 	might_lock(&ct->lock);
215 	fs_reclaim_release(GFP_KERNEL);
216 }
217 
218 int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
219 {
220 	struct xe_device *xe = ct_to_xe(ct);
221 	struct xe_gt *gt = ct_to_gt(ct);
222 	int err;
223 
224 	xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
225 
226 	ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM);
227 	if (!ct->g2h_wq)
228 		return -ENOMEM;
229 
230 	spin_lock_init(&ct->fast_lock);
231 	xa_init(&ct->fence_lookup);
232 	INIT_WORK(&ct->g2h_worker, g2h_worker_func);
233 	INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
234 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
235 	spin_lock_init(&ct->dead.lock);
236 	INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
237 #endif
238 	init_waitqueue_head(&ct->wq);
239 	init_waitqueue_head(&ct->g2h_fence_wq);
240 
241 	err = drmm_mutex_init(&xe->drm, &ct->lock);
242 	if (err)
243 		return err;
244 
245 	primelockdep(ct);
246 
247 	err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
248 	if (err)
249 		return err;
250 
251 	xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
252 	ct->state = XE_GUC_CT_STATE_DISABLED;
253 	return 0;
254 }
255 ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */
256 
257 static void guc_action_disable_ct(void *arg)
258 {
259 	struct xe_guc_ct *ct = arg;
260 
261 	guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
262 }
263 
264 int xe_guc_ct_init(struct xe_guc_ct *ct)
265 {
266 	struct xe_device *xe = ct_to_xe(ct);
267 	struct xe_gt *gt = ct_to_gt(ct);
268 	struct xe_tile *tile = gt_to_tile(gt);
269 	struct xe_bo *bo;
270 
271 	bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
272 					  XE_BO_FLAG_SYSTEM |
273 					  XE_BO_FLAG_GGTT |
274 					  XE_BO_FLAG_GGTT_INVALIDATE |
275 					  XE_BO_FLAG_PINNED_NORESTORE);
276 	if (IS_ERR(bo))
277 		return PTR_ERR(bo);
278 
279 	ct->bo = bo;
280 
281 	return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
282 }
283 ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
284 
285 /**
286  * xe_guc_ct_init_post_hwconfig - Reinitialize the GuC CTB in VRAM
287  * @ct: the &xe_guc_ct
288  *
289  * Allocate a new BO in VRAM and free the previous BO that was allocated
290  * in system memory (SMEM). Applicable only for DGFX products.
291  *
292  * Return: 0 on success, or a negative errno on failure.
293  */
294 int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct)
295 {
296 	struct xe_device *xe = ct_to_xe(ct);
297 	struct xe_gt *gt = ct_to_gt(ct);
298 	struct xe_tile *tile = gt_to_tile(gt);
299 	int ret;
300 
301 	xe_assert(xe, !xe_guc_ct_enabled(ct));
302 
303 	if (IS_DGFX(xe)) {
304 		ret = xe_managed_bo_reinit_in_vram(xe, tile, &ct->bo);
305 		if (ret)
306 			return ret;
307 	}
308 
309 	devm_remove_action(xe->drm.dev, guc_action_disable_ct, ct);
310 	return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
311 }
312 
313 #define desc_read(xe_, guc_ctb__, field_)			\
314 	xe_map_rd_field(xe_, &guc_ctb__->desc, 0,		\
315 			struct guc_ct_buffer_desc, field_)
316 
317 #define desc_write(xe_, guc_ctb__, field_, val_)		\
318 	xe_map_wr_field(xe_, &guc_ctb__->desc, 0,		\
319 			struct guc_ct_buffer_desc, field_, val_)
320 
321 static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
322 				struct iosys_map *map)
323 {
324 	h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32);
325 	h2g->info.resv_space = 0;
326 	h2g->info.tail = 0;
327 	h2g->info.head = 0;
328 	h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
329 				     h2g->info.size) -
330 			  h2g->info.resv_space;
331 	h2g->info.broken = false;
332 
333 	h2g->desc = *map;
334 	xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
335 
336 	h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2);
337 }
338 
339 static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
340 				struct iosys_map *map)
341 {
342 	g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32);
343 	g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32);
344 	g2h->info.head = 0;
345 	g2h->info.tail = 0;
346 	g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head,
347 				     g2h->info.size) -
348 			  g2h->info.resv_space;
349 	g2h->info.broken = false;
350 
351 	g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
352 	xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
353 
354 	g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 +
355 					    CTB_H2G_BUFFER_SIZE);
356 }
357 
358 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
359 {
360 	struct xe_guc *guc = ct_to_guc(ct);
361 	u32 desc_addr, ctb_addr, size;
362 	int err;
363 
364 	desc_addr = xe_bo_ggtt_addr(ct->bo);
365 	ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2;
366 	size = ct->ctbs.h2g.info.size * sizeof(u32);
367 
368 	err = xe_guc_self_cfg64(guc,
369 				GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY,
370 				desc_addr);
371 	if (err)
372 		return err;
373 
374 	err = xe_guc_self_cfg64(guc,
375 				GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY,
376 				ctb_addr);
377 	if (err)
378 		return err;
379 
380 	return xe_guc_self_cfg32(guc,
381 				 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY,
382 				 size);
383 }
384 
385 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
386 {
387 	struct xe_guc *guc = ct_to_guc(ct);
388 	u32 desc_addr, ctb_addr, size;
389 	int err;
390 
391 	desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
392 	ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 +
393 		CTB_H2G_BUFFER_SIZE;
394 	size = ct->ctbs.g2h.info.size * sizeof(u32);
395 
396 	err = xe_guc_self_cfg64(guc,
397 				GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
398 				desc_addr);
399 	if (err)
400 		return err;
401 
402 	err = xe_guc_self_cfg64(guc,
403 				GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
404 				ctb_addr);
405 	if (err)
406 		return err;
407 
408 	return xe_guc_self_cfg32(guc,
409 				 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
410 				 size);
411 }
412 
413 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
414 {
415 	u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
416 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
417 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
418 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
419 			   GUC_ACTION_HOST2GUC_CONTROL_CTB),
420 		FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL,
421 			   enable ? GUC_CTB_CONTROL_ENABLE :
422 			   GUC_CTB_CONTROL_DISABLE),
423 	};
424 	int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
425 
426 	return ret > 0 ? -EPROTO : ret;
427 }
428 
429 static void guc_ct_change_state(struct xe_guc_ct *ct,
430 				enum xe_guc_ct_state state)
431 {
432 	struct xe_gt *gt = ct_to_gt(ct);
433 	struct g2h_fence *g2h_fence;
434 	unsigned long idx;
435 
436 	mutex_lock(&ct->lock);		/* Serialise dequeue_one_g2h() */
437 	spin_lock_irq(&ct->fast_lock);	/* Serialise CT fast-path */
438 
439 	xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
440 		     state == XE_GUC_CT_STATE_STOPPED);
441 
442 	if (ct->g2h_outstanding)
443 		xe_pm_runtime_put(ct_to_xe(ct));
444 	ct->g2h_outstanding = 0;
445 	ct->state = state;
446 
447 	xe_gt_dbg(gt, "GuC CT communication channel %s\n",
448 		  state == XE_GUC_CT_STATE_STOPPED ? "stopped" :
449 		  str_enabled_disabled(state == XE_GUC_CT_STATE_ENABLED));
450 
451 	spin_unlock_irq(&ct->fast_lock);
452 
453 	/* cancel all in-flight send-recv requests */
454 	xa_for_each(&ct->fence_lookup, idx, g2h_fence)
455 		g2h_fence_cancel(g2h_fence);
456 
457 	/* make sure guc_ct_send_recv() will see g2h_fence changes */
458 	smp_mb();
459 	wake_up_all(&ct->g2h_fence_wq);
460 
461 	/*
462 	 * Lockdep doesn't like this under the fast lock and he destroy only
463 	 * needs to be serialized with the send path which ct lock provides.
464 	 */
465 	xa_destroy(&ct->fence_lookup);
466 
467 	mutex_unlock(&ct->lock);
468 }
469 
470 static bool ct_needs_safe_mode(struct xe_guc_ct *ct)
471 {
472 	return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev));
473 }
474 
475 static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct)
476 {
477 	if (!ct_needs_safe_mode(ct))
478 		return false;
479 
480 	queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10);
481 	return true;
482 }
483 
484 static void safe_mode_worker_func(struct work_struct *w)
485 {
486 	struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work);
487 
488 	receive_g2h(ct);
489 
490 	if (!ct_restart_safe_mode_worker(ct))
491 		xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n");
492 }
493 
494 static void ct_enter_safe_mode(struct xe_guc_ct *ct)
495 {
496 	if (ct_restart_safe_mode_worker(ct))
497 		xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n");
498 }
499 
500 static void ct_exit_safe_mode(struct xe_guc_ct *ct)
501 {
502 	if (cancel_delayed_work_sync(&ct->safe_mode_worker))
503 		xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n");
504 }
505 
506 int xe_guc_ct_enable(struct xe_guc_ct *ct)
507 {
508 	struct xe_device *xe = ct_to_xe(ct);
509 	struct xe_gt *gt = ct_to_gt(ct);
510 	int err;
511 
512 	xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
513 
514 	xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
515 	guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
516 	guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
517 
518 	err = guc_ct_ctb_h2g_register(ct);
519 	if (err)
520 		goto err_out;
521 
522 	err = guc_ct_ctb_g2h_register(ct);
523 	if (err)
524 		goto err_out;
525 
526 	err = guc_ct_control_toggle(ct, true);
527 	if (err)
528 		goto err_out;
529 
530 	guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED);
531 
532 	smp_mb();
533 	wake_up_all(&ct->wq);
534 
535 	if (ct_needs_safe_mode(ct))
536 		ct_enter_safe_mode(ct);
537 
538 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
539 	/*
540 	 * The CT has now been reset so the dumper can be re-armed
541 	 * after any existing dead state has been dumped.
542 	 */
543 	spin_lock_irq(&ct->dead.lock);
544 	if (ct->dead.reason) {
545 		ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
546 		queue_work(system_unbound_wq, &ct->dead.worker);
547 	}
548 	spin_unlock_irq(&ct->dead.lock);
549 #endif
550 
551 	return 0;
552 
553 err_out:
554 	xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
555 	CT_DEAD(ct, NULL, SETUP);
556 
557 	return err;
558 }
559 
560 static void stop_g2h_handler(struct xe_guc_ct *ct)
561 {
562 	cancel_work_sync(&ct->g2h_worker);
563 }
564 
565 /**
566  * xe_guc_ct_disable - Set GuC to disabled state
567  * @ct: the &xe_guc_ct
568  *
569  * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
570  * in this transition.
571  */
572 void xe_guc_ct_disable(struct xe_guc_ct *ct)
573 {
574 	guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
575 	ct_exit_safe_mode(ct);
576 	stop_g2h_handler(ct);
577 }
578 
579 /**
580  * xe_guc_ct_stop - Set GuC to stopped state
581  * @ct: the &xe_guc_ct
582  *
583  * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
584  */
585 void xe_guc_ct_stop(struct xe_guc_ct *ct)
586 {
587 	if (!xe_guc_ct_initialized(ct))
588 		return;
589 
590 	guc_ct_change_state(ct, XE_GUC_CT_STATE_STOPPED);
591 	stop_g2h_handler(ct);
592 }
593 
594 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
595 {
596 	struct guc_ctb *h2g = &ct->ctbs.h2g;
597 
598 	lockdep_assert_held(&ct->lock);
599 
600 	if (cmd_len > h2g->info.space) {
601 		h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
602 
603 		if (h2g->info.head > h2g->info.size) {
604 			struct xe_device *xe = ct_to_xe(ct);
605 			u32 desc_status = desc_read(xe, h2g, status);
606 
607 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
608 
609 			xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n",
610 				  h2g->info.head, h2g->info.size);
611 			CT_DEAD(ct, h2g, H2G_HAS_ROOM);
612 			return false;
613 		}
614 
615 		h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
616 					     h2g->info.size) -
617 				  h2g->info.resv_space;
618 		if (cmd_len > h2g->info.space)
619 			return false;
620 	}
621 
622 	return true;
623 }
624 
625 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
626 {
627 	if (!g2h_len)
628 		return true;
629 
630 	lockdep_assert_held(&ct->fast_lock);
631 
632 	return ct->ctbs.g2h.info.space > g2h_len;
633 }
634 
635 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
636 {
637 	lockdep_assert_held(&ct->lock);
638 
639 	if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
640 		return -EBUSY;
641 
642 	return 0;
643 }
644 
645 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
646 {
647 	lockdep_assert_held(&ct->lock);
648 	ct->ctbs.h2g.info.space -= cmd_len;
649 }
650 
651 static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
652 {
653 	xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
654 	xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) ||
655 		     (g2h_len && num_g2h));
656 
657 	if (g2h_len) {
658 		lockdep_assert_held(&ct->fast_lock);
659 
660 		if (!ct->g2h_outstanding)
661 			xe_pm_runtime_get_noresume(ct_to_xe(ct));
662 
663 		ct->ctbs.g2h.info.space -= g2h_len;
664 		ct->g2h_outstanding += num_g2h;
665 	}
666 }
667 
668 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
669 {
670 	bool bad = false;
671 
672 	lockdep_assert_held(&ct->fast_lock);
673 
674 	bad = ct->ctbs.g2h.info.space + g2h_len >
675 		     ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space;
676 	bad |= !ct->g2h_outstanding;
677 
678 	if (bad) {
679 		xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n",
680 			  ct->ctbs.g2h.info.space, g2h_len,
681 			  ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space,
682 			  ct->ctbs.g2h.info.space + g2h_len,
683 			  ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space,
684 			  ct->g2h_outstanding);
685 		CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE);
686 		return;
687 	}
688 
689 	ct->ctbs.g2h.info.space += g2h_len;
690 	if (!--ct->g2h_outstanding)
691 		xe_pm_runtime_put(ct_to_xe(ct));
692 }
693 
694 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
695 {
696 	spin_lock_irq(&ct->fast_lock);
697 	__g2h_release_space(ct, g2h_len);
698 	spin_unlock_irq(&ct->fast_lock);
699 }
700 
701 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
702 static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
703 {
704 	unsigned int slot = fence % ARRAY_SIZE(ct->fast_req);
705 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
706 	unsigned long entries[SZ_32];
707 	unsigned int n;
708 
709 	n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
710 
711 	/* May be called under spinlock, so avoid sleeping */
712 	ct->fast_req[slot].stack = stack_depot_save(entries, n, GFP_NOWAIT);
713 #endif
714 	ct->fast_req[slot].fence = fence;
715 	ct->fast_req[slot].action = action;
716 }
717 #else
718 static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
719 {
720 }
721 #endif
722 
723 /*
724  * The CT protocol accepts a 16 bits fence. This field is fully owned by the
725  * driver, the GuC will just copy it to the reply message. Since we need to
726  * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
727  * we use one bit of the seqno as an indicator for that and a rolling counter
728  * for the remaining 15 bits.
729  */
730 #define CT_SEQNO_MASK GENMASK(14, 0)
731 #define CT_SEQNO_UNTRACKED BIT(15)
732 static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
733 {
734 	u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
735 
736 	if (!is_g2h_fence)
737 		seqno |= CT_SEQNO_UNTRACKED;
738 
739 	return seqno;
740 }
741 
742 #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
743 
744 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
745 		     u32 ct_fence_value, bool want_response)
746 {
747 	struct xe_device *xe = ct_to_xe(ct);
748 	struct xe_gt *gt = ct_to_gt(ct);
749 	struct guc_ctb *h2g = &ct->ctbs.h2g;
750 	u32 cmd[H2G_CT_HEADERS];
751 	u32 tail = h2g->info.tail;
752 	u32 full_len;
753 	struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
754 							 tail * sizeof(u32));
755 	u32 desc_status;
756 
757 	full_len = len + GUC_CTB_HDR_LEN;
758 
759 	lockdep_assert_held(&ct->lock);
760 	xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
761 
762 	desc_status = desc_read(xe, h2g, status);
763 	if (desc_status) {
764 		xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status);
765 		goto corrupted;
766 	}
767 
768 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
769 		u32 desc_tail = desc_read(xe, h2g, tail);
770 		u32 desc_head = desc_read(xe, h2g, head);
771 
772 		if (tail != desc_tail) {
773 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH);
774 			xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail);
775 			goto corrupted;
776 		}
777 
778 		if (tail > h2g->info.size) {
779 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
780 			xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n",
781 				  tail, h2g->info.size);
782 			goto corrupted;
783 		}
784 
785 		if (desc_head >= h2g->info.size) {
786 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
787 			xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n",
788 				  desc_head, h2g->info.size);
789 			goto corrupted;
790 		}
791 	}
792 
793 	/* Command will wrap, zero fill (NOPs), return and check credits again */
794 	if (tail + full_len > h2g->info.size) {
795 		xe_map_memset(xe, &map, 0, 0,
796 			      (h2g->info.size - tail) * sizeof(u32));
797 		h2g_reserve_space(ct, (h2g->info.size - tail));
798 		h2g->info.tail = 0;
799 		desc_write(xe, h2g, tail, h2g->info.tail);
800 
801 		return -EAGAIN;
802 	}
803 
804 	/*
805 	 * dw0: CT header (including fence)
806 	 * dw1: HXG header (including action code)
807 	 * dw2+: action data
808 	 */
809 	cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
810 		FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
811 		FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
812 	if (want_response) {
813 		cmd[1] =
814 			FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
815 			FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
816 				   GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
817 	} else {
818 		fast_req_track(ct, ct_fence_value,
819 			       FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
820 
821 		cmd[1] =
822 			FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
823 			FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
824 				   GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
825 	}
826 
827 	/* H2G header in cmd[1] replaces action[0] so: */
828 	--len;
829 	++action;
830 
831 	/* Write H2G ensuring visible before descriptor update */
832 	xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
833 	xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
834 	xe_device_wmb(xe);
835 
836 	/* Update local copies */
837 	h2g->info.tail = (tail + full_len) % h2g->info.size;
838 	h2g_reserve_space(ct, full_len);
839 
840 	/* Update descriptor */
841 	desc_write(xe, h2g, tail, h2g->info.tail);
842 
843 	trace_xe_guc_ctb_h2g(xe, gt->info.id, *(action - 1), full_len,
844 			     desc_read(xe, h2g, head), h2g->info.tail);
845 
846 	return 0;
847 
848 corrupted:
849 	CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE);
850 	return -EPIPE;
851 }
852 
853 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
854 				u32 len, u32 g2h_len, u32 num_g2h,
855 				struct g2h_fence *g2h_fence)
856 {
857 	struct xe_gt *gt __maybe_unused = ct_to_gt(ct);
858 	u16 seqno;
859 	int ret;
860 
861 	xe_gt_assert(gt, xe_guc_ct_initialized(ct));
862 	xe_gt_assert(gt, !g2h_len || !g2h_fence);
863 	xe_gt_assert(gt, !num_g2h || !g2h_fence);
864 	xe_gt_assert(gt, !g2h_len || num_g2h);
865 	xe_gt_assert(gt, g2h_len || !num_g2h);
866 	lockdep_assert_held(&ct->lock);
867 
868 	if (unlikely(ct->ctbs.h2g.info.broken)) {
869 		ret = -EPIPE;
870 		goto out;
871 	}
872 
873 	if (ct->state == XE_GUC_CT_STATE_DISABLED) {
874 		ret = -ENODEV;
875 		goto out;
876 	}
877 
878 	if (ct->state == XE_GUC_CT_STATE_STOPPED) {
879 		ret = -ECANCELED;
880 		goto out;
881 	}
882 
883 	xe_gt_assert(gt, xe_guc_ct_enabled(ct));
884 
885 	if (g2h_fence) {
886 		g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
887 		num_g2h = 1;
888 
889 		if (g2h_fence_needs_alloc(g2h_fence)) {
890 			g2h_fence->seqno = next_ct_seqno(ct, true);
891 			ret = xa_err(xa_store(&ct->fence_lookup,
892 					      g2h_fence->seqno, g2h_fence,
893 					      GFP_ATOMIC));
894 			if (ret)
895 				goto out;
896 		}
897 
898 		seqno = g2h_fence->seqno;
899 	} else {
900 		seqno = next_ct_seqno(ct, false);
901 	}
902 
903 	if (g2h_len)
904 		spin_lock_irq(&ct->fast_lock);
905 retry:
906 	ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
907 	if (unlikely(ret))
908 		goto out_unlock;
909 
910 	ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
911 	if (unlikely(ret)) {
912 		if (ret == -EAGAIN)
913 			goto retry;
914 		goto out_unlock;
915 	}
916 
917 	__g2h_reserve_space(ct, g2h_len, num_g2h);
918 	xe_guc_notify(ct_to_guc(ct));
919 out_unlock:
920 	if (g2h_len)
921 		spin_unlock_irq(&ct->fast_lock);
922 out:
923 	return ret;
924 }
925 
926 static void kick_reset(struct xe_guc_ct *ct)
927 {
928 	xe_gt_reset_async(ct_to_gt(ct));
929 }
930 
931 static int dequeue_one_g2h(struct xe_guc_ct *ct);
932 
933 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
934 			      u32 g2h_len, u32 num_g2h,
935 			      struct g2h_fence *g2h_fence)
936 {
937 	struct xe_device *xe = ct_to_xe(ct);
938 	struct xe_gt *gt = ct_to_gt(ct);
939 	unsigned int sleep_period_ms = 1;
940 	int ret;
941 
942 	xe_gt_assert(gt, !g2h_len || !g2h_fence);
943 	lockdep_assert_held(&ct->lock);
944 	xe_device_assert_mem_access(ct_to_xe(ct));
945 
946 try_again:
947 	ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
948 				   g2h_fence);
949 
950 	/*
951 	 * We wait to try to restore credits for about 1 second before bailing.
952 	 * In the case of H2G credits we have no choice but just to wait for the
953 	 * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In
954 	 * the case of G2H we process any G2H in the channel, hopefully freeing
955 	 * credits as we consume the G2H messages.
956 	 */
957 	if (unlikely(ret == -EBUSY &&
958 		     !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) {
959 		struct guc_ctb *h2g = &ct->ctbs.h2g;
960 
961 		if (sleep_period_ms == 1024)
962 			goto broken;
963 
964 		trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail,
965 						 h2g->info.size,
966 						 h2g->info.space,
967 						 len + GUC_CTB_HDR_LEN);
968 		msleep(sleep_period_ms);
969 		sleep_period_ms <<= 1;
970 
971 		goto try_again;
972 	} else if (unlikely(ret == -EBUSY)) {
973 		struct xe_device *xe = ct_to_xe(ct);
974 		struct guc_ctb *g2h = &ct->ctbs.g2h;
975 
976 		trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head,
977 						 desc_read(xe, g2h, tail),
978 						 g2h->info.size,
979 						 g2h->info.space,
980 						 g2h_fence ?
981 						 GUC_CTB_HXG_MSG_MAX_LEN :
982 						 g2h_len);
983 
984 #define g2h_avail(ct)	\
985 	(desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
986 		if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
987 					g2h_avail(ct), HZ))
988 			goto broken;
989 #undef g2h_avail
990 
991 		ret = dequeue_one_g2h(ct);
992 		if (ret < 0) {
993 			if (ret != -ECANCELED)
994 				xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)",
995 					  ERR_PTR(ret));
996 			goto broken;
997 		}
998 
999 		goto try_again;
1000 	}
1001 
1002 	return ret;
1003 
1004 broken:
1005 	xe_gt_err(gt, "No forward process on H2G, reset required\n");
1006 	CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK);
1007 
1008 	return -EDEADLK;
1009 }
1010 
1011 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
1012 		       u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
1013 {
1014 	int ret;
1015 
1016 	xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence);
1017 
1018 	mutex_lock(&ct->lock);
1019 	ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
1020 	mutex_unlock(&ct->lock);
1021 
1022 	return ret;
1023 }
1024 
1025 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
1026 		   u32 g2h_len, u32 num_g2h)
1027 {
1028 	int ret;
1029 
1030 	ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
1031 	if (ret == -EDEADLK)
1032 		kick_reset(ct);
1033 
1034 	return ret;
1035 }
1036 
1037 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
1038 			  u32 g2h_len, u32 num_g2h)
1039 {
1040 	int ret;
1041 
1042 	ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
1043 	if (ret == -EDEADLK)
1044 		kick_reset(ct);
1045 
1046 	return ret;
1047 }
1048 
1049 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
1050 {
1051 	int ret;
1052 
1053 	lockdep_assert_held(&ct->lock);
1054 
1055 	ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
1056 	if (ret == -EDEADLK)
1057 		kick_reset(ct);
1058 
1059 	return ret;
1060 }
1061 
1062 /*
1063  * Check if a GT reset is in progress or will occur and if GT reset brought the
1064  * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
1065  */
1066 static bool retry_failure(struct xe_guc_ct *ct, int ret)
1067 {
1068 	if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV))
1069 		return false;
1070 
1071 #define ct_alive(ct)	\
1072 	(xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
1073 	 !ct->ctbs.g2h.info.broken)
1074 	if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
1075 		return false;
1076 #undef ct_alive
1077 
1078 	return true;
1079 }
1080 
1081 #define GUC_SEND_RETRY_LIMIT	50
1082 #define GUC_SEND_RETRY_MSLEEP	5
1083 
1084 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
1085 			    u32 *response_buffer, bool no_fail)
1086 {
1087 	struct xe_gt *gt = ct_to_gt(ct);
1088 	struct g2h_fence g2h_fence;
1089 	unsigned int retries = 0;
1090 	int ret = 0;
1091 
1092 	/*
1093 	 * We use a fence to implement blocking sends / receiving response data.
1094 	 * The seqno of the fence is sent in the H2G, returned in the G2H, and
1095 	 * an xarray is used as storage media with the seqno being to key.
1096 	 * Fields in the fence hold success, failure, retry status and the
1097 	 * response data. Safe to allocate on the stack as the xarray is the
1098 	 * only reference and it cannot be present after this function exits.
1099 	 */
1100 retry:
1101 	g2h_fence_init(&g2h_fence, response_buffer);
1102 retry_same_fence:
1103 	ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
1104 	if (unlikely(ret == -ENOMEM)) {
1105 		/* Retry allocation /w GFP_KERNEL */
1106 		ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
1107 				      &g2h_fence, GFP_KERNEL));
1108 		if (ret)
1109 			return ret;
1110 
1111 		goto retry_same_fence;
1112 	} else if (unlikely(ret)) {
1113 		if (ret == -EDEADLK)
1114 			kick_reset(ct);
1115 
1116 		if (no_fail && retry_failure(ct, ret))
1117 			goto retry_same_fence;
1118 
1119 		if (!g2h_fence_needs_alloc(&g2h_fence))
1120 			xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1121 
1122 		return ret;
1123 	}
1124 
1125 	ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
1126 	if (!ret) {
1127 		LNL_FLUSH_WORK(&ct->g2h_worker);
1128 		if (g2h_fence.done) {
1129 			xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
1130 				   g2h_fence.seqno, action[0]);
1131 			ret = 1;
1132 		}
1133 	}
1134 
1135 	/*
1136 	 * Ensure we serialize with completion side to prevent UAF with fence going out of scope on
1137 	 * the stack, since we have no clue if it will fire after the timeout before we can erase
1138 	 * from the xa. Also we have some dependent loads and stores below for which we need the
1139 	 * correct ordering, and we lack the needed barriers.
1140 	 */
1141 	mutex_lock(&ct->lock);
1142 	if (!ret) {
1143 		xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
1144 			  g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
1145 		xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1146 		mutex_unlock(&ct->lock);
1147 		return -ETIME;
1148 	}
1149 
1150 	if (g2h_fence.retry) {
1151 		xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
1152 			  action[0], g2h_fence.reason);
1153 		mutex_unlock(&ct->lock);
1154 		if (++retries > GUC_SEND_RETRY_LIMIT) {
1155 			xe_gt_err(gt, "H2G action %#x reached retry limit=%u, aborting\n",
1156 				  action[0], GUC_SEND_RETRY_LIMIT);
1157 			return -ELOOP;
1158 		}
1159 		msleep(GUC_SEND_RETRY_MSLEEP * retries);
1160 		goto retry;
1161 	}
1162 	if (g2h_fence.fail) {
1163 		if (g2h_fence.cancel) {
1164 			xe_gt_dbg(gt, "H2G request %#x canceled!\n", action[0]);
1165 			ret = -ECANCELED;
1166 			goto unlock;
1167 		}
1168 		xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n",
1169 			  action[0], g2h_fence.error, g2h_fence.hint);
1170 		ret = -EIO;
1171 	}
1172 
1173 	if (ret > 0)
1174 		ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
1175 
1176 unlock:
1177 	mutex_unlock(&ct->lock);
1178 
1179 	return ret;
1180 }
1181 
1182 /**
1183  * xe_guc_ct_send_recv - Send and receive HXG to the GuC
1184  * @ct: the &xe_guc_ct
1185  * @action: the dword array with `HXG Request`_ message (can't be NULL)
1186  * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
1187  * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
1188  *
1189  * Send a `HXG Request`_ message to the GuC over CT communication channel and
1190  * blocks until GuC replies with a `HXG Response`_ message.
1191  *
1192  * For non-blocking communication with GuC use xe_guc_ct_send().
1193  *
1194  * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
1195  *
1196  * Return: response length (in dwords) if &response_buffer was not NULL, or
1197  *         DATA0 from `HXG Response`_ if &response_buffer was NULL, or
1198  *         a negative error code on failure.
1199  */
1200 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
1201 			u32 *response_buffer)
1202 {
1203 	KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
1204 	return guc_ct_send_recv(ct, action, len, response_buffer, false);
1205 }
1206 ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO);
1207 
1208 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
1209 				u32 len, u32 *response_buffer)
1210 {
1211 	return guc_ct_send_recv(ct, action, len, response_buffer, true);
1212 }
1213 
1214 static u32 *msg_to_hxg(u32 *msg)
1215 {
1216 	return msg + GUC_CTB_MSG_MIN_LEN;
1217 }
1218 
1219 static u32 msg_len_to_hxg_len(u32 len)
1220 {
1221 	return len - GUC_CTB_MSG_MIN_LEN;
1222 }
1223 
1224 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
1225 {
1226 	u32 *hxg = msg_to_hxg(msg);
1227 	u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1228 
1229 	lockdep_assert_held(&ct->lock);
1230 
1231 	switch (action) {
1232 	case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1233 	case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1234 	case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1235 	case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1236 		g2h_release_space(ct, len);
1237 	}
1238 
1239 	return 0;
1240 }
1241 
1242 static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
1243 {
1244 	struct xe_gt *gt = ct_to_gt(ct);
1245 
1246 	if (action == XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED)
1247 		xe_gt_err(gt, "GuC Crash dump notification\n");
1248 	else if (action == XE_GUC_ACTION_NOTIFY_EXCEPTION)
1249 		xe_gt_err(gt, "GuC Exception notification\n");
1250 	else
1251 		xe_gt_err(gt, "Unknown GuC crash notification: 0x%04X\n", action);
1252 
1253 	CT_DEAD(ct, NULL, CRASH);
1254 
1255 	kick_reset(ct);
1256 
1257 	return 0;
1258 }
1259 
1260 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
1261 static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
1262 {
1263 	u16 fence_min = U16_MAX, fence_max = 0;
1264 	struct xe_gt *gt = ct_to_gt(ct);
1265 	bool found = false;
1266 	unsigned int n;
1267 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
1268 	char *buf;
1269 #endif
1270 
1271 	lockdep_assert_held(&ct->lock);
1272 
1273 	for (n = 0; n < ARRAY_SIZE(ct->fast_req); n++) {
1274 		if (ct->fast_req[n].fence < fence_min)
1275 			fence_min = ct->fast_req[n].fence;
1276 		if (ct->fast_req[n].fence > fence_max)
1277 			fence_max = ct->fast_req[n].fence;
1278 
1279 		if (ct->fast_req[n].fence != fence)
1280 			continue;
1281 		found = true;
1282 
1283 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
1284 		buf = kmalloc(SZ_4K, GFP_NOWAIT);
1285 		if (buf && stack_depot_snprint(ct->fast_req[n].stack, buf, SZ_4K, 0))
1286 			xe_gt_err(gt, "Fence 0x%x was used by action %#04x sent at:\n%s",
1287 				  fence, ct->fast_req[n].action, buf);
1288 		else
1289 			xe_gt_err(gt, "Fence 0x%x was used by action %#04x [failed to retrieve stack]\n",
1290 				  fence, ct->fast_req[n].action);
1291 		kfree(buf);
1292 #else
1293 		xe_gt_err(gt, "Fence 0x%x was used by action %#04x\n",
1294 			  fence, ct->fast_req[n].action);
1295 #endif
1296 		break;
1297 	}
1298 
1299 	if (!found)
1300 		xe_gt_warn(gt, "Fence 0x%x not found - tracking buffer wrapped? [range = 0x%x -> 0x%x, next = 0x%X]\n",
1301 			   fence, fence_min, fence_max, ct->fence_seqno);
1302 }
1303 #else
1304 static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
1305 {
1306 }
1307 #endif
1308 
1309 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
1310 {
1311 	struct xe_gt *gt =  ct_to_gt(ct);
1312 	u32 *hxg = msg_to_hxg(msg);
1313 	u32 hxg_len = msg_len_to_hxg_len(len);
1314 	u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
1315 	u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1316 	struct g2h_fence *g2h_fence;
1317 
1318 	lockdep_assert_held(&ct->lock);
1319 
1320 	/*
1321 	 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
1322 	 * Those messages should never fail, so if we do get an error back it
1323 	 * means we're likely doing an illegal operation and the GuC is
1324 	 * rejecting it. We have no way to inform the code that submitted the
1325 	 * H2G that the message was rejected, so we need to escalate the
1326 	 * failure to trigger a reset.
1327 	 */
1328 	if (fence & CT_SEQNO_UNTRACKED) {
1329 		if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
1330 			xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n",
1331 				  fence,
1332 				  FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]),
1333 				  FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]));
1334 		else
1335 			xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
1336 				  type, fence);
1337 
1338 		fast_req_report(ct, fence);
1339 
1340 		CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
1341 
1342 		return -EPROTO;
1343 	}
1344 
1345 	g2h_fence = xa_erase(&ct->fence_lookup, fence);
1346 	if (unlikely(!g2h_fence)) {
1347 		/* Don't tear down channel, as send could've timed out */
1348 		/* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */
1349 		xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
1350 		g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1351 		return 0;
1352 	}
1353 
1354 	xe_gt_assert(gt, fence == g2h_fence->seqno);
1355 
1356 	if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
1357 		g2h_fence->fail = true;
1358 		g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]);
1359 		g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]);
1360 	} else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
1361 		g2h_fence->retry = true;
1362 		g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]);
1363 	} else if (g2h_fence->response_buffer) {
1364 		g2h_fence->response_len = hxg_len;
1365 		memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32));
1366 	} else {
1367 		g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]);
1368 	}
1369 
1370 	g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1371 
1372 	g2h_fence->done = true;
1373 	smp_mb();
1374 
1375 	wake_up_all(&ct->g2h_fence_wq);
1376 
1377 	return 0;
1378 }
1379 
1380 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1381 {
1382 	struct xe_gt *gt = ct_to_gt(ct);
1383 	u32 *hxg = msg_to_hxg(msg);
1384 	u32 origin, type;
1385 	int ret;
1386 
1387 	lockdep_assert_held(&ct->lock);
1388 
1389 	origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
1390 	if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
1391 		xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
1392 			  origin);
1393 		CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN);
1394 
1395 		return -EPROTO;
1396 	}
1397 
1398 	type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1399 	switch (type) {
1400 	case GUC_HXG_TYPE_EVENT:
1401 		ret = parse_g2h_event(ct, msg, len);
1402 		break;
1403 	case GUC_HXG_TYPE_RESPONSE_SUCCESS:
1404 	case GUC_HXG_TYPE_RESPONSE_FAILURE:
1405 	case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
1406 		ret = parse_g2h_response(ct, msg, len);
1407 		break;
1408 	default:
1409 		xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
1410 			  type);
1411 		CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE);
1412 
1413 		ret = -EOPNOTSUPP;
1414 	}
1415 
1416 	return ret;
1417 }
1418 
1419 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1420 {
1421 	struct xe_guc *guc = ct_to_guc(ct);
1422 	struct xe_gt *gt = ct_to_gt(ct);
1423 	u32 hxg_len = msg_len_to_hxg_len(len);
1424 	u32 *hxg = msg_to_hxg(msg);
1425 	u32 action, adj_len;
1426 	u32 *payload;
1427 	int ret = 0;
1428 
1429 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1430 		return 0;
1431 
1432 	action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1433 	payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN;
1434 	adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN;
1435 
1436 	switch (action) {
1437 	case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1438 		ret = xe_guc_sched_done_handler(guc, payload, adj_len);
1439 		break;
1440 	case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1441 		ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
1442 		break;
1443 	case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
1444 		ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
1445 		break;
1446 	case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
1447 		ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
1448 							      adj_len);
1449 		break;
1450 	case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1451 		/* Selftest only at the moment */
1452 		break;
1453 	case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
1454 		ret = xe_guc_error_capture_handler(guc, payload, adj_len);
1455 		break;
1456 	case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
1457 		/* FIXME: Handle this */
1458 		break;
1459 	case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
1460 		ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
1461 								 adj_len);
1462 		break;
1463 	case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1464 		ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1465 		break;
1466 	case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1467 		ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
1468 		break;
1469 	case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY:
1470 		ret = xe_guc_access_counter_notify_handler(guc, payload,
1471 							   adj_len);
1472 		break;
1473 	case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
1474 		ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
1475 		break;
1476 	case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
1477 		ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
1478 		break;
1479 	case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY:
1480 		ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len);
1481 		break;
1482 	case GUC_ACTION_GUC2PF_ADVERSE_EVENT:
1483 		ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len);
1484 		break;
1485 	case XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
1486 	case XE_GUC_ACTION_NOTIFY_EXCEPTION:
1487 		ret = guc_crash_process_msg(ct, action);
1488 		break;
1489 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1490 	case XE_GUC_ACTION_TEST_G2G_RECV:
1491 		ret = xe_guc_g2g_test_notification(guc, payload, adj_len);
1492 		break;
1493 #endif
1494 	default:
1495 		xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
1496 	}
1497 
1498 	if (ret) {
1499 		xe_gt_err(gt, "G2H action %#04x failed (%pe) len %u msg %*ph\n",
1500 			  action, ERR_PTR(ret), hxg_len, (int)sizeof(u32) * hxg_len, hxg);
1501 		CT_DEAD(ct, NULL, PROCESS_FAILED);
1502 	}
1503 
1504 	return 0;
1505 }
1506 
1507 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
1508 {
1509 	struct xe_device *xe = ct_to_xe(ct);
1510 	struct xe_gt *gt = ct_to_gt(ct);
1511 	struct guc_ctb *g2h = &ct->ctbs.g2h;
1512 	u32 tail, head, len, desc_status;
1513 	s32 avail;
1514 	u32 action;
1515 	u32 *hxg;
1516 
1517 	xe_gt_assert(gt, xe_guc_ct_initialized(ct));
1518 	lockdep_assert_held(&ct->fast_lock);
1519 
1520 	if (ct->state == XE_GUC_CT_STATE_DISABLED)
1521 		return -ENODEV;
1522 
1523 	if (ct->state == XE_GUC_CT_STATE_STOPPED)
1524 		return -ECANCELED;
1525 
1526 	if (g2h->info.broken)
1527 		return -EPIPE;
1528 
1529 	xe_gt_assert(gt, xe_guc_ct_enabled(ct));
1530 
1531 	desc_status = desc_read(xe, g2h, status);
1532 	if (desc_status) {
1533 		if (desc_status & GUC_CTB_STATUS_DISABLED) {
1534 			/*
1535 			 * Potentially valid if a CLIENT_RESET request resulted in
1536 			 * contexts/engines being reset. But should never happen as
1537 			 * no contexts should be active when CLIENT_RESET is sent.
1538 			 */
1539 			xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n");
1540 			desc_status &= ~GUC_CTB_STATUS_DISABLED;
1541 		}
1542 
1543 		if (desc_status) {
1544 			xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status);
1545 			goto corrupted;
1546 		}
1547 	}
1548 
1549 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
1550 		u32 desc_tail = desc_read(xe, g2h, tail);
1551 		/*
1552 		u32 desc_head = desc_read(xe, g2h, head);
1553 
1554 		 * info.head and desc_head are updated back-to-back at the end of
1555 		 * this function and nowhere else. Hence, they cannot be different
1556 		 * unless two g2h_read calls are running concurrently. Which is not
1557 		 * possible because it is guarded by ct->fast_lock. And yet, some
1558 		 * discrete platforms are regularly hitting this error :(.
1559 		 *
1560 		 * desc_head rolling backwards shouldn't cause any noticeable
1561 		 * problems - just a delay in GuC being allowed to proceed past that
1562 		 * point in the queue. So for now, just disable the error until it
1563 		 * can be root caused.
1564 		 *
1565 		if (g2h->info.head != desc_head) {
1566 			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH);
1567 			xe_gt_err(gt, "CT read: head was modified %u != %u\n",
1568 				  desc_head, g2h->info.head);
1569 			goto corrupted;
1570 		}
1571 		 */
1572 
1573 		if (g2h->info.head > g2h->info.size) {
1574 			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1575 			xe_gt_err(gt, "CT read: head out of range: %u vs %u\n",
1576 				  g2h->info.head, g2h->info.size);
1577 			goto corrupted;
1578 		}
1579 
1580 		if (desc_tail >= g2h->info.size) {
1581 			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1582 			xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n",
1583 				  desc_tail, g2h->info.size);
1584 			goto corrupted;
1585 		}
1586 	}
1587 
1588 	/* Calculate DW available to read */
1589 	tail = desc_read(xe, g2h, tail);
1590 	avail = tail - g2h->info.head;
1591 	if (unlikely(avail == 0))
1592 		return 0;
1593 
1594 	if (avail < 0)
1595 		avail += g2h->info.size;
1596 
1597 	/* Read header */
1598 	xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head,
1599 			   sizeof(u32));
1600 	len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
1601 	if (len > avail) {
1602 		xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
1603 			  avail, len);
1604 		goto corrupted;
1605 	}
1606 
1607 	head = (g2h->info.head + 1) % g2h->info.size;
1608 	avail = len - 1;
1609 
1610 	/* Read G2H message */
1611 	if (avail + head > g2h->info.size) {
1612 		u32 avail_til_wrap = g2h->info.size - head;
1613 
1614 		xe_map_memcpy_from(xe, msg + 1,
1615 				   &g2h->cmds, sizeof(u32) * head,
1616 				   avail_til_wrap * sizeof(u32));
1617 		xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap,
1618 				   &g2h->cmds, 0,
1619 				   (avail - avail_til_wrap) * sizeof(u32));
1620 	} else {
1621 		xe_map_memcpy_from(xe, msg + 1,
1622 				   &g2h->cmds, sizeof(u32) * head,
1623 				   avail * sizeof(u32));
1624 	}
1625 
1626 	hxg = msg_to_hxg(msg);
1627 	action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1628 
1629 	if (fast_path) {
1630 		if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1631 			return 0;
1632 
1633 		switch (action) {
1634 		case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1635 		case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1636 			break;	/* Process these in fast-path */
1637 		default:
1638 			return 0;
1639 		}
1640 	}
1641 
1642 	/* Update local / descriptor header */
1643 	g2h->info.head = (head + avail) % g2h->info.size;
1644 	desc_write(xe, g2h, head, g2h->info.head);
1645 
1646 	trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id,
1647 			     action, len, g2h->info.head, tail);
1648 
1649 	return len;
1650 
1651 corrupted:
1652 	CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ);
1653 	return -EPROTO;
1654 }
1655 
1656 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
1657 {
1658 	struct xe_gt *gt = ct_to_gt(ct);
1659 	struct xe_guc *guc = ct_to_guc(ct);
1660 	u32 hxg_len = msg_len_to_hxg_len(len);
1661 	u32 *hxg = msg_to_hxg(msg);
1662 	u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1663 	u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN;
1664 	u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN;
1665 	int ret = 0;
1666 
1667 	switch (action) {
1668 	case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1669 		ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1670 		break;
1671 	case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1672 		__g2h_release_space(ct, len);
1673 		ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
1674 		break;
1675 	default:
1676 		xe_gt_warn(gt, "NOT_POSSIBLE");
1677 	}
1678 
1679 	if (ret) {
1680 		xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
1681 			  action, ERR_PTR(ret));
1682 		CT_DEAD(ct, NULL, FAST_G2H);
1683 	}
1684 }
1685 
1686 /**
1687  * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
1688  * @ct: GuC CT object
1689  *
1690  * Anything related to page faults is critical for performance, process these
1691  * critical G2H in the IRQ. This is safe as these handlers either just wake up
1692  * waiters or queue another worker.
1693  */
1694 void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
1695 {
1696 	struct xe_device *xe = ct_to_xe(ct);
1697 	bool ongoing;
1698 	int len;
1699 
1700 	ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1701 	if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1702 		return;
1703 
1704 	spin_lock(&ct->fast_lock);
1705 	do {
1706 		len = g2h_read(ct, ct->fast_msg, true);
1707 		if (len > 0)
1708 			g2h_fast_path(ct, ct->fast_msg, len);
1709 	} while (len > 0);
1710 	spin_unlock(&ct->fast_lock);
1711 
1712 	if (ongoing)
1713 		xe_pm_runtime_put(xe);
1714 }
1715 
1716 /* Returns less than zero on error, 0 on done, 1 on more available */
1717 static int dequeue_one_g2h(struct xe_guc_ct *ct)
1718 {
1719 	int len;
1720 	int ret;
1721 
1722 	lockdep_assert_held(&ct->lock);
1723 
1724 	spin_lock_irq(&ct->fast_lock);
1725 	len = g2h_read(ct, ct->msg, false);
1726 	spin_unlock_irq(&ct->fast_lock);
1727 	if (len <= 0)
1728 		return len;
1729 
1730 	ret = parse_g2h_msg(ct, ct->msg, len);
1731 	if (unlikely(ret < 0))
1732 		return ret;
1733 
1734 	ret = process_g2h_msg(ct, ct->msg, len);
1735 	if (unlikely(ret < 0))
1736 		return ret;
1737 
1738 	return 1;
1739 }
1740 
1741 static void receive_g2h(struct xe_guc_ct *ct)
1742 {
1743 	bool ongoing;
1744 	int ret;
1745 
1746 	/*
1747 	 * Normal users must always hold mem_access.ref around CT calls. However
1748 	 * during the runtime pm callbacks we rely on CT to talk to the GuC, but
1749 	 * at this stage we can't rely on mem_access.ref and even the
1750 	 * callback_task will be different than current.  For such cases we just
1751 	 * need to ensure we always process the responses from any blocking
1752 	 * ct_send requests or where we otherwise expect some response when
1753 	 * initiated from those callbacks (which will need to wait for the below
1754 	 * dequeue_one_g2h()).  The dequeue_one_g2h() will gracefully fail if
1755 	 * the device has suspended to the point that the CT communication has
1756 	 * been disabled.
1757 	 *
1758 	 * If we are inside the runtime pm callback, we can be the only task
1759 	 * still issuing CT requests (since that requires having the
1760 	 * mem_access.ref).  It seems like it might in theory be possible to
1761 	 * receive unsolicited events from the GuC just as we are
1762 	 * suspending-resuming, but those will currently anyway be lost when
1763 	 * eventually exiting from suspend, hence no need to wake up the device
1764 	 * here. If we ever need something stronger than get_if_ongoing() then
1765 	 * we need to be careful with blocking the pm callbacks from getting CT
1766 	 * responses, if the worker here is blocked on those callbacks
1767 	 * completing, creating a deadlock.
1768 	 */
1769 	ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1770 	if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1771 		return;
1772 
1773 	do {
1774 		mutex_lock(&ct->lock);
1775 		ret = dequeue_one_g2h(ct);
1776 		mutex_unlock(&ct->lock);
1777 
1778 		if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
1779 			xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret);
1780 			CT_DEAD(ct, NULL, G2H_RECV);
1781 			kick_reset(ct);
1782 		}
1783 	} while (ret == 1);
1784 
1785 	if (ongoing)
1786 		xe_pm_runtime_put(ct_to_xe(ct));
1787 }
1788 
1789 static void g2h_worker_func(struct work_struct *w)
1790 {
1791 	struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
1792 
1793 	receive_g2h(ct);
1794 }
1795 
1796 static void xe_fixup_u64_in_cmds(struct xe_device *xe, struct iosys_map *cmds,
1797 				 u32 size, u32 idx, s64 shift)
1798 {
1799 	u32 hi, lo;
1800 	u64 offset;
1801 
1802 	lo = xe_map_rd_ring_u32(xe, cmds, idx, size);
1803 	hi = xe_map_rd_ring_u32(xe, cmds, idx + 1, size);
1804 	offset = make_u64(hi, lo);
1805 	offset += shift;
1806 	lo = lower_32_bits(offset);
1807 	hi = upper_32_bits(offset);
1808 	xe_map_wr_ring_u32(xe, cmds, idx, size, lo);
1809 	xe_map_wr_ring_u32(xe, cmds, idx + 1, size, hi);
1810 }
1811 
1812 /*
1813  * Shift any GGTT addresses within a single message left within CTB from
1814  * before post-migration recovery.
1815  * @ct: pointer to CT struct of the target GuC
1816  * @cmds: iomap buffer containing CT messages
1817  * @head: start of the target message within the buffer
1818  * @len: length of the target message
1819  * @size: size of the commands buffer
1820  * @shift: the address shift to be added to each GGTT reference
1821  * Return: true if the message was fixed or needed no fixups, false on failure
1822  */
1823 static bool ct_fixup_ggtt_in_message(struct xe_guc_ct *ct,
1824 				     struct iosys_map *cmds, u32 head,
1825 				     u32 len, u32 size, s64 shift)
1826 {
1827 	struct xe_gt *gt = ct_to_gt(ct);
1828 	struct xe_device *xe = ct_to_xe(ct);
1829 	u32 msg[GUC_HXG_MSG_MIN_LEN];
1830 	u32 action, i, n;
1831 
1832 	xe_gt_assert(gt, len >= GUC_HXG_MSG_MIN_LEN);
1833 
1834 	msg[0] = xe_map_rd_ring_u32(xe, cmds, head, size);
1835 	action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
1836 
1837 	xe_gt_sriov_dbg_verbose(gt, "fixing H2G %#x\n", action);
1838 
1839 	switch (action) {
1840 	case XE_GUC_ACTION_REGISTER_CONTEXT:
1841 		if (len != XE_GUC_REGISTER_CONTEXT_MSG_LEN)
1842 			goto err_len;
1843 		xe_fixup_u64_in_cmds(xe, cmds, size, head +
1844 				     XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER,
1845 				     shift);
1846 		xe_fixup_u64_in_cmds(xe, cmds, size, head +
1847 				     XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER,
1848 				     shift);
1849 		xe_fixup_u64_in_cmds(xe, cmds, size, head +
1850 				     XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR, shift);
1851 		break;
1852 	case XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC:
1853 		if (len < XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN)
1854 			goto err_len;
1855 		n = xe_map_rd_ring_u32(xe, cmds, head +
1856 				       XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS, size);
1857 		if (len != XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN + 2 * n)
1858 			goto err_len;
1859 		xe_fixup_u64_in_cmds(xe, cmds, size, head +
1860 				     XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER,
1861 				     shift);
1862 		xe_fixup_u64_in_cmds(xe, cmds, size, head +
1863 				     XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER,
1864 				     shift);
1865 		for (i = 0; i < n; i++)
1866 			xe_fixup_u64_in_cmds(xe, cmds, size, head +
1867 					     XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR
1868 					     + 2 * i, shift);
1869 		break;
1870 	default:
1871 		break;
1872 	}
1873 	return true;
1874 
1875 err_len:
1876 	xe_gt_err(gt, "Skipped G2G %#x message fixups, unexpected length (%u)\n", action, len);
1877 	return false;
1878 }
1879 
1880 /*
1881  * Apply fixups to the next outgoing CT message within given CTB
1882  * @ct: the &xe_guc_ct struct instance representing the target GuC
1883  * @h2g: the &guc_ctb struct instance of the target buffer
1884  * @shift: shift to be added to all GGTT addresses within the CTB
1885  * @mhead: pointer to an integer storing message start position; the
1886  *   position is changed to next message before this function return
1887  * @avail: size of the area available for parsing, that is length
1888  *   of all remaining messages stored within the CTB
1889  * Return: size of the area available for parsing after one message
1890  *   has been parsed, that is length remaining from the updated mhead
1891  */
1892 static int ct_fixup_ggtt_in_buffer(struct xe_guc_ct *ct, struct guc_ctb *h2g,
1893 				   s64 shift, u32 *mhead, s32 avail)
1894 {
1895 	struct xe_gt *gt = ct_to_gt(ct);
1896 	struct xe_device *xe = ct_to_xe(ct);
1897 	u32 msg[GUC_HXG_MSG_MIN_LEN];
1898 	u32 size = h2g->info.size;
1899 	u32 head = *mhead;
1900 	u32 len;
1901 
1902 	xe_gt_assert(gt, avail >= (s32)GUC_CTB_MSG_MIN_LEN);
1903 
1904 	/* Read header */
1905 	msg[0] = xe_map_rd_ring_u32(xe, &h2g->cmds, head, size);
1906 	len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
1907 
1908 	if (unlikely(len > (u32)avail)) {
1909 		xe_gt_err(gt, "H2G channel broken on read, avail=%d, len=%d, fixups skipped\n",
1910 			  avail, len);
1911 		return 0;
1912 	}
1913 
1914 	head = (head + GUC_CTB_MSG_MIN_LEN) % size;
1915 	if (!ct_fixup_ggtt_in_message(ct, &h2g->cmds, head, msg_len_to_hxg_len(len), size, shift))
1916 		return 0;
1917 	*mhead = (head + msg_len_to_hxg_len(len)) % size;
1918 
1919 	return avail - len;
1920 }
1921 
1922 /**
1923  * xe_guc_ct_fixup_messages_with_ggtt - Fixup any pending H2G CTB messages
1924  * @ct: pointer to CT struct of the target GuC
1925  * @ggtt_shift: shift to be added to all GGTT addresses within the CTB
1926  *
1927  * Messages in GuC to Host CTB are owned by GuC and any fixups in them
1928  * are made by GuC. But content of the Host to GuC CTB is owned by the
1929  * KMD, so fixups to GGTT references in any pending messages need to be
1930  * applied here.
1931  * This function updates GGTT offsets in payloads of pending H2G CTB
1932  * messages (messages which were not consumed by GuC before the VF got
1933  * paused).
1934  */
1935 void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift)
1936 {
1937 	struct guc_ctb *h2g = &ct->ctbs.h2g;
1938 	struct xe_guc *guc = ct_to_guc(ct);
1939 	struct xe_gt *gt = guc_to_gt(guc);
1940 	u32 head, tail, size;
1941 	s32 avail;
1942 
1943 	if (unlikely(h2g->info.broken))
1944 		return;
1945 
1946 	h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
1947 	head = h2g->info.head;
1948 	tail = READ_ONCE(h2g->info.tail);
1949 	size = h2g->info.size;
1950 
1951 	if (unlikely(head > size))
1952 		goto corrupted;
1953 
1954 	if (unlikely(tail >= size))
1955 		goto corrupted;
1956 
1957 	avail = tail - head;
1958 
1959 	/* beware of buffer wrap case */
1960 	if (unlikely(avail < 0))
1961 		avail += size;
1962 	xe_gt_dbg(gt, "available %d (%u:%u:%u)\n", avail, head, tail, size);
1963 	xe_gt_assert(gt, avail >= 0);
1964 
1965 	while (avail > 0)
1966 		avail = ct_fixup_ggtt_in_buffer(ct, h2g, ggtt_shift, &head, avail);
1967 
1968 	return;
1969 
1970 corrupted:
1971 	xe_gt_err(gt, "Corrupted H2G descriptor head=%u tail=%u size=%u, fixups not applied\n",
1972 		  head, tail, size);
1973 	h2g->info.broken = true;
1974 }
1975 
1976 static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
1977 							bool want_ctb)
1978 {
1979 	struct xe_guc_ct_snapshot *snapshot;
1980 
1981 	snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL);
1982 	if (!snapshot)
1983 		return NULL;
1984 
1985 	if (ct->bo && want_ctb) {
1986 		snapshot->ctb_size = xe_bo_size(ct->bo);
1987 		snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
1988 	}
1989 
1990 	return snapshot;
1991 }
1992 
1993 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
1994 				     struct guc_ctb_snapshot *snapshot)
1995 {
1996 	xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
1997 			   sizeof(struct guc_ct_buffer_desc));
1998 	memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
1999 }
2000 
2001 static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
2002 				   struct drm_printer *p)
2003 {
2004 	drm_printf(p, "\tsize: %d\n", snapshot->info.size);
2005 	drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
2006 	drm_printf(p, "\thead: %d\n", snapshot->info.head);
2007 	drm_printf(p, "\ttail: %d\n", snapshot->info.tail);
2008 	drm_printf(p, "\tspace: %d\n", snapshot->info.space);
2009 	drm_printf(p, "\tbroken: %d\n", snapshot->info.broken);
2010 	drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
2011 	drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
2012 	drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
2013 }
2014 
2015 static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic,
2016 							  bool want_ctb)
2017 {
2018 	struct xe_device *xe = ct_to_xe(ct);
2019 	struct xe_guc_ct_snapshot *snapshot;
2020 
2021 	snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb);
2022 	if (!snapshot) {
2023 		xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n");
2024 		return NULL;
2025 	}
2026 
2027 	if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
2028 		snapshot->ct_enabled = true;
2029 		snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
2030 		guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g);
2031 		guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h);
2032 	}
2033 
2034 	if (ct->bo && snapshot->ctb)
2035 		xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size);
2036 
2037 	return snapshot;
2038 }
2039 
2040 /**
2041  * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
2042  * @ct: GuC CT object.
2043  *
2044  * This can be printed out in a later stage like during dev_coredump
2045  * analysis. This is safe to be called during atomic context.
2046  *
2047  * Returns: a GuC CT snapshot object that must be freed by the caller
2048  * by using `xe_guc_ct_snapshot_free`.
2049  */
2050 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct)
2051 {
2052 	return guc_ct_snapshot_capture(ct, true, true);
2053 }
2054 
2055 /**
2056  * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
2057  * @snapshot: GuC CT snapshot object.
2058  * @p: drm_printer where it will be printed out.
2059  *
2060  * This function prints out a given GuC CT snapshot object.
2061  */
2062 void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
2063 			      struct drm_printer *p)
2064 {
2065 	if (!snapshot)
2066 		return;
2067 
2068 	if (snapshot->ct_enabled) {
2069 		drm_puts(p, "H2G CTB (all sizes in DW):\n");
2070 		guc_ctb_snapshot_print(&snapshot->h2g, p);
2071 
2072 		drm_puts(p, "G2H CTB (all sizes in DW):\n");
2073 		guc_ctb_snapshot_print(&snapshot->g2h, p);
2074 		drm_printf(p, "\tg2h outstanding: %d\n",
2075 			   snapshot->g2h_outstanding);
2076 
2077 		if (snapshot->ctb) {
2078 			drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size);
2079 			xe_print_blob_ascii85(p, "[CTB].data", '\n',
2080 					      snapshot->ctb, 0, snapshot->ctb_size);
2081 		}
2082 	} else {
2083 		drm_puts(p, "CT disabled\n");
2084 	}
2085 }
2086 
2087 /**
2088  * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot.
2089  * @snapshot: GuC CT snapshot object.
2090  *
2091  * This function free all the memory that needed to be allocated at capture
2092  * time.
2093  */
2094 void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
2095 {
2096 	if (!snapshot)
2097 		return;
2098 
2099 	kfree(snapshot->ctb);
2100 	kfree(snapshot);
2101 }
2102 
2103 /**
2104  * xe_guc_ct_print - GuC CT Print.
2105  * @ct: GuC CT.
2106  * @p: drm_printer where it will be printed out.
2107  * @want_ctb: Should the full CTB content be dumped (vs just the headers)
2108  *
2109  * This function will quickly capture a snapshot of the CT state
2110  * and immediately print it out.
2111  */
2112 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
2113 {
2114 	struct xe_guc_ct_snapshot *snapshot;
2115 
2116 	snapshot = guc_ct_snapshot_capture(ct, false, want_ctb);
2117 	xe_guc_ct_snapshot_print(snapshot, p);
2118 	xe_guc_ct_snapshot_free(snapshot);
2119 }
2120 
2121 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
2122 
2123 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
2124 /*
2125  * This is a helper function which assists the driver in identifying if a fault
2126  * injection test is currently active, allowing it to reduce unnecessary debug
2127  * output. Typically, the function returns zero, but the fault injection
2128  * framework can alter this to return an error. Since faults are injected
2129  * through this function, it's important to ensure the compiler doesn't optimize
2130  * it into an inline function. To avoid such optimization, the 'noinline'
2131  * attribute is applied. Compiler optimizes the static function defined in the
2132  * header file as an inline function.
2133  */
2134 noinline int xe_is_injection_active(void) { return 0; }
2135 ALLOW_ERROR_INJECTION(xe_is_injection_active, ERRNO);
2136 #else
2137 int xe_is_injection_active(void) { return 0; }
2138 #endif
2139 
2140 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
2141 {
2142 	struct xe_guc_log_snapshot *snapshot_log;
2143 	struct xe_guc_ct_snapshot *snapshot_ct;
2144 	struct xe_guc *guc = ct_to_guc(ct);
2145 	unsigned long flags;
2146 	bool have_capture;
2147 
2148 	if (ctb)
2149 		ctb->info.broken = true;
2150 	/*
2151 	 * Huge dump is getting generated when injecting error for guc CT/MMIO
2152 	 * functions. So, let us suppress the dump when fault is injected.
2153 	 */
2154 	if (xe_is_injection_active())
2155 		return;
2156 
2157 	/* Ignore further errors after the first dump until a reset */
2158 	if (ct->dead.reported)
2159 		return;
2160 
2161 	spin_lock_irqsave(&ct->dead.lock, flags);
2162 
2163 	/* And only capture one dump at a time */
2164 	have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE);
2165 	ct->dead.reason |= (1 << reason_code) |
2166 			   (1 << CT_DEAD_STATE_CAPTURE);
2167 
2168 	spin_unlock_irqrestore(&ct->dead.lock, flags);
2169 
2170 	if (have_capture)
2171 		return;
2172 
2173 	snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true);
2174 	snapshot_ct = xe_guc_ct_snapshot_capture((ct));
2175 
2176 	spin_lock_irqsave(&ct->dead.lock, flags);
2177 
2178 	if (ct->dead.snapshot_log || ct->dead.snapshot_ct) {
2179 		xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n");
2180 		xe_guc_log_snapshot_free(snapshot_log);
2181 		xe_guc_ct_snapshot_free(snapshot_ct);
2182 	} else {
2183 		ct->dead.snapshot_log = snapshot_log;
2184 		ct->dead.snapshot_ct = snapshot_ct;
2185 	}
2186 
2187 	spin_unlock_irqrestore(&ct->dead.lock, flags);
2188 
2189 	queue_work(system_unbound_wq, &(ct)->dead.worker);
2190 }
2191 
2192 static void ct_dead_print(struct xe_dead_ct *dead)
2193 {
2194 	struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead);
2195 	struct xe_device *xe = ct_to_xe(ct);
2196 	struct xe_gt *gt = ct_to_gt(ct);
2197 	static int g_count;
2198 	struct drm_printer ip = xe_gt_info_printer(gt);
2199 	struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
2200 
2201 	if (!dead->reason) {
2202 		xe_gt_err(gt, "CTB is dead for no reason!?\n");
2203 		return;
2204 	}
2205 
2206 	/* Can't generate a genuine core dump at this point, so just do the good bits */
2207 	drm_puts(&lp, "**** Xe Device Coredump ****\n");
2208 	drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
2209 	xe_device_snapshot_print(xe, &lp);
2210 
2211 	drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
2212 	drm_printf(&lp, "\tTile: %d\n", gt->tile->id);
2213 
2214 	drm_puts(&lp, "**** GuC Log ****\n");
2215 	xe_guc_log_snapshot_print(dead->snapshot_log, &lp);
2216 
2217 	drm_puts(&lp, "**** GuC CT ****\n");
2218 	xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp);
2219 
2220 	drm_puts(&lp, "Done.\n");
2221 }
2222 
2223 static void ct_dead_worker_func(struct work_struct *w)
2224 {
2225 	struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker);
2226 
2227 	if (!ct->dead.reported) {
2228 		ct->dead.reported = true;
2229 		ct_dead_print(&ct->dead);
2230 	}
2231 
2232 	spin_lock_irq(&ct->dead.lock);
2233 
2234 	xe_guc_log_snapshot_free(ct->dead.snapshot_log);
2235 	ct->dead.snapshot_log = NULL;
2236 	xe_guc_ct_snapshot_free(ct->dead.snapshot_ct);
2237 	ct->dead.snapshot_ct = NULL;
2238 
2239 	if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) {
2240 		/* A reset has occurred so re-arm the error reporting */
2241 		ct->dead.reason = 0;
2242 		ct->dead.reported = false;
2243 	}
2244 
2245 	spin_unlock_irq(&ct->dead.lock);
2246 }
2247 #endif
2248