xref: /linux/drivers/gpu/drm/xe/xe_guc_ct.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_ct.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10 #include <linux/delay.h>
11 #include <linux/fault-inject.h>
12 
13 #include <kunit/static_stub.h>
14 
15 #include <drm/drm_managed.h>
16 
17 #include "abi/guc_actions_abi.h"
18 #include "abi/guc_actions_sriov_abi.h"
19 #include "abi/guc_klvs_abi.h"
20 #include "xe_bo.h"
21 #include "xe_devcoredump.h"
22 #include "xe_device.h"
23 #include "xe_gt.h"
24 #include "xe_gt_printk.h"
25 #include "xe_gt_sriov_pf_control.h"
26 #include "xe_gt_sriov_pf_monitor.h"
27 #include "xe_guc.h"
28 #include "xe_guc_log.h"
29 #include "xe_guc_pagefault.h"
30 #include "xe_guc_relay.h"
31 #include "xe_guc_submit.h"
32 #include "xe_guc_tlb_inval.h"
33 #include "xe_map.h"
34 #include "xe_pm.h"
35 #include "xe_sriov_vf.h"
36 #include "xe_trace_guc.h"
37 
38 static void receive_g2h(struct xe_guc_ct *ct);
39 static void g2h_worker_func(struct work_struct *w);
40 static void safe_mode_worker_func(struct work_struct *w);
41 static void ct_exit_safe_mode(struct xe_guc_ct *ct);
42 static void guc_ct_change_state(struct xe_guc_ct *ct,
43 				enum xe_guc_ct_state state);
44 
45 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
46 enum {
47 	/* Internal states, not error conditions */
48 	CT_DEAD_STATE_REARM,			/* 0x0001 */
49 	CT_DEAD_STATE_CAPTURE,			/* 0x0002 */
50 
51 	/* Error conditions */
52 	CT_DEAD_SETUP,				/* 0x0004 */
53 	CT_DEAD_H2G_WRITE,			/* 0x0008 */
54 	CT_DEAD_H2G_HAS_ROOM,			/* 0x0010 */
55 	CT_DEAD_G2H_READ,			/* 0x0020 */
56 	CT_DEAD_G2H_RECV,			/* 0x0040 */
57 	CT_DEAD_G2H_RELEASE,			/* 0x0080 */
58 	CT_DEAD_DEADLOCK,			/* 0x0100 */
59 	CT_DEAD_PROCESS_FAILED,			/* 0x0200 */
60 	CT_DEAD_FAST_G2H,			/* 0x0400 */
61 	CT_DEAD_PARSE_G2H_RESPONSE,		/* 0x0800 */
62 	CT_DEAD_PARSE_G2H_UNKNOWN,		/* 0x1000 */
63 	CT_DEAD_PARSE_G2H_ORIGIN,		/* 0x2000 */
64 	CT_DEAD_PARSE_G2H_TYPE,			/* 0x4000 */
65 	CT_DEAD_CRASH,				/* 0x8000 */
66 };
67 
68 static void ct_dead_worker_func(struct work_struct *w);
69 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code);
70 
71 #define CT_DEAD(ct, ctb, reason_code)		ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code)
72 #else
73 #define CT_DEAD(ct, ctb, reason)			\
74 	do {						\
75 		struct guc_ctb *_ctb = (ctb);		\
76 		if (_ctb)				\
77 			_ctb->info.broken = true;	\
78 	} while (0)
79 #endif
80 
81 /* Used when a CT send wants to block and / or receive data */
82 struct g2h_fence {
83 	u32 *response_buffer;
84 	u32 seqno;
85 	u32 response_data;
86 	u16 response_len;
87 	u16 error;
88 	u16 hint;
89 	u16 reason;
90 	bool cancel;
91 	bool retry;
92 	bool fail;
93 	bool done;
94 };
95 
96 static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
97 {
98 	memset(g2h_fence, 0, sizeof(*g2h_fence));
99 	g2h_fence->response_buffer = response_buffer;
100 	g2h_fence->seqno = ~0x0;
101 }
102 
103 static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
104 {
105 	g2h_fence->cancel = true;
106 	g2h_fence->fail = true;
107 	g2h_fence->done = true;
108 }
109 
110 static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
111 {
112 	return g2h_fence->seqno == ~0x0;
113 }
114 
115 static struct xe_guc *
116 ct_to_guc(struct xe_guc_ct *ct)
117 {
118 	return container_of(ct, struct xe_guc, ct);
119 }
120 
121 static struct xe_gt *
122 ct_to_gt(struct xe_guc_ct *ct)
123 {
124 	return container_of(ct, struct xe_gt, uc.guc.ct);
125 }
126 
127 static struct xe_device *
128 ct_to_xe(struct xe_guc_ct *ct)
129 {
130 	return gt_to_xe(ct_to_gt(ct));
131 }
132 
133 /**
134  * DOC: GuC CTB Blob
135  *
136  * We allocate single blob to hold both CTB descriptors and buffers:
137  *
138  *      +--------+-----------------------------------------------+------+
139  *      | offset | contents                                      | size |
140  *      +========+===============================================+======+
141  *      | 0x0000 | H2G CTB Descriptor (send)                     |      |
142  *      +--------+-----------------------------------------------+  4K  |
143  *      | 0x0800 | G2H CTB Descriptor (g2h)                      |      |
144  *      +--------+-----------------------------------------------+------+
145  *      | 0x1000 | H2G CT Buffer (send)                          | n*4K |
146  *      |        |                                               |      |
147  *      +--------+-----------------------------------------------+------+
148  *      | 0x1000 | G2H CT Buffer (g2h)                           | m*4K |
149  *      | + n*4K |                                               |      |
150  *      +--------+-----------------------------------------------+------+
151  *
152  * Size of each ``CT Buffer`` must be multiple of 4K.
153  * We don't expect too many messages in flight at any time, unless we are
154  * using the GuC submission. In that case each request requires a minimum
155  * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
156  * enough space to avoid backpressure on the driver. We increase the size
157  * of the receive buffer (relative to the send) to ensure a G2H response
158  * CTB has a landing spot.
159  *
160  * In addition to submissions, the G2H buffer needs to be able to hold
161  * enough space for recoverable page fault notifications. The number of
162  * page faults is interrupt driven and can be as much as the number of
163  * compute resources available. However, most of the actual work for these
164  * is in a separate page fault worker thread. Therefore we only need to
165  * make sure the queue has enough space to handle all of the submissions
166  * and responses and an extra buffer for incoming page faults.
167  */
168 
169 #define CTB_DESC_SIZE		ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
170 #define CTB_H2G_BUFFER_OFFSET	(CTB_DESC_SIZE * 2)
171 #define CTB_H2G_BUFFER_SIZE	(SZ_4K)
172 #define CTB_G2H_BUFFER_SIZE	(SZ_128K)
173 #define G2H_ROOM_BUFFER_SIZE	(CTB_G2H_BUFFER_SIZE / 2)
174 
175 /**
176  * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full
177  * CT command queue
178  * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future.
179  *
180  * Observation is that a 4KiB buffer full of commands takes a little over a
181  * second to process. Use that to calculate maximum time to process a full CT
182  * command queue.
183  *
184  * Return: Maximum time to process a full CT queue in jiffies.
185  */
186 long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
187 {
188 	BUILD_BUG_ON(!IS_ALIGNED(CTB_H2G_BUFFER_SIZE, SZ_4));
189 	return (CTB_H2G_BUFFER_SIZE / SZ_4K) * HZ;
190 }
191 
192 static size_t guc_ct_size(void)
193 {
194 	return CTB_H2G_BUFFER_OFFSET + CTB_H2G_BUFFER_SIZE +
195 		CTB_G2H_BUFFER_SIZE;
196 }
197 
198 static void guc_ct_fini(struct drm_device *drm, void *arg)
199 {
200 	struct xe_guc_ct *ct = arg;
201 
202 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
203 	cancel_work_sync(&ct->dead.worker);
204 #endif
205 	ct_exit_safe_mode(ct);
206 	destroy_workqueue(ct->g2h_wq);
207 	xa_destroy(&ct->fence_lookup);
208 }
209 
210 static void primelockdep(struct xe_guc_ct *ct)
211 {
212 	if (!IS_ENABLED(CONFIG_LOCKDEP))
213 		return;
214 
215 	fs_reclaim_acquire(GFP_KERNEL);
216 	might_lock(&ct->lock);
217 	fs_reclaim_release(GFP_KERNEL);
218 }
219 
220 int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
221 {
222 	struct xe_device *xe = ct_to_xe(ct);
223 	struct xe_gt *gt = ct_to_gt(ct);
224 	int err;
225 
226 	xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
227 
228 	err = drmm_mutex_init(&xe->drm, &ct->lock);
229 	if (err)
230 		return err;
231 
232 	primelockdep(ct);
233 
234 	ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM);
235 	if (!ct->g2h_wq)
236 		return -ENOMEM;
237 
238 	spin_lock_init(&ct->fast_lock);
239 	xa_init(&ct->fence_lookup);
240 	INIT_WORK(&ct->g2h_worker, g2h_worker_func);
241 	INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
242 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
243 	spin_lock_init(&ct->dead.lock);
244 	INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
245 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
246 	stack_depot_init();
247 #endif
248 #endif
249 	init_waitqueue_head(&ct->wq);
250 	init_waitqueue_head(&ct->g2h_fence_wq);
251 
252 	err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
253 	if (err)
254 		return err;
255 
256 	xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
257 	ct->state = XE_GUC_CT_STATE_DISABLED;
258 	return 0;
259 }
260 ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */
261 
262 static void guc_action_disable_ct(void *arg)
263 {
264 	struct xe_guc_ct *ct = arg;
265 
266 	guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
267 }
268 
269 int xe_guc_ct_init(struct xe_guc_ct *ct)
270 {
271 	struct xe_device *xe = ct_to_xe(ct);
272 	struct xe_gt *gt = ct_to_gt(ct);
273 	struct xe_tile *tile = gt_to_tile(gt);
274 	struct xe_bo *bo;
275 
276 	bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
277 					  XE_BO_FLAG_SYSTEM |
278 					  XE_BO_FLAG_GGTT |
279 					  XE_BO_FLAG_GGTT_INVALIDATE |
280 					  XE_BO_FLAG_PINNED_NORESTORE);
281 	if (IS_ERR(bo))
282 		return PTR_ERR(bo);
283 
284 	ct->bo = bo;
285 
286 	return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
287 }
288 ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
289 
290 /**
291  * xe_guc_ct_init_post_hwconfig - Reinitialize the GuC CTB in VRAM
292  * @ct: the &xe_guc_ct
293  *
294  * Allocate a new BO in VRAM and free the previous BO that was allocated
295  * in system memory (SMEM). Applicable only for DGFX products.
296  *
297  * Return: 0 on success, or a negative errno on failure.
298  */
299 int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct)
300 {
301 	struct xe_device *xe = ct_to_xe(ct);
302 	struct xe_gt *gt = ct_to_gt(ct);
303 	struct xe_tile *tile = gt_to_tile(gt);
304 	int ret;
305 
306 	xe_assert(xe, !xe_guc_ct_enabled(ct));
307 
308 	if (IS_DGFX(xe)) {
309 		ret = xe_managed_bo_reinit_in_vram(xe, tile, &ct->bo);
310 		if (ret)
311 			return ret;
312 	}
313 
314 	devm_remove_action(xe->drm.dev, guc_action_disable_ct, ct);
315 	return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
316 }
317 
318 #define desc_read(xe_, guc_ctb__, field_)			\
319 	xe_map_rd_field(xe_, &guc_ctb__->desc, 0,		\
320 			struct guc_ct_buffer_desc, field_)
321 
322 #define desc_write(xe_, guc_ctb__, field_, val_)		\
323 	xe_map_wr_field(xe_, &guc_ctb__->desc, 0,		\
324 			struct guc_ct_buffer_desc, field_, val_)
325 
326 static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
327 				struct iosys_map *map)
328 {
329 	h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32);
330 	h2g->info.resv_space = 0;
331 	h2g->info.tail = 0;
332 	h2g->info.head = 0;
333 	h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
334 				     h2g->info.size) -
335 			  h2g->info.resv_space;
336 	h2g->info.broken = false;
337 
338 	h2g->desc = *map;
339 	xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
340 
341 	h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET);
342 }
343 
344 static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
345 				struct iosys_map *map)
346 {
347 	g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32);
348 	g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32);
349 	g2h->info.head = 0;
350 	g2h->info.tail = 0;
351 	g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head,
352 				     g2h->info.size) -
353 			  g2h->info.resv_space;
354 	g2h->info.broken = false;
355 
356 	g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
357 	xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
358 
359 	g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET +
360 					    CTB_H2G_BUFFER_SIZE);
361 }
362 
363 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
364 {
365 	struct xe_guc *guc = ct_to_guc(ct);
366 	u32 desc_addr, ctb_addr, size;
367 	int err;
368 
369 	desc_addr = xe_bo_ggtt_addr(ct->bo);
370 	ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET;
371 	size = ct->ctbs.h2g.info.size * sizeof(u32);
372 
373 	err = xe_guc_self_cfg64(guc,
374 				GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY,
375 				desc_addr);
376 	if (err)
377 		return err;
378 
379 	err = xe_guc_self_cfg64(guc,
380 				GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY,
381 				ctb_addr);
382 	if (err)
383 		return err;
384 
385 	return xe_guc_self_cfg32(guc,
386 				 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY,
387 				 size);
388 }
389 
390 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
391 {
392 	struct xe_guc *guc = ct_to_guc(ct);
393 	u32 desc_addr, ctb_addr, size;
394 	int err;
395 
396 	desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
397 	ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET +
398 		CTB_H2G_BUFFER_SIZE;
399 	size = ct->ctbs.g2h.info.size * sizeof(u32);
400 
401 	err = xe_guc_self_cfg64(guc,
402 				GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
403 				desc_addr);
404 	if (err)
405 		return err;
406 
407 	err = xe_guc_self_cfg64(guc,
408 				GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
409 				ctb_addr);
410 	if (err)
411 		return err;
412 
413 	return xe_guc_self_cfg32(guc,
414 				 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
415 				 size);
416 }
417 
418 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
419 {
420 	u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
421 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
422 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
423 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
424 			   GUC_ACTION_HOST2GUC_CONTROL_CTB),
425 		FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL,
426 			   enable ? GUC_CTB_CONTROL_ENABLE :
427 			   GUC_CTB_CONTROL_DISABLE),
428 	};
429 	int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
430 
431 	return ret > 0 ? -EPROTO : ret;
432 }
433 
434 static void guc_ct_change_state(struct xe_guc_ct *ct,
435 				enum xe_guc_ct_state state)
436 {
437 	struct xe_gt *gt = ct_to_gt(ct);
438 	struct g2h_fence *g2h_fence;
439 	unsigned long idx;
440 
441 	mutex_lock(&ct->lock);		/* Serialise dequeue_one_g2h() */
442 	spin_lock_irq(&ct->fast_lock);	/* Serialise CT fast-path */
443 
444 	xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
445 		     state == XE_GUC_CT_STATE_STOPPED);
446 
447 	if (ct->g2h_outstanding)
448 		xe_pm_runtime_put(ct_to_xe(ct));
449 	ct->g2h_outstanding = 0;
450 	ct->state = state;
451 
452 	xe_gt_dbg(gt, "GuC CT communication channel %s\n",
453 		  state == XE_GUC_CT_STATE_STOPPED ? "stopped" :
454 		  str_enabled_disabled(state == XE_GUC_CT_STATE_ENABLED));
455 
456 	spin_unlock_irq(&ct->fast_lock);
457 
458 	/* cancel all in-flight send-recv requests */
459 	xa_for_each(&ct->fence_lookup, idx, g2h_fence)
460 		g2h_fence_cancel(g2h_fence);
461 
462 	/* make sure guc_ct_send_recv() will see g2h_fence changes */
463 	smp_mb();
464 	wake_up_all(&ct->g2h_fence_wq);
465 
466 	/*
467 	 * Lockdep doesn't like this under the fast lock and he destroy only
468 	 * needs to be serialized with the send path which ct lock provides.
469 	 */
470 	xa_destroy(&ct->fence_lookup);
471 
472 	mutex_unlock(&ct->lock);
473 }
474 
475 static bool ct_needs_safe_mode(struct xe_guc_ct *ct)
476 {
477 	return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev));
478 }
479 
480 static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct)
481 {
482 	if (!ct_needs_safe_mode(ct))
483 		return false;
484 
485 	queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10);
486 	return true;
487 }
488 
489 static void safe_mode_worker_func(struct work_struct *w)
490 {
491 	struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work);
492 
493 	receive_g2h(ct);
494 
495 	if (!ct_restart_safe_mode_worker(ct))
496 		xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n");
497 }
498 
499 static void ct_enter_safe_mode(struct xe_guc_ct *ct)
500 {
501 	if (ct_restart_safe_mode_worker(ct))
502 		xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n");
503 }
504 
505 static void ct_exit_safe_mode(struct xe_guc_ct *ct)
506 {
507 	if (cancel_delayed_work_sync(&ct->safe_mode_worker))
508 		xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n");
509 }
510 
511 static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool needs_register)
512 {
513 	struct xe_device *xe = ct_to_xe(ct);
514 	struct xe_gt *gt = ct_to_gt(ct);
515 	int err;
516 
517 	xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
518 
519 	if (needs_register) {
520 		xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
521 		guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
522 		guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
523 
524 		err = guc_ct_ctb_h2g_register(ct);
525 		if (err)
526 			goto err_out;
527 
528 		err = guc_ct_ctb_g2h_register(ct);
529 		if (err)
530 			goto err_out;
531 
532 		err = guc_ct_control_toggle(ct, true);
533 		if (err)
534 			goto err_out;
535 	} else {
536 		ct->ctbs.h2g.info.broken = false;
537 		ct->ctbs.g2h.info.broken = false;
538 		/* Skip everything in H2G buffer */
539 		xe_map_memset(xe, &ct->bo->vmap, CTB_H2G_BUFFER_OFFSET, 0,
540 			      CTB_H2G_BUFFER_SIZE);
541 	}
542 
543 	guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED);
544 
545 	smp_mb();
546 	wake_up_all(&ct->wq);
547 
548 	if (ct_needs_safe_mode(ct))
549 		ct_enter_safe_mode(ct);
550 
551 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
552 	/*
553 	 * The CT has now been reset so the dumper can be re-armed
554 	 * after any existing dead state has been dumped.
555 	 */
556 	spin_lock_irq(&ct->dead.lock);
557 	if (ct->dead.reason) {
558 		ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
559 		queue_work(system_unbound_wq, &ct->dead.worker);
560 	}
561 	spin_unlock_irq(&ct->dead.lock);
562 #endif
563 
564 	return 0;
565 
566 err_out:
567 	xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
568 	CT_DEAD(ct, NULL, SETUP);
569 
570 	return err;
571 }
572 
573 /**
574  * xe_guc_ct_restart() - Restart GuC CT
575  * @ct: the &xe_guc_ct
576  *
577  * Restart GuC CT to an empty state without issuing a CT register MMIO command.
578  *
579  * Return: 0 on success, or a negative errno on failure.
580  */
581 int xe_guc_ct_restart(struct xe_guc_ct *ct)
582 {
583 	return __xe_guc_ct_start(ct, false);
584 }
585 
586 /**
587  * xe_guc_ct_enable() - Enable GuC CT
588  * @ct: the &xe_guc_ct
589  *
590  * Enable GuC CT to an empty state and issue a CT register MMIO command.
591  *
592  * Return: 0 on success, or a negative errno on failure.
593  */
594 int xe_guc_ct_enable(struct xe_guc_ct *ct)
595 {
596 	return __xe_guc_ct_start(ct, true);
597 }
598 
599 static void stop_g2h_handler(struct xe_guc_ct *ct)
600 {
601 	cancel_work_sync(&ct->g2h_worker);
602 }
603 
604 /**
605  * xe_guc_ct_disable - Set GuC to disabled state
606  * @ct: the &xe_guc_ct
607  *
608  * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
609  * in this transition.
610  */
611 void xe_guc_ct_disable(struct xe_guc_ct *ct)
612 {
613 	guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
614 	ct_exit_safe_mode(ct);
615 	stop_g2h_handler(ct);
616 }
617 
618 /**
619  * xe_guc_ct_flush_and_stop - Flush and stop all processing of G2H / H2G
620  * @ct: the &xe_guc_ct
621  */
622 void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct)
623 {
624 	receive_g2h(ct);
625 	xe_guc_ct_stop(ct);
626 }
627 
628 /**
629  * xe_guc_ct_stop - Set GuC to stopped state
630  * @ct: the &xe_guc_ct
631  *
632  * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
633  */
634 void xe_guc_ct_stop(struct xe_guc_ct *ct)
635 {
636 	if (!xe_guc_ct_initialized(ct))
637 		return;
638 
639 	guc_ct_change_state(ct, XE_GUC_CT_STATE_STOPPED);
640 	stop_g2h_handler(ct);
641 }
642 
643 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
644 {
645 	struct guc_ctb *h2g = &ct->ctbs.h2g;
646 
647 	lockdep_assert_held(&ct->lock);
648 
649 	if (cmd_len > h2g->info.space) {
650 		h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
651 
652 		if (h2g->info.head > h2g->info.size) {
653 			struct xe_device *xe = ct_to_xe(ct);
654 			u32 desc_status = desc_read(xe, h2g, status);
655 
656 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
657 
658 			xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n",
659 				  h2g->info.head, h2g->info.size);
660 			CT_DEAD(ct, h2g, H2G_HAS_ROOM);
661 			return false;
662 		}
663 
664 		h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
665 					     h2g->info.size) -
666 				  h2g->info.resv_space;
667 		if (cmd_len > h2g->info.space)
668 			return false;
669 	}
670 
671 	return true;
672 }
673 
674 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
675 {
676 	if (!g2h_len)
677 		return true;
678 
679 	lockdep_assert_held(&ct->fast_lock);
680 
681 	return ct->ctbs.g2h.info.space > g2h_len;
682 }
683 
684 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
685 {
686 	lockdep_assert_held(&ct->lock);
687 
688 	if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
689 		return -EBUSY;
690 
691 	return 0;
692 }
693 
694 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
695 {
696 	lockdep_assert_held(&ct->lock);
697 	ct->ctbs.h2g.info.space -= cmd_len;
698 }
699 
700 static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
701 {
702 	xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
703 	xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) ||
704 		     (g2h_len && num_g2h));
705 
706 	if (g2h_len) {
707 		lockdep_assert_held(&ct->fast_lock);
708 
709 		if (!ct->g2h_outstanding)
710 			xe_pm_runtime_get_noresume(ct_to_xe(ct));
711 
712 		ct->ctbs.g2h.info.space -= g2h_len;
713 		ct->g2h_outstanding += num_g2h;
714 	}
715 }
716 
717 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
718 {
719 	bool bad = false;
720 
721 	lockdep_assert_held(&ct->fast_lock);
722 
723 	bad = ct->ctbs.g2h.info.space + g2h_len >
724 		     ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space;
725 	bad |= !ct->g2h_outstanding;
726 
727 	if (bad) {
728 		xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n",
729 			  ct->ctbs.g2h.info.space, g2h_len,
730 			  ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space,
731 			  ct->ctbs.g2h.info.space + g2h_len,
732 			  ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space,
733 			  ct->g2h_outstanding);
734 		CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE);
735 		return;
736 	}
737 
738 	ct->ctbs.g2h.info.space += g2h_len;
739 	if (!--ct->g2h_outstanding)
740 		xe_pm_runtime_put(ct_to_xe(ct));
741 }
742 
743 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
744 {
745 	spin_lock_irq(&ct->fast_lock);
746 	__g2h_release_space(ct, g2h_len);
747 	spin_unlock_irq(&ct->fast_lock);
748 }
749 
750 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
751 static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
752 {
753 	unsigned int slot = fence % ARRAY_SIZE(ct->fast_req);
754 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
755 	unsigned long entries[SZ_32];
756 	unsigned int n;
757 
758 	n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
759 
760 	/* May be called under spinlock, so avoid sleeping */
761 	ct->fast_req[slot].stack = stack_depot_save(entries, n, GFP_NOWAIT);
762 #endif
763 	ct->fast_req[slot].fence = fence;
764 	ct->fast_req[slot].action = action;
765 }
766 #else
767 static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
768 {
769 }
770 #endif
771 
772 /*
773  * The CT protocol accepts a 16 bits fence. This field is fully owned by the
774  * driver, the GuC will just copy it to the reply message. Since we need to
775  * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
776  * we use one bit of the seqno as an indicator for that and a rolling counter
777  * for the remaining 15 bits.
778  */
779 #define CT_SEQNO_MASK GENMASK(14, 0)
780 #define CT_SEQNO_UNTRACKED BIT(15)
781 static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
782 {
783 	u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
784 
785 	if (!is_g2h_fence)
786 		seqno |= CT_SEQNO_UNTRACKED;
787 
788 	return seqno;
789 }
790 
791 #define MAKE_ACTION(type, __action)				\
792 ({								\
793 	FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) |			\
794 	FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |			\
795 		   GUC_HXG_EVENT_MSG_0_DATA0, __action);	\
796 })
797 
798 static bool vf_action_can_safely_fail(struct xe_device *xe, u32 action)
799 {
800 	/*
801 	 * When resuming a VF, we can't reliably track whether context
802 	 * registration has completed in the GuC state machine. It is harmless
803 	 * to resend the request, as it will fail silently if GUC_HXG_TYPE_EVENT
804 	 * is used. Additionally, if there is an H2G protocol issue on a VF,
805 	 * subsequent H2G messages sent as GUC_HXG_TYPE_FAST_REQUEST will likely
806 	 * fail.
807 	 */
808 	return IS_SRIOV_VF(xe) && xe_sriov_vf_migration_supported(xe) &&
809 		(action == XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC ||
810 		 action == XE_GUC_ACTION_REGISTER_CONTEXT);
811 }
812 
813 #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
814 
815 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
816 		     u32 ct_fence_value, bool want_response)
817 {
818 	struct xe_device *xe = ct_to_xe(ct);
819 	struct xe_gt *gt = ct_to_gt(ct);
820 	struct guc_ctb *h2g = &ct->ctbs.h2g;
821 	u32 cmd[H2G_CT_HEADERS];
822 	u32 tail = h2g->info.tail;
823 	u32 full_len;
824 	struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
825 							 tail * sizeof(u32));
826 	u32 desc_status;
827 
828 	full_len = len + GUC_CTB_HDR_LEN;
829 
830 	lockdep_assert_held(&ct->lock);
831 	xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
832 
833 	desc_status = desc_read(xe, h2g, status);
834 	if (desc_status) {
835 		xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status);
836 		goto corrupted;
837 	}
838 
839 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
840 		u32 desc_tail = desc_read(xe, h2g, tail);
841 		u32 desc_head = desc_read(xe, h2g, head);
842 
843 		if (tail != desc_tail) {
844 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH);
845 			xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail);
846 			goto corrupted;
847 		}
848 
849 		if (tail > h2g->info.size) {
850 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
851 			xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n",
852 				  tail, h2g->info.size);
853 			goto corrupted;
854 		}
855 
856 		if (desc_head >= h2g->info.size) {
857 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
858 			xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n",
859 				  desc_head, h2g->info.size);
860 			goto corrupted;
861 		}
862 	}
863 
864 	/* Command will wrap, zero fill (NOPs), return and check credits again */
865 	if (tail + full_len > h2g->info.size) {
866 		xe_map_memset(xe, &map, 0, 0,
867 			      (h2g->info.size - tail) * sizeof(u32));
868 		h2g_reserve_space(ct, (h2g->info.size - tail));
869 		h2g->info.tail = 0;
870 		desc_write(xe, h2g, tail, h2g->info.tail);
871 
872 		return -EAGAIN;
873 	}
874 
875 	/*
876 	 * dw0: CT header (including fence)
877 	 * dw1: HXG header (including action code)
878 	 * dw2+: action data
879 	 */
880 	cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
881 		FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
882 		FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
883 	if (want_response) {
884 		cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_REQUEST, action[0]);
885 	} else if (vf_action_can_safely_fail(xe, action[0])) {
886 		cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_EVENT, action[0]);
887 	} else {
888 		fast_req_track(ct, ct_fence_value,
889 			       FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
890 
891 		cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_FAST_REQUEST, action[0]);
892 	}
893 
894 	/* H2G header in cmd[1] replaces action[0] so: */
895 	--len;
896 	++action;
897 
898 	/* Write H2G ensuring visible before descriptor update */
899 	xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
900 	xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
901 	xe_device_wmb(xe);
902 
903 	/* Update local copies */
904 	h2g->info.tail = (tail + full_len) % h2g->info.size;
905 	h2g_reserve_space(ct, full_len);
906 
907 	/* Update descriptor */
908 	desc_write(xe, h2g, tail, h2g->info.tail);
909 
910 	trace_xe_guc_ctb_h2g(xe, gt->info.id, *(action - 1), full_len,
911 			     desc_read(xe, h2g, head), h2g->info.tail);
912 
913 	return 0;
914 
915 corrupted:
916 	CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE);
917 	return -EPIPE;
918 }
919 
920 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
921 				u32 len, u32 g2h_len, u32 num_g2h,
922 				struct g2h_fence *g2h_fence)
923 {
924 	struct xe_gt *gt = ct_to_gt(ct);
925 	u16 seqno;
926 	int ret;
927 
928 	xe_gt_assert(gt, xe_guc_ct_initialized(ct));
929 	xe_gt_assert(gt, !g2h_len || !g2h_fence);
930 	xe_gt_assert(gt, !num_g2h || !g2h_fence);
931 	xe_gt_assert(gt, !g2h_len || num_g2h);
932 	xe_gt_assert(gt, g2h_len || !num_g2h);
933 	lockdep_assert_held(&ct->lock);
934 
935 	if (unlikely(ct->ctbs.h2g.info.broken)) {
936 		ret = -EPIPE;
937 		goto out;
938 	}
939 
940 	if (ct->state == XE_GUC_CT_STATE_DISABLED) {
941 		ret = -ENODEV;
942 		goto out;
943 	}
944 
945 	if (ct->state == XE_GUC_CT_STATE_STOPPED || xe_gt_recovery_pending(gt)) {
946 		ret = -ECANCELED;
947 		goto out;
948 	}
949 
950 	xe_gt_assert(gt, xe_guc_ct_enabled(ct));
951 
952 	if (g2h_fence) {
953 		g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
954 		num_g2h = 1;
955 
956 		if (g2h_fence_needs_alloc(g2h_fence)) {
957 			g2h_fence->seqno = next_ct_seqno(ct, true);
958 			ret = xa_err(xa_store(&ct->fence_lookup,
959 					      g2h_fence->seqno, g2h_fence,
960 					      GFP_ATOMIC));
961 			if (ret)
962 				goto out;
963 		}
964 
965 		seqno = g2h_fence->seqno;
966 	} else {
967 		seqno = next_ct_seqno(ct, false);
968 	}
969 
970 	if (g2h_len)
971 		spin_lock_irq(&ct->fast_lock);
972 retry:
973 	ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
974 	if (unlikely(ret))
975 		goto out_unlock;
976 
977 	ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
978 	if (unlikely(ret)) {
979 		if (ret == -EAGAIN)
980 			goto retry;
981 		goto out_unlock;
982 	}
983 
984 	__g2h_reserve_space(ct, g2h_len, num_g2h);
985 	xe_guc_notify(ct_to_guc(ct));
986 out_unlock:
987 	if (g2h_len)
988 		spin_unlock_irq(&ct->fast_lock);
989 out:
990 	return ret;
991 }
992 
993 static void kick_reset(struct xe_guc_ct *ct)
994 {
995 	xe_gt_reset_async(ct_to_gt(ct));
996 }
997 
998 static int dequeue_one_g2h(struct xe_guc_ct *ct);
999 
1000 /*
1001  * wait before retry of sending h2g message
1002  * Return: true if ready for retry, false if the wait timeouted
1003  */
1004 static bool guc_ct_send_wait_for_retry(struct xe_guc_ct *ct, u32 len,
1005 				       u32 g2h_len, struct g2h_fence *g2h_fence,
1006 				       unsigned int *sleep_period_ms)
1007 {
1008 	struct xe_device *xe = ct_to_xe(ct);
1009 
1010 	/*
1011 	 * We wait to try to restore credits for about 1 second before bailing.
1012 	 * In the case of H2G credits we have no choice but just to wait for the
1013 	 * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In
1014 	 * the case of G2H we process any G2H in the channel, hopefully freeing
1015 	 * credits as we consume the G2H messages.
1016 	 */
1017 	if (!h2g_has_room(ct, len + GUC_CTB_HDR_LEN)) {
1018 		struct guc_ctb *h2g = &ct->ctbs.h2g;
1019 
1020 		if (*sleep_period_ms == 1024)
1021 			return false;
1022 
1023 		trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail,
1024 						 h2g->info.size,
1025 						 h2g->info.space,
1026 						 len + GUC_CTB_HDR_LEN);
1027 		msleep(*sleep_period_ms);
1028 		*sleep_period_ms <<= 1;
1029 	} else {
1030 		struct xe_device *xe = ct_to_xe(ct);
1031 		struct guc_ctb *g2h = &ct->ctbs.g2h;
1032 		int ret;
1033 
1034 		trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head,
1035 						 desc_read(xe, g2h, tail),
1036 						 g2h->info.size,
1037 						 g2h->info.space,
1038 						 g2h_fence ?
1039 						 GUC_CTB_HXG_MSG_MAX_LEN :
1040 						 g2h_len);
1041 
1042 #define g2h_avail(ct)	\
1043 	(desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
1044 		if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
1045 					g2h_avail(ct), HZ))
1046 			return false;
1047 #undef g2h_avail
1048 
1049 		ret = dequeue_one_g2h(ct);
1050 		if (ret < 0) {
1051 			if (ret != -ECANCELED)
1052 				xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)",
1053 					  ERR_PTR(ret));
1054 			return false;
1055 		}
1056 	}
1057 	return true;
1058 }
1059 
1060 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
1061 			      u32 g2h_len, u32 num_g2h,
1062 			      struct g2h_fence *g2h_fence)
1063 {
1064 	struct xe_gt *gt = ct_to_gt(ct);
1065 	unsigned int sleep_period_ms = 1;
1066 	int ret;
1067 
1068 	xe_gt_assert(gt, !g2h_len || !g2h_fence);
1069 	lockdep_assert_held(&ct->lock);
1070 	xe_device_assert_mem_access(ct_to_xe(ct));
1071 
1072 try_again:
1073 	ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
1074 				   g2h_fence);
1075 
1076 	if (unlikely(ret == -EBUSY)) {
1077 		if (!guc_ct_send_wait_for_retry(ct, len, g2h_len, g2h_fence,
1078 						&sleep_period_ms))
1079 			goto broken;
1080 		goto try_again;
1081 	}
1082 
1083 	return ret;
1084 
1085 broken:
1086 	xe_gt_err(gt, "No forward process on H2G, reset required\n");
1087 	CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK);
1088 
1089 	return -EDEADLK;
1090 }
1091 
1092 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
1093 		       u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
1094 {
1095 	int ret;
1096 
1097 	xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence);
1098 
1099 	mutex_lock(&ct->lock);
1100 	ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
1101 	mutex_unlock(&ct->lock);
1102 
1103 	return ret;
1104 }
1105 
1106 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
1107 		   u32 g2h_len, u32 num_g2h)
1108 {
1109 	int ret;
1110 
1111 	ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
1112 	if (ret == -EDEADLK)
1113 		kick_reset(ct);
1114 
1115 	return ret;
1116 }
1117 
1118 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
1119 			  u32 g2h_len, u32 num_g2h)
1120 {
1121 	int ret;
1122 
1123 	ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
1124 	if (ret == -EDEADLK)
1125 		kick_reset(ct);
1126 
1127 	return ret;
1128 }
1129 
1130 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
1131 {
1132 	int ret;
1133 
1134 	lockdep_assert_held(&ct->lock);
1135 
1136 	ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
1137 	if (ret == -EDEADLK)
1138 		kick_reset(ct);
1139 
1140 	return ret;
1141 }
1142 
1143 /*
1144  * Check if a GT reset is in progress or will occur and if GT reset brought the
1145  * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
1146  */
1147 static bool retry_failure(struct xe_guc_ct *ct, int ret)
1148 {
1149 	if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV))
1150 		return false;
1151 
1152 #define ct_alive(ct)	\
1153 	(xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
1154 	 !ct->ctbs.g2h.info.broken)
1155 	if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
1156 		return false;
1157 #undef ct_alive
1158 
1159 	return true;
1160 }
1161 
1162 #define GUC_SEND_RETRY_LIMIT	50
1163 #define GUC_SEND_RETRY_MSLEEP	5
1164 
1165 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
1166 			    u32 *response_buffer, bool no_fail)
1167 {
1168 	struct xe_gt *gt = ct_to_gt(ct);
1169 	struct g2h_fence g2h_fence;
1170 	unsigned int retries = 0;
1171 	int ret = 0;
1172 
1173 	/*
1174 	 * We use a fence to implement blocking sends / receiving response data.
1175 	 * The seqno of the fence is sent in the H2G, returned in the G2H, and
1176 	 * an xarray is used as storage media with the seqno being to key.
1177 	 * Fields in the fence hold success, failure, retry status and the
1178 	 * response data. Safe to allocate on the stack as the xarray is the
1179 	 * only reference and it cannot be present after this function exits.
1180 	 */
1181 retry:
1182 	g2h_fence_init(&g2h_fence, response_buffer);
1183 retry_same_fence:
1184 	ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
1185 	if (unlikely(ret == -ENOMEM)) {
1186 		/* Retry allocation /w GFP_KERNEL */
1187 		ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
1188 				      &g2h_fence, GFP_KERNEL));
1189 		if (ret)
1190 			return ret;
1191 
1192 		goto retry_same_fence;
1193 	} else if (unlikely(ret)) {
1194 		if (ret == -EDEADLK)
1195 			kick_reset(ct);
1196 
1197 		if (no_fail && retry_failure(ct, ret))
1198 			goto retry_same_fence;
1199 
1200 		if (!g2h_fence_needs_alloc(&g2h_fence))
1201 			xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1202 
1203 		return ret;
1204 	}
1205 
1206 	ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
1207 	if (!ret) {
1208 		LNL_FLUSH_WORK(&ct->g2h_worker);
1209 		if (g2h_fence.done) {
1210 			xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
1211 				   g2h_fence.seqno, action[0]);
1212 			ret = 1;
1213 		}
1214 	}
1215 
1216 	/*
1217 	 * Ensure we serialize with completion side to prevent UAF with fence going out of scope on
1218 	 * the stack, since we have no clue if it will fire after the timeout before we can erase
1219 	 * from the xa. Also we have some dependent loads and stores below for which we need the
1220 	 * correct ordering, and we lack the needed barriers.
1221 	 */
1222 	mutex_lock(&ct->lock);
1223 	if (!ret) {
1224 		xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
1225 			  g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
1226 		xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1227 		mutex_unlock(&ct->lock);
1228 		return -ETIME;
1229 	}
1230 
1231 	if (g2h_fence.retry) {
1232 		xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
1233 			  action[0], g2h_fence.reason);
1234 		mutex_unlock(&ct->lock);
1235 		if (++retries > GUC_SEND_RETRY_LIMIT) {
1236 			xe_gt_err(gt, "H2G action %#x reached retry limit=%u, aborting\n",
1237 				  action[0], GUC_SEND_RETRY_LIMIT);
1238 			return -ELOOP;
1239 		}
1240 		msleep(GUC_SEND_RETRY_MSLEEP * retries);
1241 		goto retry;
1242 	}
1243 	if (g2h_fence.fail) {
1244 		if (g2h_fence.cancel) {
1245 			xe_gt_dbg(gt, "H2G request %#x canceled!\n", action[0]);
1246 			ret = -ECANCELED;
1247 			goto unlock;
1248 		}
1249 		xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n",
1250 			  action[0], g2h_fence.error, g2h_fence.hint);
1251 		ret = -EIO;
1252 	}
1253 
1254 	if (ret > 0)
1255 		ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
1256 
1257 unlock:
1258 	mutex_unlock(&ct->lock);
1259 
1260 	return ret;
1261 }
1262 
1263 /**
1264  * xe_guc_ct_send_recv - Send and receive HXG to the GuC
1265  * @ct: the &xe_guc_ct
1266  * @action: the dword array with `HXG Request`_ message (can't be NULL)
1267  * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
1268  * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
1269  *
1270  * Send a `HXG Request`_ message to the GuC over CT communication channel and
1271  * blocks until GuC replies with a `HXG Response`_ message.
1272  *
1273  * For non-blocking communication with GuC use xe_guc_ct_send().
1274  *
1275  * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
1276  *
1277  * Return: response length (in dwords) if &response_buffer was not NULL, or
1278  *         DATA0 from `HXG Response`_ if &response_buffer was NULL, or
1279  *         a negative error code on failure.
1280  */
1281 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
1282 			u32 *response_buffer)
1283 {
1284 	KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
1285 	return guc_ct_send_recv(ct, action, len, response_buffer, false);
1286 }
1287 ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO);
1288 
1289 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
1290 				u32 len, u32 *response_buffer)
1291 {
1292 	return guc_ct_send_recv(ct, action, len, response_buffer, true);
1293 }
1294 
1295 static u32 *msg_to_hxg(u32 *msg)
1296 {
1297 	return msg + GUC_CTB_MSG_MIN_LEN;
1298 }
1299 
1300 static u32 msg_len_to_hxg_len(u32 len)
1301 {
1302 	return len - GUC_CTB_MSG_MIN_LEN;
1303 }
1304 
1305 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
1306 {
1307 	u32 *hxg = msg_to_hxg(msg);
1308 	u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1309 
1310 	lockdep_assert_held(&ct->lock);
1311 
1312 	switch (action) {
1313 	case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1314 	case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1315 	case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1316 	case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1317 		g2h_release_space(ct, len);
1318 	}
1319 
1320 	return 0;
1321 }
1322 
1323 static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
1324 {
1325 	struct xe_gt *gt = ct_to_gt(ct);
1326 
1327 	if (action == XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED)
1328 		xe_gt_err(gt, "GuC Crash dump notification\n");
1329 	else if (action == XE_GUC_ACTION_NOTIFY_EXCEPTION)
1330 		xe_gt_err(gt, "GuC Exception notification\n");
1331 	else
1332 		xe_gt_err(gt, "Unknown GuC crash notification: 0x%04X\n", action);
1333 
1334 	CT_DEAD(ct, NULL, CRASH);
1335 
1336 	kick_reset(ct);
1337 
1338 	return 0;
1339 }
1340 
1341 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
1342 static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
1343 {
1344 	u16 fence_min = U16_MAX, fence_max = 0;
1345 	struct xe_gt *gt = ct_to_gt(ct);
1346 	bool found = false;
1347 	unsigned int n;
1348 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
1349 	char *buf;
1350 #endif
1351 
1352 	lockdep_assert_held(&ct->lock);
1353 
1354 	for (n = 0; n < ARRAY_SIZE(ct->fast_req); n++) {
1355 		if (ct->fast_req[n].fence < fence_min)
1356 			fence_min = ct->fast_req[n].fence;
1357 		if (ct->fast_req[n].fence > fence_max)
1358 			fence_max = ct->fast_req[n].fence;
1359 
1360 		if (ct->fast_req[n].fence != fence)
1361 			continue;
1362 		found = true;
1363 
1364 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
1365 		buf = kmalloc(SZ_4K, GFP_NOWAIT);
1366 		if (buf && stack_depot_snprint(ct->fast_req[n].stack, buf, SZ_4K, 0))
1367 			xe_gt_err(gt, "Fence 0x%x was used by action %#04x sent at:\n%s",
1368 				  fence, ct->fast_req[n].action, buf);
1369 		else
1370 			xe_gt_err(gt, "Fence 0x%x was used by action %#04x [failed to retrieve stack]\n",
1371 				  fence, ct->fast_req[n].action);
1372 		kfree(buf);
1373 #else
1374 		xe_gt_err(gt, "Fence 0x%x was used by action %#04x\n",
1375 			  fence, ct->fast_req[n].action);
1376 #endif
1377 		break;
1378 	}
1379 
1380 	if (!found)
1381 		xe_gt_warn(gt, "Fence 0x%x not found - tracking buffer wrapped? [range = 0x%x -> 0x%x, next = 0x%X]\n",
1382 			   fence, fence_min, fence_max, ct->fence_seqno);
1383 }
1384 #else
1385 static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
1386 {
1387 }
1388 #endif
1389 
1390 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
1391 {
1392 	struct xe_gt *gt =  ct_to_gt(ct);
1393 	u32 *hxg = msg_to_hxg(msg);
1394 	u32 hxg_len = msg_len_to_hxg_len(len);
1395 	u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
1396 	u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1397 	struct g2h_fence *g2h_fence;
1398 
1399 	lockdep_assert_held(&ct->lock);
1400 
1401 	/*
1402 	 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
1403 	 * Those messages should never fail, so if we do get an error back it
1404 	 * means we're likely doing an illegal operation and the GuC is
1405 	 * rejecting it. We have no way to inform the code that submitted the
1406 	 * H2G that the message was rejected, so we need to escalate the
1407 	 * failure to trigger a reset.
1408 	 */
1409 	if (fence & CT_SEQNO_UNTRACKED) {
1410 		if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
1411 			xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n",
1412 				  fence,
1413 				  FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]),
1414 				  FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]));
1415 		else
1416 			xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
1417 				  type, fence);
1418 
1419 		fast_req_report(ct, fence);
1420 
1421 		/* FIXME: W/A race in the GuC, will get in firmware soon */
1422 		if (xe_gt_recovery_pending(gt))
1423 			return 0;
1424 
1425 		CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
1426 
1427 		return -EPROTO;
1428 	}
1429 
1430 	g2h_fence = xa_erase(&ct->fence_lookup, fence);
1431 	if (unlikely(!g2h_fence)) {
1432 		/* Don't tear down channel, as send could've timed out */
1433 		/* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */
1434 		xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
1435 		g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1436 		return 0;
1437 	}
1438 
1439 	xe_gt_assert(gt, fence == g2h_fence->seqno);
1440 
1441 	if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
1442 		g2h_fence->fail = true;
1443 		g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]);
1444 		g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]);
1445 	} else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
1446 		g2h_fence->retry = true;
1447 		g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]);
1448 	} else if (g2h_fence->response_buffer) {
1449 		g2h_fence->response_len = hxg_len;
1450 		memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32));
1451 	} else {
1452 		g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]);
1453 	}
1454 
1455 	g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1456 
1457 	g2h_fence->done = true;
1458 	smp_mb();
1459 
1460 	wake_up_all(&ct->g2h_fence_wq);
1461 
1462 	return 0;
1463 }
1464 
1465 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1466 {
1467 	struct xe_gt *gt = ct_to_gt(ct);
1468 	u32 *hxg = msg_to_hxg(msg);
1469 	u32 origin, type;
1470 	int ret;
1471 
1472 	lockdep_assert_held(&ct->lock);
1473 
1474 	origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
1475 	if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
1476 		xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
1477 			  origin);
1478 		CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN);
1479 
1480 		return -EPROTO;
1481 	}
1482 
1483 	type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1484 	switch (type) {
1485 	case GUC_HXG_TYPE_EVENT:
1486 		ret = parse_g2h_event(ct, msg, len);
1487 		break;
1488 	case GUC_HXG_TYPE_RESPONSE_SUCCESS:
1489 	case GUC_HXG_TYPE_RESPONSE_FAILURE:
1490 	case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
1491 		ret = parse_g2h_response(ct, msg, len);
1492 		break;
1493 	default:
1494 		xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
1495 			  type);
1496 		CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE);
1497 
1498 		ret = -EOPNOTSUPP;
1499 	}
1500 
1501 	return ret;
1502 }
1503 
1504 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1505 {
1506 	struct xe_guc *guc = ct_to_guc(ct);
1507 	struct xe_gt *gt = ct_to_gt(ct);
1508 	u32 hxg_len = msg_len_to_hxg_len(len);
1509 	u32 *hxg = msg_to_hxg(msg);
1510 	u32 action, adj_len;
1511 	u32 *payload;
1512 	int ret = 0;
1513 
1514 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1515 		return 0;
1516 
1517 	action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1518 	payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN;
1519 	adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN;
1520 
1521 	switch (action) {
1522 	case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1523 		ret = xe_guc_sched_done_handler(guc, payload, adj_len);
1524 		break;
1525 	case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1526 		ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
1527 		break;
1528 	case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
1529 		ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
1530 		break;
1531 	case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
1532 		ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
1533 							      adj_len);
1534 		break;
1535 	case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1536 		/* Selftest only at the moment */
1537 		break;
1538 	case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
1539 		ret = xe_guc_error_capture_handler(guc, payload, adj_len);
1540 		break;
1541 	case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
1542 		/* FIXME: Handle this */
1543 		break;
1544 	case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
1545 		ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
1546 								 adj_len);
1547 		break;
1548 	case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1549 		ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1550 		break;
1551 	case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1552 		ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
1553 		break;
1554 	case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
1555 		ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
1556 		break;
1557 	case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
1558 		ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
1559 		break;
1560 	case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY:
1561 		ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len);
1562 		break;
1563 	case GUC_ACTION_GUC2PF_ADVERSE_EVENT:
1564 		ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len);
1565 		break;
1566 	case XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
1567 	case XE_GUC_ACTION_NOTIFY_EXCEPTION:
1568 		ret = guc_crash_process_msg(ct, action);
1569 		break;
1570 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1571 	case XE_GUC_ACTION_TEST_G2G_RECV:
1572 		ret = xe_guc_g2g_test_notification(guc, payload, adj_len);
1573 		break;
1574 #endif
1575 	default:
1576 		xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
1577 	}
1578 
1579 	if (ret) {
1580 		xe_gt_err(gt, "G2H action %#04x failed (%pe) len %u msg %*ph\n",
1581 			  action, ERR_PTR(ret), hxg_len, (int)sizeof(u32) * hxg_len, hxg);
1582 		CT_DEAD(ct, NULL, PROCESS_FAILED);
1583 	}
1584 
1585 	return 0;
1586 }
1587 
1588 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
1589 {
1590 	struct xe_device *xe = ct_to_xe(ct);
1591 	struct xe_gt *gt = ct_to_gt(ct);
1592 	struct guc_ctb *g2h = &ct->ctbs.g2h;
1593 	u32 tail, head, len, desc_status;
1594 	s32 avail;
1595 	u32 action;
1596 	u32 *hxg;
1597 
1598 	xe_gt_assert(gt, xe_guc_ct_initialized(ct));
1599 	lockdep_assert_held(&ct->fast_lock);
1600 
1601 	if (ct->state == XE_GUC_CT_STATE_DISABLED)
1602 		return -ENODEV;
1603 
1604 	if (ct->state == XE_GUC_CT_STATE_STOPPED)
1605 		return -ECANCELED;
1606 
1607 	if (g2h->info.broken)
1608 		return -EPIPE;
1609 
1610 	xe_gt_assert(gt, xe_guc_ct_enabled(ct));
1611 
1612 	desc_status = desc_read(xe, g2h, status);
1613 	if (desc_status) {
1614 		if (desc_status & GUC_CTB_STATUS_DISABLED) {
1615 			/*
1616 			 * Potentially valid if a CLIENT_RESET request resulted in
1617 			 * contexts/engines being reset. But should never happen as
1618 			 * no contexts should be active when CLIENT_RESET is sent.
1619 			 */
1620 			xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n");
1621 			desc_status &= ~GUC_CTB_STATUS_DISABLED;
1622 		}
1623 
1624 		if (desc_status) {
1625 			xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status);
1626 			goto corrupted;
1627 		}
1628 	}
1629 
1630 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
1631 		u32 desc_tail = desc_read(xe, g2h, tail);
1632 		/*
1633 		u32 desc_head = desc_read(xe, g2h, head);
1634 
1635 		 * info.head and desc_head are updated back-to-back at the end of
1636 		 * this function and nowhere else. Hence, they cannot be different
1637 		 * unless two g2h_read calls are running concurrently. Which is not
1638 		 * possible because it is guarded by ct->fast_lock. And yet, some
1639 		 * discrete platforms are regularly hitting this error :(.
1640 		 *
1641 		 * desc_head rolling backwards shouldn't cause any noticeable
1642 		 * problems - just a delay in GuC being allowed to proceed past that
1643 		 * point in the queue. So for now, just disable the error until it
1644 		 * can be root caused.
1645 		 *
1646 		if (g2h->info.head != desc_head) {
1647 			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH);
1648 			xe_gt_err(gt, "CT read: head was modified %u != %u\n",
1649 				  desc_head, g2h->info.head);
1650 			goto corrupted;
1651 		}
1652 		 */
1653 
1654 		if (g2h->info.head > g2h->info.size) {
1655 			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1656 			xe_gt_err(gt, "CT read: head out of range: %u vs %u\n",
1657 				  g2h->info.head, g2h->info.size);
1658 			goto corrupted;
1659 		}
1660 
1661 		if (desc_tail >= g2h->info.size) {
1662 			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1663 			xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n",
1664 				  desc_tail, g2h->info.size);
1665 			goto corrupted;
1666 		}
1667 	}
1668 
1669 	/* Calculate DW available to read */
1670 	tail = desc_read(xe, g2h, tail);
1671 	avail = tail - g2h->info.head;
1672 	if (unlikely(avail == 0))
1673 		return 0;
1674 
1675 	if (avail < 0)
1676 		avail += g2h->info.size;
1677 
1678 	/* Read header */
1679 	xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head,
1680 			   sizeof(u32));
1681 	len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
1682 	if (len > avail) {
1683 		xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
1684 			  avail, len);
1685 		goto corrupted;
1686 	}
1687 
1688 	head = (g2h->info.head + 1) % g2h->info.size;
1689 	avail = len - 1;
1690 
1691 	/* Read G2H message */
1692 	if (avail + head > g2h->info.size) {
1693 		u32 avail_til_wrap = g2h->info.size - head;
1694 
1695 		xe_map_memcpy_from(xe, msg + 1,
1696 				   &g2h->cmds, sizeof(u32) * head,
1697 				   avail_til_wrap * sizeof(u32));
1698 		xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap,
1699 				   &g2h->cmds, 0,
1700 				   (avail - avail_til_wrap) * sizeof(u32));
1701 	} else {
1702 		xe_map_memcpy_from(xe, msg + 1,
1703 				   &g2h->cmds, sizeof(u32) * head,
1704 				   avail * sizeof(u32));
1705 	}
1706 
1707 	hxg = msg_to_hxg(msg);
1708 	action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1709 
1710 	if (fast_path) {
1711 		if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1712 			return 0;
1713 
1714 		switch (action) {
1715 		case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1716 		case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1717 			break;	/* Process these in fast-path */
1718 		default:
1719 			return 0;
1720 		}
1721 	}
1722 
1723 	/* Update local / descriptor header */
1724 	g2h->info.head = (head + avail) % g2h->info.size;
1725 	desc_write(xe, g2h, head, g2h->info.head);
1726 
1727 	trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id,
1728 			     action, len, g2h->info.head, tail);
1729 
1730 	return len;
1731 
1732 corrupted:
1733 	CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ);
1734 	return -EPROTO;
1735 }
1736 
1737 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
1738 {
1739 	struct xe_gt *gt = ct_to_gt(ct);
1740 	struct xe_guc *guc = ct_to_guc(ct);
1741 	u32 hxg_len = msg_len_to_hxg_len(len);
1742 	u32 *hxg = msg_to_hxg(msg);
1743 	u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1744 	u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN;
1745 	u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN;
1746 	int ret = 0;
1747 
1748 	switch (action) {
1749 	case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1750 		ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1751 		break;
1752 	case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1753 		__g2h_release_space(ct, len);
1754 		ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
1755 		break;
1756 	default:
1757 		xe_gt_warn(gt, "NOT_POSSIBLE");
1758 	}
1759 
1760 	if (ret) {
1761 		xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
1762 			  action, ERR_PTR(ret));
1763 		CT_DEAD(ct, NULL, FAST_G2H);
1764 	}
1765 }
1766 
1767 /**
1768  * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
1769  * @ct: GuC CT object
1770  *
1771  * Anything related to page faults is critical for performance, process these
1772  * critical G2H in the IRQ. This is safe as these handlers either just wake up
1773  * waiters or queue another worker.
1774  */
1775 void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
1776 {
1777 	struct xe_device *xe = ct_to_xe(ct);
1778 	bool ongoing;
1779 	int len;
1780 
1781 	ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1782 	if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1783 		return;
1784 
1785 	spin_lock(&ct->fast_lock);
1786 	do {
1787 		len = g2h_read(ct, ct->fast_msg, true);
1788 		if (len > 0)
1789 			g2h_fast_path(ct, ct->fast_msg, len);
1790 	} while (len > 0);
1791 	spin_unlock(&ct->fast_lock);
1792 
1793 	if (ongoing)
1794 		xe_pm_runtime_put(xe);
1795 }
1796 
1797 /* Returns less than zero on error, 0 on done, 1 on more available */
1798 static int dequeue_one_g2h(struct xe_guc_ct *ct)
1799 {
1800 	int len;
1801 	int ret;
1802 
1803 	lockdep_assert_held(&ct->lock);
1804 
1805 	spin_lock_irq(&ct->fast_lock);
1806 	len = g2h_read(ct, ct->msg, false);
1807 	spin_unlock_irq(&ct->fast_lock);
1808 	if (len <= 0)
1809 		return len;
1810 
1811 	ret = parse_g2h_msg(ct, ct->msg, len);
1812 	if (unlikely(ret < 0))
1813 		return ret;
1814 
1815 	ret = process_g2h_msg(ct, ct->msg, len);
1816 	if (unlikely(ret < 0))
1817 		return ret;
1818 
1819 	return 1;
1820 }
1821 
1822 static void receive_g2h(struct xe_guc_ct *ct)
1823 {
1824 	bool ongoing;
1825 	int ret;
1826 
1827 	/*
1828 	 * Normal users must always hold mem_access.ref around CT calls. However
1829 	 * during the runtime pm callbacks we rely on CT to talk to the GuC, but
1830 	 * at this stage we can't rely on mem_access.ref and even the
1831 	 * callback_task will be different than current.  For such cases we just
1832 	 * need to ensure we always process the responses from any blocking
1833 	 * ct_send requests or where we otherwise expect some response when
1834 	 * initiated from those callbacks (which will need to wait for the below
1835 	 * dequeue_one_g2h()).  The dequeue_one_g2h() will gracefully fail if
1836 	 * the device has suspended to the point that the CT communication has
1837 	 * been disabled.
1838 	 *
1839 	 * If we are inside the runtime pm callback, we can be the only task
1840 	 * still issuing CT requests (since that requires having the
1841 	 * mem_access.ref).  It seems like it might in theory be possible to
1842 	 * receive unsolicited events from the GuC just as we are
1843 	 * suspending-resuming, but those will currently anyway be lost when
1844 	 * eventually exiting from suspend, hence no need to wake up the device
1845 	 * here. If we ever need something stronger than get_if_ongoing() then
1846 	 * we need to be careful with blocking the pm callbacks from getting CT
1847 	 * responses, if the worker here is blocked on those callbacks
1848 	 * completing, creating a deadlock.
1849 	 */
1850 	ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1851 	if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1852 		return;
1853 
1854 	do {
1855 		mutex_lock(&ct->lock);
1856 		ret = dequeue_one_g2h(ct);
1857 		mutex_unlock(&ct->lock);
1858 
1859 		if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
1860 			xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret);
1861 			CT_DEAD(ct, NULL, G2H_RECV);
1862 			kick_reset(ct);
1863 		}
1864 	} while (ret == 1);
1865 
1866 	if (ongoing)
1867 		xe_pm_runtime_put(ct_to_xe(ct));
1868 }
1869 
1870 static void g2h_worker_func(struct work_struct *w)
1871 {
1872 	struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
1873 
1874 	receive_g2h(ct);
1875 }
1876 
1877 static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
1878 							bool want_ctb)
1879 {
1880 	struct xe_guc_ct_snapshot *snapshot;
1881 
1882 	snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL);
1883 	if (!snapshot)
1884 		return NULL;
1885 
1886 	if (ct->bo && want_ctb) {
1887 		snapshot->ctb_size = xe_bo_size(ct->bo);
1888 		snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
1889 	}
1890 
1891 	return snapshot;
1892 }
1893 
1894 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
1895 				     struct guc_ctb_snapshot *snapshot)
1896 {
1897 	xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
1898 			   sizeof(struct guc_ct_buffer_desc));
1899 	memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
1900 }
1901 
1902 static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
1903 				   struct drm_printer *p)
1904 {
1905 	drm_printf(p, "\tsize: %d\n", snapshot->info.size);
1906 	drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
1907 	drm_printf(p, "\thead: %d\n", snapshot->info.head);
1908 	drm_printf(p, "\ttail: %d\n", snapshot->info.tail);
1909 	drm_printf(p, "\tspace: %d\n", snapshot->info.space);
1910 	drm_printf(p, "\tbroken: %d\n", snapshot->info.broken);
1911 	drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
1912 	drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
1913 	drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
1914 }
1915 
1916 static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic,
1917 							  bool want_ctb)
1918 {
1919 	struct xe_device *xe = ct_to_xe(ct);
1920 	struct xe_guc_ct_snapshot *snapshot;
1921 
1922 	snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb);
1923 	if (!snapshot) {
1924 		xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n");
1925 		return NULL;
1926 	}
1927 
1928 	if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
1929 		snapshot->ct_enabled = true;
1930 		snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
1931 		guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g);
1932 		guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h);
1933 	}
1934 
1935 	if (ct->bo && snapshot->ctb)
1936 		xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size);
1937 
1938 	return snapshot;
1939 }
1940 
1941 /**
1942  * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
1943  * @ct: GuC CT object.
1944  *
1945  * This can be printed out in a later stage like during dev_coredump
1946  * analysis. This is safe to be called during atomic context.
1947  *
1948  * Returns: a GuC CT snapshot object that must be freed by the caller
1949  * by using `xe_guc_ct_snapshot_free`.
1950  */
1951 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct)
1952 {
1953 	return guc_ct_snapshot_capture(ct, true, true);
1954 }
1955 
1956 /**
1957  * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
1958  * @snapshot: GuC CT snapshot object.
1959  * @p: drm_printer where it will be printed out.
1960  *
1961  * This function prints out a given GuC CT snapshot object.
1962  */
1963 void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
1964 			      struct drm_printer *p)
1965 {
1966 	if (!snapshot)
1967 		return;
1968 
1969 	if (snapshot->ct_enabled) {
1970 		drm_puts(p, "H2G CTB (all sizes in DW):\n");
1971 		guc_ctb_snapshot_print(&snapshot->h2g, p);
1972 
1973 		drm_puts(p, "G2H CTB (all sizes in DW):\n");
1974 		guc_ctb_snapshot_print(&snapshot->g2h, p);
1975 		drm_printf(p, "\tg2h outstanding: %d\n",
1976 			   snapshot->g2h_outstanding);
1977 
1978 		if (snapshot->ctb) {
1979 			drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size);
1980 			xe_print_blob_ascii85(p, "[CTB].data", '\n',
1981 					      snapshot->ctb, 0, snapshot->ctb_size);
1982 		}
1983 	} else {
1984 		drm_puts(p, "CT disabled\n");
1985 	}
1986 }
1987 
1988 /**
1989  * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot.
1990  * @snapshot: GuC CT snapshot object.
1991  *
1992  * This function free all the memory that needed to be allocated at capture
1993  * time.
1994  */
1995 void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
1996 {
1997 	if (!snapshot)
1998 		return;
1999 
2000 	kfree(snapshot->ctb);
2001 	kfree(snapshot);
2002 }
2003 
2004 /**
2005  * xe_guc_ct_print - GuC CT Print.
2006  * @ct: GuC CT.
2007  * @p: drm_printer where it will be printed out.
2008  * @want_ctb: Should the full CTB content be dumped (vs just the headers)
2009  *
2010  * This function will quickly capture a snapshot of the CT state
2011  * and immediately print it out.
2012  */
2013 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
2014 {
2015 	struct xe_guc_ct_snapshot *snapshot;
2016 
2017 	snapshot = guc_ct_snapshot_capture(ct, false, want_ctb);
2018 	xe_guc_ct_snapshot_print(snapshot, p);
2019 	xe_guc_ct_snapshot_free(snapshot);
2020 }
2021 
2022 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
2023 
2024 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
2025 /*
2026  * This is a helper function which assists the driver in identifying if a fault
2027  * injection test is currently active, allowing it to reduce unnecessary debug
2028  * output. Typically, the function returns zero, but the fault injection
2029  * framework can alter this to return an error. Since faults are injected
2030  * through this function, it's important to ensure the compiler doesn't optimize
2031  * it into an inline function. To avoid such optimization, the 'noinline'
2032  * attribute is applied. Compiler optimizes the static function defined in the
2033  * header file as an inline function.
2034  */
2035 noinline int xe_is_injection_active(void) { return 0; }
2036 ALLOW_ERROR_INJECTION(xe_is_injection_active, ERRNO);
2037 #else
2038 int xe_is_injection_active(void) { return 0; }
2039 #endif
2040 
2041 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
2042 {
2043 	struct xe_guc_log_snapshot *snapshot_log;
2044 	struct xe_guc_ct_snapshot *snapshot_ct;
2045 	struct xe_guc *guc = ct_to_guc(ct);
2046 	unsigned long flags;
2047 	bool have_capture;
2048 
2049 	if (ctb)
2050 		ctb->info.broken = true;
2051 	/*
2052 	 * Huge dump is getting generated when injecting error for guc CT/MMIO
2053 	 * functions. So, let us suppress the dump when fault is injected.
2054 	 */
2055 	if (xe_is_injection_active())
2056 		return;
2057 
2058 	/* Ignore further errors after the first dump until a reset */
2059 	if (ct->dead.reported)
2060 		return;
2061 
2062 	spin_lock_irqsave(&ct->dead.lock, flags);
2063 
2064 	/* And only capture one dump at a time */
2065 	have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE);
2066 	ct->dead.reason |= (1 << reason_code) |
2067 			   (1 << CT_DEAD_STATE_CAPTURE);
2068 
2069 	spin_unlock_irqrestore(&ct->dead.lock, flags);
2070 
2071 	if (have_capture)
2072 		return;
2073 
2074 	snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true);
2075 	snapshot_ct = xe_guc_ct_snapshot_capture((ct));
2076 
2077 	spin_lock_irqsave(&ct->dead.lock, flags);
2078 
2079 	if (ct->dead.snapshot_log || ct->dead.snapshot_ct) {
2080 		xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n");
2081 		xe_guc_log_snapshot_free(snapshot_log);
2082 		xe_guc_ct_snapshot_free(snapshot_ct);
2083 	} else {
2084 		ct->dead.snapshot_log = snapshot_log;
2085 		ct->dead.snapshot_ct = snapshot_ct;
2086 	}
2087 
2088 	spin_unlock_irqrestore(&ct->dead.lock, flags);
2089 
2090 	queue_work(system_unbound_wq, &(ct)->dead.worker);
2091 }
2092 
2093 static void ct_dead_print(struct xe_dead_ct *dead)
2094 {
2095 	struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead);
2096 	struct xe_device *xe = ct_to_xe(ct);
2097 	struct xe_gt *gt = ct_to_gt(ct);
2098 	static int g_count;
2099 	struct drm_printer ip = xe_gt_info_printer(gt);
2100 	struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
2101 
2102 	if (!dead->reason) {
2103 		xe_gt_err(gt, "CTB is dead for no reason!?\n");
2104 		return;
2105 	}
2106 
2107 	/* Can't generate a genuine core dump at this point, so just do the good bits */
2108 	drm_puts(&lp, "**** Xe Device Coredump ****\n");
2109 	drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
2110 	xe_device_snapshot_print(xe, &lp);
2111 
2112 	drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
2113 	drm_printf(&lp, "\tTile: %d\n", gt->tile->id);
2114 
2115 	drm_puts(&lp, "**** GuC Log ****\n");
2116 	xe_guc_log_snapshot_print(dead->snapshot_log, &lp);
2117 
2118 	drm_puts(&lp, "**** GuC CT ****\n");
2119 	xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp);
2120 
2121 	drm_puts(&lp, "Done.\n");
2122 }
2123 
2124 static void ct_dead_worker_func(struct work_struct *w)
2125 {
2126 	struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker);
2127 
2128 	if (!ct->dead.reported) {
2129 		ct->dead.reported = true;
2130 		ct_dead_print(&ct->dead);
2131 	}
2132 
2133 	spin_lock_irq(&ct->dead.lock);
2134 
2135 	xe_guc_log_snapshot_free(ct->dead.snapshot_log);
2136 	ct->dead.snapshot_log = NULL;
2137 	xe_guc_ct_snapshot_free(ct->dead.snapshot_ct);
2138 	ct->dead.snapshot_ct = NULL;
2139 
2140 	if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) {
2141 		/* A reset has occurred so re-arm the error reporting */
2142 		ct->dead.reason = 0;
2143 		ct->dead.reported = false;
2144 	}
2145 
2146 	spin_unlock_irq(&ct->dead.lock);
2147 }
2148 #endif
2149