xref: /linux/drivers/gpu/drm/xe/xe_guc_ct.c (revision 00e08fb2e7ce88e2ae366cbc79997d71d014b0ac)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_ct.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10 #include <linux/delay.h>
11 #include <linux/fault-inject.h>
12 
13 #include <kunit/static_stub.h>
14 
15 #include <drm/drm_managed.h>
16 
17 #include "abi/guc_actions_abi.h"
18 #include "abi/guc_actions_sriov_abi.h"
19 #include "abi/guc_klvs_abi.h"
20 #include "xe_bo.h"
21 #include "xe_devcoredump.h"
22 #include "xe_device.h"
23 #include "xe_gt.h"
24 #include "xe_gt_printk.h"
25 #include "xe_gt_sriov_pf_control.h"
26 #include "xe_gt_sriov_pf_monitor.h"
27 #include "xe_guc.h"
28 #include "xe_guc_log.h"
29 #include "xe_guc_pagefault.h"
30 #include "xe_guc_relay.h"
31 #include "xe_guc_submit.h"
32 #include "xe_guc_tlb_inval.h"
33 #include "xe_map.h"
34 #include "xe_pm.h"
35 #include "xe_sriov_vf.h"
36 #include "xe_trace_guc.h"
37 
38 static void receive_g2h(struct xe_guc_ct *ct);
39 static void g2h_worker_func(struct work_struct *w);
40 static void safe_mode_worker_func(struct work_struct *w);
41 static void ct_exit_safe_mode(struct xe_guc_ct *ct);
42 static void guc_ct_change_state(struct xe_guc_ct *ct,
43 				enum xe_guc_ct_state state);
44 
45 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
46 enum {
47 	/* Internal states, not error conditions */
48 	CT_DEAD_STATE_REARM,			/* 0x0001 */
49 	CT_DEAD_STATE_CAPTURE,			/* 0x0002 */
50 
51 	/* Error conditions */
52 	CT_DEAD_SETUP,				/* 0x0004 */
53 	CT_DEAD_H2G_WRITE,			/* 0x0008 */
54 	CT_DEAD_H2G_HAS_ROOM,			/* 0x0010 */
55 	CT_DEAD_G2H_READ,			/* 0x0020 */
56 	CT_DEAD_G2H_RECV,			/* 0x0040 */
57 	CT_DEAD_G2H_RELEASE,			/* 0x0080 */
58 	CT_DEAD_DEADLOCK,			/* 0x0100 */
59 	CT_DEAD_PROCESS_FAILED,			/* 0x0200 */
60 	CT_DEAD_FAST_G2H,			/* 0x0400 */
61 	CT_DEAD_PARSE_G2H_RESPONSE,		/* 0x0800 */
62 	CT_DEAD_PARSE_G2H_UNKNOWN,		/* 0x1000 */
63 	CT_DEAD_PARSE_G2H_ORIGIN,		/* 0x2000 */
64 	CT_DEAD_PARSE_G2H_TYPE,			/* 0x4000 */
65 	CT_DEAD_CRASH,				/* 0x8000 */
66 };
67 
68 static void ct_dead_worker_func(struct work_struct *w);
69 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code);
70 
71 #define CT_DEAD(ct, ctb, reason_code)		ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code)
72 #else
73 #define CT_DEAD(ct, ctb, reason)			\
74 	do {						\
75 		struct guc_ctb *_ctb = (ctb);		\
76 		if (_ctb)				\
77 			_ctb->info.broken = true;	\
78 	} while (0)
79 #endif
80 
81 /* Used when a CT send wants to block and / or receive data */
82 struct g2h_fence {
83 	u32 *response_buffer;
84 	u32 seqno;
85 	u32 response_data;
86 	u16 response_len;
87 	u16 error;
88 	u16 hint;
89 	u16 reason;
90 	bool cancel;
91 	bool retry;
92 	bool fail;
93 	bool done;
94 };
95 
96 static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
97 {
98 	memset(g2h_fence, 0, sizeof(*g2h_fence));
99 	g2h_fence->response_buffer = response_buffer;
100 	g2h_fence->seqno = ~0x0;
101 }
102 
103 static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
104 {
105 	g2h_fence->cancel = true;
106 	g2h_fence->fail = true;
107 	g2h_fence->done = true;
108 }
109 
110 static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
111 {
112 	return g2h_fence->seqno == ~0x0;
113 }
114 
115 static struct xe_guc *
116 ct_to_guc(struct xe_guc_ct *ct)
117 {
118 	return container_of(ct, struct xe_guc, ct);
119 }
120 
121 static struct xe_gt *
122 ct_to_gt(struct xe_guc_ct *ct)
123 {
124 	return container_of(ct, struct xe_gt, uc.guc.ct);
125 }
126 
127 static struct xe_device *
128 ct_to_xe(struct xe_guc_ct *ct)
129 {
130 	return gt_to_xe(ct_to_gt(ct));
131 }
132 
133 /**
134  * DOC: GuC CTB Blob
135  *
136  * We allocate single blob to hold both CTB descriptors and buffers:
137  *
138  *      +--------+-----------------------------------------------+------+
139  *      | offset | contents                                      | size |
140  *      +========+===============================================+======+
141  *      | 0x0000 | H2G CTB Descriptor (send)                     |      |
142  *      +--------+-----------------------------------------------+  4K  |
143  *      | 0x0800 | G2H CTB Descriptor (g2h)                      |      |
144  *      +--------+-----------------------------------------------+------+
145  *      | 0x1000 | H2G CT Buffer (send)                          | n*4K |
146  *      |        |                                               |      |
147  *      +--------+-----------------------------------------------+------+
148  *      | 0x1000 | G2H CT Buffer (g2h)                           | m*4K |
149  *      | + n*4K |                                               |      |
150  *      +--------+-----------------------------------------------+------+
151  *
152  * Size of each ``CT Buffer`` must be multiple of 4K.
153  * We don't expect too many messages in flight at any time, unless we are
154  * using the GuC submission. In that case each request requires a minimum
155  * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
156  * enough space to avoid backpressure on the driver. We increase the size
157  * of the receive buffer (relative to the send) to ensure a G2H response
158  * CTB has a landing spot.
159  *
160  * In addition to submissions, the G2H buffer needs to be able to hold
161  * enough space for recoverable page fault notifications. The number of
162  * page faults is interrupt driven and can be as much as the number of
163  * compute resources available. However, most of the actual work for these
164  * is in a separate page fault worker thread. Therefore we only need to
165  * make sure the queue has enough space to handle all of the submissions
166  * and responses and an extra buffer for incoming page faults.
167  */
168 
169 #define CTB_DESC_SIZE		ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
170 #define CTB_H2G_BUFFER_OFFSET	(CTB_DESC_SIZE * 2)
171 #define CTB_H2G_BUFFER_SIZE	(SZ_4K)
172 #define CTB_G2H_BUFFER_SIZE	(SZ_128K)
173 #define G2H_ROOM_BUFFER_SIZE	(CTB_G2H_BUFFER_SIZE / 2)
174 
175 /**
176  * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full
177  * CT command queue
178  * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future.
179  *
180  * Observation is that a 4KiB buffer full of commands takes a little over a
181  * second to process. Use that to calculate maximum time to process a full CT
182  * command queue.
183  *
184  * Return: Maximum time to process a full CT queue in jiffies.
185  */
186 long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
187 {
188 	BUILD_BUG_ON(!IS_ALIGNED(CTB_H2G_BUFFER_SIZE, SZ_4));
189 	return (CTB_H2G_BUFFER_SIZE / SZ_4K) * HZ;
190 }
191 
192 static size_t guc_ct_size(void)
193 {
194 	return CTB_H2G_BUFFER_OFFSET + CTB_H2G_BUFFER_SIZE +
195 		CTB_G2H_BUFFER_SIZE;
196 }
197 
198 static void guc_ct_fini(struct drm_device *drm, void *arg)
199 {
200 	struct xe_guc_ct *ct = arg;
201 
202 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
203 	cancel_work_sync(&ct->dead.worker);
204 #endif
205 	ct_exit_safe_mode(ct);
206 	destroy_workqueue(ct->g2h_wq);
207 	xa_destroy(&ct->fence_lookup);
208 }
209 
210 static void primelockdep(struct xe_guc_ct *ct)
211 {
212 	if (!IS_ENABLED(CONFIG_LOCKDEP))
213 		return;
214 
215 	fs_reclaim_acquire(GFP_KERNEL);
216 	might_lock(&ct->lock);
217 	fs_reclaim_release(GFP_KERNEL);
218 }
219 
220 int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
221 {
222 	struct xe_device *xe = ct_to_xe(ct);
223 	struct xe_gt *gt = ct_to_gt(ct);
224 	int err;
225 
226 	xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
227 
228 	ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM);
229 	if (!ct->g2h_wq)
230 		return -ENOMEM;
231 
232 	spin_lock_init(&ct->fast_lock);
233 	xa_init(&ct->fence_lookup);
234 	INIT_WORK(&ct->g2h_worker, g2h_worker_func);
235 	INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
236 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
237 	spin_lock_init(&ct->dead.lock);
238 	INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
239 #endif
240 	init_waitqueue_head(&ct->wq);
241 	init_waitqueue_head(&ct->g2h_fence_wq);
242 
243 	err = drmm_mutex_init(&xe->drm, &ct->lock);
244 	if (err)
245 		return err;
246 
247 	primelockdep(ct);
248 
249 	err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
250 	if (err)
251 		return err;
252 
253 	xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
254 	ct->state = XE_GUC_CT_STATE_DISABLED;
255 	return 0;
256 }
257 ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */
258 
259 static void guc_action_disable_ct(void *arg)
260 {
261 	struct xe_guc_ct *ct = arg;
262 
263 	guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
264 }
265 
266 int xe_guc_ct_init(struct xe_guc_ct *ct)
267 {
268 	struct xe_device *xe = ct_to_xe(ct);
269 	struct xe_gt *gt = ct_to_gt(ct);
270 	struct xe_tile *tile = gt_to_tile(gt);
271 	struct xe_bo *bo;
272 
273 	bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
274 					  XE_BO_FLAG_SYSTEM |
275 					  XE_BO_FLAG_GGTT |
276 					  XE_BO_FLAG_GGTT_INVALIDATE |
277 					  XE_BO_FLAG_PINNED_NORESTORE);
278 	if (IS_ERR(bo))
279 		return PTR_ERR(bo);
280 
281 	ct->bo = bo;
282 
283 	return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
284 }
285 ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
286 
287 /**
288  * xe_guc_ct_init_post_hwconfig - Reinitialize the GuC CTB in VRAM
289  * @ct: the &xe_guc_ct
290  *
291  * Allocate a new BO in VRAM and free the previous BO that was allocated
292  * in system memory (SMEM). Applicable only for DGFX products.
293  *
294  * Return: 0 on success, or a negative errno on failure.
295  */
296 int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct)
297 {
298 	struct xe_device *xe = ct_to_xe(ct);
299 	struct xe_gt *gt = ct_to_gt(ct);
300 	struct xe_tile *tile = gt_to_tile(gt);
301 	int ret;
302 
303 	xe_assert(xe, !xe_guc_ct_enabled(ct));
304 
305 	if (IS_DGFX(xe)) {
306 		ret = xe_managed_bo_reinit_in_vram(xe, tile, &ct->bo);
307 		if (ret)
308 			return ret;
309 	}
310 
311 	devm_remove_action(xe->drm.dev, guc_action_disable_ct, ct);
312 	return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
313 }
314 
315 #define desc_read(xe_, guc_ctb__, field_)			\
316 	xe_map_rd_field(xe_, &guc_ctb__->desc, 0,		\
317 			struct guc_ct_buffer_desc, field_)
318 
319 #define desc_write(xe_, guc_ctb__, field_, val_)		\
320 	xe_map_wr_field(xe_, &guc_ctb__->desc, 0,		\
321 			struct guc_ct_buffer_desc, field_, val_)
322 
323 static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
324 				struct iosys_map *map)
325 {
326 	h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32);
327 	h2g->info.resv_space = 0;
328 	h2g->info.tail = 0;
329 	h2g->info.head = 0;
330 	h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
331 				     h2g->info.size) -
332 			  h2g->info.resv_space;
333 	h2g->info.broken = false;
334 
335 	h2g->desc = *map;
336 	xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
337 
338 	h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET);
339 }
340 
341 static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
342 				struct iosys_map *map)
343 {
344 	g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32);
345 	g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32);
346 	g2h->info.head = 0;
347 	g2h->info.tail = 0;
348 	g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head,
349 				     g2h->info.size) -
350 			  g2h->info.resv_space;
351 	g2h->info.broken = false;
352 
353 	g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
354 	xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
355 
356 	g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET +
357 					    CTB_H2G_BUFFER_SIZE);
358 }
359 
360 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
361 {
362 	struct xe_guc *guc = ct_to_guc(ct);
363 	u32 desc_addr, ctb_addr, size;
364 	int err;
365 
366 	desc_addr = xe_bo_ggtt_addr(ct->bo);
367 	ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET;
368 	size = ct->ctbs.h2g.info.size * sizeof(u32);
369 
370 	err = xe_guc_self_cfg64(guc,
371 				GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY,
372 				desc_addr);
373 	if (err)
374 		return err;
375 
376 	err = xe_guc_self_cfg64(guc,
377 				GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY,
378 				ctb_addr);
379 	if (err)
380 		return err;
381 
382 	return xe_guc_self_cfg32(guc,
383 				 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY,
384 				 size);
385 }
386 
387 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
388 {
389 	struct xe_guc *guc = ct_to_guc(ct);
390 	u32 desc_addr, ctb_addr, size;
391 	int err;
392 
393 	desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
394 	ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET +
395 		CTB_H2G_BUFFER_SIZE;
396 	size = ct->ctbs.g2h.info.size * sizeof(u32);
397 
398 	err = xe_guc_self_cfg64(guc,
399 				GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
400 				desc_addr);
401 	if (err)
402 		return err;
403 
404 	err = xe_guc_self_cfg64(guc,
405 				GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
406 				ctb_addr);
407 	if (err)
408 		return err;
409 
410 	return xe_guc_self_cfg32(guc,
411 				 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
412 				 size);
413 }
414 
415 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
416 {
417 	u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
418 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
419 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
420 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
421 			   GUC_ACTION_HOST2GUC_CONTROL_CTB),
422 		FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL,
423 			   enable ? GUC_CTB_CONTROL_ENABLE :
424 			   GUC_CTB_CONTROL_DISABLE),
425 	};
426 	int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
427 
428 	return ret > 0 ? -EPROTO : ret;
429 }
430 
431 static void guc_ct_change_state(struct xe_guc_ct *ct,
432 				enum xe_guc_ct_state state)
433 {
434 	struct xe_gt *gt = ct_to_gt(ct);
435 	struct g2h_fence *g2h_fence;
436 	unsigned long idx;
437 
438 	mutex_lock(&ct->lock);		/* Serialise dequeue_one_g2h() */
439 	spin_lock_irq(&ct->fast_lock);	/* Serialise CT fast-path */
440 
441 	xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
442 		     state == XE_GUC_CT_STATE_STOPPED);
443 
444 	if (ct->g2h_outstanding)
445 		xe_pm_runtime_put(ct_to_xe(ct));
446 	ct->g2h_outstanding = 0;
447 	ct->state = state;
448 
449 	xe_gt_dbg(gt, "GuC CT communication channel %s\n",
450 		  state == XE_GUC_CT_STATE_STOPPED ? "stopped" :
451 		  str_enabled_disabled(state == XE_GUC_CT_STATE_ENABLED));
452 
453 	spin_unlock_irq(&ct->fast_lock);
454 
455 	/* cancel all in-flight send-recv requests */
456 	xa_for_each(&ct->fence_lookup, idx, g2h_fence)
457 		g2h_fence_cancel(g2h_fence);
458 
459 	/* make sure guc_ct_send_recv() will see g2h_fence changes */
460 	smp_mb();
461 	wake_up_all(&ct->g2h_fence_wq);
462 
463 	/*
464 	 * Lockdep doesn't like this under the fast lock and he destroy only
465 	 * needs to be serialized with the send path which ct lock provides.
466 	 */
467 	xa_destroy(&ct->fence_lookup);
468 
469 	mutex_unlock(&ct->lock);
470 }
471 
472 static bool ct_needs_safe_mode(struct xe_guc_ct *ct)
473 {
474 	return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev));
475 }
476 
477 static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct)
478 {
479 	if (!ct_needs_safe_mode(ct))
480 		return false;
481 
482 	queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10);
483 	return true;
484 }
485 
486 static void safe_mode_worker_func(struct work_struct *w)
487 {
488 	struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work);
489 
490 	receive_g2h(ct);
491 
492 	if (!ct_restart_safe_mode_worker(ct))
493 		xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n");
494 }
495 
496 static void ct_enter_safe_mode(struct xe_guc_ct *ct)
497 {
498 	if (ct_restart_safe_mode_worker(ct))
499 		xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n");
500 }
501 
502 static void ct_exit_safe_mode(struct xe_guc_ct *ct)
503 {
504 	if (cancel_delayed_work_sync(&ct->safe_mode_worker))
505 		xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n");
506 }
507 
508 static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool needs_register)
509 {
510 	struct xe_device *xe = ct_to_xe(ct);
511 	struct xe_gt *gt = ct_to_gt(ct);
512 	int err;
513 
514 	xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
515 
516 	if (needs_register) {
517 		xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
518 		guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
519 		guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
520 
521 		err = guc_ct_ctb_h2g_register(ct);
522 		if (err)
523 			goto err_out;
524 
525 		err = guc_ct_ctb_g2h_register(ct);
526 		if (err)
527 			goto err_out;
528 
529 		err = guc_ct_control_toggle(ct, true);
530 		if (err)
531 			goto err_out;
532 	} else {
533 		ct->ctbs.h2g.info.broken = false;
534 		ct->ctbs.g2h.info.broken = false;
535 		/* Skip everything in H2G buffer */
536 		xe_map_memset(xe, &ct->bo->vmap, CTB_H2G_BUFFER_OFFSET, 0,
537 			      CTB_H2G_BUFFER_SIZE);
538 	}
539 
540 	guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED);
541 
542 	smp_mb();
543 	wake_up_all(&ct->wq);
544 
545 	if (ct_needs_safe_mode(ct))
546 		ct_enter_safe_mode(ct);
547 
548 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
549 	/*
550 	 * The CT has now been reset so the dumper can be re-armed
551 	 * after any existing dead state has been dumped.
552 	 */
553 	spin_lock_irq(&ct->dead.lock);
554 	if (ct->dead.reason) {
555 		ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
556 		queue_work(system_unbound_wq, &ct->dead.worker);
557 	}
558 	spin_unlock_irq(&ct->dead.lock);
559 #endif
560 
561 	return 0;
562 
563 err_out:
564 	xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
565 	CT_DEAD(ct, NULL, SETUP);
566 
567 	return err;
568 }
569 
570 /**
571  * xe_guc_ct_restart() - Restart GuC CT
572  * @ct: the &xe_guc_ct
573  *
574  * Restart GuC CT to an empty state without issuing a CT register MMIO command.
575  *
576  * Return: 0 on success, or a negative errno on failure.
577  */
578 int xe_guc_ct_restart(struct xe_guc_ct *ct)
579 {
580 	return __xe_guc_ct_start(ct, false);
581 }
582 
583 /**
584  * xe_guc_ct_enable() - Enable GuC CT
585  * @ct: the &xe_guc_ct
586  *
587  * Enable GuC CT to an empty state and issue a CT register MMIO command.
588  *
589  * Return: 0 on success, or a negative errno on failure.
590  */
591 int xe_guc_ct_enable(struct xe_guc_ct *ct)
592 {
593 	return __xe_guc_ct_start(ct, true);
594 }
595 
596 static void stop_g2h_handler(struct xe_guc_ct *ct)
597 {
598 	cancel_work_sync(&ct->g2h_worker);
599 }
600 
601 /**
602  * xe_guc_ct_disable - Set GuC to disabled state
603  * @ct: the &xe_guc_ct
604  *
605  * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
606  * in this transition.
607  */
608 void xe_guc_ct_disable(struct xe_guc_ct *ct)
609 {
610 	guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
611 	ct_exit_safe_mode(ct);
612 	stop_g2h_handler(ct);
613 }
614 
615 /**
616  * xe_guc_ct_flush_and_stop - Flush and stop all processing of G2H / H2G
617  * @ct: the &xe_guc_ct
618  */
619 void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct)
620 {
621 	receive_g2h(ct);
622 	xe_guc_ct_stop(ct);
623 }
624 
625 /**
626  * xe_guc_ct_stop - Set GuC to stopped state
627  * @ct: the &xe_guc_ct
628  *
629  * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
630  */
631 void xe_guc_ct_stop(struct xe_guc_ct *ct)
632 {
633 	if (!xe_guc_ct_initialized(ct))
634 		return;
635 
636 	guc_ct_change_state(ct, XE_GUC_CT_STATE_STOPPED);
637 	stop_g2h_handler(ct);
638 }
639 
640 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
641 {
642 	struct guc_ctb *h2g = &ct->ctbs.h2g;
643 
644 	lockdep_assert_held(&ct->lock);
645 
646 	if (cmd_len > h2g->info.space) {
647 		h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
648 
649 		if (h2g->info.head > h2g->info.size) {
650 			struct xe_device *xe = ct_to_xe(ct);
651 			u32 desc_status = desc_read(xe, h2g, status);
652 
653 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
654 
655 			xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n",
656 				  h2g->info.head, h2g->info.size);
657 			CT_DEAD(ct, h2g, H2G_HAS_ROOM);
658 			return false;
659 		}
660 
661 		h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
662 					     h2g->info.size) -
663 				  h2g->info.resv_space;
664 		if (cmd_len > h2g->info.space)
665 			return false;
666 	}
667 
668 	return true;
669 }
670 
671 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
672 {
673 	if (!g2h_len)
674 		return true;
675 
676 	lockdep_assert_held(&ct->fast_lock);
677 
678 	return ct->ctbs.g2h.info.space > g2h_len;
679 }
680 
681 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
682 {
683 	lockdep_assert_held(&ct->lock);
684 
685 	if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
686 		return -EBUSY;
687 
688 	return 0;
689 }
690 
691 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
692 {
693 	lockdep_assert_held(&ct->lock);
694 	ct->ctbs.h2g.info.space -= cmd_len;
695 }
696 
697 static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
698 {
699 	xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
700 	xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) ||
701 		     (g2h_len && num_g2h));
702 
703 	if (g2h_len) {
704 		lockdep_assert_held(&ct->fast_lock);
705 
706 		if (!ct->g2h_outstanding)
707 			xe_pm_runtime_get_noresume(ct_to_xe(ct));
708 
709 		ct->ctbs.g2h.info.space -= g2h_len;
710 		ct->g2h_outstanding += num_g2h;
711 	}
712 }
713 
714 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
715 {
716 	bool bad = false;
717 
718 	lockdep_assert_held(&ct->fast_lock);
719 
720 	bad = ct->ctbs.g2h.info.space + g2h_len >
721 		     ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space;
722 	bad |= !ct->g2h_outstanding;
723 
724 	if (bad) {
725 		xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n",
726 			  ct->ctbs.g2h.info.space, g2h_len,
727 			  ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space,
728 			  ct->ctbs.g2h.info.space + g2h_len,
729 			  ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space,
730 			  ct->g2h_outstanding);
731 		CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE);
732 		return;
733 	}
734 
735 	ct->ctbs.g2h.info.space += g2h_len;
736 	if (!--ct->g2h_outstanding)
737 		xe_pm_runtime_put(ct_to_xe(ct));
738 }
739 
740 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
741 {
742 	spin_lock_irq(&ct->fast_lock);
743 	__g2h_release_space(ct, g2h_len);
744 	spin_unlock_irq(&ct->fast_lock);
745 }
746 
747 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
748 static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
749 {
750 	unsigned int slot = fence % ARRAY_SIZE(ct->fast_req);
751 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
752 	unsigned long entries[SZ_32];
753 	unsigned int n;
754 
755 	n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
756 
757 	/* May be called under spinlock, so avoid sleeping */
758 	ct->fast_req[slot].stack = stack_depot_save(entries, n, GFP_NOWAIT);
759 #endif
760 	ct->fast_req[slot].fence = fence;
761 	ct->fast_req[slot].action = action;
762 }
763 #else
764 static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
765 {
766 }
767 #endif
768 
769 /*
770  * The CT protocol accepts a 16 bits fence. This field is fully owned by the
771  * driver, the GuC will just copy it to the reply message. Since we need to
772  * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
773  * we use one bit of the seqno as an indicator for that and a rolling counter
774  * for the remaining 15 bits.
775  */
776 #define CT_SEQNO_MASK GENMASK(14, 0)
777 #define CT_SEQNO_UNTRACKED BIT(15)
778 static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
779 {
780 	u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
781 
782 	if (!is_g2h_fence)
783 		seqno |= CT_SEQNO_UNTRACKED;
784 
785 	return seqno;
786 }
787 
788 #define MAKE_ACTION(type, __action)				\
789 ({								\
790 	FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) |			\
791 	FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |			\
792 		   GUC_HXG_EVENT_MSG_0_DATA0, __action);	\
793 })
794 
795 static bool vf_action_can_safely_fail(struct xe_device *xe, u32 action)
796 {
797 	/*
798 	 * When resuming a VF, we can't reliably track whether context
799 	 * registration has completed in the GuC state machine. It is harmless
800 	 * to resend the request, as it will fail silently if GUC_HXG_TYPE_EVENT
801 	 * is used. Additionally, if there is an H2G protocol issue on a VF,
802 	 * subsequent H2G messages sent as GUC_HXG_TYPE_FAST_REQUEST will likely
803 	 * fail.
804 	 */
805 	return IS_SRIOV_VF(xe) && xe_sriov_vf_migration_supported(xe) &&
806 		(action == XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC ||
807 		 action == XE_GUC_ACTION_REGISTER_CONTEXT);
808 }
809 
810 #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
811 
812 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
813 		     u32 ct_fence_value, bool want_response)
814 {
815 	struct xe_device *xe = ct_to_xe(ct);
816 	struct xe_gt *gt = ct_to_gt(ct);
817 	struct guc_ctb *h2g = &ct->ctbs.h2g;
818 	u32 cmd[H2G_CT_HEADERS];
819 	u32 tail = h2g->info.tail;
820 	u32 full_len;
821 	struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
822 							 tail * sizeof(u32));
823 	u32 desc_status;
824 
825 	full_len = len + GUC_CTB_HDR_LEN;
826 
827 	lockdep_assert_held(&ct->lock);
828 	xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
829 
830 	desc_status = desc_read(xe, h2g, status);
831 	if (desc_status) {
832 		xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status);
833 		goto corrupted;
834 	}
835 
836 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
837 		u32 desc_tail = desc_read(xe, h2g, tail);
838 		u32 desc_head = desc_read(xe, h2g, head);
839 
840 		if (tail != desc_tail) {
841 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH);
842 			xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail);
843 			goto corrupted;
844 		}
845 
846 		if (tail > h2g->info.size) {
847 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
848 			xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n",
849 				  tail, h2g->info.size);
850 			goto corrupted;
851 		}
852 
853 		if (desc_head >= h2g->info.size) {
854 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
855 			xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n",
856 				  desc_head, h2g->info.size);
857 			goto corrupted;
858 		}
859 	}
860 
861 	/* Command will wrap, zero fill (NOPs), return and check credits again */
862 	if (tail + full_len > h2g->info.size) {
863 		xe_map_memset(xe, &map, 0, 0,
864 			      (h2g->info.size - tail) * sizeof(u32));
865 		h2g_reserve_space(ct, (h2g->info.size - tail));
866 		h2g->info.tail = 0;
867 		desc_write(xe, h2g, tail, h2g->info.tail);
868 
869 		return -EAGAIN;
870 	}
871 
872 	/*
873 	 * dw0: CT header (including fence)
874 	 * dw1: HXG header (including action code)
875 	 * dw2+: action data
876 	 */
877 	cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
878 		FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
879 		FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
880 	if (want_response) {
881 		cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_REQUEST, action[0]);
882 	} else if (vf_action_can_safely_fail(xe, action[0])) {
883 		cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_EVENT, action[0]);
884 	} else {
885 		fast_req_track(ct, ct_fence_value,
886 			       FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
887 
888 		cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_FAST_REQUEST, action[0]);
889 	}
890 
891 	/* H2G header in cmd[1] replaces action[0] so: */
892 	--len;
893 	++action;
894 
895 	/* Write H2G ensuring visible before descriptor update */
896 	xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
897 	xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
898 	xe_device_wmb(xe);
899 
900 	/* Update local copies */
901 	h2g->info.tail = (tail + full_len) % h2g->info.size;
902 	h2g_reserve_space(ct, full_len);
903 
904 	/* Update descriptor */
905 	desc_write(xe, h2g, tail, h2g->info.tail);
906 
907 	trace_xe_guc_ctb_h2g(xe, gt->info.id, *(action - 1), full_len,
908 			     desc_read(xe, h2g, head), h2g->info.tail);
909 
910 	return 0;
911 
912 corrupted:
913 	CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE);
914 	return -EPIPE;
915 }
916 
917 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
918 				u32 len, u32 g2h_len, u32 num_g2h,
919 				struct g2h_fence *g2h_fence)
920 {
921 	struct xe_gt *gt = ct_to_gt(ct);
922 	u16 seqno;
923 	int ret;
924 
925 	xe_gt_assert(gt, xe_guc_ct_initialized(ct));
926 	xe_gt_assert(gt, !g2h_len || !g2h_fence);
927 	xe_gt_assert(gt, !num_g2h || !g2h_fence);
928 	xe_gt_assert(gt, !g2h_len || num_g2h);
929 	xe_gt_assert(gt, g2h_len || !num_g2h);
930 	lockdep_assert_held(&ct->lock);
931 
932 	if (unlikely(ct->ctbs.h2g.info.broken)) {
933 		ret = -EPIPE;
934 		goto out;
935 	}
936 
937 	if (ct->state == XE_GUC_CT_STATE_DISABLED) {
938 		ret = -ENODEV;
939 		goto out;
940 	}
941 
942 	if (ct->state == XE_GUC_CT_STATE_STOPPED || xe_gt_recovery_pending(gt)) {
943 		ret = -ECANCELED;
944 		goto out;
945 	}
946 
947 	xe_gt_assert(gt, xe_guc_ct_enabled(ct));
948 
949 	if (g2h_fence) {
950 		g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
951 		num_g2h = 1;
952 
953 		if (g2h_fence_needs_alloc(g2h_fence)) {
954 			g2h_fence->seqno = next_ct_seqno(ct, true);
955 			ret = xa_err(xa_store(&ct->fence_lookup,
956 					      g2h_fence->seqno, g2h_fence,
957 					      GFP_ATOMIC));
958 			if (ret)
959 				goto out;
960 		}
961 
962 		seqno = g2h_fence->seqno;
963 	} else {
964 		seqno = next_ct_seqno(ct, false);
965 	}
966 
967 	if (g2h_len)
968 		spin_lock_irq(&ct->fast_lock);
969 retry:
970 	ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
971 	if (unlikely(ret))
972 		goto out_unlock;
973 
974 	ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
975 	if (unlikely(ret)) {
976 		if (ret == -EAGAIN)
977 			goto retry;
978 		goto out_unlock;
979 	}
980 
981 	__g2h_reserve_space(ct, g2h_len, num_g2h);
982 	xe_guc_notify(ct_to_guc(ct));
983 out_unlock:
984 	if (g2h_len)
985 		spin_unlock_irq(&ct->fast_lock);
986 out:
987 	return ret;
988 }
989 
990 static void kick_reset(struct xe_guc_ct *ct)
991 {
992 	xe_gt_reset_async(ct_to_gt(ct));
993 }
994 
995 static int dequeue_one_g2h(struct xe_guc_ct *ct);
996 
997 /*
998  * wait before retry of sending h2g message
999  * Return: true if ready for retry, false if the wait timeouted
1000  */
1001 static bool guc_ct_send_wait_for_retry(struct xe_guc_ct *ct, u32 len,
1002 				       u32 g2h_len, struct g2h_fence *g2h_fence,
1003 				       unsigned int *sleep_period_ms)
1004 {
1005 	struct xe_device *xe = ct_to_xe(ct);
1006 
1007 	/*
1008 	 * We wait to try to restore credits for about 1 second before bailing.
1009 	 * In the case of H2G credits we have no choice but just to wait for the
1010 	 * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In
1011 	 * the case of G2H we process any G2H in the channel, hopefully freeing
1012 	 * credits as we consume the G2H messages.
1013 	 */
1014 	if (!h2g_has_room(ct, len + GUC_CTB_HDR_LEN)) {
1015 		struct guc_ctb *h2g = &ct->ctbs.h2g;
1016 
1017 		if (*sleep_period_ms == 1024)
1018 			return false;
1019 
1020 		trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail,
1021 						 h2g->info.size,
1022 						 h2g->info.space,
1023 						 len + GUC_CTB_HDR_LEN);
1024 		msleep(*sleep_period_ms);
1025 		*sleep_period_ms <<= 1;
1026 	} else {
1027 		struct xe_device *xe = ct_to_xe(ct);
1028 		struct guc_ctb *g2h = &ct->ctbs.g2h;
1029 		int ret;
1030 
1031 		trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head,
1032 						 desc_read(xe, g2h, tail),
1033 						 g2h->info.size,
1034 						 g2h->info.space,
1035 						 g2h_fence ?
1036 						 GUC_CTB_HXG_MSG_MAX_LEN :
1037 						 g2h_len);
1038 
1039 #define g2h_avail(ct)	\
1040 	(desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
1041 		if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
1042 					g2h_avail(ct), HZ))
1043 			return false;
1044 #undef g2h_avail
1045 
1046 		ret = dequeue_one_g2h(ct);
1047 		if (ret < 0) {
1048 			if (ret != -ECANCELED)
1049 				xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)",
1050 					  ERR_PTR(ret));
1051 			return false;
1052 		}
1053 	}
1054 	return true;
1055 }
1056 
1057 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
1058 			      u32 g2h_len, u32 num_g2h,
1059 			      struct g2h_fence *g2h_fence)
1060 {
1061 	struct xe_gt *gt = ct_to_gt(ct);
1062 	unsigned int sleep_period_ms = 1;
1063 	int ret;
1064 
1065 	xe_gt_assert(gt, !g2h_len || !g2h_fence);
1066 	lockdep_assert_held(&ct->lock);
1067 	xe_device_assert_mem_access(ct_to_xe(ct));
1068 
1069 try_again:
1070 	ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
1071 				   g2h_fence);
1072 
1073 	if (unlikely(ret == -EBUSY)) {
1074 		if (!guc_ct_send_wait_for_retry(ct, len, g2h_len, g2h_fence,
1075 						&sleep_period_ms))
1076 			goto broken;
1077 		goto try_again;
1078 	}
1079 
1080 	return ret;
1081 
1082 broken:
1083 	xe_gt_err(gt, "No forward process on H2G, reset required\n");
1084 	CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK);
1085 
1086 	return -EDEADLK;
1087 }
1088 
1089 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
1090 		       u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
1091 {
1092 	int ret;
1093 
1094 	xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence);
1095 
1096 	mutex_lock(&ct->lock);
1097 	ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
1098 	mutex_unlock(&ct->lock);
1099 
1100 	return ret;
1101 }
1102 
1103 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
1104 		   u32 g2h_len, u32 num_g2h)
1105 {
1106 	int ret;
1107 
1108 	ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
1109 	if (ret == -EDEADLK)
1110 		kick_reset(ct);
1111 
1112 	return ret;
1113 }
1114 
1115 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
1116 			  u32 g2h_len, u32 num_g2h)
1117 {
1118 	int ret;
1119 
1120 	ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
1121 	if (ret == -EDEADLK)
1122 		kick_reset(ct);
1123 
1124 	return ret;
1125 }
1126 
1127 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
1128 {
1129 	int ret;
1130 
1131 	lockdep_assert_held(&ct->lock);
1132 
1133 	ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
1134 	if (ret == -EDEADLK)
1135 		kick_reset(ct);
1136 
1137 	return ret;
1138 }
1139 
1140 /*
1141  * Check if a GT reset is in progress or will occur and if GT reset brought the
1142  * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
1143  */
1144 static bool retry_failure(struct xe_guc_ct *ct, int ret)
1145 {
1146 	if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV))
1147 		return false;
1148 
1149 #define ct_alive(ct)	\
1150 	(xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
1151 	 !ct->ctbs.g2h.info.broken)
1152 	if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
1153 		return false;
1154 #undef ct_alive
1155 
1156 	return true;
1157 }
1158 
1159 #define GUC_SEND_RETRY_LIMIT	50
1160 #define GUC_SEND_RETRY_MSLEEP	5
1161 
1162 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
1163 			    u32 *response_buffer, bool no_fail)
1164 {
1165 	struct xe_gt *gt = ct_to_gt(ct);
1166 	struct g2h_fence g2h_fence;
1167 	unsigned int retries = 0;
1168 	int ret = 0;
1169 
1170 	/*
1171 	 * We use a fence to implement blocking sends / receiving response data.
1172 	 * The seqno of the fence is sent in the H2G, returned in the G2H, and
1173 	 * an xarray is used as storage media with the seqno being to key.
1174 	 * Fields in the fence hold success, failure, retry status and the
1175 	 * response data. Safe to allocate on the stack as the xarray is the
1176 	 * only reference and it cannot be present after this function exits.
1177 	 */
1178 retry:
1179 	g2h_fence_init(&g2h_fence, response_buffer);
1180 retry_same_fence:
1181 	ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
1182 	if (unlikely(ret == -ENOMEM)) {
1183 		/* Retry allocation /w GFP_KERNEL */
1184 		ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
1185 				      &g2h_fence, GFP_KERNEL));
1186 		if (ret)
1187 			return ret;
1188 
1189 		goto retry_same_fence;
1190 	} else if (unlikely(ret)) {
1191 		if (ret == -EDEADLK)
1192 			kick_reset(ct);
1193 
1194 		if (no_fail && retry_failure(ct, ret))
1195 			goto retry_same_fence;
1196 
1197 		if (!g2h_fence_needs_alloc(&g2h_fence))
1198 			xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1199 
1200 		return ret;
1201 	}
1202 
1203 	ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
1204 	if (!ret) {
1205 		LNL_FLUSH_WORK(&ct->g2h_worker);
1206 		if (g2h_fence.done) {
1207 			xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
1208 				   g2h_fence.seqno, action[0]);
1209 			ret = 1;
1210 		}
1211 	}
1212 
1213 	/*
1214 	 * Ensure we serialize with completion side to prevent UAF with fence going out of scope on
1215 	 * the stack, since we have no clue if it will fire after the timeout before we can erase
1216 	 * from the xa. Also we have some dependent loads and stores below for which we need the
1217 	 * correct ordering, and we lack the needed barriers.
1218 	 */
1219 	mutex_lock(&ct->lock);
1220 	if (!ret) {
1221 		xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
1222 			  g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
1223 		xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1224 		mutex_unlock(&ct->lock);
1225 		return -ETIME;
1226 	}
1227 
1228 	if (g2h_fence.retry) {
1229 		xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
1230 			  action[0], g2h_fence.reason);
1231 		mutex_unlock(&ct->lock);
1232 		if (++retries > GUC_SEND_RETRY_LIMIT) {
1233 			xe_gt_err(gt, "H2G action %#x reached retry limit=%u, aborting\n",
1234 				  action[0], GUC_SEND_RETRY_LIMIT);
1235 			return -ELOOP;
1236 		}
1237 		msleep(GUC_SEND_RETRY_MSLEEP * retries);
1238 		goto retry;
1239 	}
1240 	if (g2h_fence.fail) {
1241 		if (g2h_fence.cancel) {
1242 			xe_gt_dbg(gt, "H2G request %#x canceled!\n", action[0]);
1243 			ret = -ECANCELED;
1244 			goto unlock;
1245 		}
1246 		xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n",
1247 			  action[0], g2h_fence.error, g2h_fence.hint);
1248 		ret = -EIO;
1249 	}
1250 
1251 	if (ret > 0)
1252 		ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
1253 
1254 unlock:
1255 	mutex_unlock(&ct->lock);
1256 
1257 	return ret;
1258 }
1259 
1260 /**
1261  * xe_guc_ct_send_recv - Send and receive HXG to the GuC
1262  * @ct: the &xe_guc_ct
1263  * @action: the dword array with `HXG Request`_ message (can't be NULL)
1264  * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
1265  * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
1266  *
1267  * Send a `HXG Request`_ message to the GuC over CT communication channel and
1268  * blocks until GuC replies with a `HXG Response`_ message.
1269  *
1270  * For non-blocking communication with GuC use xe_guc_ct_send().
1271  *
1272  * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
1273  *
1274  * Return: response length (in dwords) if &response_buffer was not NULL, or
1275  *         DATA0 from `HXG Response`_ if &response_buffer was NULL, or
1276  *         a negative error code on failure.
1277  */
1278 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
1279 			u32 *response_buffer)
1280 {
1281 	KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
1282 	return guc_ct_send_recv(ct, action, len, response_buffer, false);
1283 }
1284 ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO);
1285 
1286 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
1287 				u32 len, u32 *response_buffer)
1288 {
1289 	return guc_ct_send_recv(ct, action, len, response_buffer, true);
1290 }
1291 
1292 static u32 *msg_to_hxg(u32 *msg)
1293 {
1294 	return msg + GUC_CTB_MSG_MIN_LEN;
1295 }
1296 
1297 static u32 msg_len_to_hxg_len(u32 len)
1298 {
1299 	return len - GUC_CTB_MSG_MIN_LEN;
1300 }
1301 
1302 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
1303 {
1304 	u32 *hxg = msg_to_hxg(msg);
1305 	u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1306 
1307 	lockdep_assert_held(&ct->lock);
1308 
1309 	switch (action) {
1310 	case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1311 	case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1312 	case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1313 	case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1314 		g2h_release_space(ct, len);
1315 	}
1316 
1317 	return 0;
1318 }
1319 
1320 static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
1321 {
1322 	struct xe_gt *gt = ct_to_gt(ct);
1323 
1324 	if (action == XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED)
1325 		xe_gt_err(gt, "GuC Crash dump notification\n");
1326 	else if (action == XE_GUC_ACTION_NOTIFY_EXCEPTION)
1327 		xe_gt_err(gt, "GuC Exception notification\n");
1328 	else
1329 		xe_gt_err(gt, "Unknown GuC crash notification: 0x%04X\n", action);
1330 
1331 	CT_DEAD(ct, NULL, CRASH);
1332 
1333 	kick_reset(ct);
1334 
1335 	return 0;
1336 }
1337 
1338 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
1339 static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
1340 {
1341 	u16 fence_min = U16_MAX, fence_max = 0;
1342 	struct xe_gt *gt = ct_to_gt(ct);
1343 	bool found = false;
1344 	unsigned int n;
1345 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
1346 	char *buf;
1347 #endif
1348 
1349 	lockdep_assert_held(&ct->lock);
1350 
1351 	for (n = 0; n < ARRAY_SIZE(ct->fast_req); n++) {
1352 		if (ct->fast_req[n].fence < fence_min)
1353 			fence_min = ct->fast_req[n].fence;
1354 		if (ct->fast_req[n].fence > fence_max)
1355 			fence_max = ct->fast_req[n].fence;
1356 
1357 		if (ct->fast_req[n].fence != fence)
1358 			continue;
1359 		found = true;
1360 
1361 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
1362 		buf = kmalloc(SZ_4K, GFP_NOWAIT);
1363 		if (buf && stack_depot_snprint(ct->fast_req[n].stack, buf, SZ_4K, 0))
1364 			xe_gt_err(gt, "Fence 0x%x was used by action %#04x sent at:\n%s",
1365 				  fence, ct->fast_req[n].action, buf);
1366 		else
1367 			xe_gt_err(gt, "Fence 0x%x was used by action %#04x [failed to retrieve stack]\n",
1368 				  fence, ct->fast_req[n].action);
1369 		kfree(buf);
1370 #else
1371 		xe_gt_err(gt, "Fence 0x%x was used by action %#04x\n",
1372 			  fence, ct->fast_req[n].action);
1373 #endif
1374 		break;
1375 	}
1376 
1377 	if (!found)
1378 		xe_gt_warn(gt, "Fence 0x%x not found - tracking buffer wrapped? [range = 0x%x -> 0x%x, next = 0x%X]\n",
1379 			   fence, fence_min, fence_max, ct->fence_seqno);
1380 }
1381 #else
1382 static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
1383 {
1384 }
1385 #endif
1386 
1387 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
1388 {
1389 	struct xe_gt *gt =  ct_to_gt(ct);
1390 	u32 *hxg = msg_to_hxg(msg);
1391 	u32 hxg_len = msg_len_to_hxg_len(len);
1392 	u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
1393 	u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1394 	struct g2h_fence *g2h_fence;
1395 
1396 	lockdep_assert_held(&ct->lock);
1397 
1398 	/*
1399 	 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
1400 	 * Those messages should never fail, so if we do get an error back it
1401 	 * means we're likely doing an illegal operation and the GuC is
1402 	 * rejecting it. We have no way to inform the code that submitted the
1403 	 * H2G that the message was rejected, so we need to escalate the
1404 	 * failure to trigger a reset.
1405 	 */
1406 	if (fence & CT_SEQNO_UNTRACKED) {
1407 		if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
1408 			xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n",
1409 				  fence,
1410 				  FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]),
1411 				  FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]));
1412 		else
1413 			xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
1414 				  type, fence);
1415 
1416 		fast_req_report(ct, fence);
1417 
1418 		/* FIXME: W/A race in the GuC, will get in firmware soon */
1419 		if (xe_gt_recovery_pending(gt))
1420 			return 0;
1421 
1422 		CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
1423 
1424 		return -EPROTO;
1425 	}
1426 
1427 	g2h_fence = xa_erase(&ct->fence_lookup, fence);
1428 	if (unlikely(!g2h_fence)) {
1429 		/* Don't tear down channel, as send could've timed out */
1430 		/* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */
1431 		xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
1432 		g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1433 		return 0;
1434 	}
1435 
1436 	xe_gt_assert(gt, fence == g2h_fence->seqno);
1437 
1438 	if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
1439 		g2h_fence->fail = true;
1440 		g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]);
1441 		g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]);
1442 	} else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
1443 		g2h_fence->retry = true;
1444 		g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]);
1445 	} else if (g2h_fence->response_buffer) {
1446 		g2h_fence->response_len = hxg_len;
1447 		memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32));
1448 	} else {
1449 		g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]);
1450 	}
1451 
1452 	g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1453 
1454 	g2h_fence->done = true;
1455 	smp_mb();
1456 
1457 	wake_up_all(&ct->g2h_fence_wq);
1458 
1459 	return 0;
1460 }
1461 
1462 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1463 {
1464 	struct xe_gt *gt = ct_to_gt(ct);
1465 	u32 *hxg = msg_to_hxg(msg);
1466 	u32 origin, type;
1467 	int ret;
1468 
1469 	lockdep_assert_held(&ct->lock);
1470 
1471 	origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
1472 	if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
1473 		xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
1474 			  origin);
1475 		CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN);
1476 
1477 		return -EPROTO;
1478 	}
1479 
1480 	type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1481 	switch (type) {
1482 	case GUC_HXG_TYPE_EVENT:
1483 		ret = parse_g2h_event(ct, msg, len);
1484 		break;
1485 	case GUC_HXG_TYPE_RESPONSE_SUCCESS:
1486 	case GUC_HXG_TYPE_RESPONSE_FAILURE:
1487 	case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
1488 		ret = parse_g2h_response(ct, msg, len);
1489 		break;
1490 	default:
1491 		xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
1492 			  type);
1493 		CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE);
1494 
1495 		ret = -EOPNOTSUPP;
1496 	}
1497 
1498 	return ret;
1499 }
1500 
1501 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1502 {
1503 	struct xe_guc *guc = ct_to_guc(ct);
1504 	struct xe_gt *gt = ct_to_gt(ct);
1505 	u32 hxg_len = msg_len_to_hxg_len(len);
1506 	u32 *hxg = msg_to_hxg(msg);
1507 	u32 action, adj_len;
1508 	u32 *payload;
1509 	int ret = 0;
1510 
1511 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1512 		return 0;
1513 
1514 	action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1515 	payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN;
1516 	adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN;
1517 
1518 	switch (action) {
1519 	case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1520 		ret = xe_guc_sched_done_handler(guc, payload, adj_len);
1521 		break;
1522 	case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1523 		ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
1524 		break;
1525 	case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
1526 		ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
1527 		break;
1528 	case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
1529 		ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
1530 							      adj_len);
1531 		break;
1532 	case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1533 		/* Selftest only at the moment */
1534 		break;
1535 	case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
1536 		ret = xe_guc_error_capture_handler(guc, payload, adj_len);
1537 		break;
1538 	case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
1539 		/* FIXME: Handle this */
1540 		break;
1541 	case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
1542 		ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
1543 								 adj_len);
1544 		break;
1545 	case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1546 		ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1547 		break;
1548 	case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1549 		ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
1550 		break;
1551 	case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
1552 		ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
1553 		break;
1554 	case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
1555 		ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
1556 		break;
1557 	case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY:
1558 		ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len);
1559 		break;
1560 	case GUC_ACTION_GUC2PF_ADVERSE_EVENT:
1561 		ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len);
1562 		break;
1563 	case XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
1564 	case XE_GUC_ACTION_NOTIFY_EXCEPTION:
1565 		ret = guc_crash_process_msg(ct, action);
1566 		break;
1567 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1568 	case XE_GUC_ACTION_TEST_G2G_RECV:
1569 		ret = xe_guc_g2g_test_notification(guc, payload, adj_len);
1570 		break;
1571 #endif
1572 	default:
1573 		xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
1574 	}
1575 
1576 	if (ret) {
1577 		xe_gt_err(gt, "G2H action %#04x failed (%pe) len %u msg %*ph\n",
1578 			  action, ERR_PTR(ret), hxg_len, (int)sizeof(u32) * hxg_len, hxg);
1579 		CT_DEAD(ct, NULL, PROCESS_FAILED);
1580 	}
1581 
1582 	return 0;
1583 }
1584 
1585 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
1586 {
1587 	struct xe_device *xe = ct_to_xe(ct);
1588 	struct xe_gt *gt = ct_to_gt(ct);
1589 	struct guc_ctb *g2h = &ct->ctbs.g2h;
1590 	u32 tail, head, len, desc_status;
1591 	s32 avail;
1592 	u32 action;
1593 	u32 *hxg;
1594 
1595 	xe_gt_assert(gt, xe_guc_ct_initialized(ct));
1596 	lockdep_assert_held(&ct->fast_lock);
1597 
1598 	if (ct->state == XE_GUC_CT_STATE_DISABLED)
1599 		return -ENODEV;
1600 
1601 	if (ct->state == XE_GUC_CT_STATE_STOPPED)
1602 		return -ECANCELED;
1603 
1604 	if (g2h->info.broken)
1605 		return -EPIPE;
1606 
1607 	xe_gt_assert(gt, xe_guc_ct_enabled(ct));
1608 
1609 	desc_status = desc_read(xe, g2h, status);
1610 	if (desc_status) {
1611 		if (desc_status & GUC_CTB_STATUS_DISABLED) {
1612 			/*
1613 			 * Potentially valid if a CLIENT_RESET request resulted in
1614 			 * contexts/engines being reset. But should never happen as
1615 			 * no contexts should be active when CLIENT_RESET is sent.
1616 			 */
1617 			xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n");
1618 			desc_status &= ~GUC_CTB_STATUS_DISABLED;
1619 		}
1620 
1621 		if (desc_status) {
1622 			xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status);
1623 			goto corrupted;
1624 		}
1625 	}
1626 
1627 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
1628 		u32 desc_tail = desc_read(xe, g2h, tail);
1629 		/*
1630 		u32 desc_head = desc_read(xe, g2h, head);
1631 
1632 		 * info.head and desc_head are updated back-to-back at the end of
1633 		 * this function and nowhere else. Hence, they cannot be different
1634 		 * unless two g2h_read calls are running concurrently. Which is not
1635 		 * possible because it is guarded by ct->fast_lock. And yet, some
1636 		 * discrete platforms are regularly hitting this error :(.
1637 		 *
1638 		 * desc_head rolling backwards shouldn't cause any noticeable
1639 		 * problems - just a delay in GuC being allowed to proceed past that
1640 		 * point in the queue. So for now, just disable the error until it
1641 		 * can be root caused.
1642 		 *
1643 		if (g2h->info.head != desc_head) {
1644 			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH);
1645 			xe_gt_err(gt, "CT read: head was modified %u != %u\n",
1646 				  desc_head, g2h->info.head);
1647 			goto corrupted;
1648 		}
1649 		 */
1650 
1651 		if (g2h->info.head > g2h->info.size) {
1652 			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1653 			xe_gt_err(gt, "CT read: head out of range: %u vs %u\n",
1654 				  g2h->info.head, g2h->info.size);
1655 			goto corrupted;
1656 		}
1657 
1658 		if (desc_tail >= g2h->info.size) {
1659 			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1660 			xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n",
1661 				  desc_tail, g2h->info.size);
1662 			goto corrupted;
1663 		}
1664 	}
1665 
1666 	/* Calculate DW available to read */
1667 	tail = desc_read(xe, g2h, tail);
1668 	avail = tail - g2h->info.head;
1669 	if (unlikely(avail == 0))
1670 		return 0;
1671 
1672 	if (avail < 0)
1673 		avail += g2h->info.size;
1674 
1675 	/* Read header */
1676 	xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head,
1677 			   sizeof(u32));
1678 	len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
1679 	if (len > avail) {
1680 		xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
1681 			  avail, len);
1682 		goto corrupted;
1683 	}
1684 
1685 	head = (g2h->info.head + 1) % g2h->info.size;
1686 	avail = len - 1;
1687 
1688 	/* Read G2H message */
1689 	if (avail + head > g2h->info.size) {
1690 		u32 avail_til_wrap = g2h->info.size - head;
1691 
1692 		xe_map_memcpy_from(xe, msg + 1,
1693 				   &g2h->cmds, sizeof(u32) * head,
1694 				   avail_til_wrap * sizeof(u32));
1695 		xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap,
1696 				   &g2h->cmds, 0,
1697 				   (avail - avail_til_wrap) * sizeof(u32));
1698 	} else {
1699 		xe_map_memcpy_from(xe, msg + 1,
1700 				   &g2h->cmds, sizeof(u32) * head,
1701 				   avail * sizeof(u32));
1702 	}
1703 
1704 	hxg = msg_to_hxg(msg);
1705 	action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1706 
1707 	if (fast_path) {
1708 		if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1709 			return 0;
1710 
1711 		switch (action) {
1712 		case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1713 		case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1714 			break;	/* Process these in fast-path */
1715 		default:
1716 			return 0;
1717 		}
1718 	}
1719 
1720 	/* Update local / descriptor header */
1721 	g2h->info.head = (head + avail) % g2h->info.size;
1722 	desc_write(xe, g2h, head, g2h->info.head);
1723 
1724 	trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id,
1725 			     action, len, g2h->info.head, tail);
1726 
1727 	return len;
1728 
1729 corrupted:
1730 	CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ);
1731 	return -EPROTO;
1732 }
1733 
1734 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
1735 {
1736 	struct xe_gt *gt = ct_to_gt(ct);
1737 	struct xe_guc *guc = ct_to_guc(ct);
1738 	u32 hxg_len = msg_len_to_hxg_len(len);
1739 	u32 *hxg = msg_to_hxg(msg);
1740 	u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1741 	u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN;
1742 	u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN;
1743 	int ret = 0;
1744 
1745 	switch (action) {
1746 	case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1747 		ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1748 		break;
1749 	case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1750 		__g2h_release_space(ct, len);
1751 		ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
1752 		break;
1753 	default:
1754 		xe_gt_warn(gt, "NOT_POSSIBLE");
1755 	}
1756 
1757 	if (ret) {
1758 		xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
1759 			  action, ERR_PTR(ret));
1760 		CT_DEAD(ct, NULL, FAST_G2H);
1761 	}
1762 }
1763 
1764 /**
1765  * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
1766  * @ct: GuC CT object
1767  *
1768  * Anything related to page faults is critical for performance, process these
1769  * critical G2H in the IRQ. This is safe as these handlers either just wake up
1770  * waiters or queue another worker.
1771  */
1772 void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
1773 {
1774 	struct xe_device *xe = ct_to_xe(ct);
1775 	bool ongoing;
1776 	int len;
1777 
1778 	ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1779 	if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1780 		return;
1781 
1782 	spin_lock(&ct->fast_lock);
1783 	do {
1784 		len = g2h_read(ct, ct->fast_msg, true);
1785 		if (len > 0)
1786 			g2h_fast_path(ct, ct->fast_msg, len);
1787 	} while (len > 0);
1788 	spin_unlock(&ct->fast_lock);
1789 
1790 	if (ongoing)
1791 		xe_pm_runtime_put(xe);
1792 }
1793 
1794 /* Returns less than zero on error, 0 on done, 1 on more available */
1795 static int dequeue_one_g2h(struct xe_guc_ct *ct)
1796 {
1797 	int len;
1798 	int ret;
1799 
1800 	lockdep_assert_held(&ct->lock);
1801 
1802 	spin_lock_irq(&ct->fast_lock);
1803 	len = g2h_read(ct, ct->msg, false);
1804 	spin_unlock_irq(&ct->fast_lock);
1805 	if (len <= 0)
1806 		return len;
1807 
1808 	ret = parse_g2h_msg(ct, ct->msg, len);
1809 	if (unlikely(ret < 0))
1810 		return ret;
1811 
1812 	ret = process_g2h_msg(ct, ct->msg, len);
1813 	if (unlikely(ret < 0))
1814 		return ret;
1815 
1816 	return 1;
1817 }
1818 
1819 static void receive_g2h(struct xe_guc_ct *ct)
1820 {
1821 	bool ongoing;
1822 	int ret;
1823 
1824 	/*
1825 	 * Normal users must always hold mem_access.ref around CT calls. However
1826 	 * during the runtime pm callbacks we rely on CT to talk to the GuC, but
1827 	 * at this stage we can't rely on mem_access.ref and even the
1828 	 * callback_task will be different than current.  For such cases we just
1829 	 * need to ensure we always process the responses from any blocking
1830 	 * ct_send requests or where we otherwise expect some response when
1831 	 * initiated from those callbacks (which will need to wait for the below
1832 	 * dequeue_one_g2h()).  The dequeue_one_g2h() will gracefully fail if
1833 	 * the device has suspended to the point that the CT communication has
1834 	 * been disabled.
1835 	 *
1836 	 * If we are inside the runtime pm callback, we can be the only task
1837 	 * still issuing CT requests (since that requires having the
1838 	 * mem_access.ref).  It seems like it might in theory be possible to
1839 	 * receive unsolicited events from the GuC just as we are
1840 	 * suspending-resuming, but those will currently anyway be lost when
1841 	 * eventually exiting from suspend, hence no need to wake up the device
1842 	 * here. If we ever need something stronger than get_if_ongoing() then
1843 	 * we need to be careful with blocking the pm callbacks from getting CT
1844 	 * responses, if the worker here is blocked on those callbacks
1845 	 * completing, creating a deadlock.
1846 	 */
1847 	ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1848 	if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1849 		return;
1850 
1851 	do {
1852 		mutex_lock(&ct->lock);
1853 		ret = dequeue_one_g2h(ct);
1854 		mutex_unlock(&ct->lock);
1855 
1856 		if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
1857 			xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret);
1858 			CT_DEAD(ct, NULL, G2H_RECV);
1859 			kick_reset(ct);
1860 		}
1861 	} while (ret == 1);
1862 
1863 	if (ongoing)
1864 		xe_pm_runtime_put(ct_to_xe(ct));
1865 }
1866 
1867 static void g2h_worker_func(struct work_struct *w)
1868 {
1869 	struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
1870 
1871 	receive_g2h(ct);
1872 }
1873 
1874 static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
1875 							bool want_ctb)
1876 {
1877 	struct xe_guc_ct_snapshot *snapshot;
1878 
1879 	snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL);
1880 	if (!snapshot)
1881 		return NULL;
1882 
1883 	if (ct->bo && want_ctb) {
1884 		snapshot->ctb_size = xe_bo_size(ct->bo);
1885 		snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
1886 	}
1887 
1888 	return snapshot;
1889 }
1890 
1891 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
1892 				     struct guc_ctb_snapshot *snapshot)
1893 {
1894 	xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
1895 			   sizeof(struct guc_ct_buffer_desc));
1896 	memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
1897 }
1898 
1899 static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
1900 				   struct drm_printer *p)
1901 {
1902 	drm_printf(p, "\tsize: %d\n", snapshot->info.size);
1903 	drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
1904 	drm_printf(p, "\thead: %d\n", snapshot->info.head);
1905 	drm_printf(p, "\ttail: %d\n", snapshot->info.tail);
1906 	drm_printf(p, "\tspace: %d\n", snapshot->info.space);
1907 	drm_printf(p, "\tbroken: %d\n", snapshot->info.broken);
1908 	drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
1909 	drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
1910 	drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
1911 }
1912 
1913 static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic,
1914 							  bool want_ctb)
1915 {
1916 	struct xe_device *xe = ct_to_xe(ct);
1917 	struct xe_guc_ct_snapshot *snapshot;
1918 
1919 	snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb);
1920 	if (!snapshot) {
1921 		xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n");
1922 		return NULL;
1923 	}
1924 
1925 	if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
1926 		snapshot->ct_enabled = true;
1927 		snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
1928 		guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g);
1929 		guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h);
1930 	}
1931 
1932 	if (ct->bo && snapshot->ctb)
1933 		xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size);
1934 
1935 	return snapshot;
1936 }
1937 
1938 /**
1939  * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
1940  * @ct: GuC CT object.
1941  *
1942  * This can be printed out in a later stage like during dev_coredump
1943  * analysis. This is safe to be called during atomic context.
1944  *
1945  * Returns: a GuC CT snapshot object that must be freed by the caller
1946  * by using `xe_guc_ct_snapshot_free`.
1947  */
1948 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct)
1949 {
1950 	return guc_ct_snapshot_capture(ct, true, true);
1951 }
1952 
1953 /**
1954  * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
1955  * @snapshot: GuC CT snapshot object.
1956  * @p: drm_printer where it will be printed out.
1957  *
1958  * This function prints out a given GuC CT snapshot object.
1959  */
1960 void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
1961 			      struct drm_printer *p)
1962 {
1963 	if (!snapshot)
1964 		return;
1965 
1966 	if (snapshot->ct_enabled) {
1967 		drm_puts(p, "H2G CTB (all sizes in DW):\n");
1968 		guc_ctb_snapshot_print(&snapshot->h2g, p);
1969 
1970 		drm_puts(p, "G2H CTB (all sizes in DW):\n");
1971 		guc_ctb_snapshot_print(&snapshot->g2h, p);
1972 		drm_printf(p, "\tg2h outstanding: %d\n",
1973 			   snapshot->g2h_outstanding);
1974 
1975 		if (snapshot->ctb) {
1976 			drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size);
1977 			xe_print_blob_ascii85(p, "[CTB].data", '\n',
1978 					      snapshot->ctb, 0, snapshot->ctb_size);
1979 		}
1980 	} else {
1981 		drm_puts(p, "CT disabled\n");
1982 	}
1983 }
1984 
1985 /**
1986  * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot.
1987  * @snapshot: GuC CT snapshot object.
1988  *
1989  * This function free all the memory that needed to be allocated at capture
1990  * time.
1991  */
1992 void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
1993 {
1994 	if (!snapshot)
1995 		return;
1996 
1997 	kfree(snapshot->ctb);
1998 	kfree(snapshot);
1999 }
2000 
2001 /**
2002  * xe_guc_ct_print - GuC CT Print.
2003  * @ct: GuC CT.
2004  * @p: drm_printer where it will be printed out.
2005  * @want_ctb: Should the full CTB content be dumped (vs just the headers)
2006  *
2007  * This function will quickly capture a snapshot of the CT state
2008  * and immediately print it out.
2009  */
2010 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
2011 {
2012 	struct xe_guc_ct_snapshot *snapshot;
2013 
2014 	snapshot = guc_ct_snapshot_capture(ct, false, want_ctb);
2015 	xe_guc_ct_snapshot_print(snapshot, p);
2016 	xe_guc_ct_snapshot_free(snapshot);
2017 }
2018 
2019 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
2020 
2021 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
2022 /*
2023  * This is a helper function which assists the driver in identifying if a fault
2024  * injection test is currently active, allowing it to reduce unnecessary debug
2025  * output. Typically, the function returns zero, but the fault injection
2026  * framework can alter this to return an error. Since faults are injected
2027  * through this function, it's important to ensure the compiler doesn't optimize
2028  * it into an inline function. To avoid such optimization, the 'noinline'
2029  * attribute is applied. Compiler optimizes the static function defined in the
2030  * header file as an inline function.
2031  */
2032 noinline int xe_is_injection_active(void) { return 0; }
2033 ALLOW_ERROR_INJECTION(xe_is_injection_active, ERRNO);
2034 #else
2035 int xe_is_injection_active(void) { return 0; }
2036 #endif
2037 
2038 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
2039 {
2040 	struct xe_guc_log_snapshot *snapshot_log;
2041 	struct xe_guc_ct_snapshot *snapshot_ct;
2042 	struct xe_guc *guc = ct_to_guc(ct);
2043 	unsigned long flags;
2044 	bool have_capture;
2045 
2046 	if (ctb)
2047 		ctb->info.broken = true;
2048 	/*
2049 	 * Huge dump is getting generated when injecting error for guc CT/MMIO
2050 	 * functions. So, let us suppress the dump when fault is injected.
2051 	 */
2052 	if (xe_is_injection_active())
2053 		return;
2054 
2055 	/* Ignore further errors after the first dump until a reset */
2056 	if (ct->dead.reported)
2057 		return;
2058 
2059 	spin_lock_irqsave(&ct->dead.lock, flags);
2060 
2061 	/* And only capture one dump at a time */
2062 	have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE);
2063 	ct->dead.reason |= (1 << reason_code) |
2064 			   (1 << CT_DEAD_STATE_CAPTURE);
2065 
2066 	spin_unlock_irqrestore(&ct->dead.lock, flags);
2067 
2068 	if (have_capture)
2069 		return;
2070 
2071 	snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true);
2072 	snapshot_ct = xe_guc_ct_snapshot_capture((ct));
2073 
2074 	spin_lock_irqsave(&ct->dead.lock, flags);
2075 
2076 	if (ct->dead.snapshot_log || ct->dead.snapshot_ct) {
2077 		xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n");
2078 		xe_guc_log_snapshot_free(snapshot_log);
2079 		xe_guc_ct_snapshot_free(snapshot_ct);
2080 	} else {
2081 		ct->dead.snapshot_log = snapshot_log;
2082 		ct->dead.snapshot_ct = snapshot_ct;
2083 	}
2084 
2085 	spin_unlock_irqrestore(&ct->dead.lock, flags);
2086 
2087 	queue_work(system_unbound_wq, &(ct)->dead.worker);
2088 }
2089 
2090 static void ct_dead_print(struct xe_dead_ct *dead)
2091 {
2092 	struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead);
2093 	struct xe_device *xe = ct_to_xe(ct);
2094 	struct xe_gt *gt = ct_to_gt(ct);
2095 	static int g_count;
2096 	struct drm_printer ip = xe_gt_info_printer(gt);
2097 	struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
2098 
2099 	if (!dead->reason) {
2100 		xe_gt_err(gt, "CTB is dead for no reason!?\n");
2101 		return;
2102 	}
2103 
2104 	/* Can't generate a genuine core dump at this point, so just do the good bits */
2105 	drm_puts(&lp, "**** Xe Device Coredump ****\n");
2106 	drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
2107 	xe_device_snapshot_print(xe, &lp);
2108 
2109 	drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
2110 	drm_printf(&lp, "\tTile: %d\n", gt->tile->id);
2111 
2112 	drm_puts(&lp, "**** GuC Log ****\n");
2113 	xe_guc_log_snapshot_print(dead->snapshot_log, &lp);
2114 
2115 	drm_puts(&lp, "**** GuC CT ****\n");
2116 	xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp);
2117 
2118 	drm_puts(&lp, "Done.\n");
2119 }
2120 
2121 static void ct_dead_worker_func(struct work_struct *w)
2122 {
2123 	struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker);
2124 
2125 	if (!ct->dead.reported) {
2126 		ct->dead.reported = true;
2127 		ct_dead_print(&ct->dead);
2128 	}
2129 
2130 	spin_lock_irq(&ct->dead.lock);
2131 
2132 	xe_guc_log_snapshot_free(ct->dead.snapshot_log);
2133 	ct->dead.snapshot_log = NULL;
2134 	xe_guc_ct_snapshot_free(ct->dead.snapshot_ct);
2135 	ct->dead.snapshot_ct = NULL;
2136 
2137 	if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) {
2138 		/* A reset has occurred so re-arm the error reporting */
2139 		ct->dead.reason = 0;
2140 		ct->dead.reported = false;
2141 	}
2142 
2143 	spin_unlock_irq(&ct->dead.lock);
2144 }
2145 #endif
2146