xref: /linux/drivers/gpu/drm/xe/xe_guc_ct.c (revision 8934827db5403eae57d4537114a9ff88b0a8460f)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_ct.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10 #include <linux/delay.h>
11 #include <linux/fault-inject.h>
12 
13 #include <kunit/static_stub.h>
14 
15 #include <drm/drm_managed.h>
16 
17 #include "abi/guc_actions_abi.h"
18 #include "abi/guc_actions_sriov_abi.h"
19 #include "abi/guc_klvs_abi.h"
20 #include "xe_bo.h"
21 #include "xe_devcoredump.h"
22 #include "xe_device.h"
23 #include "xe_gt.h"
24 #include "xe_gt_printk.h"
25 #include "xe_gt_sriov_pf_control.h"
26 #include "xe_gt_sriov_pf_monitor.h"
27 #include "xe_guc.h"
28 #include "xe_guc_log.h"
29 #include "xe_guc_pagefault.h"
30 #include "xe_guc_relay.h"
31 #include "xe_guc_submit.h"
32 #include "xe_guc_tlb_inval.h"
33 #include "xe_map.h"
34 #include "xe_pm.h"
35 #include "xe_sriov_vf.h"
36 #include "xe_trace_guc.h"
37 
38 static void receive_g2h(struct xe_guc_ct *ct);
39 static void g2h_worker_func(struct work_struct *w);
40 static void safe_mode_worker_func(struct work_struct *w);
41 static void ct_exit_safe_mode(struct xe_guc_ct *ct);
42 static void guc_ct_change_state(struct xe_guc_ct *ct,
43 				enum xe_guc_ct_state state);
44 
ct_to_guc(struct xe_guc_ct * ct)45 static struct xe_guc *ct_to_guc(struct xe_guc_ct *ct)
46 {
47 	return container_of(ct, struct xe_guc, ct);
48 }
49 
ct_to_gt(struct xe_guc_ct * ct)50 static struct xe_gt *ct_to_gt(struct xe_guc_ct *ct)
51 {
52 	return container_of(ct, struct xe_gt, uc.guc.ct);
53 }
54 
ct_to_xe(struct xe_guc_ct * ct)55 static struct xe_device *ct_to_xe(struct xe_guc_ct *ct)
56 {
57 	return gt_to_xe(ct_to_gt(ct));
58 }
59 
60 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
61 enum {
62 	/* Internal states, not error conditions */
63 	CT_DEAD_STATE_REARM,			/* 0x0001 */
64 	CT_DEAD_STATE_CAPTURE,			/* 0x0002 */
65 
66 	/* Error conditions */
67 	CT_DEAD_SETUP,				/* 0x0004 */
68 	CT_DEAD_H2G_WRITE,			/* 0x0008 */
69 	CT_DEAD_H2G_HAS_ROOM,			/* 0x0010 */
70 	CT_DEAD_G2H_READ,			/* 0x0020 */
71 	CT_DEAD_G2H_RECV,			/* 0x0040 */
72 	CT_DEAD_G2H_RELEASE,			/* 0x0080 */
73 	CT_DEAD_DEADLOCK,			/* 0x0100 */
74 	CT_DEAD_PROCESS_FAILED,			/* 0x0200 */
75 	CT_DEAD_FAST_G2H,			/* 0x0400 */
76 	CT_DEAD_PARSE_G2H_RESPONSE,		/* 0x0800 */
77 	CT_DEAD_PARSE_G2H_UNKNOWN,		/* 0x1000 */
78 	CT_DEAD_PARSE_G2H_ORIGIN,		/* 0x2000 */
79 	CT_DEAD_PARSE_G2H_TYPE,			/* 0x4000 */
80 	CT_DEAD_CRASH,				/* 0x8000 */
81 };
82 
83 static void ct_dead_worker_func(struct work_struct *w);
84 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code);
85 
ct_dead_fini(struct xe_guc_ct * ct)86 static void ct_dead_fini(struct xe_guc_ct *ct)
87 {
88 	cancel_work_sync(&ct->dead.worker);
89 }
90 
ct_dead_init(struct xe_guc_ct * ct)91 static void ct_dead_init(struct xe_guc_ct *ct)
92 {
93 	spin_lock_init(&ct->dead.lock);
94 	INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
95 
96 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
97 	stack_depot_init();
98 #endif
99 }
100 
fast_req_stack_save(struct xe_guc_ct * ct,unsigned int slot)101 static void fast_req_stack_save(struct xe_guc_ct *ct, unsigned int slot)
102 {
103 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
104 	unsigned long entries[SZ_32];
105 	unsigned int n;
106 
107 	n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
108 	/* May be called under spinlock, so avoid sleeping */
109 	ct->fast_req[slot].stack = stack_depot_save(entries, n, GFP_NOWAIT);
110 #endif
111 }
112 
fast_req_dump(struct xe_guc_ct * ct,u16 fence,unsigned int slot)113 static void fast_req_dump(struct xe_guc_ct *ct, u16 fence, unsigned int slot)
114 {
115 	struct xe_gt *gt = ct_to_gt(ct);
116 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
117 	char *buf __cleanup(kfree) = kmalloc(SZ_4K, GFP_NOWAIT);
118 
119 	if (buf && stack_depot_snprint(ct->fast_req[slot].stack, buf, SZ_4K, 0))
120 		xe_gt_err(gt, "Fence 0x%x was used by action %#04x sent at:\n%s\n",
121 			  fence, ct->fast_req[slot].action, buf);
122 	else
123 		xe_gt_err(gt, "Fence 0x%x was used by action %#04x [failed to retrieve stack]\n",
124 			  fence, ct->fast_req[slot].action);
125 #else
126 	xe_gt_err(gt, "Fence 0x%x was used by action %#04x\n",
127 		  fence, ct->fast_req[slot].action);
128 #endif
129 }
130 
fast_req_report(struct xe_guc_ct * ct,u16 fence)131 static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
132 {
133 	u16 fence_min = U16_MAX, fence_max = 0;
134 	struct xe_gt *gt = ct_to_gt(ct);
135 	unsigned int n;
136 
137 	lockdep_assert_held(&ct->lock);
138 
139 	for (n = 0; n < ARRAY_SIZE(ct->fast_req); n++) {
140 		if (ct->fast_req[n].fence < fence_min)
141 			fence_min = ct->fast_req[n].fence;
142 		if (ct->fast_req[n].fence > fence_max)
143 			fence_max = ct->fast_req[n].fence;
144 
145 		if (ct->fast_req[n].fence != fence)
146 			continue;
147 
148 		return fast_req_dump(ct, fence, n);
149 	}
150 
151 	xe_gt_warn(gt, "Fence 0x%x not found - tracking buffer wrapped? [range = 0x%x -> 0x%x, next = 0x%X]\n",
152 		   fence, fence_min, fence_max, ct->fence_seqno);
153 }
154 
fast_req_track(struct xe_guc_ct * ct,u16 fence,u16 action)155 static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
156 {
157 	unsigned int slot = fence % ARRAY_SIZE(ct->fast_req);
158 
159 	fast_req_stack_save(ct, slot);
160 	ct->fast_req[slot].fence = fence;
161 	ct->fast_req[slot].action = action;
162 }
163 
164 #define CT_DEAD(ct, ctb, reason_code)	ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code)
165 
166 #else
167 
ct_dead_fini(struct xe_guc_ct * ct)168 static void ct_dead_fini(struct xe_guc_ct *ct) { }
ct_dead_init(struct xe_guc_ct * ct)169 static void ct_dead_init(struct xe_guc_ct *ct) { }
170 
fast_req_report(struct xe_guc_ct * ct,u16 fence)171 static void fast_req_report(struct xe_guc_ct *ct, u16 fence) { }
fast_req_track(struct xe_guc_ct * ct,u16 fence,u16 action)172 static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action) { }
173 
174 #define CT_DEAD(ct, ctb, reason)			\
175 	do {						\
176 		struct guc_ctb *_ctb = (ctb);		\
177 		if (_ctb)				\
178 			_ctb->info.broken = true;	\
179 	} while (0)
180 
181 #endif
182 
183 /* Used when a CT send wants to block and / or receive data */
184 struct g2h_fence {
185 	u32 *response_buffer;
186 	u32 seqno;
187 	u32 response_data;
188 	u16 response_len;
189 	u16 error;
190 	u16 hint;
191 	u16 reason;
192 	bool cancel;
193 	bool retry;
194 	bool fail;
195 	bool done;
196 };
197 
g2h_fence_init(struct g2h_fence * g2h_fence,u32 * response_buffer)198 static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
199 {
200 	memset(g2h_fence, 0, sizeof(*g2h_fence));
201 	g2h_fence->response_buffer = response_buffer;
202 	g2h_fence->seqno = ~0x0;
203 }
204 
g2h_fence_cancel(struct g2h_fence * g2h_fence)205 static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
206 {
207 	g2h_fence->cancel = true;
208 	g2h_fence->fail = true;
209 
210 	/* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
211 	WRITE_ONCE(g2h_fence->done, true);
212 }
213 
g2h_fence_needs_alloc(struct g2h_fence * g2h_fence)214 static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
215 {
216 	return g2h_fence->seqno == ~0x0;
217 }
218 
219 /**
220  * DOC: GuC CTB Blob
221  *
222  * We allocate single blob to hold both CTB descriptors and buffers:
223  *
224  *      +--------+-----------------------------------------------+------+
225  *      | offset | contents                                      | size |
226  *      +========+===============================================+======+
227  *      | 0x0000 | H2G CTB Descriptor (send)                     |      |
228  *      +--------+-----------------------------------------------+  4K  |
229  *      | 0x0800 | G2H CTB Descriptor (g2h)                      |      |
230  *      +--------+-----------------------------------------------+------+
231  *      | 0x1000 | H2G CT Buffer (send)                          | n*4K |
232  *      |        |                                               |      |
233  *      +--------+-----------------------------------------------+------+
234  *      | 0x1000 | G2H CT Buffer (g2h)                           | m*4K |
235  *      | + n*4K |                                               |      |
236  *      +--------+-----------------------------------------------+------+
237  *
238  * Size of each ``CT Buffer`` must be multiple of 4K.
239  * We don't expect too many messages in flight at any time, unless we are
240  * using the GuC submission. In that case each request requires a minimum
241  * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
242  * enough space to avoid backpressure on the driver. We increase the size
243  * of the receive buffer (relative to the send) to ensure a G2H response
244  * CTB has a landing spot.
245  *
246  * In addition to submissions, the G2H buffer needs to be able to hold
247  * enough space for recoverable page fault notifications. The number of
248  * page faults is interrupt driven and can be as much as the number of
249  * compute resources available. However, most of the actual work for these
250  * is in a separate page fault worker thread. Therefore we only need to
251  * make sure the queue has enough space to handle all of the submissions
252  * and responses and an extra buffer for incoming page faults.
253  */
254 
255 #define CTB_DESC_SIZE		ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
256 #define CTB_H2G_BUFFER_OFFSET	(CTB_DESC_SIZE * 2)
257 #define CTB_H2G_BUFFER_SIZE	(SZ_4K)
258 #define CTB_H2G_BUFFER_DWORDS	(CTB_H2G_BUFFER_SIZE / sizeof(u32))
259 #define CTB_G2H_BUFFER_SIZE	(SZ_128K)
260 #define CTB_G2H_BUFFER_DWORDS	(CTB_G2H_BUFFER_SIZE / sizeof(u32))
261 #define G2H_ROOM_BUFFER_SIZE	(CTB_G2H_BUFFER_SIZE / 2)
262 #define G2H_ROOM_BUFFER_DWORDS	(CTB_G2H_BUFFER_DWORDS / 2)
263 
264 /**
265  * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full
266  * CT command queue
267  * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future.
268  *
269  * Observation is that a 4KiB buffer full of commands takes a little over a
270  * second to process. Use that to calculate maximum time to process a full CT
271  * command queue.
272  *
273  * Return: Maximum time to process a full CT queue in jiffies.
274  */
xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct * ct)275 long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
276 {
277 	BUILD_BUG_ON(!IS_ALIGNED(CTB_H2G_BUFFER_SIZE, SZ_4));
278 	return (CTB_H2G_BUFFER_SIZE / SZ_4K) * HZ;
279 }
280 
guc_ct_size(void)281 static size_t guc_ct_size(void)
282 {
283 	return CTB_H2G_BUFFER_OFFSET + CTB_H2G_BUFFER_SIZE +
284 		CTB_G2H_BUFFER_SIZE;
285 }
286 
guc_ct_fini(struct drm_device * drm,void * arg)287 static void guc_ct_fini(struct drm_device *drm, void *arg)
288 {
289 	struct xe_guc_ct *ct = arg;
290 
291 	ct_dead_fini(ct);
292 	ct_exit_safe_mode(ct);
293 	destroy_workqueue(ct->g2h_wq);
294 	xa_destroy(&ct->fence_lookup);
295 }
296 
primelockdep(struct xe_guc_ct * ct)297 static void primelockdep(struct xe_guc_ct *ct)
298 {
299 	if (!IS_ENABLED(CONFIG_LOCKDEP))
300 		return;
301 
302 	fs_reclaim_acquire(GFP_KERNEL);
303 	might_lock(&ct->lock);
304 	fs_reclaim_release(GFP_KERNEL);
305 }
306 
xe_guc_ct_init_noalloc(struct xe_guc_ct * ct)307 int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
308 {
309 	struct xe_device *xe = ct_to_xe(ct);
310 	struct xe_gt *gt = ct_to_gt(ct);
311 	int err;
312 
313 	xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
314 
315 	err = drmm_mutex_init(&xe->drm, &ct->lock);
316 	if (err)
317 		return err;
318 
319 	primelockdep(ct);
320 
321 	ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM);
322 	if (!ct->g2h_wq)
323 		return -ENOMEM;
324 
325 	spin_lock_init(&ct->fast_lock);
326 	xa_init(&ct->fence_lookup);
327 	INIT_WORK(&ct->g2h_worker, g2h_worker_func);
328 	INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
329 
330 	ct_dead_init(ct);
331 	init_waitqueue_head(&ct->wq);
332 	init_waitqueue_head(&ct->g2h_fence_wq);
333 
334 	err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
335 	if (err)
336 		return err;
337 
338 	xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
339 	ct->state = XE_GUC_CT_STATE_DISABLED;
340 	return 0;
341 }
342 ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */
343 
guc_action_disable_ct(void * arg)344 static void guc_action_disable_ct(void *arg)
345 {
346 	struct xe_guc_ct *ct = arg;
347 
348 	guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
349 }
350 
xe_guc_ct_init(struct xe_guc_ct * ct)351 int xe_guc_ct_init(struct xe_guc_ct *ct)
352 {
353 	struct xe_device *xe = ct_to_xe(ct);
354 	struct xe_gt *gt = ct_to_gt(ct);
355 	struct xe_tile *tile = gt_to_tile(gt);
356 	struct xe_bo *bo;
357 
358 	bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
359 					  XE_BO_FLAG_SYSTEM |
360 					  XE_BO_FLAG_GGTT |
361 					  XE_BO_FLAG_GGTT_INVALIDATE |
362 					  XE_BO_FLAG_PINNED_NORESTORE);
363 	if (IS_ERR(bo))
364 		return PTR_ERR(bo);
365 
366 	ct->bo = bo;
367 
368 	return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
369 }
370 ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
371 
372 /**
373  * xe_guc_ct_init_post_hwconfig - Reinitialize the GuC CTB in VRAM
374  * @ct: the &xe_guc_ct
375  *
376  * Allocate a new BO in VRAM and free the previous BO that was allocated
377  * in system memory (SMEM). Applicable only for DGFX products.
378  *
379  * Return: 0 on success, or a negative errno on failure.
380  */
xe_guc_ct_init_post_hwconfig(struct xe_guc_ct * ct)381 int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct)
382 {
383 	struct xe_device *xe = ct_to_xe(ct);
384 	struct xe_gt *gt = ct_to_gt(ct);
385 	struct xe_tile *tile = gt_to_tile(gt);
386 	int ret;
387 
388 	xe_assert(xe, !xe_guc_ct_enabled(ct));
389 
390 	if (IS_DGFX(xe)) {
391 		ret = xe_managed_bo_reinit_in_vram(xe, tile, &ct->bo);
392 		if (ret)
393 			return ret;
394 	}
395 
396 	devm_remove_action(xe->drm.dev, guc_action_disable_ct, ct);
397 	return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
398 }
399 
400 #define desc_read(xe_, guc_ctb__, field_)			\
401 	xe_map_rd_field(xe_, &guc_ctb__->desc, 0,		\
402 			struct guc_ct_buffer_desc, field_)
403 
404 #define desc_write(xe_, guc_ctb__, field_, val_)		\
405 	xe_map_wr_field(xe_, &guc_ctb__->desc, 0,		\
406 			struct guc_ct_buffer_desc, field_, val_)
407 
guc_ct_ctb_h2g_init(struct xe_device * xe,struct guc_ctb * h2g,struct iosys_map * map)408 static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
409 				struct iosys_map *map)
410 {
411 	h2g->info.size = CTB_H2G_BUFFER_DWORDS;
412 	h2g->info.resv_space = 0;
413 	h2g->info.tail = 0;
414 	h2g->info.head = 0;
415 	h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
416 				     h2g->info.size) -
417 			  h2g->info.resv_space;
418 	h2g->info.broken = false;
419 
420 	h2g->desc = *map;
421 	xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
422 
423 	h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET);
424 }
425 
guc_ct_ctb_g2h_init(struct xe_device * xe,struct guc_ctb * g2h,struct iosys_map * map)426 static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
427 				struct iosys_map *map)
428 {
429 	g2h->info.size = CTB_G2H_BUFFER_DWORDS;
430 	g2h->info.resv_space = G2H_ROOM_BUFFER_DWORDS;
431 	g2h->info.head = 0;
432 	g2h->info.tail = 0;
433 	g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head,
434 				     g2h->info.size) -
435 			  g2h->info.resv_space;
436 	g2h->info.broken = false;
437 
438 	g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
439 	xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
440 
441 	g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET +
442 					    CTB_H2G_BUFFER_SIZE);
443 }
444 
guc_ct_ctb_h2g_register(struct xe_guc_ct * ct)445 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
446 {
447 	struct xe_guc *guc = ct_to_guc(ct);
448 	u32 desc_addr, ctb_addr, size;
449 	int err;
450 
451 	desc_addr = xe_bo_ggtt_addr(ct->bo);
452 	ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET;
453 	size = ct->ctbs.h2g.info.size * sizeof(u32);
454 
455 	err = xe_guc_self_cfg64(guc,
456 				GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY,
457 				desc_addr);
458 	if (err)
459 		return err;
460 
461 	err = xe_guc_self_cfg64(guc,
462 				GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY,
463 				ctb_addr);
464 	if (err)
465 		return err;
466 
467 	return xe_guc_self_cfg32(guc,
468 				 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY,
469 				 size);
470 }
471 
guc_ct_ctb_g2h_register(struct xe_guc_ct * ct)472 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
473 {
474 	struct xe_guc *guc = ct_to_guc(ct);
475 	u32 desc_addr, ctb_addr, size;
476 	int err;
477 
478 	desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
479 	ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET +
480 		CTB_H2G_BUFFER_SIZE;
481 	size = ct->ctbs.g2h.info.size * sizeof(u32);
482 
483 	err = xe_guc_self_cfg64(guc,
484 				GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
485 				desc_addr);
486 	if (err)
487 		return err;
488 
489 	err = xe_guc_self_cfg64(guc,
490 				GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
491 				ctb_addr);
492 	if (err)
493 		return err;
494 
495 	return xe_guc_self_cfg32(guc,
496 				 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
497 				 size);
498 }
499 
guc_ct_control_toggle(struct xe_guc_ct * ct,bool enable)500 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
501 {
502 	u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
503 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
504 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
505 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
506 			   GUC_ACTION_HOST2GUC_CONTROL_CTB),
507 		FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL,
508 			   enable ? GUC_CTB_CONTROL_ENABLE :
509 			   GUC_CTB_CONTROL_DISABLE),
510 	};
511 	int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
512 
513 	return ret > 0 ? -EPROTO : ret;
514 }
515 
guc_ct_change_state(struct xe_guc_ct * ct,enum xe_guc_ct_state state)516 static void guc_ct_change_state(struct xe_guc_ct *ct,
517 				enum xe_guc_ct_state state)
518 {
519 	struct xe_gt *gt = ct_to_gt(ct);
520 	struct g2h_fence *g2h_fence;
521 	unsigned long idx;
522 
523 	mutex_lock(&ct->lock);		/* Serialise dequeue_one_g2h() */
524 	spin_lock_irq(&ct->fast_lock);	/* Serialise CT fast-path */
525 
526 	xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
527 		     state == XE_GUC_CT_STATE_STOPPED);
528 
529 	if (ct->g2h_outstanding)
530 		xe_pm_runtime_put(ct_to_xe(ct));
531 	ct->g2h_outstanding = 0;
532 
533 	/*
534 	 * WRITE_ONCE pairs with READ_ONCEs in xe_guc_ct_initialized and
535 	 * xe_guc_ct_enabled.
536 	 */
537 	WRITE_ONCE(ct->state, state);
538 
539 	xe_gt_dbg(gt, "GuC CT communication channel %s\n",
540 		  state == XE_GUC_CT_STATE_STOPPED ? "stopped" :
541 		  str_enabled_disabled(state == XE_GUC_CT_STATE_ENABLED));
542 
543 	spin_unlock_irq(&ct->fast_lock);
544 
545 	/* cancel all in-flight send-recv requests */
546 	xa_for_each(&ct->fence_lookup, idx, g2h_fence)
547 		g2h_fence_cancel(g2h_fence);
548 
549 	/* make sure guc_ct_send_recv() will see g2h_fence changes */
550 	smp_mb();
551 	wake_up_all(&ct->g2h_fence_wq);
552 
553 	/*
554 	 * Lockdep doesn't like this under the fast lock and he destroy only
555 	 * needs to be serialized with the send path which ct lock provides.
556 	 */
557 	xa_destroy(&ct->fence_lookup);
558 
559 	mutex_unlock(&ct->lock);
560 }
561 
ct_needs_safe_mode(struct xe_guc_ct * ct)562 static bool ct_needs_safe_mode(struct xe_guc_ct *ct)
563 {
564 	return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev));
565 }
566 
ct_restart_safe_mode_worker(struct xe_guc_ct * ct)567 static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct)
568 {
569 	if (!ct_needs_safe_mode(ct))
570 		return false;
571 
572 	queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10);
573 	return true;
574 }
575 
safe_mode_worker_func(struct work_struct * w)576 static void safe_mode_worker_func(struct work_struct *w)
577 {
578 	struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work);
579 
580 	receive_g2h(ct);
581 
582 	if (!ct_restart_safe_mode_worker(ct))
583 		xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n");
584 }
585 
ct_enter_safe_mode(struct xe_guc_ct * ct)586 static void ct_enter_safe_mode(struct xe_guc_ct *ct)
587 {
588 	if (ct_restart_safe_mode_worker(ct))
589 		xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n");
590 }
591 
ct_exit_safe_mode(struct xe_guc_ct * ct)592 static void ct_exit_safe_mode(struct xe_guc_ct *ct)
593 {
594 	if (cancel_delayed_work_sync(&ct->safe_mode_worker))
595 		xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n");
596 }
597 
__xe_guc_ct_start(struct xe_guc_ct * ct,bool needs_register)598 static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool needs_register)
599 {
600 	struct xe_device *xe = ct_to_xe(ct);
601 	struct xe_gt *gt = ct_to_gt(ct);
602 	int err;
603 
604 	xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
605 
606 	if (needs_register) {
607 		xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
608 		guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
609 		guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
610 
611 		err = guc_ct_ctb_h2g_register(ct);
612 		if (err)
613 			goto err_out;
614 
615 		err = guc_ct_ctb_g2h_register(ct);
616 		if (err)
617 			goto err_out;
618 
619 		err = guc_ct_control_toggle(ct, true);
620 		if (err)
621 			goto err_out;
622 	} else {
623 		ct->ctbs.h2g.info.broken = false;
624 		ct->ctbs.g2h.info.broken = false;
625 		/* Skip everything in H2G buffer */
626 		xe_map_memset(xe, &ct->bo->vmap, CTB_H2G_BUFFER_OFFSET, 0,
627 			      CTB_H2G_BUFFER_SIZE);
628 	}
629 
630 	guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED);
631 
632 	smp_mb();
633 	wake_up_all(&ct->wq);
634 
635 	if (ct_needs_safe_mode(ct))
636 		ct_enter_safe_mode(ct);
637 
638 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
639 	/*
640 	 * The CT has now been reset so the dumper can be re-armed
641 	 * after any existing dead state has been dumped.
642 	 */
643 	spin_lock_irq(&ct->dead.lock);
644 	if (ct->dead.reason) {
645 		ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
646 		queue_work(system_unbound_wq, &ct->dead.worker);
647 	}
648 	spin_unlock_irq(&ct->dead.lock);
649 #endif
650 
651 	return 0;
652 
653 err_out:
654 	xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
655 	CT_DEAD(ct, NULL, SETUP);
656 
657 	return err;
658 }
659 
660 /**
661  * xe_guc_ct_restart() - Restart GuC CT
662  * @ct: the &xe_guc_ct
663  *
664  * Restart GuC CT to an empty state without issuing a CT register MMIO command.
665  *
666  * Return: 0 on success, or a negative errno on failure.
667  */
xe_guc_ct_restart(struct xe_guc_ct * ct)668 int xe_guc_ct_restart(struct xe_guc_ct *ct)
669 {
670 	return __xe_guc_ct_start(ct, false);
671 }
672 
673 /**
674  * xe_guc_ct_enable() - Enable GuC CT
675  * @ct: the &xe_guc_ct
676  *
677  * Enable GuC CT to an empty state and issue a CT register MMIO command.
678  *
679  * Return: 0 on success, or a negative errno on failure.
680  */
xe_guc_ct_enable(struct xe_guc_ct * ct)681 int xe_guc_ct_enable(struct xe_guc_ct *ct)
682 {
683 	return __xe_guc_ct_start(ct, true);
684 }
685 
stop_g2h_handler(struct xe_guc_ct * ct)686 static void stop_g2h_handler(struct xe_guc_ct *ct)
687 {
688 	cancel_work_sync(&ct->g2h_worker);
689 }
690 
691 /**
692  * xe_guc_ct_disable - Set GuC to disabled state
693  * @ct: the &xe_guc_ct
694  *
695  * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
696  * in this transition.
697  */
xe_guc_ct_disable(struct xe_guc_ct * ct)698 void xe_guc_ct_disable(struct xe_guc_ct *ct)
699 {
700 	guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
701 	ct_exit_safe_mode(ct);
702 	stop_g2h_handler(ct);
703 }
704 
705 /**
706  * xe_guc_ct_flush_and_stop - Flush and stop all processing of G2H / H2G
707  * @ct: the &xe_guc_ct
708  */
xe_guc_ct_flush_and_stop(struct xe_guc_ct * ct)709 void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct)
710 {
711 	receive_g2h(ct);
712 	xe_guc_ct_stop(ct);
713 }
714 
715 /**
716  * xe_guc_ct_stop - Set GuC to stopped state
717  * @ct: the &xe_guc_ct
718  *
719  * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
720  */
xe_guc_ct_stop(struct xe_guc_ct * ct)721 void xe_guc_ct_stop(struct xe_guc_ct *ct)
722 {
723 	if (!xe_guc_ct_initialized(ct))
724 		return;
725 
726 	guc_ct_change_state(ct, XE_GUC_CT_STATE_STOPPED);
727 	stop_g2h_handler(ct);
728 }
729 
730 /**
731  * xe_guc_ct_runtime_suspend() - GuC CT runtime suspend
732  * @ct: the &xe_guc_ct
733  *
734  * Set GuC CT to disabled state.
735  */
xe_guc_ct_runtime_suspend(struct xe_guc_ct * ct)736 void xe_guc_ct_runtime_suspend(struct xe_guc_ct *ct)
737 {
738 	struct guc_ctb *g2h = &ct->ctbs.g2h;
739 	u32 credits = CIRC_SPACE(0, 0, CTB_G2H_BUFFER_DWORDS) - G2H_ROOM_BUFFER_DWORDS;
740 
741 	/* We should be back to guc_ct_ctb_g2h_init() values */
742 	xe_gt_assert(ct_to_gt(ct), g2h->info.space == credits);
743 
744 	/*
745 	 * Since we're already in runtime suspend path, we shouldn't have pending
746 	 * messages. But if there happen to be any, we'd probably want them to be
747 	 * thrown as errors for further investigation.
748 	 */
749 	xe_guc_ct_disable(ct);
750 }
751 
752 /**
753  * xe_guc_ct_runtime_resume() - GuC CT runtime resume
754  * @ct: the &xe_guc_ct
755  *
756  * Restart GuC CT and set it to enabled state.
757  */
xe_guc_ct_runtime_resume(struct xe_guc_ct * ct)758 void xe_guc_ct_runtime_resume(struct xe_guc_ct *ct)
759 {
760 	xe_guc_ct_restart(ct);
761 }
762 
h2g_has_room(struct xe_guc_ct * ct,u32 cmd_len)763 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
764 {
765 	struct guc_ctb *h2g = &ct->ctbs.h2g;
766 
767 	lockdep_assert_held(&ct->lock);
768 
769 	if (cmd_len > h2g->info.space) {
770 		h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
771 
772 		if (h2g->info.head > h2g->info.size) {
773 			struct xe_device *xe = ct_to_xe(ct);
774 			u32 desc_status = desc_read(xe, h2g, status);
775 
776 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
777 
778 			xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n",
779 				  h2g->info.head, h2g->info.size);
780 			CT_DEAD(ct, h2g, H2G_HAS_ROOM);
781 			return false;
782 		}
783 
784 		h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
785 					     h2g->info.size) -
786 				  h2g->info.resv_space;
787 		if (cmd_len > h2g->info.space)
788 			return false;
789 	}
790 
791 	return true;
792 }
793 
g2h_has_room(struct xe_guc_ct * ct,u32 g2h_len)794 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
795 {
796 	if (!g2h_len)
797 		return true;
798 
799 	lockdep_assert_held(&ct->fast_lock);
800 
801 	return ct->ctbs.g2h.info.space > g2h_len;
802 }
803 
has_room(struct xe_guc_ct * ct,u32 cmd_len,u32 g2h_len)804 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
805 {
806 	lockdep_assert_held(&ct->lock);
807 
808 	if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
809 		return -EBUSY;
810 
811 	return 0;
812 }
813 
h2g_reserve_space(struct xe_guc_ct * ct,u32 cmd_len)814 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
815 {
816 	lockdep_assert_held(&ct->lock);
817 	ct->ctbs.h2g.info.space -= cmd_len;
818 }
819 
__g2h_reserve_space(struct xe_guc_ct * ct,u32 g2h_len,u32 num_g2h)820 static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
821 {
822 	xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
823 	xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) ||
824 		     (g2h_len && num_g2h));
825 
826 	if (g2h_len) {
827 		lockdep_assert_held(&ct->fast_lock);
828 
829 		if (!ct->g2h_outstanding)
830 			xe_pm_runtime_get_noresume(ct_to_xe(ct));
831 
832 		ct->ctbs.g2h.info.space -= g2h_len;
833 		ct->g2h_outstanding += num_g2h;
834 	}
835 }
836 
__g2h_release_space(struct xe_guc_ct * ct,u32 g2h_len)837 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
838 {
839 	bool bad = false;
840 
841 	lockdep_assert_held(&ct->fast_lock);
842 
843 	bad = ct->ctbs.g2h.info.space + g2h_len >
844 		     ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space;
845 	bad |= !ct->g2h_outstanding;
846 
847 	if (bad) {
848 		xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n",
849 			  ct->ctbs.g2h.info.space, g2h_len,
850 			  ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space,
851 			  ct->ctbs.g2h.info.space + g2h_len,
852 			  ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space,
853 			  ct->g2h_outstanding);
854 		CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE);
855 		return;
856 	}
857 
858 	ct->ctbs.g2h.info.space += g2h_len;
859 	if (!--ct->g2h_outstanding)
860 		xe_pm_runtime_put(ct_to_xe(ct));
861 }
862 
g2h_release_space(struct xe_guc_ct * ct,u32 g2h_len)863 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
864 {
865 	spin_lock_irq(&ct->fast_lock);
866 	__g2h_release_space(ct, g2h_len);
867 	spin_unlock_irq(&ct->fast_lock);
868 }
869 
870 /*
871  * The CT protocol accepts a 16 bits fence. This field is fully owned by the
872  * driver, the GuC will just copy it to the reply message. Since we need to
873  * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
874  * we use one bit of the seqno as an indicator for that and a rolling counter
875  * for the remaining 15 bits.
876  */
877 #define CT_SEQNO_MASK GENMASK(14, 0)
878 #define CT_SEQNO_UNTRACKED BIT(15)
next_ct_seqno(struct xe_guc_ct * ct,bool is_g2h_fence)879 static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
880 {
881 	u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
882 
883 	if (!is_g2h_fence)
884 		seqno |= CT_SEQNO_UNTRACKED;
885 
886 	return seqno;
887 }
888 
889 #define MAKE_ACTION(type, __action)				\
890 ({								\
891 	FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) |			\
892 	FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |			\
893 		   GUC_HXG_EVENT_MSG_0_DATA0, __action);	\
894 })
895 
vf_action_can_safely_fail(struct xe_device * xe,u32 action)896 static bool vf_action_can_safely_fail(struct xe_device *xe, u32 action)
897 {
898 	/*
899 	 * When resuming a VF, we can't reliably track whether context
900 	 * registration has completed in the GuC state machine. It is harmless
901 	 * to resend the request, as it will fail silently if GUC_HXG_TYPE_EVENT
902 	 * is used. Additionally, if there is an H2G protocol issue on a VF,
903 	 * subsequent H2G messages sent as GUC_HXG_TYPE_FAST_REQUEST will likely
904 	 * fail.
905 	 */
906 	return IS_SRIOV_VF(xe) && xe_sriov_vf_migration_supported(xe) &&
907 		(action == XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC ||
908 		 action == XE_GUC_ACTION_REGISTER_CONTEXT);
909 }
910 
911 #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
912 
h2g_write(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 ct_fence_value,bool want_response)913 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
914 		     u32 ct_fence_value, bool want_response)
915 {
916 	struct xe_device *xe = ct_to_xe(ct);
917 	struct xe_gt *gt = ct_to_gt(ct);
918 	struct guc_ctb *h2g = &ct->ctbs.h2g;
919 	u32 cmd[H2G_CT_HEADERS];
920 	u32 tail = h2g->info.tail;
921 	u32 full_len;
922 	struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
923 							 tail * sizeof(u32));
924 	u32 desc_status;
925 
926 	full_len = len + GUC_CTB_HDR_LEN;
927 
928 	lockdep_assert_held(&ct->lock);
929 	xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
930 
931 	desc_status = desc_read(xe, h2g, status);
932 	if (desc_status) {
933 		xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status);
934 		goto corrupted;
935 	}
936 
937 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
938 		u32 desc_tail = desc_read(xe, h2g, tail);
939 		u32 desc_head = desc_read(xe, h2g, head);
940 
941 		if (tail != desc_tail) {
942 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH);
943 			xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail);
944 			goto corrupted;
945 		}
946 
947 		if (tail > h2g->info.size) {
948 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
949 			xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n",
950 				  tail, h2g->info.size);
951 			goto corrupted;
952 		}
953 
954 		if (desc_head >= h2g->info.size) {
955 			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
956 			xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n",
957 				  desc_head, h2g->info.size);
958 			goto corrupted;
959 		}
960 	}
961 
962 	/* Command will wrap, zero fill (NOPs), return and check credits again */
963 	if (tail + full_len > h2g->info.size) {
964 		xe_map_memset(xe, &map, 0, 0,
965 			      (h2g->info.size - tail) * sizeof(u32));
966 		h2g_reserve_space(ct, (h2g->info.size - tail));
967 		h2g->info.tail = 0;
968 		desc_write(xe, h2g, tail, h2g->info.tail);
969 
970 		return -EAGAIN;
971 	}
972 
973 	/*
974 	 * dw0: CT header (including fence)
975 	 * dw1: HXG header (including action code)
976 	 * dw2+: action data
977 	 */
978 	cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
979 		FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
980 		FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
981 	if (want_response) {
982 		cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_REQUEST, action[0]);
983 	} else if (vf_action_can_safely_fail(xe, action[0])) {
984 		cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_EVENT, action[0]);
985 	} else {
986 		fast_req_track(ct, ct_fence_value,
987 			       FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
988 
989 		cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_FAST_REQUEST, action[0]);
990 	}
991 
992 	/* H2G header in cmd[1] replaces action[0] so: */
993 	--len;
994 	++action;
995 
996 	/* Write H2G ensuring visible before descriptor update */
997 	xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
998 	xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
999 	xe_device_wmb(xe);
1000 
1001 	/* Update local copies */
1002 	h2g->info.tail = (tail + full_len) % h2g->info.size;
1003 	h2g_reserve_space(ct, full_len);
1004 
1005 	/* Update descriptor */
1006 	desc_write(xe, h2g, tail, h2g->info.tail);
1007 
1008 	trace_xe_guc_ctb_h2g(xe, gt->info.id, *(action - 1), full_len,
1009 			     desc_read(xe, h2g, head), h2g->info.tail);
1010 
1011 	return 0;
1012 
1013 corrupted:
1014 	CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE);
1015 	return -EPIPE;
1016 }
1017 
__guc_ct_send_locked(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h,struct g2h_fence * g2h_fence)1018 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
1019 				u32 len, u32 g2h_len, u32 num_g2h,
1020 				struct g2h_fence *g2h_fence)
1021 {
1022 	struct xe_gt *gt = ct_to_gt(ct);
1023 	u16 seqno;
1024 	int ret;
1025 
1026 	xe_gt_assert(gt, xe_guc_ct_initialized(ct));
1027 	xe_gt_assert(gt, !g2h_len || !g2h_fence);
1028 	xe_gt_assert(gt, !num_g2h || !g2h_fence);
1029 	xe_gt_assert(gt, !g2h_len || num_g2h);
1030 	xe_gt_assert(gt, g2h_len || !num_g2h);
1031 	lockdep_assert_held(&ct->lock);
1032 
1033 	if (unlikely(ct->ctbs.h2g.info.broken)) {
1034 		ret = -EPIPE;
1035 		goto out;
1036 	}
1037 
1038 	if (ct->state == XE_GUC_CT_STATE_DISABLED) {
1039 		ret = -ENODEV;
1040 		goto out;
1041 	}
1042 
1043 	if (ct->state == XE_GUC_CT_STATE_STOPPED || xe_gt_recovery_pending(gt)) {
1044 		ret = -ECANCELED;
1045 		goto out;
1046 	}
1047 
1048 	xe_gt_assert(gt, xe_guc_ct_enabled(ct));
1049 
1050 	if (g2h_fence) {
1051 		g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
1052 		num_g2h = 1;
1053 
1054 		if (g2h_fence_needs_alloc(g2h_fence)) {
1055 			g2h_fence->seqno = next_ct_seqno(ct, true);
1056 			ret = xa_err(xa_store(&ct->fence_lookup,
1057 					      g2h_fence->seqno, g2h_fence,
1058 					      GFP_ATOMIC));
1059 			if (ret)
1060 				goto out;
1061 		}
1062 
1063 		seqno = g2h_fence->seqno;
1064 	} else {
1065 		seqno = next_ct_seqno(ct, false);
1066 	}
1067 
1068 	if (g2h_len)
1069 		spin_lock_irq(&ct->fast_lock);
1070 retry:
1071 	ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
1072 	if (unlikely(ret))
1073 		goto out_unlock;
1074 
1075 	ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
1076 	if (unlikely(ret)) {
1077 		if (ret == -EAGAIN)
1078 			goto retry;
1079 		goto out_unlock;
1080 	}
1081 
1082 	__g2h_reserve_space(ct, g2h_len, num_g2h);
1083 	xe_guc_notify(ct_to_guc(ct));
1084 out_unlock:
1085 	if (g2h_len)
1086 		spin_unlock_irq(&ct->fast_lock);
1087 out:
1088 	return ret;
1089 }
1090 
kick_reset(struct xe_guc_ct * ct)1091 static void kick_reset(struct xe_guc_ct *ct)
1092 {
1093 	xe_gt_reset_async(ct_to_gt(ct));
1094 }
1095 
1096 static int dequeue_one_g2h(struct xe_guc_ct *ct);
1097 
1098 /*
1099  * wait before retry of sending h2g message
1100  * Return: true if ready for retry, false if the wait timeouted
1101  */
guc_ct_send_wait_for_retry(struct xe_guc_ct * ct,u32 len,u32 g2h_len,struct g2h_fence * g2h_fence,unsigned int * sleep_period_ms)1102 static bool guc_ct_send_wait_for_retry(struct xe_guc_ct *ct, u32 len,
1103 				       u32 g2h_len, struct g2h_fence *g2h_fence,
1104 				       unsigned int *sleep_period_ms)
1105 {
1106 	struct xe_device *xe = ct_to_xe(ct);
1107 
1108 	/*
1109 	 * We wait to try to restore credits for about 1 second before bailing.
1110 	 * In the case of H2G credits we have no choice but just to wait for the
1111 	 * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In
1112 	 * the case of G2H we process any G2H in the channel, hopefully freeing
1113 	 * credits as we consume the G2H messages.
1114 	 */
1115 	if (!h2g_has_room(ct, len + GUC_CTB_HDR_LEN)) {
1116 		struct guc_ctb *h2g = &ct->ctbs.h2g;
1117 
1118 		if (*sleep_period_ms == 1024)
1119 			return false;
1120 
1121 		trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail,
1122 						 h2g->info.size,
1123 						 h2g->info.space,
1124 						 len + GUC_CTB_HDR_LEN);
1125 		msleep(*sleep_period_ms);
1126 		*sleep_period_ms <<= 1;
1127 	} else {
1128 		struct xe_device *xe = ct_to_xe(ct);
1129 		struct guc_ctb *g2h = &ct->ctbs.g2h;
1130 		int ret;
1131 
1132 		trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head,
1133 						 desc_read(xe, g2h, tail),
1134 						 g2h->info.size,
1135 						 g2h->info.space,
1136 						 g2h_fence ?
1137 						 GUC_CTB_HXG_MSG_MAX_LEN :
1138 						 g2h_len);
1139 
1140 #define g2h_avail(ct)	\
1141 	(desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
1142 		if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
1143 					g2h_avail(ct), HZ))
1144 			return false;
1145 #undef g2h_avail
1146 
1147 		ret = dequeue_one_g2h(ct);
1148 		if (ret < 0) {
1149 			if (ret != -ECANCELED)
1150 				xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)",
1151 					  ERR_PTR(ret));
1152 			return false;
1153 		}
1154 	}
1155 	return true;
1156 }
1157 
guc_ct_send_locked(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h,struct g2h_fence * g2h_fence)1158 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
1159 			      u32 g2h_len, u32 num_g2h,
1160 			      struct g2h_fence *g2h_fence)
1161 {
1162 	struct xe_gt *gt = ct_to_gt(ct);
1163 	unsigned int sleep_period_ms = 1;
1164 	int ret;
1165 
1166 	xe_gt_assert(gt, !g2h_len || !g2h_fence);
1167 	lockdep_assert_held(&ct->lock);
1168 	xe_device_assert_mem_access(ct_to_xe(ct));
1169 
1170 try_again:
1171 	ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
1172 				   g2h_fence);
1173 
1174 	if (unlikely(ret == -EBUSY)) {
1175 		if (!guc_ct_send_wait_for_retry(ct, len, g2h_len, g2h_fence,
1176 						&sleep_period_ms))
1177 			goto broken;
1178 		goto try_again;
1179 	}
1180 
1181 	return ret;
1182 
1183 broken:
1184 	xe_gt_err(gt, "No forward process on H2G, reset required\n");
1185 	CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK);
1186 
1187 	return -EDEADLK;
1188 }
1189 
guc_ct_send(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h,struct g2h_fence * g2h_fence)1190 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
1191 		       u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
1192 {
1193 	int ret;
1194 
1195 	xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence);
1196 
1197 	mutex_lock(&ct->lock);
1198 	ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
1199 	mutex_unlock(&ct->lock);
1200 
1201 	return ret;
1202 }
1203 
xe_guc_ct_send(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h)1204 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
1205 		   u32 g2h_len, u32 num_g2h)
1206 {
1207 	int ret;
1208 
1209 	ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
1210 	if (ret == -EDEADLK)
1211 		kick_reset(ct);
1212 
1213 	return ret;
1214 }
1215 
xe_guc_ct_send_locked(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h)1216 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
1217 			  u32 g2h_len, u32 num_g2h)
1218 {
1219 	int ret;
1220 
1221 	ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
1222 	if (ret == -EDEADLK)
1223 		kick_reset(ct);
1224 
1225 	return ret;
1226 }
1227 
xe_guc_ct_send_g2h_handler(struct xe_guc_ct * ct,const u32 * action,u32 len)1228 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
1229 {
1230 	int ret;
1231 
1232 	lockdep_assert_held(&ct->lock);
1233 
1234 	ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
1235 	if (ret == -EDEADLK)
1236 		kick_reset(ct);
1237 
1238 	return ret;
1239 }
1240 
1241 /*
1242  * Check if a GT reset is in progress or will occur and if GT reset brought the
1243  * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
1244  */
retry_failure(struct xe_guc_ct * ct,int ret)1245 static bool retry_failure(struct xe_guc_ct *ct, int ret)
1246 {
1247 	if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV))
1248 		return false;
1249 
1250 #define ct_alive(ct)	\
1251 	(xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
1252 	 !ct->ctbs.g2h.info.broken)
1253 	if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
1254 		return false;
1255 #undef ct_alive
1256 
1257 	return true;
1258 }
1259 
1260 #define GUC_SEND_RETRY_LIMIT	50
1261 #define GUC_SEND_RETRY_MSLEEP	5
1262 
guc_ct_send_recv(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 * response_buffer,bool no_fail)1263 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
1264 			    u32 *response_buffer, bool no_fail)
1265 {
1266 	struct xe_gt *gt = ct_to_gt(ct);
1267 	struct g2h_fence g2h_fence;
1268 	unsigned int retries = 0;
1269 	int ret = 0;
1270 
1271 	/*
1272 	 * We use a fence to implement blocking sends / receiving response data.
1273 	 * The seqno of the fence is sent in the H2G, returned in the G2H, and
1274 	 * an xarray is used as storage media with the seqno being to key.
1275 	 * Fields in the fence hold success, failure, retry status and the
1276 	 * response data. Safe to allocate on the stack as the xarray is the
1277 	 * only reference and it cannot be present after this function exits.
1278 	 */
1279 retry:
1280 	g2h_fence_init(&g2h_fence, response_buffer);
1281 retry_same_fence:
1282 	ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
1283 	if (unlikely(ret == -ENOMEM)) {
1284 		/* Retry allocation /w GFP_KERNEL */
1285 		ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
1286 				      &g2h_fence, GFP_KERNEL));
1287 		if (ret)
1288 			return ret;
1289 
1290 		goto retry_same_fence;
1291 	} else if (unlikely(ret)) {
1292 		if (ret == -EDEADLK)
1293 			kick_reset(ct);
1294 
1295 		if (no_fail && retry_failure(ct, ret))
1296 			goto retry_same_fence;
1297 
1298 		if (!g2h_fence_needs_alloc(&g2h_fence))
1299 			xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1300 
1301 		return ret;
1302 	}
1303 
1304 	/* READ_ONCEs pairs with WRITE_ONCEs in parse_g2h_response
1305 	 * and g2h_fence_cancel.
1306 	 */
1307 	ret = wait_event_timeout(ct->g2h_fence_wq, READ_ONCE(g2h_fence.done), HZ);
1308 	if (!ret) {
1309 		LNL_FLUSH_WORK(&ct->g2h_worker);
1310 		if (READ_ONCE(g2h_fence.done)) {
1311 			xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
1312 				   g2h_fence.seqno, action[0]);
1313 			ret = 1;
1314 		}
1315 	}
1316 
1317 	/*
1318 	 * Ensure we serialize with completion side to prevent UAF with fence going out of scope on
1319 	 * the stack, since we have no clue if it will fire after the timeout before we can erase
1320 	 * from the xa. Also we have some dependent loads and stores below for which we need the
1321 	 * correct ordering, and we lack the needed barriers.
1322 	 */
1323 	mutex_lock(&ct->lock);
1324 	if (!ret) {
1325 		xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
1326 			  g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
1327 		xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1328 		mutex_unlock(&ct->lock);
1329 		return -ETIME;
1330 	}
1331 
1332 	if (g2h_fence.retry) {
1333 		xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
1334 			  action[0], g2h_fence.reason);
1335 		mutex_unlock(&ct->lock);
1336 		if (++retries > GUC_SEND_RETRY_LIMIT) {
1337 			xe_gt_err(gt, "H2G action %#x reached retry limit=%u, aborting\n",
1338 				  action[0], GUC_SEND_RETRY_LIMIT);
1339 			return -ELOOP;
1340 		}
1341 		msleep(GUC_SEND_RETRY_MSLEEP * retries);
1342 		goto retry;
1343 	}
1344 	if (g2h_fence.fail) {
1345 		if (g2h_fence.cancel) {
1346 			xe_gt_dbg(gt, "H2G request %#x canceled!\n", action[0]);
1347 			ret = -ECANCELED;
1348 			goto unlock;
1349 		}
1350 		xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n",
1351 			  action[0], g2h_fence.error, g2h_fence.hint);
1352 		ret = -EIO;
1353 	}
1354 
1355 	if (ret > 0)
1356 		ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
1357 
1358 unlock:
1359 	mutex_unlock(&ct->lock);
1360 
1361 	return ret;
1362 }
1363 
1364 /**
1365  * xe_guc_ct_send_recv - Send and receive HXG to the GuC
1366  * @ct: the &xe_guc_ct
1367  * @action: the dword array with `HXG Request`_ message (can't be NULL)
1368  * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
1369  * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
1370  *
1371  * Send a `HXG Request`_ message to the GuC over CT communication channel and
1372  * blocks until GuC replies with a `HXG Response`_ message.
1373  *
1374  * For non-blocking communication with GuC use xe_guc_ct_send().
1375  *
1376  * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
1377  *
1378  * Return: response length (in dwords) if &response_buffer was not NULL, or
1379  *         DATA0 from `HXG Response`_ if &response_buffer was NULL, or
1380  *         a negative error code on failure.
1381  */
xe_guc_ct_send_recv(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 * response_buffer)1382 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
1383 			u32 *response_buffer)
1384 {
1385 	KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
1386 	return guc_ct_send_recv(ct, action, len, response_buffer, false);
1387 }
1388 ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO);
1389 
xe_guc_ct_send_recv_no_fail(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 * response_buffer)1390 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
1391 				u32 len, u32 *response_buffer)
1392 {
1393 	return guc_ct_send_recv(ct, action, len, response_buffer, true);
1394 }
1395 
msg_to_hxg(u32 * msg)1396 static u32 *msg_to_hxg(u32 *msg)
1397 {
1398 	return msg + GUC_CTB_MSG_MIN_LEN;
1399 }
1400 
msg_len_to_hxg_len(u32 len)1401 static u32 msg_len_to_hxg_len(u32 len)
1402 {
1403 	return len - GUC_CTB_MSG_MIN_LEN;
1404 }
1405 
parse_g2h_event(struct xe_guc_ct * ct,u32 * msg,u32 len)1406 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
1407 {
1408 	u32 *hxg = msg_to_hxg(msg);
1409 	u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1410 
1411 	lockdep_assert_held(&ct->lock);
1412 
1413 	switch (action) {
1414 	case XE_GUC_ACTION_NOTIFY_MULTI_QUEUE_CONTEXT_CGP_SYNC_DONE:
1415 	case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1416 	case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1417 	case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1418 	case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1419 	case XE_GUC_ACTION_PAGE_RECLAMATION_DONE:
1420 		g2h_release_space(ct, len);
1421 	}
1422 
1423 	return 0;
1424 }
1425 
guc_crash_process_msg(struct xe_guc_ct * ct,u32 action)1426 static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
1427 {
1428 	struct xe_gt *gt = ct_to_gt(ct);
1429 
1430 	if (action == XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED)
1431 		xe_gt_err(gt, "GuC Crash dump notification\n");
1432 	else if (action == XE_GUC_ACTION_NOTIFY_EXCEPTION)
1433 		xe_gt_err(gt, "GuC Exception notification\n");
1434 	else
1435 		xe_gt_err(gt, "Unknown GuC crash notification: 0x%04X\n", action);
1436 
1437 	CT_DEAD(ct, NULL, CRASH);
1438 
1439 	kick_reset(ct);
1440 
1441 	return 0;
1442 }
1443 
parse_g2h_response(struct xe_guc_ct * ct,u32 * msg,u32 len)1444 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
1445 {
1446 	struct xe_gt *gt =  ct_to_gt(ct);
1447 	u32 *hxg = msg_to_hxg(msg);
1448 	u32 hxg_len = msg_len_to_hxg_len(len);
1449 	u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
1450 	u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1451 	struct g2h_fence *g2h_fence;
1452 
1453 	lockdep_assert_held(&ct->lock);
1454 
1455 	/*
1456 	 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
1457 	 * Those messages should never fail, so if we do get an error back it
1458 	 * means we're likely doing an illegal operation and the GuC is
1459 	 * rejecting it. We have no way to inform the code that submitted the
1460 	 * H2G that the message was rejected, so we need to escalate the
1461 	 * failure to trigger a reset.
1462 	 */
1463 	if (fence & CT_SEQNO_UNTRACKED) {
1464 		if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
1465 			xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n",
1466 				  fence,
1467 				  FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]),
1468 				  FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]));
1469 		else
1470 			xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
1471 				  type, fence);
1472 
1473 		fast_req_report(ct, fence);
1474 
1475 		/* FIXME: W/A race in the GuC, will get in firmware soon */
1476 		if (xe_gt_recovery_pending(gt))
1477 			return 0;
1478 
1479 		CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
1480 
1481 		return -EPROTO;
1482 	}
1483 
1484 	g2h_fence = xa_erase(&ct->fence_lookup, fence);
1485 	if (unlikely(!g2h_fence)) {
1486 		/* Don't tear down channel, as send could've timed out */
1487 		/* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */
1488 		xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
1489 		g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1490 		return 0;
1491 	}
1492 
1493 	xe_gt_assert(gt, fence == g2h_fence->seqno);
1494 
1495 	if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
1496 		g2h_fence->fail = true;
1497 		g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]);
1498 		g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]);
1499 	} else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
1500 		g2h_fence->retry = true;
1501 		g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]);
1502 	} else if (g2h_fence->response_buffer) {
1503 		g2h_fence->response_len = hxg_len;
1504 		memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32));
1505 	} else {
1506 		g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]);
1507 	}
1508 
1509 	g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1510 
1511 	/* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
1512 	WRITE_ONCE(g2h_fence->done, true);
1513 	smp_mb();
1514 
1515 	wake_up_all(&ct->g2h_fence_wq);
1516 
1517 	return 0;
1518 }
1519 
parse_g2h_msg(struct xe_guc_ct * ct,u32 * msg,u32 len)1520 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1521 {
1522 	struct xe_gt *gt = ct_to_gt(ct);
1523 	u32 *hxg = msg_to_hxg(msg);
1524 	u32 origin, type;
1525 	int ret;
1526 
1527 	lockdep_assert_held(&ct->lock);
1528 
1529 	origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
1530 	if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
1531 		xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
1532 			  origin);
1533 		CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN);
1534 
1535 		return -EPROTO;
1536 	}
1537 
1538 	type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1539 	switch (type) {
1540 	case GUC_HXG_TYPE_EVENT:
1541 		ret = parse_g2h_event(ct, msg, len);
1542 		break;
1543 	case GUC_HXG_TYPE_RESPONSE_SUCCESS:
1544 	case GUC_HXG_TYPE_RESPONSE_FAILURE:
1545 	case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
1546 		ret = parse_g2h_response(ct, msg, len);
1547 		break;
1548 	default:
1549 		xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
1550 			  type);
1551 		CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE);
1552 
1553 		ret = -EOPNOTSUPP;
1554 	}
1555 
1556 	return ret;
1557 }
1558 
process_g2h_msg(struct xe_guc_ct * ct,u32 * msg,u32 len)1559 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1560 {
1561 	struct xe_guc *guc = ct_to_guc(ct);
1562 	struct xe_gt *gt = ct_to_gt(ct);
1563 	u32 hxg_len = msg_len_to_hxg_len(len);
1564 	u32 *hxg = msg_to_hxg(msg);
1565 	u32 action, adj_len;
1566 	u32 *payload;
1567 	int ret = 0;
1568 
1569 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1570 		return 0;
1571 
1572 	action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1573 	payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN;
1574 	adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN;
1575 
1576 	switch (action) {
1577 	case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1578 		ret = xe_guc_sched_done_handler(guc, payload, adj_len);
1579 		break;
1580 	case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1581 		ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
1582 		break;
1583 	case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
1584 		ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
1585 		break;
1586 	case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
1587 		ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
1588 							      adj_len);
1589 		break;
1590 	case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1591 		/* Selftest only at the moment */
1592 		break;
1593 	case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
1594 		ret = xe_guc_error_capture_handler(guc, payload, adj_len);
1595 		break;
1596 	case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
1597 		/* FIXME: Handle this */
1598 		break;
1599 	case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
1600 		ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
1601 								 adj_len);
1602 		break;
1603 	case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1604 		ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1605 		break;
1606 	case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1607 	case XE_GUC_ACTION_PAGE_RECLAMATION_DONE:
1608 		/*
1609 		 * Page reclamation is an extension of TLB invalidation. Both
1610 		 * operations share the same seqno and fence. When either
1611 		 * action completes, we need to signal the corresponding
1612 		 * fence. Since the handling logic (lookup fence by seqno,
1613 		 * fence signalling) is identical, we use the same handler
1614 		 * for both G2H events.
1615 		 */
1616 		ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
1617 		break;
1618 	case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
1619 		ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
1620 		break;
1621 	case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
1622 		ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
1623 		break;
1624 	case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY:
1625 		ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len);
1626 		break;
1627 	case GUC_ACTION_GUC2PF_ADVERSE_EVENT:
1628 		ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len);
1629 		break;
1630 	case XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
1631 	case XE_GUC_ACTION_NOTIFY_EXCEPTION:
1632 		ret = guc_crash_process_msg(ct, action);
1633 		break;
1634 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1635 	case XE_GUC_ACTION_TEST_G2G_RECV:
1636 		ret = xe_guc_g2g_test_notification(guc, payload, adj_len);
1637 		break;
1638 #endif
1639 	case XE_GUC_ACTION_NOTIFY_MULTI_QUEUE_CONTEXT_CGP_SYNC_DONE:
1640 		ret = xe_guc_exec_queue_cgp_sync_done_handler(guc, payload, adj_len);
1641 		break;
1642 	case XE_GUC_ACTION_NOTIFY_MULTI_QUEUE_CGP_CONTEXT_ERROR:
1643 		ret = xe_guc_exec_queue_cgp_context_error_handler(guc, payload,
1644 								  adj_len);
1645 		break;
1646 	default:
1647 		xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
1648 	}
1649 
1650 	if (ret) {
1651 		xe_gt_err(gt, "G2H action %#04x failed (%pe) len %u msg %*ph\n",
1652 			  action, ERR_PTR(ret), hxg_len, (int)sizeof(u32) * hxg_len, hxg);
1653 		CT_DEAD(ct, NULL, PROCESS_FAILED);
1654 	}
1655 
1656 	return 0;
1657 }
1658 
g2h_read(struct xe_guc_ct * ct,u32 * msg,bool fast_path)1659 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
1660 {
1661 	struct xe_device *xe = ct_to_xe(ct);
1662 	struct xe_gt *gt = ct_to_gt(ct);
1663 	struct guc_ctb *g2h = &ct->ctbs.g2h;
1664 	u32 tail, head, len, desc_status;
1665 	s32 avail;
1666 	u32 action;
1667 	u32 *hxg;
1668 
1669 	xe_gt_assert(gt, xe_guc_ct_initialized(ct));
1670 	lockdep_assert_held(&ct->fast_lock);
1671 
1672 	if (ct->state == XE_GUC_CT_STATE_DISABLED)
1673 		return -ENODEV;
1674 
1675 	if (ct->state == XE_GUC_CT_STATE_STOPPED)
1676 		return -ECANCELED;
1677 
1678 	if (g2h->info.broken)
1679 		return -EPIPE;
1680 
1681 	xe_gt_assert(gt, xe_guc_ct_enabled(ct));
1682 
1683 	desc_status = desc_read(xe, g2h, status);
1684 	if (desc_status) {
1685 		if (desc_status & GUC_CTB_STATUS_DISABLED) {
1686 			/*
1687 			 * Potentially valid if a CLIENT_RESET request resulted in
1688 			 * contexts/engines being reset. But should never happen as
1689 			 * no contexts should be active when CLIENT_RESET is sent.
1690 			 */
1691 			xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n");
1692 			desc_status &= ~GUC_CTB_STATUS_DISABLED;
1693 		}
1694 
1695 		if (desc_status) {
1696 			xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status);
1697 			goto corrupted;
1698 		}
1699 	}
1700 
1701 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
1702 		u32 desc_tail = desc_read(xe, g2h, tail);
1703 		/*
1704 		u32 desc_head = desc_read(xe, g2h, head);
1705 
1706 		 * info.head and desc_head are updated back-to-back at the end of
1707 		 * this function and nowhere else. Hence, they cannot be different
1708 		 * unless two g2h_read calls are running concurrently. Which is not
1709 		 * possible because it is guarded by ct->fast_lock. And yet, some
1710 		 * discrete platforms are regularly hitting this error :(.
1711 		 *
1712 		 * desc_head rolling backwards shouldn't cause any noticeable
1713 		 * problems - just a delay in GuC being allowed to proceed past that
1714 		 * point in the queue. So for now, just disable the error until it
1715 		 * can be root caused.
1716 		 *
1717 		if (g2h->info.head != desc_head) {
1718 			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH);
1719 			xe_gt_err(gt, "CT read: head was modified %u != %u\n",
1720 				  desc_head, g2h->info.head);
1721 			goto corrupted;
1722 		}
1723 		 */
1724 
1725 		if (g2h->info.head > g2h->info.size) {
1726 			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1727 			xe_gt_err(gt, "CT read: head out of range: %u vs %u\n",
1728 				  g2h->info.head, g2h->info.size);
1729 			goto corrupted;
1730 		}
1731 
1732 		if (desc_tail >= g2h->info.size) {
1733 			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1734 			xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n",
1735 				  desc_tail, g2h->info.size);
1736 			goto corrupted;
1737 		}
1738 	}
1739 
1740 	/* Calculate DW available to read */
1741 	tail = desc_read(xe, g2h, tail);
1742 	avail = tail - g2h->info.head;
1743 	if (unlikely(avail == 0))
1744 		return 0;
1745 
1746 	if (avail < 0)
1747 		avail += g2h->info.size;
1748 
1749 	/* Read header */
1750 	xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head,
1751 			   sizeof(u32));
1752 	len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
1753 	if (len > avail) {
1754 		xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
1755 			  avail, len);
1756 		goto corrupted;
1757 	}
1758 
1759 	head = (g2h->info.head + 1) % g2h->info.size;
1760 	avail = len - 1;
1761 
1762 	/* Read G2H message */
1763 	if (avail + head > g2h->info.size) {
1764 		u32 avail_til_wrap = g2h->info.size - head;
1765 
1766 		xe_map_memcpy_from(xe, msg + 1,
1767 				   &g2h->cmds, sizeof(u32) * head,
1768 				   avail_til_wrap * sizeof(u32));
1769 		xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap,
1770 				   &g2h->cmds, 0,
1771 				   (avail - avail_til_wrap) * sizeof(u32));
1772 	} else {
1773 		xe_map_memcpy_from(xe, msg + 1,
1774 				   &g2h->cmds, sizeof(u32) * head,
1775 				   avail * sizeof(u32));
1776 	}
1777 
1778 	hxg = msg_to_hxg(msg);
1779 	action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1780 
1781 	if (fast_path) {
1782 		if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1783 			return 0;
1784 
1785 		switch (action) {
1786 		case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1787 		case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1788 		case XE_GUC_ACTION_PAGE_RECLAMATION_DONE:
1789 			break;	/* Process these in fast-path */
1790 		default:
1791 			return 0;
1792 		}
1793 	}
1794 
1795 	/* Update local / descriptor header */
1796 	g2h->info.head = (head + avail) % g2h->info.size;
1797 	desc_write(xe, g2h, head, g2h->info.head);
1798 
1799 	trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id,
1800 			     action, len, g2h->info.head, tail);
1801 
1802 	return len;
1803 
1804 corrupted:
1805 	CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ);
1806 	return -EPROTO;
1807 }
1808 
g2h_fast_path(struct xe_guc_ct * ct,u32 * msg,u32 len)1809 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
1810 {
1811 	struct xe_gt *gt = ct_to_gt(ct);
1812 	struct xe_guc *guc = ct_to_guc(ct);
1813 	u32 hxg_len = msg_len_to_hxg_len(len);
1814 	u32 *hxg = msg_to_hxg(msg);
1815 	u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1816 	u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN;
1817 	u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN;
1818 	int ret = 0;
1819 
1820 	switch (action) {
1821 	case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1822 		ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1823 		break;
1824 	case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1825 	case XE_GUC_ACTION_PAGE_RECLAMATION_DONE:
1826 		/*
1827 		 * Seqno and fence handling of page reclamation and TLB
1828 		 * invalidation is identical, so we can use the same handler
1829 		 * for both actions.
1830 		 */
1831 		__g2h_release_space(ct, len);
1832 		ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
1833 		break;
1834 	default:
1835 		xe_gt_warn(gt, "NOT_POSSIBLE");
1836 	}
1837 
1838 	if (ret) {
1839 		xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
1840 			  action, ERR_PTR(ret));
1841 		CT_DEAD(ct, NULL, FAST_G2H);
1842 	}
1843 }
1844 
1845 /**
1846  * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
1847  * @ct: GuC CT object
1848  *
1849  * Anything related to page faults is critical for performance, process these
1850  * critical G2H in the IRQ. This is safe as these handlers either just wake up
1851  * waiters or queue another worker.
1852  */
xe_guc_ct_fast_path(struct xe_guc_ct * ct)1853 void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
1854 {
1855 	struct xe_device *xe = ct_to_xe(ct);
1856 	bool ongoing;
1857 	int len;
1858 
1859 	ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1860 	if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1861 		return;
1862 
1863 	spin_lock(&ct->fast_lock);
1864 	do {
1865 		len = g2h_read(ct, ct->fast_msg, true);
1866 		if (len > 0)
1867 			g2h_fast_path(ct, ct->fast_msg, len);
1868 	} while (len > 0);
1869 	spin_unlock(&ct->fast_lock);
1870 
1871 	if (ongoing)
1872 		xe_pm_runtime_put(xe);
1873 }
1874 
1875 /* Returns less than zero on error, 0 on done, 1 on more available */
dequeue_one_g2h(struct xe_guc_ct * ct)1876 static int dequeue_one_g2h(struct xe_guc_ct *ct)
1877 {
1878 	int len;
1879 	int ret;
1880 
1881 	lockdep_assert_held(&ct->lock);
1882 
1883 	spin_lock_irq(&ct->fast_lock);
1884 	len = g2h_read(ct, ct->msg, false);
1885 	spin_unlock_irq(&ct->fast_lock);
1886 	if (len <= 0)
1887 		return len;
1888 
1889 	ret = parse_g2h_msg(ct, ct->msg, len);
1890 	if (unlikely(ret < 0))
1891 		return ret;
1892 
1893 	ret = process_g2h_msg(ct, ct->msg, len);
1894 	if (unlikely(ret < 0))
1895 		return ret;
1896 
1897 	return 1;
1898 }
1899 
receive_g2h(struct xe_guc_ct * ct)1900 static void receive_g2h(struct xe_guc_ct *ct)
1901 {
1902 	bool ongoing;
1903 	int ret;
1904 
1905 	/*
1906 	 * Normal users must always hold mem_access.ref around CT calls. However
1907 	 * during the runtime pm callbacks we rely on CT to talk to the GuC, but
1908 	 * at this stage we can't rely on mem_access.ref and even the
1909 	 * callback_task will be different than current.  For such cases we just
1910 	 * need to ensure we always process the responses from any blocking
1911 	 * ct_send requests or where we otherwise expect some response when
1912 	 * initiated from those callbacks (which will need to wait for the below
1913 	 * dequeue_one_g2h()).  The dequeue_one_g2h() will gracefully fail if
1914 	 * the device has suspended to the point that the CT communication has
1915 	 * been disabled.
1916 	 *
1917 	 * If we are inside the runtime pm callback, we can be the only task
1918 	 * still issuing CT requests (since that requires having the
1919 	 * mem_access.ref).  It seems like it might in theory be possible to
1920 	 * receive unsolicited events from the GuC just as we are
1921 	 * suspending-resuming, but those will currently anyway be lost when
1922 	 * eventually exiting from suspend, hence no need to wake up the device
1923 	 * here. If we ever need something stronger than get_if_ongoing() then
1924 	 * we need to be careful with blocking the pm callbacks from getting CT
1925 	 * responses, if the worker here is blocked on those callbacks
1926 	 * completing, creating a deadlock.
1927 	 */
1928 	ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1929 	if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1930 		return;
1931 
1932 	do {
1933 		mutex_lock(&ct->lock);
1934 		ret = dequeue_one_g2h(ct);
1935 		mutex_unlock(&ct->lock);
1936 
1937 		if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
1938 			xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret);
1939 			CT_DEAD(ct, NULL, G2H_RECV);
1940 			kick_reset(ct);
1941 		}
1942 	} while (ret == 1);
1943 
1944 	if (ongoing)
1945 		xe_pm_runtime_put(ct_to_xe(ct));
1946 }
1947 
g2h_worker_func(struct work_struct * w)1948 static void g2h_worker_func(struct work_struct *w)
1949 {
1950 	struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
1951 
1952 	receive_g2h(ct);
1953 }
1954 
guc_ct_snapshot_alloc(struct xe_guc_ct * ct,bool atomic,bool want_ctb)1955 static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
1956 							bool want_ctb)
1957 {
1958 	struct xe_guc_ct_snapshot *snapshot;
1959 
1960 	snapshot = kzalloc_obj(*snapshot, atomic ? GFP_ATOMIC : GFP_KERNEL);
1961 	if (!snapshot)
1962 		return NULL;
1963 
1964 	if (ct->bo && want_ctb) {
1965 		snapshot->ctb_size = xe_bo_size(ct->bo);
1966 		snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
1967 	}
1968 
1969 	return snapshot;
1970 }
1971 
guc_ctb_snapshot_capture(struct xe_device * xe,struct guc_ctb * ctb,struct guc_ctb_snapshot * snapshot)1972 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
1973 				     struct guc_ctb_snapshot *snapshot)
1974 {
1975 	xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
1976 			   sizeof(struct guc_ct_buffer_desc));
1977 	memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
1978 }
1979 
guc_ctb_snapshot_print(struct guc_ctb_snapshot * snapshot,struct drm_printer * p)1980 static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
1981 				   struct drm_printer *p)
1982 {
1983 	drm_printf(p, "\tsize: %d\n", snapshot->info.size);
1984 	drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
1985 	drm_printf(p, "\thead: %d\n", snapshot->info.head);
1986 	drm_printf(p, "\ttail: %d\n", snapshot->info.tail);
1987 	drm_printf(p, "\tspace: %d\n", snapshot->info.space);
1988 	drm_printf(p, "\tbroken: %d\n", snapshot->info.broken);
1989 	drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
1990 	drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
1991 	drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
1992 }
1993 
guc_ct_snapshot_capture(struct xe_guc_ct * ct,bool atomic,bool want_ctb)1994 static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic,
1995 							  bool want_ctb)
1996 {
1997 	struct xe_device *xe = ct_to_xe(ct);
1998 	struct xe_guc_ct_snapshot *snapshot;
1999 
2000 	snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb);
2001 	if (!snapshot) {
2002 		xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n");
2003 		return NULL;
2004 	}
2005 
2006 	if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
2007 		snapshot->ct_enabled = true;
2008 		snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
2009 		guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g);
2010 		guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h);
2011 	}
2012 
2013 	if (ct->bo && snapshot->ctb)
2014 		xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size);
2015 
2016 	return snapshot;
2017 }
2018 
2019 /**
2020  * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
2021  * @ct: GuC CT object.
2022  *
2023  * This can be printed out in a later stage like during dev_coredump
2024  * analysis. This is safe to be called during atomic context.
2025  *
2026  * Returns: a GuC CT snapshot object that must be freed by the caller
2027  * by using `xe_guc_ct_snapshot_free`.
2028  */
xe_guc_ct_snapshot_capture(struct xe_guc_ct * ct)2029 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct)
2030 {
2031 	return guc_ct_snapshot_capture(ct, true, true);
2032 }
2033 
2034 /**
2035  * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
2036  * @snapshot: GuC CT snapshot object.
2037  * @p: drm_printer where it will be printed out.
2038  *
2039  * This function prints out a given GuC CT snapshot object.
2040  */
xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot * snapshot,struct drm_printer * p)2041 void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
2042 			      struct drm_printer *p)
2043 {
2044 	if (!snapshot)
2045 		return;
2046 
2047 	if (snapshot->ct_enabled) {
2048 		drm_puts(p, "H2G CTB (all sizes in DW):\n");
2049 		guc_ctb_snapshot_print(&snapshot->h2g, p);
2050 
2051 		drm_puts(p, "G2H CTB (all sizes in DW):\n");
2052 		guc_ctb_snapshot_print(&snapshot->g2h, p);
2053 		drm_printf(p, "\tg2h outstanding: %d\n",
2054 			   snapshot->g2h_outstanding);
2055 
2056 		if (snapshot->ctb) {
2057 			drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size);
2058 			xe_print_blob_ascii85(p, "[CTB].data", '\n',
2059 					      snapshot->ctb, 0, snapshot->ctb_size);
2060 		}
2061 	} else {
2062 		drm_puts(p, "CT disabled\n");
2063 	}
2064 }
2065 
2066 /**
2067  * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot.
2068  * @snapshot: GuC CT snapshot object.
2069  *
2070  * This function free all the memory that needed to be allocated at capture
2071  * time.
2072  */
xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot * snapshot)2073 void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
2074 {
2075 	if (!snapshot)
2076 		return;
2077 
2078 	kfree(snapshot->ctb);
2079 	kfree(snapshot);
2080 }
2081 
2082 /**
2083  * xe_guc_ct_print - GuC CT Print.
2084  * @ct: GuC CT.
2085  * @p: drm_printer where it will be printed out.
2086  * @want_ctb: Should the full CTB content be dumped (vs just the headers)
2087  *
2088  * This function will quickly capture a snapshot of the CT state
2089  * and immediately print it out.
2090  */
xe_guc_ct_print(struct xe_guc_ct * ct,struct drm_printer * p,bool want_ctb)2091 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
2092 {
2093 	struct xe_guc_ct_snapshot *snapshot;
2094 
2095 	snapshot = guc_ct_snapshot_capture(ct, false, want_ctb);
2096 	xe_guc_ct_snapshot_print(snapshot, p);
2097 	xe_guc_ct_snapshot_free(snapshot);
2098 }
2099 
2100 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
2101 
2102 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
2103 /*
2104  * This is a helper function which assists the driver in identifying if a fault
2105  * injection test is currently active, allowing it to reduce unnecessary debug
2106  * output. Typically, the function returns zero, but the fault injection
2107  * framework can alter this to return an error. Since faults are injected
2108  * through this function, it's important to ensure the compiler doesn't optimize
2109  * it into an inline function. To avoid such optimization, the 'noinline'
2110  * attribute is applied. Compiler optimizes the static function defined in the
2111  * header file as an inline function.
2112  */
xe_is_injection_active(void)2113 noinline int xe_is_injection_active(void) { return 0; }
2114 ALLOW_ERROR_INJECTION(xe_is_injection_active, ERRNO);
2115 #else
xe_is_injection_active(void)2116 int xe_is_injection_active(void) { return 0; }
2117 #endif
2118 
ct_dead_capture(struct xe_guc_ct * ct,struct guc_ctb * ctb,u32 reason_code)2119 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
2120 {
2121 	struct xe_guc_log_snapshot *snapshot_log;
2122 	struct xe_guc_ct_snapshot *snapshot_ct;
2123 	struct xe_guc *guc = ct_to_guc(ct);
2124 	unsigned long flags;
2125 	bool have_capture;
2126 
2127 	if (ctb)
2128 		ctb->info.broken = true;
2129 	/*
2130 	 * Huge dump is getting generated when injecting error for guc CT/MMIO
2131 	 * functions. So, let us suppress the dump when fault is injected.
2132 	 */
2133 	if (xe_is_injection_active())
2134 		return;
2135 
2136 	/* Ignore further errors after the first dump until a reset */
2137 	if (ct->dead.reported)
2138 		return;
2139 
2140 	spin_lock_irqsave(&ct->dead.lock, flags);
2141 
2142 	/* And only capture one dump at a time */
2143 	have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE);
2144 	ct->dead.reason |= (1 << reason_code) |
2145 			   (1 << CT_DEAD_STATE_CAPTURE);
2146 
2147 	spin_unlock_irqrestore(&ct->dead.lock, flags);
2148 
2149 	if (have_capture)
2150 		return;
2151 
2152 	snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true);
2153 	snapshot_ct = xe_guc_ct_snapshot_capture((ct));
2154 
2155 	spin_lock_irqsave(&ct->dead.lock, flags);
2156 
2157 	if (ct->dead.snapshot_log || ct->dead.snapshot_ct) {
2158 		xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n");
2159 		xe_guc_log_snapshot_free(snapshot_log);
2160 		xe_guc_ct_snapshot_free(snapshot_ct);
2161 	} else {
2162 		ct->dead.snapshot_log = snapshot_log;
2163 		ct->dead.snapshot_ct = snapshot_ct;
2164 	}
2165 
2166 	spin_unlock_irqrestore(&ct->dead.lock, flags);
2167 
2168 	queue_work(system_unbound_wq, &(ct)->dead.worker);
2169 }
2170 
ct_dead_print(struct xe_dead_ct * dead)2171 static void ct_dead_print(struct xe_dead_ct *dead)
2172 {
2173 	struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead);
2174 	struct xe_device *xe = ct_to_xe(ct);
2175 	struct xe_gt *gt = ct_to_gt(ct);
2176 	static int g_count;
2177 	struct drm_printer ip = xe_gt_info_printer(gt);
2178 	struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
2179 
2180 	if (!dead->reason) {
2181 		xe_gt_err(gt, "CTB is dead for no reason!?\n");
2182 		return;
2183 	}
2184 
2185 	/* Can't generate a genuine core dump at this point, so just do the good bits */
2186 	drm_puts(&lp, "**** Xe Device Coredump ****\n");
2187 	drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
2188 	xe_device_snapshot_print(xe, &lp);
2189 
2190 	drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
2191 	drm_printf(&lp, "\tTile: %d\n", gt->tile->id);
2192 
2193 	drm_puts(&lp, "**** GuC Log ****\n");
2194 	xe_guc_log_snapshot_print(dead->snapshot_log, &lp);
2195 
2196 	drm_puts(&lp, "**** GuC CT ****\n");
2197 	xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp);
2198 
2199 	drm_puts(&lp, "Done.\n");
2200 }
2201 
ct_dead_worker_func(struct work_struct * w)2202 static void ct_dead_worker_func(struct work_struct *w)
2203 {
2204 	struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker);
2205 
2206 	if (!ct->dead.reported) {
2207 		ct->dead.reported = true;
2208 		ct_dead_print(&ct->dead);
2209 	}
2210 
2211 	spin_lock_irq(&ct->dead.lock);
2212 
2213 	xe_guc_log_snapshot_free(ct->dead.snapshot_log);
2214 	ct->dead.snapshot_log = NULL;
2215 	xe_guc_ct_snapshot_free(ct->dead.snapshot_ct);
2216 	ct->dead.snapshot_ct = NULL;
2217 
2218 	if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) {
2219 		/* A reset has occurred so re-arm the error reporting */
2220 		ct->dead.reason = 0;
2221 		ct->dead.reported = false;
2222 	}
2223 
2224 	spin_unlock_irq(&ct->dead.lock);
2225 }
2226 #endif
2227