1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_guc_ct.h"
7
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10 #include <linux/delay.h>
11 #include <linux/fault-inject.h>
12
13 #include <kunit/static_stub.h>
14
15 #include <drm/drm_managed.h>
16
17 #include "abi/guc_actions_abi.h"
18 #include "abi/guc_actions_sriov_abi.h"
19 #include "abi/guc_klvs_abi.h"
20 #include "xe_bo.h"
21 #include "xe_devcoredump.h"
22 #include "xe_device.h"
23 #include "xe_gt.h"
24 #include "xe_gt_printk.h"
25 #include "xe_gt_sriov_pf_control.h"
26 #include "xe_gt_sriov_pf_monitor.h"
27 #include "xe_guc.h"
28 #include "xe_guc_log.h"
29 #include "xe_guc_pagefault.h"
30 #include "xe_guc_relay.h"
31 #include "xe_guc_submit.h"
32 #include "xe_guc_tlb_inval.h"
33 #include "xe_map.h"
34 #include "xe_page_reclaim.h"
35 #include "xe_pm.h"
36 #include "xe_sleep.h"
37 #include "xe_sriov_vf.h"
38 #include "xe_trace_guc.h"
39
40 static void receive_g2h(struct xe_guc_ct *ct);
41 static void g2h_worker_func(struct work_struct *w);
42 static void safe_mode_worker_func(struct work_struct *w);
43 static void ct_exit_safe_mode(struct xe_guc_ct *ct);
44 static void guc_ct_change_state(struct xe_guc_ct *ct,
45 enum xe_guc_ct_state state);
46
ct_to_guc(struct xe_guc_ct * ct)47 static struct xe_guc *ct_to_guc(struct xe_guc_ct *ct)
48 {
49 return container_of(ct, struct xe_guc, ct);
50 }
51
ct_to_gt(struct xe_guc_ct * ct)52 static struct xe_gt *ct_to_gt(struct xe_guc_ct *ct)
53 {
54 return container_of(ct, struct xe_gt, uc.guc.ct);
55 }
56
ct_to_xe(struct xe_guc_ct * ct)57 static struct xe_device *ct_to_xe(struct xe_guc_ct *ct)
58 {
59 return gt_to_xe(ct_to_gt(ct));
60 }
61
62 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
63 enum {
64 /* Internal states, not error conditions */
65 CT_DEAD_STATE_REARM, /* 0x0001 */
66 CT_DEAD_STATE_CAPTURE, /* 0x0002 */
67
68 /* Error conditions */
69 CT_DEAD_SETUP, /* 0x0004 */
70 CT_DEAD_H2G_WRITE, /* 0x0008 */
71 CT_DEAD_H2G_HAS_ROOM, /* 0x0010 */
72 CT_DEAD_G2H_READ, /* 0x0020 */
73 CT_DEAD_G2H_RECV, /* 0x0040 */
74 CT_DEAD_G2H_RELEASE, /* 0x0080 */
75 CT_DEAD_DEADLOCK, /* 0x0100 */
76 CT_DEAD_PROCESS_FAILED, /* 0x0200 */
77 CT_DEAD_FAST_G2H, /* 0x0400 */
78 CT_DEAD_PARSE_G2H_RESPONSE, /* 0x0800 */
79 CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */
80 CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */
81 CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */
82 CT_DEAD_CRASH, /* 0x8000 */
83 };
84
85 static void ct_dead_worker_func(struct work_struct *w);
86 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code);
87
ct_dead_fini(struct xe_guc_ct * ct)88 static void ct_dead_fini(struct xe_guc_ct *ct)
89 {
90 cancel_work_sync(&ct->dead.worker);
91 }
92
ct_dead_init(struct xe_guc_ct * ct)93 static void ct_dead_init(struct xe_guc_ct *ct)
94 {
95 spin_lock_init(&ct->dead.lock);
96 INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
97
98 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
99 stack_depot_init();
100 #endif
101 }
102
fast_req_stack_save(struct xe_guc_ct * ct,unsigned int slot)103 static void fast_req_stack_save(struct xe_guc_ct *ct, unsigned int slot)
104 {
105 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
106 unsigned long entries[SZ_32];
107 unsigned int n;
108
109 n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
110 /* May be called under spinlock, so avoid sleeping */
111 ct->fast_req[slot].stack = stack_depot_save(entries, n, GFP_NOWAIT);
112 #endif
113 }
114
fast_req_dump(struct xe_guc_ct * ct,u16 fence,unsigned int slot)115 static void fast_req_dump(struct xe_guc_ct *ct, u16 fence, unsigned int slot)
116 {
117 struct xe_gt *gt = ct_to_gt(ct);
118 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
119 char *buf __cleanup(kfree) = kmalloc(SZ_4K, GFP_NOWAIT);
120
121 if (buf && stack_depot_snprint(ct->fast_req[slot].stack, buf, SZ_4K, 0))
122 xe_gt_err(gt, "Fence 0x%x was used by action %#04x sent at:\n%s\n",
123 fence, ct->fast_req[slot].action, buf);
124 else
125 xe_gt_err(gt, "Fence 0x%x was used by action %#04x [failed to retrieve stack]\n",
126 fence, ct->fast_req[slot].action);
127 #else
128 xe_gt_err(gt, "Fence 0x%x was used by action %#04x\n",
129 fence, ct->fast_req[slot].action);
130 #endif
131 }
132
fast_req_report(struct xe_guc_ct * ct,u16 fence)133 static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
134 {
135 u16 fence_min = U16_MAX, fence_max = 0;
136 struct xe_gt *gt = ct_to_gt(ct);
137 unsigned int n;
138
139 lockdep_assert_held(&ct->lock);
140
141 for (n = 0; n < ARRAY_SIZE(ct->fast_req); n++) {
142 if (ct->fast_req[n].fence < fence_min)
143 fence_min = ct->fast_req[n].fence;
144 if (ct->fast_req[n].fence > fence_max)
145 fence_max = ct->fast_req[n].fence;
146
147 if (ct->fast_req[n].fence != fence)
148 continue;
149
150 return fast_req_dump(ct, fence, n);
151 }
152
153 xe_gt_warn(gt, "Fence 0x%x not found - tracking buffer wrapped? [range = 0x%x -> 0x%x, next = 0x%X]\n",
154 fence, fence_min, fence_max, ct->fence_seqno);
155 }
156
fast_req_track(struct xe_guc_ct * ct,u16 fence,u16 action)157 static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
158 {
159 unsigned int slot = fence % ARRAY_SIZE(ct->fast_req);
160
161 fast_req_stack_save(ct, slot);
162 ct->fast_req[slot].fence = fence;
163 ct->fast_req[slot].action = action;
164 }
165
166 #define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code)
167
168 #else
169
ct_dead_fini(struct xe_guc_ct * ct)170 static void ct_dead_fini(struct xe_guc_ct *ct) { }
ct_dead_init(struct xe_guc_ct * ct)171 static void ct_dead_init(struct xe_guc_ct *ct) { }
172
fast_req_report(struct xe_guc_ct * ct,u16 fence)173 static void fast_req_report(struct xe_guc_ct *ct, u16 fence) { }
fast_req_track(struct xe_guc_ct * ct,u16 fence,u16 action)174 static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action) { }
175
176 #define CT_DEAD(ct, ctb, reason) \
177 do { \
178 struct guc_ctb *_ctb = (ctb); \
179 if (_ctb) \
180 _ctb->info.broken = true; \
181 } while (0)
182
183 #endif
184
185 /* Used when a CT send wants to block and / or receive data */
186 struct g2h_fence {
187 u32 *response_buffer;
188 u32 seqno;
189 u32 response_data;
190 u16 response_len;
191 u16 error;
192 u16 hint;
193 u16 reason;
194 bool cancel;
195 bool retry;
196 bool fail;
197 bool done;
198 };
199
g2h_fence_init(struct g2h_fence * g2h_fence,u32 * response_buffer)200 static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
201 {
202 memset(g2h_fence, 0, sizeof(*g2h_fence));
203 g2h_fence->response_buffer = response_buffer;
204 g2h_fence->seqno = ~0x0;
205 }
206
g2h_fence_cancel(struct g2h_fence * g2h_fence)207 static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
208 {
209 g2h_fence->cancel = true;
210 g2h_fence->fail = true;
211
212 /* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
213 WRITE_ONCE(g2h_fence->done, true);
214 }
215
g2h_fence_needs_alloc(struct g2h_fence * g2h_fence)216 static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
217 {
218 return g2h_fence->seqno == ~0x0;
219 }
220
221 /**
222 * DOC: GuC CTB Blob
223 *
224 * We allocate single blob to hold both CTB descriptors and buffers:
225 *
226 * +--------+-----------------------------------------------+------+
227 * | offset | contents | size |
228 * +========+===============================================+======+
229 * | 0x0000 | H2G CTB Descriptor (send) | |
230 * +--------+-----------------------------------------------+ 4K |
231 * | 0x0800 | G2H CTB Descriptor (g2h) | |
232 * +--------+-----------------------------------------------+------+
233 * | 0x1000 | H2G CT Buffer (send) | n*4K |
234 * | | | |
235 * +--------+-----------------------------------------------+------+
236 * | 0x1000 | G2H CT Buffer (g2h) | m*4K |
237 * | + n*4K | | |
238 * +--------+-----------------------------------------------+------+
239 *
240 * Size of each ``CT Buffer`` must be multiple of 4K.
241 * We don't expect too many messages in flight at any time, unless we are
242 * using the GuC submission. In that case each request requires a minimum
243 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
244 * enough space to avoid backpressure on the driver. We increase the size
245 * of the receive buffer (relative to the send) to ensure a G2H response
246 * CTB has a landing spot.
247 *
248 * In addition to submissions, the G2H buffer needs to be able to hold
249 * enough space for recoverable page fault notifications. The number of
250 * page faults is interrupt driven and can be as much as the number of
251 * compute resources available. However, most of the actual work for these
252 * is in a separate page fault worker thread. Therefore we only need to
253 * make sure the queue has enough space to handle all of the submissions
254 * and responses and an extra buffer for incoming page faults.
255 */
256
257 #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
258 #define CTB_H2G_BUFFER_OFFSET (CTB_DESC_SIZE * 2)
259 #define CTB_G2H_BUFFER_OFFSET (CTB_DESC_SIZE * 2)
260 #define CTB_H2G_BUFFER_SIZE (SZ_4K)
261 #define CTB_H2G_BUFFER_DWORDS (CTB_H2G_BUFFER_SIZE / sizeof(u32))
262 #define CTB_G2H_BUFFER_SIZE (SZ_128K)
263 #define CTB_G2H_BUFFER_DWORDS (CTB_G2H_BUFFER_SIZE / sizeof(u32))
264 #define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2)
265 #define G2H_ROOM_BUFFER_DWORDS (CTB_G2H_BUFFER_DWORDS / 2)
266
267 /**
268 * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full
269 * CT command queue
270 * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future.
271 *
272 * Observation is that a 4KiB buffer full of commands takes a little over a
273 * second to process. Use that to calculate maximum time to process a full CT
274 * command queue.
275 *
276 * Return: Maximum time to process a full CT queue in jiffies.
277 */
xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct * ct)278 long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
279 {
280 BUILD_BUG_ON(!IS_ALIGNED(CTB_H2G_BUFFER_SIZE, SZ_4K));
281 return (CTB_H2G_BUFFER_SIZE / SZ_4K) * HZ;
282 }
283
guc_h2g_size(void)284 static size_t guc_h2g_size(void)
285 {
286 return CTB_H2G_BUFFER_OFFSET + CTB_H2G_BUFFER_SIZE;
287 }
288
guc_g2h_size(void)289 static size_t guc_g2h_size(void)
290 {
291 return CTB_G2H_BUFFER_OFFSET + CTB_G2H_BUFFER_SIZE;
292 }
293
guc_ct_fini(struct drm_device * drm,void * arg)294 static void guc_ct_fini(struct drm_device *drm, void *arg)
295 {
296 struct xe_guc_ct *ct = arg;
297
298 ct_dead_fini(ct);
299 ct_exit_safe_mode(ct);
300 destroy_workqueue(ct->g2h_wq);
301 xa_destroy(&ct->fence_lookup);
302 }
303
primelockdep(struct xe_guc_ct * ct)304 static void primelockdep(struct xe_guc_ct *ct)
305 {
306 if (!IS_ENABLED(CONFIG_LOCKDEP))
307 return;
308
309 fs_reclaim_acquire(GFP_KERNEL);
310 might_lock(&ct->lock);
311 fs_reclaim_release(GFP_KERNEL);
312 }
313
xe_guc_ct_init_noalloc(struct xe_guc_ct * ct)314 int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
315 {
316 struct xe_device *xe = ct_to_xe(ct);
317 struct xe_gt *gt = ct_to_gt(ct);
318 int err;
319
320 xe_gt_assert(gt, !(guc_h2g_size() % PAGE_SIZE));
321 xe_gt_assert(gt, !(guc_g2h_size() % PAGE_SIZE));
322
323 err = drmm_mutex_init(&xe->drm, &ct->lock);
324 if (err)
325 return err;
326
327 primelockdep(ct);
328
329 ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM);
330 if (!ct->g2h_wq)
331 return -ENOMEM;
332
333 spin_lock_init(&ct->fast_lock);
334 xa_init(&ct->fence_lookup);
335 INIT_WORK(&ct->g2h_worker, g2h_worker_func);
336 INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
337
338 ct_dead_init(ct);
339 init_waitqueue_head(&ct->wq);
340 init_waitqueue_head(&ct->g2h_fence_wq);
341
342 err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
343 if (err)
344 return err;
345
346 xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
347 ct->state = XE_GUC_CT_STATE_DISABLED;
348 return 0;
349 }
350 ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */
351
guc_action_disable_ct(void * arg)352 static void guc_action_disable_ct(void *arg)
353 {
354 struct xe_guc_ct *ct = arg;
355
356 xe_guc_ct_stop(ct);
357 guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
358 }
359
xe_guc_ct_init(struct xe_guc_ct * ct)360 int xe_guc_ct_init(struct xe_guc_ct *ct)
361 {
362 struct xe_device *xe = ct_to_xe(ct);
363 struct xe_gt *gt = ct_to_gt(ct);
364 struct xe_tile *tile = gt_to_tile(gt);
365 struct xe_bo *bo;
366
367 bo = xe_managed_bo_create_pin_map(xe, tile, guc_h2g_size(),
368 XE_BO_FLAG_SYSTEM |
369 XE_BO_FLAG_GGTT |
370 XE_BO_FLAG_GGTT_INVALIDATE |
371 XE_BO_FLAG_PINNED_NORESTORE);
372 if (IS_ERR(bo))
373 return PTR_ERR(bo);
374
375 ct->ctbs.h2g.bo = bo;
376
377 bo = xe_managed_bo_create_pin_map(xe, tile, guc_g2h_size(),
378 XE_BO_FLAG_SYSTEM |
379 XE_BO_FLAG_GGTT |
380 XE_BO_FLAG_GGTT_INVALIDATE |
381 XE_BO_FLAG_PINNED_NORESTORE);
382 if (IS_ERR(bo))
383 return PTR_ERR(bo);
384
385 ct->ctbs.g2h.bo = bo;
386
387 return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
388 }
389 ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
390
391 /**
392 * xe_guc_ct_init_post_hwconfig - Reinitialize the GuC CTB in VRAM
393 * @ct: the &xe_guc_ct
394 *
395 * Allocate a new BO in VRAM and free the previous BO that was allocated
396 * in system memory (SMEM). Applicable only for DGFX products.
397 *
398 * Return: 0 on success, or a negative errno on failure.
399 */
xe_guc_ct_init_post_hwconfig(struct xe_guc_ct * ct)400 int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct)
401 {
402 struct xe_device *xe = ct_to_xe(ct);
403 struct xe_gt *gt = ct_to_gt(ct);
404 struct xe_tile *tile = gt_to_tile(gt);
405 int ret;
406
407 xe_assert(xe, !xe_guc_ct_enabled(ct));
408
409 if (IS_DGFX(xe)) {
410 ret = xe_managed_bo_reinit_in_vram(xe, tile, &ct->ctbs.h2g.bo);
411 if (ret)
412 return ret;
413 }
414
415 devm_remove_action(xe->drm.dev, guc_action_disable_ct, ct);
416 return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
417 }
418
419 #define desc_read(xe_, guc_ctb__, field_) \
420 xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
421 struct guc_ct_buffer_desc, field_)
422
423 #define desc_write(xe_, guc_ctb__, field_, val_) \
424 xe_map_wr_field(xe_, &guc_ctb__->desc, 0, \
425 struct guc_ct_buffer_desc, field_, val_)
426
guc_ct_ctb_h2g_init(struct xe_device * xe,struct guc_ctb * h2g,struct iosys_map * map)427 static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
428 struct iosys_map *map)
429 {
430 h2g->info.size = CTB_H2G_BUFFER_DWORDS;
431 h2g->info.resv_space = 0;
432 h2g->info.tail = 0;
433 h2g->info.head = 0;
434 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
435 h2g->info.size) -
436 h2g->info.resv_space;
437 h2g->info.broken = false;
438
439 h2g->desc = *map;
440 xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
441
442 h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET);
443 }
444
guc_ct_ctb_g2h_init(struct xe_device * xe,struct guc_ctb * g2h,struct iosys_map * map)445 static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
446 struct iosys_map *map)
447 {
448 g2h->info.size = CTB_G2H_BUFFER_DWORDS;
449 g2h->info.resv_space = G2H_ROOM_BUFFER_DWORDS;
450 g2h->info.head = 0;
451 g2h->info.tail = 0;
452 g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head,
453 g2h->info.size) -
454 g2h->info.resv_space;
455 g2h->info.broken = false;
456
457 g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
458 xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
459
460 g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_G2H_BUFFER_OFFSET);
461 }
462
guc_ct_ctb_h2g_register(struct xe_guc_ct * ct)463 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
464 {
465 struct xe_guc *guc = ct_to_guc(ct);
466 u32 desc_addr, ctb_addr, size;
467 int err;
468
469 desc_addr = xe_bo_ggtt_addr(ct->ctbs.h2g.bo);
470 ctb_addr = xe_bo_ggtt_addr(ct->ctbs.h2g.bo) + CTB_H2G_BUFFER_OFFSET;
471 size = ct->ctbs.h2g.info.size * sizeof(u32);
472
473 err = xe_guc_self_cfg64(guc,
474 GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY,
475 desc_addr);
476 if (err)
477 return err;
478
479 err = xe_guc_self_cfg64(guc,
480 GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY,
481 ctb_addr);
482 if (err)
483 return err;
484
485 return xe_guc_self_cfg32(guc,
486 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY,
487 size);
488 }
489
guc_ct_ctb_g2h_register(struct xe_guc_ct * ct)490 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
491 {
492 struct xe_guc *guc = ct_to_guc(ct);
493 u32 desc_addr, ctb_addr, size;
494 int err;
495
496 desc_addr = xe_bo_ggtt_addr(ct->ctbs.g2h.bo) + CTB_DESC_SIZE;
497 ctb_addr = xe_bo_ggtt_addr(ct->ctbs.g2h.bo) + CTB_G2H_BUFFER_OFFSET;
498 size = ct->ctbs.g2h.info.size * sizeof(u32);
499
500 err = xe_guc_self_cfg64(guc,
501 GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
502 desc_addr);
503 if (err)
504 return err;
505
506 err = xe_guc_self_cfg64(guc,
507 GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
508 ctb_addr);
509 if (err)
510 return err;
511
512 return xe_guc_self_cfg32(guc,
513 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
514 size);
515 }
516
guc_ct_control_toggle(struct xe_guc_ct * ct,bool enable)517 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
518 {
519 u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
520 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
521 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
522 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
523 GUC_ACTION_HOST2GUC_CONTROL_CTB),
524 FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL,
525 enable ? GUC_CTB_CONTROL_ENABLE :
526 GUC_CTB_CONTROL_DISABLE),
527 };
528 int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
529
530 return ret > 0 ? -EPROTO : ret;
531 }
532
guc_ct_change_state(struct xe_guc_ct * ct,enum xe_guc_ct_state state)533 static void guc_ct_change_state(struct xe_guc_ct *ct,
534 enum xe_guc_ct_state state)
535 {
536 struct xe_gt *gt = ct_to_gt(ct);
537 struct g2h_fence *g2h_fence;
538 unsigned long idx;
539
540 mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
541 spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
542
543 xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
544 state == XE_GUC_CT_STATE_STOPPED);
545
546 if (ct->g2h_outstanding)
547 xe_pm_runtime_put(ct_to_xe(ct));
548 ct->g2h_outstanding = 0;
549
550 /*
551 * WRITE_ONCE pairs with READ_ONCEs in xe_guc_ct_initialized and
552 * xe_guc_ct_enabled.
553 */
554 WRITE_ONCE(ct->state, state);
555
556 xe_gt_dbg(gt, "GuC CT communication channel %s\n",
557 state == XE_GUC_CT_STATE_STOPPED ? "stopped" :
558 str_enabled_disabled(state == XE_GUC_CT_STATE_ENABLED));
559
560 spin_unlock_irq(&ct->fast_lock);
561
562 /* cancel all in-flight send-recv requests */
563 xa_for_each(&ct->fence_lookup, idx, g2h_fence)
564 g2h_fence_cancel(g2h_fence);
565
566 /* make sure guc_ct_send_recv() will see g2h_fence changes */
567 smp_mb();
568 wake_up_all(&ct->g2h_fence_wq);
569
570 /*
571 * Lockdep doesn't like this under the fast lock and he destroy only
572 * needs to be serialized with the send path which ct lock provides.
573 */
574 xa_destroy(&ct->fence_lookup);
575
576 mutex_unlock(&ct->lock);
577 }
578
ct_needs_safe_mode(struct xe_guc_ct * ct)579 static bool ct_needs_safe_mode(struct xe_guc_ct *ct)
580 {
581 return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev));
582 }
583
ct_restart_safe_mode_worker(struct xe_guc_ct * ct)584 static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct)
585 {
586 if (!ct_needs_safe_mode(ct))
587 return false;
588
589 queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10);
590 return true;
591 }
592
safe_mode_worker_func(struct work_struct * w)593 static void safe_mode_worker_func(struct work_struct *w)
594 {
595 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work);
596
597 receive_g2h(ct);
598
599 if (!ct_restart_safe_mode_worker(ct))
600 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n");
601 }
602
ct_enter_safe_mode(struct xe_guc_ct * ct)603 static void ct_enter_safe_mode(struct xe_guc_ct *ct)
604 {
605 if (ct_restart_safe_mode_worker(ct))
606 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n");
607 }
608
ct_exit_safe_mode(struct xe_guc_ct * ct)609 static void ct_exit_safe_mode(struct xe_guc_ct *ct)
610 {
611 if (cancel_delayed_work_sync(&ct->safe_mode_worker))
612 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n");
613 }
614
__xe_guc_ct_start(struct xe_guc_ct * ct,bool needs_register)615 static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool needs_register)
616 {
617 struct xe_device *xe = ct_to_xe(ct);
618 struct xe_gt *gt = ct_to_gt(ct);
619 int err;
620
621 xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
622
623 if (needs_register) {
624 xe_map_memset(xe, &ct->ctbs.h2g.bo->vmap, 0, 0,
625 xe_bo_size(ct->ctbs.h2g.bo));
626 xe_map_memset(xe, &ct->ctbs.g2h.bo->vmap, 0, 0,
627 xe_bo_size(ct->ctbs.g2h.bo));
628 guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->ctbs.h2g.bo->vmap);
629 guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->ctbs.g2h.bo->vmap);
630
631 err = guc_ct_ctb_h2g_register(ct);
632 if (err)
633 goto err_out;
634
635 err = guc_ct_ctb_g2h_register(ct);
636 if (err)
637 goto err_out;
638
639 err = guc_ct_control_toggle(ct, true);
640 if (err)
641 goto err_out;
642 } else {
643 ct->ctbs.h2g.info.broken = false;
644 ct->ctbs.g2h.info.broken = false;
645 /* Skip everything in H2G buffer */
646 xe_map_memset(xe, &ct->ctbs.h2g.bo->vmap, CTB_H2G_BUFFER_OFFSET, 0,
647 CTB_H2G_BUFFER_SIZE);
648 }
649
650 guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED);
651
652 smp_mb();
653 wake_up_all(&ct->wq);
654
655 if (ct_needs_safe_mode(ct))
656 ct_enter_safe_mode(ct);
657
658 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
659 /*
660 * The CT has now been reset so the dumper can be re-armed
661 * after any existing dead state has been dumped.
662 */
663 spin_lock_irq(&ct->dead.lock);
664 if (ct->dead.reason) {
665 ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
666 queue_work(system_dfl_wq, &ct->dead.worker);
667 }
668 spin_unlock_irq(&ct->dead.lock);
669 #endif
670
671 return 0;
672
673 err_out:
674 xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
675 CT_DEAD(ct, NULL, SETUP);
676
677 return err;
678 }
679
680 /**
681 * xe_guc_ct_restart() - Restart GuC CT
682 * @ct: the &xe_guc_ct
683 *
684 * Restart GuC CT to an empty state without issuing a CT register MMIO command.
685 *
686 * Return: 0 on success, or a negative errno on failure.
687 */
xe_guc_ct_restart(struct xe_guc_ct * ct)688 int xe_guc_ct_restart(struct xe_guc_ct *ct)
689 {
690 return __xe_guc_ct_start(ct, false);
691 }
692
693 /**
694 * xe_guc_ct_enable() - Enable GuC CT
695 * @ct: the &xe_guc_ct
696 *
697 * Enable GuC CT to an empty state and issue a CT register MMIO command.
698 *
699 * Return: 0 on success, or a negative errno on failure.
700 */
xe_guc_ct_enable(struct xe_guc_ct * ct)701 int xe_guc_ct_enable(struct xe_guc_ct *ct)
702 {
703 return __xe_guc_ct_start(ct, true);
704 }
705
stop_g2h_handler(struct xe_guc_ct * ct)706 static void stop_g2h_handler(struct xe_guc_ct *ct)
707 {
708 cancel_work_sync(&ct->g2h_worker);
709 }
710
711 /**
712 * xe_guc_ct_disable - Set GuC to disabled state
713 * @ct: the &xe_guc_ct
714 *
715 * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
716 * in this transition.
717 */
xe_guc_ct_disable(struct xe_guc_ct * ct)718 void xe_guc_ct_disable(struct xe_guc_ct *ct)
719 {
720 guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
721 ct_exit_safe_mode(ct);
722 stop_g2h_handler(ct);
723 }
724
725 /**
726 * xe_guc_ct_flush_and_stop - Flush and stop all processing of G2H / H2G
727 * @ct: the &xe_guc_ct
728 */
xe_guc_ct_flush_and_stop(struct xe_guc_ct * ct)729 void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct)
730 {
731 receive_g2h(ct);
732 xe_guc_ct_stop(ct);
733 }
734
735 /**
736 * xe_guc_ct_stop - Set GuC to stopped state
737 * @ct: the &xe_guc_ct
738 *
739 * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
740 */
xe_guc_ct_stop(struct xe_guc_ct * ct)741 void xe_guc_ct_stop(struct xe_guc_ct *ct)
742 {
743 if (!xe_guc_ct_initialized(ct))
744 return;
745
746 guc_ct_change_state(ct, XE_GUC_CT_STATE_STOPPED);
747 stop_g2h_handler(ct);
748 }
749
750 /**
751 * xe_guc_ct_runtime_suspend() - GuC CT runtime suspend
752 * @ct: the &xe_guc_ct
753 *
754 * Set GuC CT to disabled state.
755 */
xe_guc_ct_runtime_suspend(struct xe_guc_ct * ct)756 void xe_guc_ct_runtime_suspend(struct xe_guc_ct *ct)
757 {
758 struct guc_ctb *g2h = &ct->ctbs.g2h;
759 u32 credits = CIRC_SPACE(0, 0, CTB_G2H_BUFFER_DWORDS) - G2H_ROOM_BUFFER_DWORDS;
760
761 /* We should be back to guc_ct_ctb_g2h_init() values */
762 xe_gt_assert(ct_to_gt(ct), g2h->info.space == credits);
763
764 /*
765 * Since we're already in runtime suspend path, we shouldn't have pending
766 * messages. But if there happen to be any, we'd probably want them to be
767 * thrown as errors for further investigation.
768 */
769 xe_guc_ct_disable(ct);
770 }
771
772 /**
773 * xe_guc_ct_runtime_resume() - GuC CT runtime resume
774 * @ct: the &xe_guc_ct
775 *
776 * Restart GuC CT and set it to enabled state.
777 */
xe_guc_ct_runtime_resume(struct xe_guc_ct * ct)778 void xe_guc_ct_runtime_resume(struct xe_guc_ct *ct)
779 {
780 xe_guc_ct_restart(ct);
781 }
782
h2g_has_room(struct xe_guc_ct * ct,u32 cmd_len)783 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
784 {
785 struct guc_ctb *h2g = &ct->ctbs.h2g;
786
787 lockdep_assert_held(&ct->lock);
788
789 if (cmd_len > h2g->info.space) {
790 h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
791
792 if (h2g->info.head > h2g->info.size) {
793 struct xe_device *xe = ct_to_xe(ct);
794 u32 desc_status = desc_read(xe, h2g, status);
795
796 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
797
798 xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n",
799 h2g->info.head, h2g->info.size);
800 CT_DEAD(ct, h2g, H2G_HAS_ROOM);
801 return false;
802 }
803
804 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
805 h2g->info.size) -
806 h2g->info.resv_space;
807 if (cmd_len > h2g->info.space)
808 return false;
809 }
810
811 return true;
812 }
813
g2h_has_room(struct xe_guc_ct * ct,u32 g2h_len)814 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
815 {
816 if (!g2h_len)
817 return true;
818
819 lockdep_assert_held(&ct->fast_lock);
820
821 return ct->ctbs.g2h.info.space > g2h_len;
822 }
823
has_room(struct xe_guc_ct * ct,u32 cmd_len,u32 g2h_len)824 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
825 {
826 lockdep_assert_held(&ct->lock);
827
828 if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
829 return -EBUSY;
830
831 return 0;
832 }
833
h2g_reserve_space(struct xe_guc_ct * ct,u32 cmd_len)834 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
835 {
836 lockdep_assert_held(&ct->lock);
837 ct->ctbs.h2g.info.space -= cmd_len;
838 }
839
__g2h_reserve_space(struct xe_guc_ct * ct,u32 g2h_len,u32 num_g2h)840 static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
841 {
842 xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
843 xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) ||
844 (g2h_len && num_g2h));
845
846 if (g2h_len) {
847 lockdep_assert_held(&ct->fast_lock);
848
849 if (!ct->g2h_outstanding)
850 xe_pm_runtime_get_noresume(ct_to_xe(ct));
851
852 ct->ctbs.g2h.info.space -= g2h_len;
853 ct->g2h_outstanding += num_g2h;
854 }
855 }
856
__g2h_release_space(struct xe_guc_ct * ct,u32 g2h_len)857 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
858 {
859 bool bad = false;
860
861 lockdep_assert_held(&ct->fast_lock);
862
863 bad = ct->ctbs.g2h.info.space + g2h_len >
864 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space;
865 bad |= !ct->g2h_outstanding;
866
867 if (bad) {
868 xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n",
869 ct->ctbs.g2h.info.space, g2h_len,
870 ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space,
871 ct->ctbs.g2h.info.space + g2h_len,
872 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space,
873 ct->g2h_outstanding);
874 CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE);
875 return;
876 }
877
878 ct->ctbs.g2h.info.space += g2h_len;
879 if (!--ct->g2h_outstanding)
880 xe_pm_runtime_put(ct_to_xe(ct));
881 }
882
g2h_release_space(struct xe_guc_ct * ct,u32 g2h_len)883 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
884 {
885 spin_lock_irq(&ct->fast_lock);
886 __g2h_release_space(ct, g2h_len);
887 spin_unlock_irq(&ct->fast_lock);
888 }
889
890 /*
891 * The CT protocol accepts a 16 bits fence. This field is fully owned by the
892 * driver, the GuC will just copy it to the reply message. Since we need to
893 * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
894 * we use one bit of the seqno as an indicator for that and a rolling counter
895 * for the remaining 15 bits.
896 */
897 #define CT_SEQNO_MASK GENMASK(14, 0)
898 #define CT_SEQNO_UNTRACKED BIT(15)
next_ct_seqno(struct xe_guc_ct * ct,bool is_g2h_fence)899 static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
900 {
901 u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
902
903 if (!is_g2h_fence)
904 seqno |= CT_SEQNO_UNTRACKED;
905
906 return seqno;
907 }
908
909 #define MAKE_ACTION(type, __action) \
910 ({ \
911 FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) | \
912 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | \
913 GUC_HXG_EVENT_MSG_0_DATA0, __action); \
914 })
915
vf_action_can_safely_fail(struct xe_device * xe,u32 action)916 static bool vf_action_can_safely_fail(struct xe_device *xe, u32 action)
917 {
918 /*
919 * When resuming a VF, we can't reliably track whether context
920 * registration has completed in the GuC state machine. It is harmless
921 * to resend the request, as it will fail silently if GUC_HXG_TYPE_EVENT
922 * is used. Additionally, if there is an H2G protocol issue on a VF,
923 * subsequent H2G messages sent as GUC_HXG_TYPE_FAST_REQUEST will likely
924 * fail.
925 */
926 return IS_SRIOV_VF(xe) && xe_sriov_vf_migration_supported(xe) &&
927 (action == XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC ||
928 action == XE_GUC_ACTION_REGISTER_CONTEXT);
929 }
930
931 #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
932
h2g_write(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 ct_fence_value,bool want_response)933 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
934 u32 ct_fence_value, bool want_response)
935 {
936 struct xe_device *xe = ct_to_xe(ct);
937 struct xe_gt *gt = ct_to_gt(ct);
938 struct guc_ctb *h2g = &ct->ctbs.h2g;
939 u32 cmd[H2G_CT_HEADERS];
940 u32 tail = h2g->info.tail;
941 u32 full_len;
942 struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
943 tail * sizeof(u32));
944
945 full_len = len + GUC_CTB_HDR_LEN;
946
947 lockdep_assert_held(&ct->lock);
948 xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
949
950 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
951 u32 desc_tail = desc_read(xe, h2g, tail);
952 u32 desc_head = desc_read(xe, h2g, head);
953 u32 desc_status;
954
955 desc_status = desc_read(xe, h2g, status);
956 if (desc_status) {
957 xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status);
958 goto corrupted;
959 }
960
961 if (tail != desc_tail) {
962 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH);
963 xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail);
964 goto corrupted;
965 }
966
967 if (tail > h2g->info.size) {
968 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
969 xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n",
970 tail, h2g->info.size);
971 goto corrupted;
972 }
973
974 if (desc_head >= h2g->info.size) {
975 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
976 xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n",
977 desc_head, h2g->info.size);
978 goto corrupted;
979 }
980 }
981
982 /* Command will wrap, zero fill (NOPs), return and check credits again */
983 if (tail + full_len > h2g->info.size) {
984 xe_map_memset(xe, &map, 0, 0,
985 (h2g->info.size - tail) * sizeof(u32));
986 h2g_reserve_space(ct, (h2g->info.size - tail));
987 h2g->info.tail = 0;
988 desc_write(xe, h2g, tail, h2g->info.tail);
989
990 return -EAGAIN;
991 }
992
993 /*
994 * dw0: CT header (including fence)
995 * dw1: HXG header (including action code)
996 * dw2+: action data
997 */
998 cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
999 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
1000 FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
1001 if (want_response) {
1002 cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_REQUEST, action[0]);
1003 } else if (vf_action_can_safely_fail(xe, action[0])) {
1004 cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_EVENT, action[0]);
1005 } else {
1006 fast_req_track(ct, ct_fence_value,
1007 FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
1008
1009 cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_FAST_REQUEST, action[0]);
1010 }
1011
1012 /* H2G header in cmd[1] replaces action[0] so: */
1013 --len;
1014 ++action;
1015
1016 /* Write H2G ensuring visible before descriptor update */
1017 xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
1018 xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
1019 xe_device_wmb(xe);
1020
1021 /* Update local copies */
1022 h2g->info.tail = (tail + full_len) % h2g->info.size;
1023 h2g_reserve_space(ct, full_len);
1024
1025 /* Update descriptor */
1026 desc_write(xe, h2g, tail, h2g->info.tail);
1027
1028 /*
1029 * desc_read() performs an VRAM read which serializes the CPU and drains
1030 * posted writes on dGPU platforms. Tracepoints evaluate arguments even
1031 * when disabled, so guard the event to avoid adding µs-scale latency to
1032 * the fast H2G submission path when tracing is not active.
1033 */
1034 if (trace_xe_guc_ctb_h2g_enabled())
1035 trace_xe_guc_ctb_h2g(xe, gt->info.id, *(action - 1), full_len,
1036 desc_read(xe, h2g, head), h2g->info.tail);
1037
1038 return 0;
1039
1040 corrupted:
1041 CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE);
1042 return -EPIPE;
1043 }
1044
__guc_ct_send_locked(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h,struct g2h_fence * g2h_fence)1045 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
1046 u32 len, u32 g2h_len, u32 num_g2h,
1047 struct g2h_fence *g2h_fence)
1048 {
1049 struct xe_gt *gt = ct_to_gt(ct);
1050 u16 seqno;
1051 int ret;
1052
1053 xe_gt_assert(gt, xe_guc_ct_initialized(ct));
1054 xe_gt_assert(gt, !g2h_len || !g2h_fence);
1055 xe_gt_assert(gt, !num_g2h || !g2h_fence);
1056 xe_gt_assert(gt, !g2h_len || num_g2h);
1057 xe_gt_assert(gt, g2h_len || !num_g2h);
1058 lockdep_assert_held(&ct->lock);
1059
1060 if (unlikely(ct->ctbs.h2g.info.broken)) {
1061 ret = -EPIPE;
1062 goto out;
1063 }
1064
1065 if (ct->state == XE_GUC_CT_STATE_DISABLED) {
1066 ret = -ENODEV;
1067 goto out;
1068 }
1069
1070 if (ct->state == XE_GUC_CT_STATE_STOPPED || xe_gt_recovery_pending(gt)) {
1071 ret = -ECANCELED;
1072 goto out;
1073 }
1074
1075 xe_gt_assert(gt, xe_guc_ct_enabled(ct));
1076
1077 if (g2h_fence) {
1078 g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
1079 num_g2h = 1;
1080
1081 if (g2h_fence_needs_alloc(g2h_fence)) {
1082 g2h_fence->seqno = next_ct_seqno(ct, true);
1083 ret = xa_err(xa_store(&ct->fence_lookup,
1084 g2h_fence->seqno, g2h_fence,
1085 GFP_ATOMIC));
1086 if (ret)
1087 goto out;
1088 }
1089
1090 seqno = g2h_fence->seqno;
1091 } else {
1092 seqno = next_ct_seqno(ct, false);
1093 }
1094
1095 if (g2h_len)
1096 spin_lock_irq(&ct->fast_lock);
1097 retry:
1098 ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
1099 if (unlikely(ret))
1100 goto out_unlock;
1101
1102 ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
1103 if (unlikely(ret)) {
1104 if (ret == -EAGAIN)
1105 goto retry;
1106 goto out_unlock;
1107 }
1108
1109 __g2h_reserve_space(ct, g2h_len, num_g2h);
1110 xe_guc_notify(ct_to_guc(ct));
1111 out_unlock:
1112 if (g2h_len)
1113 spin_unlock_irq(&ct->fast_lock);
1114 out:
1115 return ret;
1116 }
1117
kick_reset(struct xe_guc_ct * ct)1118 static void kick_reset(struct xe_guc_ct *ct)
1119 {
1120 xe_gt_reset_async(ct_to_gt(ct));
1121 }
1122
1123 static int dequeue_one_g2h(struct xe_guc_ct *ct);
1124
1125 /*
1126 * wait before retry of sending h2g message
1127 * Return: true if ready for retry, false if the wait timeouted
1128 */
guc_ct_send_wait_for_retry(struct xe_guc_ct * ct,u32 len,u32 g2h_len,struct g2h_fence * g2h_fence,unsigned int * sleep_period_ms,unsigned int * sleep_total_ms)1129 static bool guc_ct_send_wait_for_retry(struct xe_guc_ct *ct, u32 len,
1130 u32 g2h_len, struct g2h_fence *g2h_fence,
1131 unsigned int *sleep_period_ms,
1132 unsigned int *sleep_total_ms)
1133 {
1134 struct xe_device *xe = ct_to_xe(ct);
1135
1136 /*
1137 * We wait to try to restore credits for about 1 second before bailing.
1138 * In the case of H2G credits we have no choice but just to wait for the
1139 * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In
1140 * the case of G2H we process any G2H in the channel, hopefully freeing
1141 * credits as we consume the G2H messages.
1142 */
1143 if (!h2g_has_room(ct, len + GUC_CTB_HDR_LEN)) {
1144 struct guc_ctb *h2g = &ct->ctbs.h2g;
1145
1146 if (*sleep_total_ms > 1000)
1147 return false;
1148
1149 trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail,
1150 h2g->info.size,
1151 h2g->info.space,
1152 len + GUC_CTB_HDR_LEN);
1153 *sleep_total_ms += xe_sleep_exponential_ms(sleep_period_ms, 64);
1154 } else {
1155 struct guc_ctb *g2h = &ct->ctbs.g2h;
1156 int ret;
1157
1158 trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head,
1159 desc_read(xe, g2h, tail),
1160 g2h->info.size,
1161 g2h->info.space,
1162 g2h_fence ?
1163 GUC_CTB_HXG_MSG_MAX_LEN :
1164 g2h_len);
1165
1166 #define g2h_avail(ct) \
1167 (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
1168 if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
1169 g2h_avail(ct), HZ))
1170 return false;
1171 #undef g2h_avail
1172
1173 ret = dequeue_one_g2h(ct);
1174 if (ret < 0) {
1175 if (ret != -ECANCELED)
1176 xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)\n",
1177 ERR_PTR(ret));
1178 return false;
1179 }
1180 }
1181 return true;
1182 }
1183
guc_ct_send_locked(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h,struct g2h_fence * g2h_fence)1184 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
1185 u32 g2h_len, u32 num_g2h,
1186 struct g2h_fence *g2h_fence)
1187 {
1188 struct xe_gt *gt = ct_to_gt(ct);
1189 unsigned int sleep_period_ms = 1;
1190 unsigned int sleep_total_ms = 0;
1191 int ret;
1192
1193 xe_gt_assert(gt, !g2h_len || !g2h_fence);
1194 lockdep_assert_held(&ct->lock);
1195 xe_device_assert_mem_access(ct_to_xe(ct));
1196
1197 try_again:
1198 ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
1199 g2h_fence);
1200
1201 if (unlikely(ret == -EBUSY)) {
1202 if (!guc_ct_send_wait_for_retry(ct, len, g2h_len, g2h_fence,
1203 &sleep_period_ms, &sleep_total_ms))
1204 goto broken;
1205 goto try_again;
1206 }
1207
1208 return ret;
1209
1210 broken:
1211 xe_gt_err(gt, "No forward process on H2G, reset required\n");
1212 CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK);
1213
1214 return -EDEADLK;
1215 }
1216
guc_ct_send(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h,struct g2h_fence * g2h_fence)1217 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
1218 u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
1219 {
1220 int ret;
1221
1222 xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence);
1223
1224 mutex_lock(&ct->lock);
1225 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
1226 mutex_unlock(&ct->lock);
1227
1228 return ret;
1229 }
1230
xe_guc_ct_send(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h)1231 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
1232 u32 g2h_len, u32 num_g2h)
1233 {
1234 int ret;
1235
1236 ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
1237 if (ret == -EDEADLK)
1238 kick_reset(ct);
1239
1240 return ret;
1241 }
1242
xe_guc_ct_send_locked(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h)1243 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
1244 u32 g2h_len, u32 num_g2h)
1245 {
1246 int ret;
1247
1248 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
1249 if (ret == -EDEADLK)
1250 kick_reset(ct);
1251
1252 return ret;
1253 }
1254
xe_guc_ct_send_g2h_handler(struct xe_guc_ct * ct,const u32 * action,u32 len)1255 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
1256 {
1257 int ret;
1258
1259 lockdep_assert_held(&ct->lock);
1260
1261 ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
1262 if (ret == -EDEADLK)
1263 kick_reset(ct);
1264
1265 return ret;
1266 }
1267
1268 /*
1269 * Check if a GT reset is in progress or will occur and if GT reset brought the
1270 * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
1271 */
retry_failure(struct xe_guc_ct * ct,int ret)1272 static bool retry_failure(struct xe_guc_ct *ct, int ret)
1273 {
1274 if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV))
1275 return false;
1276
1277 #define ct_alive(ct) \
1278 (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
1279 !ct->ctbs.g2h.info.broken)
1280 if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
1281 return false;
1282 #undef ct_alive
1283
1284 return true;
1285 }
1286
1287 #define GUC_SEND_RETRY_LIMIT 50
1288 #define GUC_SEND_RETRY_MSLEEP 5
1289
guc_ct_send_recv(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 * response_buffer,bool no_fail)1290 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
1291 u32 *response_buffer, bool no_fail)
1292 {
1293 struct xe_gt *gt = ct_to_gt(ct);
1294 struct g2h_fence g2h_fence;
1295 unsigned int retries = 0;
1296 int ret = 0;
1297
1298 /*
1299 * We use a fence to implement blocking sends / receiving response data.
1300 * The seqno of the fence is sent in the H2G, returned in the G2H, and
1301 * an xarray is used as storage media with the seqno being to key.
1302 * Fields in the fence hold success, failure, retry status and the
1303 * response data. Safe to allocate on the stack as the xarray is the
1304 * only reference and it cannot be present after this function exits.
1305 */
1306 retry:
1307 g2h_fence_init(&g2h_fence, response_buffer);
1308 retry_same_fence:
1309 ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
1310 if (unlikely(ret == -ENOMEM)) {
1311 /* Retry allocation /w GFP_KERNEL */
1312 ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
1313 &g2h_fence, GFP_KERNEL));
1314 if (ret)
1315 return ret;
1316
1317 goto retry_same_fence;
1318 } else if (unlikely(ret)) {
1319 if (ret == -EDEADLK)
1320 kick_reset(ct);
1321
1322 if (no_fail && retry_failure(ct, ret))
1323 goto retry_same_fence;
1324
1325 if (!g2h_fence_needs_alloc(&g2h_fence))
1326 xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1327
1328 return ret;
1329 }
1330
1331 /* READ_ONCEs pairs with WRITE_ONCEs in parse_g2h_response
1332 * and g2h_fence_cancel.
1333 */
1334 ret = wait_event_timeout(ct->g2h_fence_wq, READ_ONCE(g2h_fence.done), HZ);
1335 if (!ret) {
1336 LNL_FLUSH_WORK(&ct->g2h_worker);
1337 if (READ_ONCE(g2h_fence.done)) {
1338 xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
1339 g2h_fence.seqno, action[0]);
1340 ret = 1;
1341 }
1342 }
1343
1344 /*
1345 * Ensure we serialize with completion side to prevent UAF with fence going out of scope on
1346 * the stack, since we have no clue if it will fire after the timeout before we can erase
1347 * from the xa. Also we have some dependent loads and stores below for which we need the
1348 * correct ordering, and we lack the needed barriers.
1349 */
1350 mutex_lock(&ct->lock);
1351 if (!ret) {
1352 xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s\n",
1353 g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
1354 xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1355 mutex_unlock(&ct->lock);
1356 return -ETIME;
1357 }
1358
1359 if (g2h_fence.retry) {
1360 xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
1361 action[0], g2h_fence.reason);
1362 mutex_unlock(&ct->lock);
1363 if (++retries > GUC_SEND_RETRY_LIMIT) {
1364 xe_gt_err(gt, "H2G action %#x reached retry limit=%u, aborting\n",
1365 action[0], GUC_SEND_RETRY_LIMIT);
1366 return -ELOOP;
1367 }
1368 msleep(GUC_SEND_RETRY_MSLEEP * retries);
1369 goto retry;
1370 }
1371 if (g2h_fence.fail) {
1372 if (g2h_fence.cancel) {
1373 xe_gt_dbg(gt, "H2G request %#x canceled!\n", action[0]);
1374 ret = -ECANCELED;
1375 goto unlock;
1376 }
1377 xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n",
1378 action[0], g2h_fence.error, g2h_fence.hint);
1379 ret = -EIO;
1380 }
1381
1382 if (ret > 0)
1383 ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
1384
1385 unlock:
1386 mutex_unlock(&ct->lock);
1387
1388 return ret;
1389 }
1390
1391 /**
1392 * xe_guc_ct_send_recv - Send and receive HXG to the GuC
1393 * @ct: the &xe_guc_ct
1394 * @action: the dword array with `HXG Request`_ message (can't be NULL)
1395 * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
1396 * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
1397 *
1398 * Send a `HXG Request`_ message to the GuC over CT communication channel and
1399 * blocks until GuC replies with a `HXG Response`_ message.
1400 *
1401 * For non-blocking communication with GuC use xe_guc_ct_send().
1402 *
1403 * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
1404 *
1405 * Return: response length (in dwords) if &response_buffer was not NULL, or
1406 * DATA0 from `HXG Response`_ if &response_buffer was NULL, or
1407 * a negative error code on failure.
1408 */
xe_guc_ct_send_recv(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 * response_buffer)1409 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
1410 u32 *response_buffer)
1411 {
1412 KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
1413 return guc_ct_send_recv(ct, action, len, response_buffer, false);
1414 }
1415 ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO);
1416
xe_guc_ct_send_recv_no_fail(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 * response_buffer)1417 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
1418 u32 len, u32 *response_buffer)
1419 {
1420 return guc_ct_send_recv(ct, action, len, response_buffer, true);
1421 }
1422
msg_to_hxg(u32 * msg)1423 static u32 *msg_to_hxg(u32 *msg)
1424 {
1425 return msg + GUC_CTB_MSG_MIN_LEN;
1426 }
1427
msg_len_to_hxg_len(u32 len)1428 static u32 msg_len_to_hxg_len(u32 len)
1429 {
1430 return len - GUC_CTB_MSG_MIN_LEN;
1431 }
1432
parse_g2h_event(struct xe_guc_ct * ct,u32 * msg,u32 len)1433 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
1434 {
1435 u32 *hxg = msg_to_hxg(msg);
1436 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1437
1438 lockdep_assert_held(&ct->lock);
1439
1440 switch (action) {
1441 case XE_GUC_ACTION_NOTIFY_MULTI_QUEUE_CONTEXT_CGP_SYNC_DONE:
1442 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1443 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1444 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1445 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1446 case XE_GUC_ACTION_PAGE_RECLAMATION_DONE:
1447 g2h_release_space(ct, len);
1448 }
1449
1450 return 0;
1451 }
1452
guc_crash_process_msg(struct xe_guc_ct * ct,u32 action)1453 static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
1454 {
1455 struct xe_gt *gt = ct_to_gt(ct);
1456
1457 if (action == XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED)
1458 xe_gt_err(gt, "GuC Crash dump notification\n");
1459 else if (action == XE_GUC_ACTION_NOTIFY_EXCEPTION)
1460 xe_gt_err(gt, "GuC Exception notification\n");
1461 else
1462 xe_gt_err(gt, "Unknown GuC crash notification: 0x%04X\n", action);
1463
1464 CT_DEAD(ct, NULL, CRASH);
1465
1466 kick_reset(ct);
1467
1468 return 0;
1469 }
1470
parse_g2h_response(struct xe_guc_ct * ct,u32 * msg,u32 len)1471 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
1472 {
1473 struct xe_gt *gt = ct_to_gt(ct);
1474 u32 *hxg = msg_to_hxg(msg);
1475 u32 hxg_len = msg_len_to_hxg_len(len);
1476 u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
1477 u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1478 struct g2h_fence *g2h_fence;
1479
1480 lockdep_assert_held(&ct->lock);
1481
1482 /*
1483 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
1484 * Those messages should never fail, so if we do get an error back it
1485 * means we're likely doing an illegal operation and the GuC is
1486 * rejecting it. We have no way to inform the code that submitted the
1487 * H2G that the message was rejected, so we need to escalate the
1488 * failure to trigger a reset.
1489 */
1490 if (fence & CT_SEQNO_UNTRACKED) {
1491 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
1492 xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n",
1493 fence,
1494 FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]),
1495 FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]));
1496 else
1497 xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
1498 type, fence);
1499
1500 fast_req_report(ct, fence);
1501
1502 /* FIXME: W/A race in the GuC, will get in firmware soon */
1503 if (xe_gt_recovery_pending(gt))
1504 return 0;
1505
1506 CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
1507
1508 return -EPROTO;
1509 }
1510
1511 g2h_fence = xa_erase(&ct->fence_lookup, fence);
1512 if (unlikely(!g2h_fence)) {
1513 /* Don't tear down channel, as send could've timed out */
1514 /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */
1515 xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
1516 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1517 return 0;
1518 }
1519
1520 xe_gt_assert(gt, fence == g2h_fence->seqno);
1521
1522 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
1523 g2h_fence->fail = true;
1524 g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]);
1525 g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]);
1526 } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
1527 g2h_fence->retry = true;
1528 g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]);
1529 } else if (g2h_fence->response_buffer) {
1530 g2h_fence->response_len = hxg_len;
1531 memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32));
1532 } else {
1533 g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]);
1534 }
1535
1536 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1537
1538 /* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
1539 WRITE_ONCE(g2h_fence->done, true);
1540 smp_mb();
1541
1542 wake_up_all(&ct->g2h_fence_wq);
1543
1544 return 0;
1545 }
1546
parse_g2h_msg(struct xe_guc_ct * ct,u32 * msg,u32 len)1547 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1548 {
1549 struct xe_gt *gt = ct_to_gt(ct);
1550 u32 *hxg = msg_to_hxg(msg);
1551 u32 origin, type;
1552 int ret;
1553
1554 lockdep_assert_held(&ct->lock);
1555
1556 origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
1557 if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
1558 xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
1559 origin);
1560 CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN);
1561
1562 return -EPROTO;
1563 }
1564
1565 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1566 switch (type) {
1567 case GUC_HXG_TYPE_EVENT:
1568 ret = parse_g2h_event(ct, msg, len);
1569 break;
1570 case GUC_HXG_TYPE_RESPONSE_SUCCESS:
1571 case GUC_HXG_TYPE_RESPONSE_FAILURE:
1572 case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
1573 ret = parse_g2h_response(ct, msg, len);
1574 break;
1575 default:
1576 xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
1577 type);
1578 CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE);
1579
1580 ret = -EOPNOTSUPP;
1581 }
1582
1583 return ret;
1584 }
1585
process_g2h_msg(struct xe_guc_ct * ct,u32 * msg,u32 len)1586 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1587 {
1588 struct xe_guc *guc = ct_to_guc(ct);
1589 struct xe_gt *gt = ct_to_gt(ct);
1590 u32 hxg_len = msg_len_to_hxg_len(len);
1591 u32 *hxg = msg_to_hxg(msg);
1592 u32 action, adj_len;
1593 u32 *payload;
1594 int ret = 0;
1595
1596 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1597 return 0;
1598
1599 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1600 payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN;
1601 adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN;
1602
1603 switch (action) {
1604 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1605 ret = xe_guc_sched_done_handler(guc, payload, adj_len);
1606 break;
1607 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1608 ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
1609 break;
1610 case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
1611 ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
1612 break;
1613 case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
1614 ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
1615 adj_len);
1616 break;
1617 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1618 /* Selftest only at the moment */
1619 break;
1620 case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
1621 ret = xe_guc_error_capture_handler(guc, payload, adj_len);
1622 break;
1623 case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
1624 /* FIXME: Handle this */
1625 break;
1626 case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
1627 ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
1628 adj_len);
1629 break;
1630 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1631 ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1632 break;
1633 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1634 ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
1635 break;
1636 case XE_GUC_ACTION_PAGE_RECLAMATION_DONE:
1637 ret = xe_guc_page_reclaim_done_handler(guc, payload, adj_len);
1638 break;
1639 case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
1640 ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
1641 break;
1642 case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
1643 ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
1644 break;
1645 case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY:
1646 ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len);
1647 break;
1648 case GUC_ACTION_GUC2PF_ADVERSE_EVENT:
1649 ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len);
1650 break;
1651 case XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
1652 case XE_GUC_ACTION_NOTIFY_EXCEPTION:
1653 ret = guc_crash_process_msg(ct, action);
1654 break;
1655 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1656 case XE_GUC_ACTION_TEST_G2G_RECV:
1657 ret = xe_guc_g2g_test_notification(guc, payload, adj_len);
1658 break;
1659 #endif
1660 case XE_GUC_ACTION_NOTIFY_MULTI_QUEUE_CONTEXT_CGP_SYNC_DONE:
1661 ret = xe_guc_exec_queue_cgp_sync_done_handler(guc, payload, adj_len);
1662 break;
1663 case XE_GUC_ACTION_NOTIFY_MULTI_QUEUE_CGP_CONTEXT_ERROR:
1664 ret = xe_guc_exec_queue_cgp_context_error_handler(guc, payload,
1665 adj_len);
1666 break;
1667 default:
1668 xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
1669 }
1670
1671 if (ret) {
1672 xe_gt_err(gt, "G2H action %#04x failed (%pe) len %u msg %*ph\n",
1673 action, ERR_PTR(ret), hxg_len, (int)sizeof(u32) * hxg_len, hxg);
1674 CT_DEAD(ct, NULL, PROCESS_FAILED);
1675 }
1676
1677 return 0;
1678 }
1679
g2h_read(struct xe_guc_ct * ct,u32 * msg,bool fast_path)1680 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
1681 {
1682 struct xe_device *xe = ct_to_xe(ct);
1683 struct xe_gt *gt = ct_to_gt(ct);
1684 struct guc_ctb *g2h = &ct->ctbs.g2h;
1685 u32 tail, head, len, desc_status;
1686 s32 avail;
1687 u32 action;
1688 u32 *hxg;
1689
1690 xe_gt_assert(gt, xe_guc_ct_initialized(ct));
1691 lockdep_assert_held(&ct->fast_lock);
1692
1693 if (ct->state == XE_GUC_CT_STATE_DISABLED)
1694 return -ENODEV;
1695
1696 if (ct->state == XE_GUC_CT_STATE_STOPPED)
1697 return -ECANCELED;
1698
1699 if (g2h->info.broken)
1700 return -EPIPE;
1701
1702 xe_gt_assert(gt, xe_guc_ct_enabled(ct));
1703
1704 desc_status = desc_read(xe, g2h, status);
1705 if (desc_status) {
1706 if (desc_status & GUC_CTB_STATUS_DISABLED) {
1707 /*
1708 * Potentially valid if a CLIENT_RESET request resulted in
1709 * contexts/engines being reset. But should never happen as
1710 * no contexts should be active when CLIENT_RESET is sent.
1711 */
1712 xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n");
1713 desc_status &= ~GUC_CTB_STATUS_DISABLED;
1714 }
1715
1716 if (desc_status) {
1717 xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status);
1718 goto corrupted;
1719 }
1720 }
1721
1722 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
1723 u32 desc_tail = desc_read(xe, g2h, tail);
1724 /*
1725 u32 desc_head = desc_read(xe, g2h, head);
1726
1727 * info.head and desc_head are updated back-to-back at the end of
1728 * this function and nowhere else. Hence, they cannot be different
1729 * unless two g2h_read calls are running concurrently. Which is not
1730 * possible because it is guarded by ct->fast_lock. And yet, some
1731 * discrete platforms are regularly hitting this error :(.
1732 *
1733 * desc_head rolling backwards shouldn't cause any noticeable
1734 * problems - just a delay in GuC being allowed to proceed past that
1735 * point in the queue. So for now, just disable the error until it
1736 * can be root caused.
1737 *
1738 if (g2h->info.head != desc_head) {
1739 desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH);
1740 xe_gt_err(gt, "CT read: head was modified %u != %u\n",
1741 desc_head, g2h->info.head);
1742 goto corrupted;
1743 }
1744 */
1745
1746 if (g2h->info.head > g2h->info.size) {
1747 desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1748 xe_gt_err(gt, "CT read: head out of range: %u vs %u\n",
1749 g2h->info.head, g2h->info.size);
1750 goto corrupted;
1751 }
1752
1753 if (desc_tail >= g2h->info.size) {
1754 desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1755 xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n",
1756 desc_tail, g2h->info.size);
1757 goto corrupted;
1758 }
1759 }
1760
1761 /* Calculate DW available to read */
1762 tail = desc_read(xe, g2h, tail);
1763 avail = tail - g2h->info.head;
1764 if (unlikely(avail == 0))
1765 return 0;
1766
1767 if (avail < 0)
1768 avail += g2h->info.size;
1769
1770 /* Read header */
1771 xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head,
1772 sizeof(u32));
1773 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
1774 if (len > avail) {
1775 xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
1776 avail, len);
1777 goto corrupted;
1778 }
1779
1780 head = (g2h->info.head + 1) % g2h->info.size;
1781 avail = len - 1;
1782
1783 /* Read G2H message */
1784 if (avail + head > g2h->info.size) {
1785 u32 avail_til_wrap = g2h->info.size - head;
1786
1787 xe_map_memcpy_from(xe, msg + 1,
1788 &g2h->cmds, sizeof(u32) * head,
1789 avail_til_wrap * sizeof(u32));
1790 xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap,
1791 &g2h->cmds, 0,
1792 (avail - avail_til_wrap) * sizeof(u32));
1793 } else {
1794 xe_map_memcpy_from(xe, msg + 1,
1795 &g2h->cmds, sizeof(u32) * head,
1796 avail * sizeof(u32));
1797 }
1798
1799 hxg = msg_to_hxg(msg);
1800 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1801
1802 if (fast_path) {
1803 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1804 return 0;
1805
1806 switch (action) {
1807 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1808 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1809 case XE_GUC_ACTION_PAGE_RECLAMATION_DONE:
1810 break; /* Process these in fast-path */
1811 default:
1812 return 0;
1813 }
1814 }
1815
1816 /* Update local / descriptor header */
1817 g2h->info.head = (head + avail) % g2h->info.size;
1818 desc_write(xe, g2h, head, g2h->info.head);
1819
1820 trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id,
1821 action, len, g2h->info.head, tail);
1822
1823 return len;
1824
1825 corrupted:
1826 CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ);
1827 return -EPROTO;
1828 }
1829
g2h_fast_path(struct xe_guc_ct * ct,u32 * msg,u32 len)1830 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
1831 {
1832 struct xe_gt *gt = ct_to_gt(ct);
1833 struct xe_guc *guc = ct_to_guc(ct);
1834 u32 hxg_len = msg_len_to_hxg_len(len);
1835 u32 *hxg = msg_to_hxg(msg);
1836 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1837 u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN;
1838 u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN;
1839 int ret = 0;
1840
1841 switch (action) {
1842 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1843 ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1844 break;
1845 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1846 __g2h_release_space(ct, len);
1847 ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
1848 break;
1849 case XE_GUC_ACTION_PAGE_RECLAMATION_DONE:
1850 __g2h_release_space(ct, len);
1851 ret = xe_guc_page_reclaim_done_handler(guc, payload, adj_len);
1852 break;
1853 default:
1854 xe_gt_warn(gt, "NOT_POSSIBLE\n");
1855 }
1856
1857 if (ret) {
1858 xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
1859 action, ERR_PTR(ret));
1860 CT_DEAD(ct, NULL, FAST_G2H);
1861 }
1862 }
1863
1864 /**
1865 * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
1866 * @ct: GuC CT object
1867 *
1868 * Anything related to page faults is critical for performance, process these
1869 * critical G2H in the IRQ. This is safe as these handlers either just wake up
1870 * waiters or queue another worker.
1871 */
xe_guc_ct_fast_path(struct xe_guc_ct * ct)1872 void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
1873 {
1874 struct xe_device *xe = ct_to_xe(ct);
1875 bool ongoing;
1876 int len;
1877
1878 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1879 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1880 return;
1881
1882 spin_lock(&ct->fast_lock);
1883 do {
1884 len = g2h_read(ct, ct->fast_msg, true);
1885 if (len > 0)
1886 g2h_fast_path(ct, ct->fast_msg, len);
1887 } while (len > 0);
1888 spin_unlock(&ct->fast_lock);
1889
1890 if (ongoing)
1891 xe_pm_runtime_put(xe);
1892 }
1893
1894 /* Returns less than zero on error, 0 on done, 1 on more available */
dequeue_one_g2h(struct xe_guc_ct * ct)1895 static int dequeue_one_g2h(struct xe_guc_ct *ct)
1896 {
1897 int len;
1898 int ret;
1899
1900 lockdep_assert_held(&ct->lock);
1901
1902 spin_lock_irq(&ct->fast_lock);
1903 len = g2h_read(ct, ct->msg, false);
1904 spin_unlock_irq(&ct->fast_lock);
1905 if (len <= 0)
1906 return len;
1907
1908 ret = parse_g2h_msg(ct, ct->msg, len);
1909 if (unlikely(ret < 0))
1910 return ret;
1911
1912 ret = process_g2h_msg(ct, ct->msg, len);
1913 if (unlikely(ret < 0))
1914 return ret;
1915
1916 return 1;
1917 }
1918
receive_g2h(struct xe_guc_ct * ct)1919 static void receive_g2h(struct xe_guc_ct *ct)
1920 {
1921 bool ongoing;
1922 int ret;
1923
1924 /*
1925 * Normal users must always hold mem_access.ref around CT calls. However
1926 * during the runtime pm callbacks we rely on CT to talk to the GuC, but
1927 * at this stage we can't rely on mem_access.ref and even the
1928 * callback_task will be different than current. For such cases we just
1929 * need to ensure we always process the responses from any blocking
1930 * ct_send requests or where we otherwise expect some response when
1931 * initiated from those callbacks (which will need to wait for the below
1932 * dequeue_one_g2h()). The dequeue_one_g2h() will gracefully fail if
1933 * the device has suspended to the point that the CT communication has
1934 * been disabled.
1935 *
1936 * If we are inside the runtime pm callback, we can be the only task
1937 * still issuing CT requests (since that requires having the
1938 * mem_access.ref). It seems like it might in theory be possible to
1939 * receive unsolicited events from the GuC just as we are
1940 * suspending-resuming, but those will currently anyway be lost when
1941 * eventually exiting from suspend, hence no need to wake up the device
1942 * here. If we ever need something stronger than get_if_ongoing() then
1943 * we need to be careful with blocking the pm callbacks from getting CT
1944 * responses, if the worker here is blocked on those callbacks
1945 * completing, creating a deadlock.
1946 */
1947 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1948 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1949 return;
1950
1951 do {
1952 mutex_lock(&ct->lock);
1953 ret = dequeue_one_g2h(ct);
1954 mutex_unlock(&ct->lock);
1955
1956 if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
1957 xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d\n", ret);
1958 CT_DEAD(ct, NULL, G2H_RECV);
1959 kick_reset(ct);
1960 }
1961 } while (ret == 1);
1962
1963 if (ongoing)
1964 xe_pm_runtime_put(ct_to_xe(ct));
1965 }
1966
g2h_worker_func(struct work_struct * w)1967 static void g2h_worker_func(struct work_struct *w)
1968 {
1969 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
1970
1971 receive_g2h(ct);
1972 }
1973
guc_ct_snapshot_alloc(struct xe_guc_ct * ct,bool atomic,bool want_ctb)1974 static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
1975 bool want_ctb)
1976 {
1977 struct xe_guc_ct_snapshot *snapshot;
1978
1979 snapshot = kzalloc_obj(*snapshot, atomic ? GFP_ATOMIC : GFP_KERNEL);
1980 if (!snapshot)
1981 return NULL;
1982
1983 if (ct->ctbs.h2g.bo && ct->ctbs.g2h.bo && want_ctb) {
1984 snapshot->ctb_size = xe_bo_size(ct->ctbs.h2g.bo) +
1985 xe_bo_size(ct->ctbs.g2h.bo);
1986 snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
1987 }
1988
1989 return snapshot;
1990 }
1991
guc_ctb_snapshot_capture(struct xe_device * xe,struct guc_ctb * ctb,struct guc_ctb_snapshot * snapshot)1992 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
1993 struct guc_ctb_snapshot *snapshot)
1994 {
1995 xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
1996 sizeof(struct guc_ct_buffer_desc));
1997 memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
1998 }
1999
guc_ctb_snapshot_print(struct guc_ctb_snapshot * snapshot,struct drm_printer * p)2000 static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
2001 struct drm_printer *p)
2002 {
2003 drm_printf(p, "\tsize: %d\n", snapshot->info.size);
2004 drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
2005 drm_printf(p, "\thead: %d\n", snapshot->info.head);
2006 drm_printf(p, "\ttail: %d\n", snapshot->info.tail);
2007 drm_printf(p, "\tspace: %d\n", snapshot->info.space);
2008 drm_printf(p, "\tbroken: %d\n", snapshot->info.broken);
2009 drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
2010 drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
2011 drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
2012 }
2013
guc_ct_snapshot_capture(struct xe_guc_ct * ct,bool atomic,bool want_ctb)2014 static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic,
2015 bool want_ctb)
2016 {
2017 struct xe_device *xe = ct_to_xe(ct);
2018 struct xe_guc_ct_snapshot *snapshot;
2019
2020 snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb);
2021 if (!snapshot) {
2022 xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n");
2023 return NULL;
2024 }
2025
2026 if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
2027 snapshot->ct_enabled = true;
2028 snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
2029 guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g);
2030 guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h);
2031 }
2032
2033 if (ct->ctbs.h2g.bo && ct->ctbs.g2h.bo && snapshot->ctb) {
2034 xe_map_memcpy_from(xe, snapshot->ctb, &ct->ctbs.h2g.bo->vmap, 0,
2035 xe_bo_size(ct->ctbs.h2g.bo));
2036 xe_map_memcpy_from(xe, snapshot->ctb + xe_bo_size(ct->ctbs.h2g.bo),
2037 &ct->ctbs.g2h.bo->vmap, 0,
2038 xe_bo_size(ct->ctbs.g2h.bo));
2039 }
2040
2041 return snapshot;
2042 }
2043
2044 /**
2045 * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
2046 * @ct: GuC CT object.
2047 *
2048 * This can be printed out in a later stage like during dev_coredump
2049 * analysis. This is safe to be called during atomic context.
2050 *
2051 * Returns: a GuC CT snapshot object that must be freed by the caller
2052 * by using `xe_guc_ct_snapshot_free`.
2053 */
xe_guc_ct_snapshot_capture(struct xe_guc_ct * ct)2054 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct)
2055 {
2056 return guc_ct_snapshot_capture(ct, true, true);
2057 }
2058
2059 /**
2060 * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
2061 * @snapshot: GuC CT snapshot object.
2062 * @p: drm_printer where it will be printed out.
2063 *
2064 * This function prints out a given GuC CT snapshot object.
2065 */
xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot * snapshot,struct drm_printer * p)2066 void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
2067 struct drm_printer *p)
2068 {
2069 if (!snapshot)
2070 return;
2071
2072 if (snapshot->ct_enabled) {
2073 drm_puts(p, "H2G CTB (all sizes in DW):\n");
2074 guc_ctb_snapshot_print(&snapshot->h2g, p);
2075
2076 drm_puts(p, "G2H CTB (all sizes in DW):\n");
2077 guc_ctb_snapshot_print(&snapshot->g2h, p);
2078 drm_printf(p, "\tg2h outstanding: %d\n",
2079 snapshot->g2h_outstanding);
2080
2081 if (snapshot->ctb) {
2082 drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size);
2083 xe_print_blob_ascii85(p, "[CTB].data", '\n',
2084 snapshot->ctb, 0, snapshot->ctb_size);
2085 }
2086 } else {
2087 drm_puts(p, "CT disabled\n");
2088 }
2089 }
2090
2091 /**
2092 * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot.
2093 * @snapshot: GuC CT snapshot object.
2094 *
2095 * This function free all the memory that needed to be allocated at capture
2096 * time.
2097 */
xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot * snapshot)2098 void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
2099 {
2100 if (!snapshot)
2101 return;
2102
2103 kfree(snapshot->ctb);
2104 kfree(snapshot);
2105 }
2106
2107 /**
2108 * xe_guc_ct_print - GuC CT Print.
2109 * @ct: GuC CT.
2110 * @p: drm_printer where it will be printed out.
2111 * @want_ctb: Should the full CTB content be dumped (vs just the headers)
2112 *
2113 * This function will quickly capture a snapshot of the CT state
2114 * and immediately print it out.
2115 */
xe_guc_ct_print(struct xe_guc_ct * ct,struct drm_printer * p,bool want_ctb)2116 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
2117 {
2118 struct xe_guc_ct_snapshot *snapshot;
2119
2120 snapshot = guc_ct_snapshot_capture(ct, false, want_ctb);
2121 xe_guc_ct_snapshot_print(snapshot, p);
2122 xe_guc_ct_snapshot_free(snapshot);
2123 }
2124
2125 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
2126
2127 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
2128 /*
2129 * This is a helper function which assists the driver in identifying if a fault
2130 * injection test is currently active, allowing it to reduce unnecessary debug
2131 * output. Typically, the function returns zero, but the fault injection
2132 * framework can alter this to return an error. Since faults are injected
2133 * through this function, it's important to ensure the compiler doesn't optimize
2134 * it into an inline function. To avoid such optimization, the 'noinline'
2135 * attribute is applied. Compiler optimizes the static function defined in the
2136 * header file as an inline function.
2137 */
xe_is_injection_active(void)2138 noinline int xe_is_injection_active(void) { return 0; }
2139 ALLOW_ERROR_INJECTION(xe_is_injection_active, ERRNO);
2140 #else
xe_is_injection_active(void)2141 int xe_is_injection_active(void) { return 0; }
2142 #endif
2143
ct_dead_capture(struct xe_guc_ct * ct,struct guc_ctb * ctb,u32 reason_code)2144 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
2145 {
2146 struct xe_guc_log_snapshot *snapshot_log;
2147 struct xe_guc_ct_snapshot *snapshot_ct;
2148 struct xe_guc *guc = ct_to_guc(ct);
2149 unsigned long flags;
2150 bool have_capture;
2151
2152 if (ctb)
2153 ctb->info.broken = true;
2154 /*
2155 * Huge dump is getting generated when injecting error for guc CT/MMIO
2156 * functions. So, let us suppress the dump when fault is injected.
2157 */
2158 if (xe_is_injection_active())
2159 return;
2160
2161 /* Ignore further errors after the first dump until a reset */
2162 if (ct->dead.reported)
2163 return;
2164
2165 spin_lock_irqsave(&ct->dead.lock, flags);
2166
2167 /* And only capture one dump at a time */
2168 have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE);
2169 ct->dead.reason |= (1 << reason_code) |
2170 (1 << CT_DEAD_STATE_CAPTURE);
2171
2172 spin_unlock_irqrestore(&ct->dead.lock, flags);
2173
2174 if (have_capture)
2175 return;
2176
2177 snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true);
2178 snapshot_ct = xe_guc_ct_snapshot_capture((ct));
2179
2180 spin_lock_irqsave(&ct->dead.lock, flags);
2181
2182 if (ct->dead.snapshot_log || ct->dead.snapshot_ct) {
2183 xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n");
2184 xe_guc_log_snapshot_free(snapshot_log);
2185 xe_guc_ct_snapshot_free(snapshot_ct);
2186 } else {
2187 ct->dead.snapshot_log = snapshot_log;
2188 ct->dead.snapshot_ct = snapshot_ct;
2189 }
2190
2191 spin_unlock_irqrestore(&ct->dead.lock, flags);
2192
2193 queue_work(system_dfl_wq, &(ct)->dead.worker);
2194 }
2195
ct_dead_print(struct xe_dead_ct * dead)2196 static void ct_dead_print(struct xe_dead_ct *dead)
2197 {
2198 struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead);
2199 struct xe_device *xe = ct_to_xe(ct);
2200 struct xe_gt *gt = ct_to_gt(ct);
2201 static int g_count;
2202 struct drm_printer ip = xe_gt_info_printer(gt);
2203 struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
2204
2205 if (!dead->reason) {
2206 xe_gt_err(gt, "CTB is dead for no reason!?\n");
2207 return;
2208 }
2209
2210 /* Can't generate a genuine core dump at this point, so just do the good bits */
2211 drm_puts(&lp, "**** Xe Device Coredump ****\n");
2212 drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
2213 xe_device_snapshot_print(xe, &lp);
2214
2215 drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
2216 drm_printf(&lp, "\tTile: %d\n", gt->tile->id);
2217
2218 drm_puts(&lp, "**** GuC Log ****\n");
2219 xe_guc_log_snapshot_print(dead->snapshot_log, &lp);
2220
2221 drm_puts(&lp, "**** GuC CT ****\n");
2222 xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp);
2223
2224 drm_puts(&lp, "Done.\n");
2225 }
2226
ct_dead_worker_func(struct work_struct * w)2227 static void ct_dead_worker_func(struct work_struct *w)
2228 {
2229 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker);
2230
2231 if (!ct->dead.reported) {
2232 ct->dead.reported = true;
2233 ct_dead_print(&ct->dead);
2234 }
2235
2236 spin_lock_irq(&ct->dead.lock);
2237
2238 xe_guc_log_snapshot_free(ct->dead.snapshot_log);
2239 ct->dead.snapshot_log = NULL;
2240 xe_guc_ct_snapshot_free(ct->dead.snapshot_ct);
2241 ct->dead.snapshot_ct = NULL;
2242
2243 if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) {
2244 /* A reset has occurred so re-arm the error reporting */
2245 ct->dead.reason = 0;
2246 ct->dead.reported = false;
2247 }
2248
2249 spin_unlock_irq(&ct->dead.lock);
2250 }
2251 #endif
2252