1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_guc_ct.h"
7
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10 #include <linux/delay.h>
11 #include <linux/fault-inject.h>
12
13 #include <kunit/static_stub.h>
14
15 #include <drm/drm_managed.h>
16
17 #include "abi/guc_actions_abi.h"
18 #include "abi/guc_actions_sriov_abi.h"
19 #include "abi/guc_klvs_abi.h"
20 #include "xe_bo.h"
21 #include "xe_devcoredump.h"
22 #include "xe_device.h"
23 #include "xe_gt.h"
24 #include "xe_gt_printk.h"
25 #include "xe_gt_sriov_pf_control.h"
26 #include "xe_gt_sriov_pf_monitor.h"
27 #include "xe_guc.h"
28 #include "xe_guc_log.h"
29 #include "xe_guc_pagefault.h"
30 #include "xe_guc_relay.h"
31 #include "xe_guc_submit.h"
32 #include "xe_guc_tlb_inval.h"
33 #include "xe_map.h"
34 #include "xe_pm.h"
35 #include "xe_sriov_vf.h"
36 #include "xe_trace_guc.h"
37
38 static void receive_g2h(struct xe_guc_ct *ct);
39 static void g2h_worker_func(struct work_struct *w);
40 static void safe_mode_worker_func(struct work_struct *w);
41 static void ct_exit_safe_mode(struct xe_guc_ct *ct);
42 static void guc_ct_change_state(struct xe_guc_ct *ct,
43 enum xe_guc_ct_state state);
44
45 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
46 enum {
47 /* Internal states, not error conditions */
48 CT_DEAD_STATE_REARM, /* 0x0001 */
49 CT_DEAD_STATE_CAPTURE, /* 0x0002 */
50
51 /* Error conditions */
52 CT_DEAD_SETUP, /* 0x0004 */
53 CT_DEAD_H2G_WRITE, /* 0x0008 */
54 CT_DEAD_H2G_HAS_ROOM, /* 0x0010 */
55 CT_DEAD_G2H_READ, /* 0x0020 */
56 CT_DEAD_G2H_RECV, /* 0x0040 */
57 CT_DEAD_G2H_RELEASE, /* 0x0080 */
58 CT_DEAD_DEADLOCK, /* 0x0100 */
59 CT_DEAD_PROCESS_FAILED, /* 0x0200 */
60 CT_DEAD_FAST_G2H, /* 0x0400 */
61 CT_DEAD_PARSE_G2H_RESPONSE, /* 0x0800 */
62 CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */
63 CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */
64 CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */
65 CT_DEAD_CRASH, /* 0x8000 */
66 };
67
68 static void ct_dead_worker_func(struct work_struct *w);
69 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code);
70
71 #define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code)
72 #else
73 #define CT_DEAD(ct, ctb, reason) \
74 do { \
75 struct guc_ctb *_ctb = (ctb); \
76 if (_ctb) \
77 _ctb->info.broken = true; \
78 } while (0)
79 #endif
80
81 /* Used when a CT send wants to block and / or receive data */
82 struct g2h_fence {
83 u32 *response_buffer;
84 u32 seqno;
85 u32 response_data;
86 u16 response_len;
87 u16 error;
88 u16 hint;
89 u16 reason;
90 bool cancel;
91 bool retry;
92 bool fail;
93 bool done;
94 };
95
g2h_fence_init(struct g2h_fence * g2h_fence,u32 * response_buffer)96 static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
97 {
98 memset(g2h_fence, 0, sizeof(*g2h_fence));
99 g2h_fence->response_buffer = response_buffer;
100 g2h_fence->seqno = ~0x0;
101 }
102
g2h_fence_cancel(struct g2h_fence * g2h_fence)103 static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
104 {
105 g2h_fence->cancel = true;
106 g2h_fence->fail = true;
107
108 /* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
109 WRITE_ONCE(g2h_fence->done, true);
110 }
111
g2h_fence_needs_alloc(struct g2h_fence * g2h_fence)112 static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
113 {
114 return g2h_fence->seqno == ~0x0;
115 }
116
117 static struct xe_guc *
ct_to_guc(struct xe_guc_ct * ct)118 ct_to_guc(struct xe_guc_ct *ct)
119 {
120 return container_of(ct, struct xe_guc, ct);
121 }
122
123 static struct xe_gt *
ct_to_gt(struct xe_guc_ct * ct)124 ct_to_gt(struct xe_guc_ct *ct)
125 {
126 return container_of(ct, struct xe_gt, uc.guc.ct);
127 }
128
129 static struct xe_device *
ct_to_xe(struct xe_guc_ct * ct)130 ct_to_xe(struct xe_guc_ct *ct)
131 {
132 return gt_to_xe(ct_to_gt(ct));
133 }
134
135 /**
136 * DOC: GuC CTB Blob
137 *
138 * We allocate single blob to hold both CTB descriptors and buffers:
139 *
140 * +--------+-----------------------------------------------+------+
141 * | offset | contents | size |
142 * +========+===============================================+======+
143 * | 0x0000 | H2G CTB Descriptor (send) | |
144 * +--------+-----------------------------------------------+ 4K |
145 * | 0x0800 | G2H CTB Descriptor (g2h) | |
146 * +--------+-----------------------------------------------+------+
147 * | 0x1000 | H2G CT Buffer (send) | n*4K |
148 * | | | |
149 * +--------+-----------------------------------------------+------+
150 * | 0x1000 | G2H CT Buffer (g2h) | m*4K |
151 * | + n*4K | | |
152 * +--------+-----------------------------------------------+------+
153 *
154 * Size of each ``CT Buffer`` must be multiple of 4K.
155 * We don't expect too many messages in flight at any time, unless we are
156 * using the GuC submission. In that case each request requires a minimum
157 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
158 * enough space to avoid backpressure on the driver. We increase the size
159 * of the receive buffer (relative to the send) to ensure a G2H response
160 * CTB has a landing spot.
161 *
162 * In addition to submissions, the G2H buffer needs to be able to hold
163 * enough space for recoverable page fault notifications. The number of
164 * page faults is interrupt driven and can be as much as the number of
165 * compute resources available. However, most of the actual work for these
166 * is in a separate page fault worker thread. Therefore we only need to
167 * make sure the queue has enough space to handle all of the submissions
168 * and responses and an extra buffer for incoming page faults.
169 */
170
171 #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
172 #define CTB_H2G_BUFFER_OFFSET (CTB_DESC_SIZE * 2)
173 #define CTB_H2G_BUFFER_SIZE (SZ_4K)
174 #define CTB_G2H_BUFFER_SIZE (SZ_128K)
175 #define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2)
176
177 /**
178 * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full
179 * CT command queue
180 * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future.
181 *
182 * Observation is that a 4KiB buffer full of commands takes a little over a
183 * second to process. Use that to calculate maximum time to process a full CT
184 * command queue.
185 *
186 * Return: Maximum time to process a full CT queue in jiffies.
187 */
xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct * ct)188 long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
189 {
190 BUILD_BUG_ON(!IS_ALIGNED(CTB_H2G_BUFFER_SIZE, SZ_4));
191 return (CTB_H2G_BUFFER_SIZE / SZ_4K) * HZ;
192 }
193
guc_ct_size(void)194 static size_t guc_ct_size(void)
195 {
196 return CTB_H2G_BUFFER_OFFSET + CTB_H2G_BUFFER_SIZE +
197 CTB_G2H_BUFFER_SIZE;
198 }
199
guc_ct_fini(struct drm_device * drm,void * arg)200 static void guc_ct_fini(struct drm_device *drm, void *arg)
201 {
202 struct xe_guc_ct *ct = arg;
203
204 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
205 cancel_work_sync(&ct->dead.worker);
206 #endif
207 ct_exit_safe_mode(ct);
208 destroy_workqueue(ct->g2h_wq);
209 xa_destroy(&ct->fence_lookup);
210 }
211
primelockdep(struct xe_guc_ct * ct)212 static void primelockdep(struct xe_guc_ct *ct)
213 {
214 if (!IS_ENABLED(CONFIG_LOCKDEP))
215 return;
216
217 fs_reclaim_acquire(GFP_KERNEL);
218 might_lock(&ct->lock);
219 fs_reclaim_release(GFP_KERNEL);
220 }
221
xe_guc_ct_init_noalloc(struct xe_guc_ct * ct)222 int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
223 {
224 struct xe_device *xe = ct_to_xe(ct);
225 struct xe_gt *gt = ct_to_gt(ct);
226 int err;
227
228 xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
229
230 err = drmm_mutex_init(&xe->drm, &ct->lock);
231 if (err)
232 return err;
233
234 primelockdep(ct);
235
236 ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM);
237 if (!ct->g2h_wq)
238 return -ENOMEM;
239
240 spin_lock_init(&ct->fast_lock);
241 xa_init(&ct->fence_lookup);
242 INIT_WORK(&ct->g2h_worker, g2h_worker_func);
243 INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
244 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
245 spin_lock_init(&ct->dead.lock);
246 INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
247 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
248 stack_depot_init();
249 #endif
250 #endif
251 init_waitqueue_head(&ct->wq);
252 init_waitqueue_head(&ct->g2h_fence_wq);
253
254 err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
255 if (err)
256 return err;
257
258 xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
259 ct->state = XE_GUC_CT_STATE_DISABLED;
260 return 0;
261 }
262 ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */
263
guc_action_disable_ct(void * arg)264 static void guc_action_disable_ct(void *arg)
265 {
266 struct xe_guc_ct *ct = arg;
267
268 guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
269 }
270
xe_guc_ct_init(struct xe_guc_ct * ct)271 int xe_guc_ct_init(struct xe_guc_ct *ct)
272 {
273 struct xe_device *xe = ct_to_xe(ct);
274 struct xe_gt *gt = ct_to_gt(ct);
275 struct xe_tile *tile = gt_to_tile(gt);
276 struct xe_bo *bo;
277
278 bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
279 XE_BO_FLAG_SYSTEM |
280 XE_BO_FLAG_GGTT |
281 XE_BO_FLAG_GGTT_INVALIDATE |
282 XE_BO_FLAG_PINNED_NORESTORE);
283 if (IS_ERR(bo))
284 return PTR_ERR(bo);
285
286 ct->bo = bo;
287
288 return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
289 }
290 ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
291
292 /**
293 * xe_guc_ct_init_post_hwconfig - Reinitialize the GuC CTB in VRAM
294 * @ct: the &xe_guc_ct
295 *
296 * Allocate a new BO in VRAM and free the previous BO that was allocated
297 * in system memory (SMEM). Applicable only for DGFX products.
298 *
299 * Return: 0 on success, or a negative errno on failure.
300 */
xe_guc_ct_init_post_hwconfig(struct xe_guc_ct * ct)301 int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct)
302 {
303 struct xe_device *xe = ct_to_xe(ct);
304 struct xe_gt *gt = ct_to_gt(ct);
305 struct xe_tile *tile = gt_to_tile(gt);
306 int ret;
307
308 xe_assert(xe, !xe_guc_ct_enabled(ct));
309
310 if (IS_DGFX(xe)) {
311 ret = xe_managed_bo_reinit_in_vram(xe, tile, &ct->bo);
312 if (ret)
313 return ret;
314 }
315
316 devm_remove_action(xe->drm.dev, guc_action_disable_ct, ct);
317 return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
318 }
319
320 #define desc_read(xe_, guc_ctb__, field_) \
321 xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
322 struct guc_ct_buffer_desc, field_)
323
324 #define desc_write(xe_, guc_ctb__, field_, val_) \
325 xe_map_wr_field(xe_, &guc_ctb__->desc, 0, \
326 struct guc_ct_buffer_desc, field_, val_)
327
guc_ct_ctb_h2g_init(struct xe_device * xe,struct guc_ctb * h2g,struct iosys_map * map)328 static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
329 struct iosys_map *map)
330 {
331 h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32);
332 h2g->info.resv_space = 0;
333 h2g->info.tail = 0;
334 h2g->info.head = 0;
335 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
336 h2g->info.size) -
337 h2g->info.resv_space;
338 h2g->info.broken = false;
339
340 h2g->desc = *map;
341 xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
342
343 h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET);
344 }
345
guc_ct_ctb_g2h_init(struct xe_device * xe,struct guc_ctb * g2h,struct iosys_map * map)346 static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
347 struct iosys_map *map)
348 {
349 g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32);
350 g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32);
351 g2h->info.head = 0;
352 g2h->info.tail = 0;
353 g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head,
354 g2h->info.size) -
355 g2h->info.resv_space;
356 g2h->info.broken = false;
357
358 g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
359 xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
360
361 g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET +
362 CTB_H2G_BUFFER_SIZE);
363 }
364
guc_ct_ctb_h2g_register(struct xe_guc_ct * ct)365 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
366 {
367 struct xe_guc *guc = ct_to_guc(ct);
368 u32 desc_addr, ctb_addr, size;
369 int err;
370
371 desc_addr = xe_bo_ggtt_addr(ct->bo);
372 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET;
373 size = ct->ctbs.h2g.info.size * sizeof(u32);
374
375 err = xe_guc_self_cfg64(guc,
376 GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY,
377 desc_addr);
378 if (err)
379 return err;
380
381 err = xe_guc_self_cfg64(guc,
382 GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY,
383 ctb_addr);
384 if (err)
385 return err;
386
387 return xe_guc_self_cfg32(guc,
388 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY,
389 size);
390 }
391
guc_ct_ctb_g2h_register(struct xe_guc_ct * ct)392 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
393 {
394 struct xe_guc *guc = ct_to_guc(ct);
395 u32 desc_addr, ctb_addr, size;
396 int err;
397
398 desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
399 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET +
400 CTB_H2G_BUFFER_SIZE;
401 size = ct->ctbs.g2h.info.size * sizeof(u32);
402
403 err = xe_guc_self_cfg64(guc,
404 GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
405 desc_addr);
406 if (err)
407 return err;
408
409 err = xe_guc_self_cfg64(guc,
410 GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
411 ctb_addr);
412 if (err)
413 return err;
414
415 return xe_guc_self_cfg32(guc,
416 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
417 size);
418 }
419
guc_ct_control_toggle(struct xe_guc_ct * ct,bool enable)420 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
421 {
422 u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
423 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
424 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
425 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
426 GUC_ACTION_HOST2GUC_CONTROL_CTB),
427 FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL,
428 enable ? GUC_CTB_CONTROL_ENABLE :
429 GUC_CTB_CONTROL_DISABLE),
430 };
431 int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
432
433 return ret > 0 ? -EPROTO : ret;
434 }
435
guc_ct_change_state(struct xe_guc_ct * ct,enum xe_guc_ct_state state)436 static void guc_ct_change_state(struct xe_guc_ct *ct,
437 enum xe_guc_ct_state state)
438 {
439 struct xe_gt *gt = ct_to_gt(ct);
440 struct g2h_fence *g2h_fence;
441 unsigned long idx;
442
443 mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
444 spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
445
446 xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
447 state == XE_GUC_CT_STATE_STOPPED);
448
449 if (ct->g2h_outstanding)
450 xe_pm_runtime_put(ct_to_xe(ct));
451 ct->g2h_outstanding = 0;
452 ct->state = state;
453
454 xe_gt_dbg(gt, "GuC CT communication channel %s\n",
455 state == XE_GUC_CT_STATE_STOPPED ? "stopped" :
456 str_enabled_disabled(state == XE_GUC_CT_STATE_ENABLED));
457
458 spin_unlock_irq(&ct->fast_lock);
459
460 /* cancel all in-flight send-recv requests */
461 xa_for_each(&ct->fence_lookup, idx, g2h_fence)
462 g2h_fence_cancel(g2h_fence);
463
464 /* make sure guc_ct_send_recv() will see g2h_fence changes */
465 smp_mb();
466 wake_up_all(&ct->g2h_fence_wq);
467
468 /*
469 * Lockdep doesn't like this under the fast lock and he destroy only
470 * needs to be serialized with the send path which ct lock provides.
471 */
472 xa_destroy(&ct->fence_lookup);
473
474 mutex_unlock(&ct->lock);
475 }
476
ct_needs_safe_mode(struct xe_guc_ct * ct)477 static bool ct_needs_safe_mode(struct xe_guc_ct *ct)
478 {
479 return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev));
480 }
481
ct_restart_safe_mode_worker(struct xe_guc_ct * ct)482 static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct)
483 {
484 if (!ct_needs_safe_mode(ct))
485 return false;
486
487 queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10);
488 return true;
489 }
490
safe_mode_worker_func(struct work_struct * w)491 static void safe_mode_worker_func(struct work_struct *w)
492 {
493 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work);
494
495 receive_g2h(ct);
496
497 if (!ct_restart_safe_mode_worker(ct))
498 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n");
499 }
500
ct_enter_safe_mode(struct xe_guc_ct * ct)501 static void ct_enter_safe_mode(struct xe_guc_ct *ct)
502 {
503 if (ct_restart_safe_mode_worker(ct))
504 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n");
505 }
506
ct_exit_safe_mode(struct xe_guc_ct * ct)507 static void ct_exit_safe_mode(struct xe_guc_ct *ct)
508 {
509 if (cancel_delayed_work_sync(&ct->safe_mode_worker))
510 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n");
511 }
512
__xe_guc_ct_start(struct xe_guc_ct * ct,bool needs_register)513 static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool needs_register)
514 {
515 struct xe_device *xe = ct_to_xe(ct);
516 struct xe_gt *gt = ct_to_gt(ct);
517 int err;
518
519 xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
520
521 if (needs_register) {
522 xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
523 guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
524 guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
525
526 err = guc_ct_ctb_h2g_register(ct);
527 if (err)
528 goto err_out;
529
530 err = guc_ct_ctb_g2h_register(ct);
531 if (err)
532 goto err_out;
533
534 err = guc_ct_control_toggle(ct, true);
535 if (err)
536 goto err_out;
537 } else {
538 ct->ctbs.h2g.info.broken = false;
539 ct->ctbs.g2h.info.broken = false;
540 /* Skip everything in H2G buffer */
541 xe_map_memset(xe, &ct->bo->vmap, CTB_H2G_BUFFER_OFFSET, 0,
542 CTB_H2G_BUFFER_SIZE);
543 }
544
545 guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED);
546
547 smp_mb();
548 wake_up_all(&ct->wq);
549
550 if (ct_needs_safe_mode(ct))
551 ct_enter_safe_mode(ct);
552
553 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
554 /*
555 * The CT has now been reset so the dumper can be re-armed
556 * after any existing dead state has been dumped.
557 */
558 spin_lock_irq(&ct->dead.lock);
559 if (ct->dead.reason) {
560 ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
561 queue_work(system_unbound_wq, &ct->dead.worker);
562 }
563 spin_unlock_irq(&ct->dead.lock);
564 #endif
565
566 return 0;
567
568 err_out:
569 xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
570 CT_DEAD(ct, NULL, SETUP);
571
572 return err;
573 }
574
575 /**
576 * xe_guc_ct_restart() - Restart GuC CT
577 * @ct: the &xe_guc_ct
578 *
579 * Restart GuC CT to an empty state without issuing a CT register MMIO command.
580 *
581 * Return: 0 on success, or a negative errno on failure.
582 */
xe_guc_ct_restart(struct xe_guc_ct * ct)583 int xe_guc_ct_restart(struct xe_guc_ct *ct)
584 {
585 return __xe_guc_ct_start(ct, false);
586 }
587
588 /**
589 * xe_guc_ct_enable() - Enable GuC CT
590 * @ct: the &xe_guc_ct
591 *
592 * Enable GuC CT to an empty state and issue a CT register MMIO command.
593 *
594 * Return: 0 on success, or a negative errno on failure.
595 */
xe_guc_ct_enable(struct xe_guc_ct * ct)596 int xe_guc_ct_enable(struct xe_guc_ct *ct)
597 {
598 return __xe_guc_ct_start(ct, true);
599 }
600
stop_g2h_handler(struct xe_guc_ct * ct)601 static void stop_g2h_handler(struct xe_guc_ct *ct)
602 {
603 cancel_work_sync(&ct->g2h_worker);
604 }
605
606 /**
607 * xe_guc_ct_disable - Set GuC to disabled state
608 * @ct: the &xe_guc_ct
609 *
610 * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
611 * in this transition.
612 */
xe_guc_ct_disable(struct xe_guc_ct * ct)613 void xe_guc_ct_disable(struct xe_guc_ct *ct)
614 {
615 guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
616 ct_exit_safe_mode(ct);
617 stop_g2h_handler(ct);
618 }
619
620 /**
621 * xe_guc_ct_flush_and_stop - Flush and stop all processing of G2H / H2G
622 * @ct: the &xe_guc_ct
623 */
xe_guc_ct_flush_and_stop(struct xe_guc_ct * ct)624 void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct)
625 {
626 receive_g2h(ct);
627 xe_guc_ct_stop(ct);
628 }
629
630 /**
631 * xe_guc_ct_stop - Set GuC to stopped state
632 * @ct: the &xe_guc_ct
633 *
634 * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
635 */
xe_guc_ct_stop(struct xe_guc_ct * ct)636 void xe_guc_ct_stop(struct xe_guc_ct *ct)
637 {
638 if (!xe_guc_ct_initialized(ct))
639 return;
640
641 guc_ct_change_state(ct, XE_GUC_CT_STATE_STOPPED);
642 stop_g2h_handler(ct);
643 }
644
h2g_has_room(struct xe_guc_ct * ct,u32 cmd_len)645 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
646 {
647 struct guc_ctb *h2g = &ct->ctbs.h2g;
648
649 lockdep_assert_held(&ct->lock);
650
651 if (cmd_len > h2g->info.space) {
652 h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
653
654 if (h2g->info.head > h2g->info.size) {
655 struct xe_device *xe = ct_to_xe(ct);
656 u32 desc_status = desc_read(xe, h2g, status);
657
658 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
659
660 xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n",
661 h2g->info.head, h2g->info.size);
662 CT_DEAD(ct, h2g, H2G_HAS_ROOM);
663 return false;
664 }
665
666 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
667 h2g->info.size) -
668 h2g->info.resv_space;
669 if (cmd_len > h2g->info.space)
670 return false;
671 }
672
673 return true;
674 }
675
g2h_has_room(struct xe_guc_ct * ct,u32 g2h_len)676 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
677 {
678 if (!g2h_len)
679 return true;
680
681 lockdep_assert_held(&ct->fast_lock);
682
683 return ct->ctbs.g2h.info.space > g2h_len;
684 }
685
has_room(struct xe_guc_ct * ct,u32 cmd_len,u32 g2h_len)686 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
687 {
688 lockdep_assert_held(&ct->lock);
689
690 if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
691 return -EBUSY;
692
693 return 0;
694 }
695
h2g_reserve_space(struct xe_guc_ct * ct,u32 cmd_len)696 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
697 {
698 lockdep_assert_held(&ct->lock);
699 ct->ctbs.h2g.info.space -= cmd_len;
700 }
701
__g2h_reserve_space(struct xe_guc_ct * ct,u32 g2h_len,u32 num_g2h)702 static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
703 {
704 xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
705 xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) ||
706 (g2h_len && num_g2h));
707
708 if (g2h_len) {
709 lockdep_assert_held(&ct->fast_lock);
710
711 if (!ct->g2h_outstanding)
712 xe_pm_runtime_get_noresume(ct_to_xe(ct));
713
714 ct->ctbs.g2h.info.space -= g2h_len;
715 ct->g2h_outstanding += num_g2h;
716 }
717 }
718
__g2h_release_space(struct xe_guc_ct * ct,u32 g2h_len)719 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
720 {
721 bool bad = false;
722
723 lockdep_assert_held(&ct->fast_lock);
724
725 bad = ct->ctbs.g2h.info.space + g2h_len >
726 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space;
727 bad |= !ct->g2h_outstanding;
728
729 if (bad) {
730 xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n",
731 ct->ctbs.g2h.info.space, g2h_len,
732 ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space,
733 ct->ctbs.g2h.info.space + g2h_len,
734 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space,
735 ct->g2h_outstanding);
736 CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE);
737 return;
738 }
739
740 ct->ctbs.g2h.info.space += g2h_len;
741 if (!--ct->g2h_outstanding)
742 xe_pm_runtime_put(ct_to_xe(ct));
743 }
744
g2h_release_space(struct xe_guc_ct * ct,u32 g2h_len)745 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
746 {
747 spin_lock_irq(&ct->fast_lock);
748 __g2h_release_space(ct, g2h_len);
749 spin_unlock_irq(&ct->fast_lock);
750 }
751
752 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
fast_req_track(struct xe_guc_ct * ct,u16 fence,u16 action)753 static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
754 {
755 unsigned int slot = fence % ARRAY_SIZE(ct->fast_req);
756 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
757 unsigned long entries[SZ_32];
758 unsigned int n;
759
760 n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
761
762 /* May be called under spinlock, so avoid sleeping */
763 ct->fast_req[slot].stack = stack_depot_save(entries, n, GFP_NOWAIT);
764 #endif
765 ct->fast_req[slot].fence = fence;
766 ct->fast_req[slot].action = action;
767 }
768 #else
fast_req_track(struct xe_guc_ct * ct,u16 fence,u16 action)769 static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
770 {
771 }
772 #endif
773
774 /*
775 * The CT protocol accepts a 16 bits fence. This field is fully owned by the
776 * driver, the GuC will just copy it to the reply message. Since we need to
777 * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
778 * we use one bit of the seqno as an indicator for that and a rolling counter
779 * for the remaining 15 bits.
780 */
781 #define CT_SEQNO_MASK GENMASK(14, 0)
782 #define CT_SEQNO_UNTRACKED BIT(15)
next_ct_seqno(struct xe_guc_ct * ct,bool is_g2h_fence)783 static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
784 {
785 u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
786
787 if (!is_g2h_fence)
788 seqno |= CT_SEQNO_UNTRACKED;
789
790 return seqno;
791 }
792
793 #define MAKE_ACTION(type, __action) \
794 ({ \
795 FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) | \
796 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | \
797 GUC_HXG_EVENT_MSG_0_DATA0, __action); \
798 })
799
vf_action_can_safely_fail(struct xe_device * xe,u32 action)800 static bool vf_action_can_safely_fail(struct xe_device *xe, u32 action)
801 {
802 /*
803 * When resuming a VF, we can't reliably track whether context
804 * registration has completed in the GuC state machine. It is harmless
805 * to resend the request, as it will fail silently if GUC_HXG_TYPE_EVENT
806 * is used. Additionally, if there is an H2G protocol issue on a VF,
807 * subsequent H2G messages sent as GUC_HXG_TYPE_FAST_REQUEST will likely
808 * fail.
809 */
810 return IS_SRIOV_VF(xe) && xe_sriov_vf_migration_supported(xe) &&
811 (action == XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC ||
812 action == XE_GUC_ACTION_REGISTER_CONTEXT);
813 }
814
815 #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
816
h2g_write(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 ct_fence_value,bool want_response)817 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
818 u32 ct_fence_value, bool want_response)
819 {
820 struct xe_device *xe = ct_to_xe(ct);
821 struct xe_gt *gt = ct_to_gt(ct);
822 struct guc_ctb *h2g = &ct->ctbs.h2g;
823 u32 cmd[H2G_CT_HEADERS];
824 u32 tail = h2g->info.tail;
825 u32 full_len;
826 struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
827 tail * sizeof(u32));
828 u32 desc_status;
829
830 full_len = len + GUC_CTB_HDR_LEN;
831
832 lockdep_assert_held(&ct->lock);
833 xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
834
835 desc_status = desc_read(xe, h2g, status);
836 if (desc_status) {
837 xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status);
838 goto corrupted;
839 }
840
841 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
842 u32 desc_tail = desc_read(xe, h2g, tail);
843 u32 desc_head = desc_read(xe, h2g, head);
844
845 if (tail != desc_tail) {
846 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH);
847 xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail);
848 goto corrupted;
849 }
850
851 if (tail > h2g->info.size) {
852 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
853 xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n",
854 tail, h2g->info.size);
855 goto corrupted;
856 }
857
858 if (desc_head >= h2g->info.size) {
859 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
860 xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n",
861 desc_head, h2g->info.size);
862 goto corrupted;
863 }
864 }
865
866 /* Command will wrap, zero fill (NOPs), return and check credits again */
867 if (tail + full_len > h2g->info.size) {
868 xe_map_memset(xe, &map, 0, 0,
869 (h2g->info.size - tail) * sizeof(u32));
870 h2g_reserve_space(ct, (h2g->info.size - tail));
871 h2g->info.tail = 0;
872 desc_write(xe, h2g, tail, h2g->info.tail);
873
874 return -EAGAIN;
875 }
876
877 /*
878 * dw0: CT header (including fence)
879 * dw1: HXG header (including action code)
880 * dw2+: action data
881 */
882 cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
883 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
884 FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
885 if (want_response) {
886 cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_REQUEST, action[0]);
887 } else if (vf_action_can_safely_fail(xe, action[0])) {
888 cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_EVENT, action[0]);
889 } else {
890 fast_req_track(ct, ct_fence_value,
891 FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
892
893 cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_FAST_REQUEST, action[0]);
894 }
895
896 /* H2G header in cmd[1] replaces action[0] so: */
897 --len;
898 ++action;
899
900 /* Write H2G ensuring visible before descriptor update */
901 xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
902 xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
903 xe_device_wmb(xe);
904
905 /* Update local copies */
906 h2g->info.tail = (tail + full_len) % h2g->info.size;
907 h2g_reserve_space(ct, full_len);
908
909 /* Update descriptor */
910 desc_write(xe, h2g, tail, h2g->info.tail);
911
912 trace_xe_guc_ctb_h2g(xe, gt->info.id, *(action - 1), full_len,
913 desc_read(xe, h2g, head), h2g->info.tail);
914
915 return 0;
916
917 corrupted:
918 CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE);
919 return -EPIPE;
920 }
921
__guc_ct_send_locked(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h,struct g2h_fence * g2h_fence)922 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
923 u32 len, u32 g2h_len, u32 num_g2h,
924 struct g2h_fence *g2h_fence)
925 {
926 struct xe_gt *gt = ct_to_gt(ct);
927 u16 seqno;
928 int ret;
929
930 xe_gt_assert(gt, xe_guc_ct_initialized(ct));
931 xe_gt_assert(gt, !g2h_len || !g2h_fence);
932 xe_gt_assert(gt, !num_g2h || !g2h_fence);
933 xe_gt_assert(gt, !g2h_len || num_g2h);
934 xe_gt_assert(gt, g2h_len || !num_g2h);
935 lockdep_assert_held(&ct->lock);
936
937 if (unlikely(ct->ctbs.h2g.info.broken)) {
938 ret = -EPIPE;
939 goto out;
940 }
941
942 if (ct->state == XE_GUC_CT_STATE_DISABLED) {
943 ret = -ENODEV;
944 goto out;
945 }
946
947 if (ct->state == XE_GUC_CT_STATE_STOPPED || xe_gt_recovery_pending(gt)) {
948 ret = -ECANCELED;
949 goto out;
950 }
951
952 xe_gt_assert(gt, xe_guc_ct_enabled(ct));
953
954 if (g2h_fence) {
955 g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
956 num_g2h = 1;
957
958 if (g2h_fence_needs_alloc(g2h_fence)) {
959 g2h_fence->seqno = next_ct_seqno(ct, true);
960 ret = xa_err(xa_store(&ct->fence_lookup,
961 g2h_fence->seqno, g2h_fence,
962 GFP_ATOMIC));
963 if (ret)
964 goto out;
965 }
966
967 seqno = g2h_fence->seqno;
968 } else {
969 seqno = next_ct_seqno(ct, false);
970 }
971
972 if (g2h_len)
973 spin_lock_irq(&ct->fast_lock);
974 retry:
975 ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
976 if (unlikely(ret))
977 goto out_unlock;
978
979 ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
980 if (unlikely(ret)) {
981 if (ret == -EAGAIN)
982 goto retry;
983 goto out_unlock;
984 }
985
986 __g2h_reserve_space(ct, g2h_len, num_g2h);
987 xe_guc_notify(ct_to_guc(ct));
988 out_unlock:
989 if (g2h_len)
990 spin_unlock_irq(&ct->fast_lock);
991 out:
992 return ret;
993 }
994
kick_reset(struct xe_guc_ct * ct)995 static void kick_reset(struct xe_guc_ct *ct)
996 {
997 xe_gt_reset_async(ct_to_gt(ct));
998 }
999
1000 static int dequeue_one_g2h(struct xe_guc_ct *ct);
1001
1002 /*
1003 * wait before retry of sending h2g message
1004 * Return: true if ready for retry, false if the wait timeouted
1005 */
guc_ct_send_wait_for_retry(struct xe_guc_ct * ct,u32 len,u32 g2h_len,struct g2h_fence * g2h_fence,unsigned int * sleep_period_ms)1006 static bool guc_ct_send_wait_for_retry(struct xe_guc_ct *ct, u32 len,
1007 u32 g2h_len, struct g2h_fence *g2h_fence,
1008 unsigned int *sleep_period_ms)
1009 {
1010 struct xe_device *xe = ct_to_xe(ct);
1011
1012 /*
1013 * We wait to try to restore credits for about 1 second before bailing.
1014 * In the case of H2G credits we have no choice but just to wait for the
1015 * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In
1016 * the case of G2H we process any G2H in the channel, hopefully freeing
1017 * credits as we consume the G2H messages.
1018 */
1019 if (!h2g_has_room(ct, len + GUC_CTB_HDR_LEN)) {
1020 struct guc_ctb *h2g = &ct->ctbs.h2g;
1021
1022 if (*sleep_period_ms == 1024)
1023 return false;
1024
1025 trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail,
1026 h2g->info.size,
1027 h2g->info.space,
1028 len + GUC_CTB_HDR_LEN);
1029 msleep(*sleep_period_ms);
1030 *sleep_period_ms <<= 1;
1031 } else {
1032 struct xe_device *xe = ct_to_xe(ct);
1033 struct guc_ctb *g2h = &ct->ctbs.g2h;
1034 int ret;
1035
1036 trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head,
1037 desc_read(xe, g2h, tail),
1038 g2h->info.size,
1039 g2h->info.space,
1040 g2h_fence ?
1041 GUC_CTB_HXG_MSG_MAX_LEN :
1042 g2h_len);
1043
1044 #define g2h_avail(ct) \
1045 (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
1046 if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
1047 g2h_avail(ct), HZ))
1048 return false;
1049 #undef g2h_avail
1050
1051 ret = dequeue_one_g2h(ct);
1052 if (ret < 0) {
1053 if (ret != -ECANCELED)
1054 xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)",
1055 ERR_PTR(ret));
1056 return false;
1057 }
1058 }
1059 return true;
1060 }
1061
guc_ct_send_locked(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h,struct g2h_fence * g2h_fence)1062 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
1063 u32 g2h_len, u32 num_g2h,
1064 struct g2h_fence *g2h_fence)
1065 {
1066 struct xe_gt *gt = ct_to_gt(ct);
1067 unsigned int sleep_period_ms = 1;
1068 int ret;
1069
1070 xe_gt_assert(gt, !g2h_len || !g2h_fence);
1071 lockdep_assert_held(&ct->lock);
1072 xe_device_assert_mem_access(ct_to_xe(ct));
1073
1074 try_again:
1075 ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
1076 g2h_fence);
1077
1078 if (unlikely(ret == -EBUSY)) {
1079 if (!guc_ct_send_wait_for_retry(ct, len, g2h_len, g2h_fence,
1080 &sleep_period_ms))
1081 goto broken;
1082 goto try_again;
1083 }
1084
1085 return ret;
1086
1087 broken:
1088 xe_gt_err(gt, "No forward process on H2G, reset required\n");
1089 CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK);
1090
1091 return -EDEADLK;
1092 }
1093
guc_ct_send(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h,struct g2h_fence * g2h_fence)1094 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
1095 u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
1096 {
1097 int ret;
1098
1099 xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence);
1100
1101 mutex_lock(&ct->lock);
1102 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
1103 mutex_unlock(&ct->lock);
1104
1105 return ret;
1106 }
1107
xe_guc_ct_send(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h)1108 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
1109 u32 g2h_len, u32 num_g2h)
1110 {
1111 int ret;
1112
1113 ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
1114 if (ret == -EDEADLK)
1115 kick_reset(ct);
1116
1117 return ret;
1118 }
1119
xe_guc_ct_send_locked(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h)1120 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
1121 u32 g2h_len, u32 num_g2h)
1122 {
1123 int ret;
1124
1125 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
1126 if (ret == -EDEADLK)
1127 kick_reset(ct);
1128
1129 return ret;
1130 }
1131
xe_guc_ct_send_g2h_handler(struct xe_guc_ct * ct,const u32 * action,u32 len)1132 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
1133 {
1134 int ret;
1135
1136 lockdep_assert_held(&ct->lock);
1137
1138 ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
1139 if (ret == -EDEADLK)
1140 kick_reset(ct);
1141
1142 return ret;
1143 }
1144
1145 /*
1146 * Check if a GT reset is in progress or will occur and if GT reset brought the
1147 * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
1148 */
retry_failure(struct xe_guc_ct * ct,int ret)1149 static bool retry_failure(struct xe_guc_ct *ct, int ret)
1150 {
1151 if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV))
1152 return false;
1153
1154 #define ct_alive(ct) \
1155 (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
1156 !ct->ctbs.g2h.info.broken)
1157 if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
1158 return false;
1159 #undef ct_alive
1160
1161 return true;
1162 }
1163
1164 #define GUC_SEND_RETRY_LIMIT 50
1165 #define GUC_SEND_RETRY_MSLEEP 5
1166
guc_ct_send_recv(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 * response_buffer,bool no_fail)1167 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
1168 u32 *response_buffer, bool no_fail)
1169 {
1170 struct xe_gt *gt = ct_to_gt(ct);
1171 struct g2h_fence g2h_fence;
1172 unsigned int retries = 0;
1173 int ret = 0;
1174
1175 /*
1176 * We use a fence to implement blocking sends / receiving response data.
1177 * The seqno of the fence is sent in the H2G, returned in the G2H, and
1178 * an xarray is used as storage media with the seqno being to key.
1179 * Fields in the fence hold success, failure, retry status and the
1180 * response data. Safe to allocate on the stack as the xarray is the
1181 * only reference and it cannot be present after this function exits.
1182 */
1183 retry:
1184 g2h_fence_init(&g2h_fence, response_buffer);
1185 retry_same_fence:
1186 ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
1187 if (unlikely(ret == -ENOMEM)) {
1188 /* Retry allocation /w GFP_KERNEL */
1189 ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
1190 &g2h_fence, GFP_KERNEL));
1191 if (ret)
1192 return ret;
1193
1194 goto retry_same_fence;
1195 } else if (unlikely(ret)) {
1196 if (ret == -EDEADLK)
1197 kick_reset(ct);
1198
1199 if (no_fail && retry_failure(ct, ret))
1200 goto retry_same_fence;
1201
1202 if (!g2h_fence_needs_alloc(&g2h_fence))
1203 xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1204
1205 return ret;
1206 }
1207
1208 /* READ_ONCEs pairs with WRITE_ONCEs in parse_g2h_response
1209 * and g2h_fence_cancel.
1210 */
1211 ret = wait_event_timeout(ct->g2h_fence_wq, READ_ONCE(g2h_fence.done), HZ);
1212 if (!ret) {
1213 LNL_FLUSH_WORK(&ct->g2h_worker);
1214 if (READ_ONCE(g2h_fence.done)) {
1215 xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
1216 g2h_fence.seqno, action[0]);
1217 ret = 1;
1218 }
1219 }
1220
1221 /*
1222 * Ensure we serialize with completion side to prevent UAF with fence going out of scope on
1223 * the stack, since we have no clue if it will fire after the timeout before we can erase
1224 * from the xa. Also we have some dependent loads and stores below for which we need the
1225 * correct ordering, and we lack the needed barriers.
1226 */
1227 mutex_lock(&ct->lock);
1228 if (!ret) {
1229 xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
1230 g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
1231 xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1232 mutex_unlock(&ct->lock);
1233 return -ETIME;
1234 }
1235
1236 if (g2h_fence.retry) {
1237 xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
1238 action[0], g2h_fence.reason);
1239 mutex_unlock(&ct->lock);
1240 if (++retries > GUC_SEND_RETRY_LIMIT) {
1241 xe_gt_err(gt, "H2G action %#x reached retry limit=%u, aborting\n",
1242 action[0], GUC_SEND_RETRY_LIMIT);
1243 return -ELOOP;
1244 }
1245 msleep(GUC_SEND_RETRY_MSLEEP * retries);
1246 goto retry;
1247 }
1248 if (g2h_fence.fail) {
1249 if (g2h_fence.cancel) {
1250 xe_gt_dbg(gt, "H2G request %#x canceled!\n", action[0]);
1251 ret = -ECANCELED;
1252 goto unlock;
1253 }
1254 xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n",
1255 action[0], g2h_fence.error, g2h_fence.hint);
1256 ret = -EIO;
1257 }
1258
1259 if (ret > 0)
1260 ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
1261
1262 unlock:
1263 mutex_unlock(&ct->lock);
1264
1265 return ret;
1266 }
1267
1268 /**
1269 * xe_guc_ct_send_recv - Send and receive HXG to the GuC
1270 * @ct: the &xe_guc_ct
1271 * @action: the dword array with `HXG Request`_ message (can't be NULL)
1272 * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
1273 * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
1274 *
1275 * Send a `HXG Request`_ message to the GuC over CT communication channel and
1276 * blocks until GuC replies with a `HXG Response`_ message.
1277 *
1278 * For non-blocking communication with GuC use xe_guc_ct_send().
1279 *
1280 * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
1281 *
1282 * Return: response length (in dwords) if &response_buffer was not NULL, or
1283 * DATA0 from `HXG Response`_ if &response_buffer was NULL, or
1284 * a negative error code on failure.
1285 */
xe_guc_ct_send_recv(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 * response_buffer)1286 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
1287 u32 *response_buffer)
1288 {
1289 KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
1290 return guc_ct_send_recv(ct, action, len, response_buffer, false);
1291 }
1292 ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO);
1293
xe_guc_ct_send_recv_no_fail(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 * response_buffer)1294 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
1295 u32 len, u32 *response_buffer)
1296 {
1297 return guc_ct_send_recv(ct, action, len, response_buffer, true);
1298 }
1299
msg_to_hxg(u32 * msg)1300 static u32 *msg_to_hxg(u32 *msg)
1301 {
1302 return msg + GUC_CTB_MSG_MIN_LEN;
1303 }
1304
msg_len_to_hxg_len(u32 len)1305 static u32 msg_len_to_hxg_len(u32 len)
1306 {
1307 return len - GUC_CTB_MSG_MIN_LEN;
1308 }
1309
parse_g2h_event(struct xe_guc_ct * ct,u32 * msg,u32 len)1310 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
1311 {
1312 u32 *hxg = msg_to_hxg(msg);
1313 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1314
1315 lockdep_assert_held(&ct->lock);
1316
1317 switch (action) {
1318 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1319 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1320 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1321 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1322 g2h_release_space(ct, len);
1323 }
1324
1325 return 0;
1326 }
1327
guc_crash_process_msg(struct xe_guc_ct * ct,u32 action)1328 static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
1329 {
1330 struct xe_gt *gt = ct_to_gt(ct);
1331
1332 if (action == XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED)
1333 xe_gt_err(gt, "GuC Crash dump notification\n");
1334 else if (action == XE_GUC_ACTION_NOTIFY_EXCEPTION)
1335 xe_gt_err(gt, "GuC Exception notification\n");
1336 else
1337 xe_gt_err(gt, "Unknown GuC crash notification: 0x%04X\n", action);
1338
1339 CT_DEAD(ct, NULL, CRASH);
1340
1341 kick_reset(ct);
1342
1343 return 0;
1344 }
1345
1346 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
fast_req_report(struct xe_guc_ct * ct,u16 fence)1347 static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
1348 {
1349 u16 fence_min = U16_MAX, fence_max = 0;
1350 struct xe_gt *gt = ct_to_gt(ct);
1351 bool found = false;
1352 unsigned int n;
1353 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
1354 char *buf;
1355 #endif
1356
1357 lockdep_assert_held(&ct->lock);
1358
1359 for (n = 0; n < ARRAY_SIZE(ct->fast_req); n++) {
1360 if (ct->fast_req[n].fence < fence_min)
1361 fence_min = ct->fast_req[n].fence;
1362 if (ct->fast_req[n].fence > fence_max)
1363 fence_max = ct->fast_req[n].fence;
1364
1365 if (ct->fast_req[n].fence != fence)
1366 continue;
1367 found = true;
1368
1369 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
1370 buf = kmalloc(SZ_4K, GFP_NOWAIT);
1371 if (buf && stack_depot_snprint(ct->fast_req[n].stack, buf, SZ_4K, 0))
1372 xe_gt_err(gt, "Fence 0x%x was used by action %#04x sent at:\n%s",
1373 fence, ct->fast_req[n].action, buf);
1374 else
1375 xe_gt_err(gt, "Fence 0x%x was used by action %#04x [failed to retrieve stack]\n",
1376 fence, ct->fast_req[n].action);
1377 kfree(buf);
1378 #else
1379 xe_gt_err(gt, "Fence 0x%x was used by action %#04x\n",
1380 fence, ct->fast_req[n].action);
1381 #endif
1382 break;
1383 }
1384
1385 if (!found)
1386 xe_gt_warn(gt, "Fence 0x%x not found - tracking buffer wrapped? [range = 0x%x -> 0x%x, next = 0x%X]\n",
1387 fence, fence_min, fence_max, ct->fence_seqno);
1388 }
1389 #else
fast_req_report(struct xe_guc_ct * ct,u16 fence)1390 static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
1391 {
1392 }
1393 #endif
1394
parse_g2h_response(struct xe_guc_ct * ct,u32 * msg,u32 len)1395 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
1396 {
1397 struct xe_gt *gt = ct_to_gt(ct);
1398 u32 *hxg = msg_to_hxg(msg);
1399 u32 hxg_len = msg_len_to_hxg_len(len);
1400 u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
1401 u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1402 struct g2h_fence *g2h_fence;
1403
1404 lockdep_assert_held(&ct->lock);
1405
1406 /*
1407 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
1408 * Those messages should never fail, so if we do get an error back it
1409 * means we're likely doing an illegal operation and the GuC is
1410 * rejecting it. We have no way to inform the code that submitted the
1411 * H2G that the message was rejected, so we need to escalate the
1412 * failure to trigger a reset.
1413 */
1414 if (fence & CT_SEQNO_UNTRACKED) {
1415 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
1416 xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n",
1417 fence,
1418 FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]),
1419 FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]));
1420 else
1421 xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
1422 type, fence);
1423
1424 fast_req_report(ct, fence);
1425
1426 /* FIXME: W/A race in the GuC, will get in firmware soon */
1427 if (xe_gt_recovery_pending(gt))
1428 return 0;
1429
1430 CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
1431
1432 return -EPROTO;
1433 }
1434
1435 g2h_fence = xa_erase(&ct->fence_lookup, fence);
1436 if (unlikely(!g2h_fence)) {
1437 /* Don't tear down channel, as send could've timed out */
1438 /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */
1439 xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
1440 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1441 return 0;
1442 }
1443
1444 xe_gt_assert(gt, fence == g2h_fence->seqno);
1445
1446 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
1447 g2h_fence->fail = true;
1448 g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]);
1449 g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]);
1450 } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
1451 g2h_fence->retry = true;
1452 g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]);
1453 } else if (g2h_fence->response_buffer) {
1454 g2h_fence->response_len = hxg_len;
1455 memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32));
1456 } else {
1457 g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]);
1458 }
1459
1460 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1461
1462 /* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
1463 WRITE_ONCE(g2h_fence->done, true);
1464 smp_mb();
1465
1466 wake_up_all(&ct->g2h_fence_wq);
1467
1468 return 0;
1469 }
1470
parse_g2h_msg(struct xe_guc_ct * ct,u32 * msg,u32 len)1471 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1472 {
1473 struct xe_gt *gt = ct_to_gt(ct);
1474 u32 *hxg = msg_to_hxg(msg);
1475 u32 origin, type;
1476 int ret;
1477
1478 lockdep_assert_held(&ct->lock);
1479
1480 origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
1481 if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
1482 xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
1483 origin);
1484 CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN);
1485
1486 return -EPROTO;
1487 }
1488
1489 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1490 switch (type) {
1491 case GUC_HXG_TYPE_EVENT:
1492 ret = parse_g2h_event(ct, msg, len);
1493 break;
1494 case GUC_HXG_TYPE_RESPONSE_SUCCESS:
1495 case GUC_HXG_TYPE_RESPONSE_FAILURE:
1496 case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
1497 ret = parse_g2h_response(ct, msg, len);
1498 break;
1499 default:
1500 xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
1501 type);
1502 CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE);
1503
1504 ret = -EOPNOTSUPP;
1505 }
1506
1507 return ret;
1508 }
1509
process_g2h_msg(struct xe_guc_ct * ct,u32 * msg,u32 len)1510 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1511 {
1512 struct xe_guc *guc = ct_to_guc(ct);
1513 struct xe_gt *gt = ct_to_gt(ct);
1514 u32 hxg_len = msg_len_to_hxg_len(len);
1515 u32 *hxg = msg_to_hxg(msg);
1516 u32 action, adj_len;
1517 u32 *payload;
1518 int ret = 0;
1519
1520 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1521 return 0;
1522
1523 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1524 payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN;
1525 adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN;
1526
1527 switch (action) {
1528 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1529 ret = xe_guc_sched_done_handler(guc, payload, adj_len);
1530 break;
1531 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1532 ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
1533 break;
1534 case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
1535 ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
1536 break;
1537 case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
1538 ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
1539 adj_len);
1540 break;
1541 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1542 /* Selftest only at the moment */
1543 break;
1544 case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
1545 ret = xe_guc_error_capture_handler(guc, payload, adj_len);
1546 break;
1547 case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
1548 /* FIXME: Handle this */
1549 break;
1550 case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
1551 ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
1552 adj_len);
1553 break;
1554 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1555 ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1556 break;
1557 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1558 ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
1559 break;
1560 case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
1561 ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
1562 break;
1563 case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
1564 ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
1565 break;
1566 case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY:
1567 ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len);
1568 break;
1569 case GUC_ACTION_GUC2PF_ADVERSE_EVENT:
1570 ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len);
1571 break;
1572 case XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
1573 case XE_GUC_ACTION_NOTIFY_EXCEPTION:
1574 ret = guc_crash_process_msg(ct, action);
1575 break;
1576 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1577 case XE_GUC_ACTION_TEST_G2G_RECV:
1578 ret = xe_guc_g2g_test_notification(guc, payload, adj_len);
1579 break;
1580 #endif
1581 default:
1582 xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
1583 }
1584
1585 if (ret) {
1586 xe_gt_err(gt, "G2H action %#04x failed (%pe) len %u msg %*ph\n",
1587 action, ERR_PTR(ret), hxg_len, (int)sizeof(u32) * hxg_len, hxg);
1588 CT_DEAD(ct, NULL, PROCESS_FAILED);
1589 }
1590
1591 return 0;
1592 }
1593
g2h_read(struct xe_guc_ct * ct,u32 * msg,bool fast_path)1594 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
1595 {
1596 struct xe_device *xe = ct_to_xe(ct);
1597 struct xe_gt *gt = ct_to_gt(ct);
1598 struct guc_ctb *g2h = &ct->ctbs.g2h;
1599 u32 tail, head, len, desc_status;
1600 s32 avail;
1601 u32 action;
1602 u32 *hxg;
1603
1604 xe_gt_assert(gt, xe_guc_ct_initialized(ct));
1605 lockdep_assert_held(&ct->fast_lock);
1606
1607 if (ct->state == XE_GUC_CT_STATE_DISABLED)
1608 return -ENODEV;
1609
1610 if (ct->state == XE_GUC_CT_STATE_STOPPED)
1611 return -ECANCELED;
1612
1613 if (g2h->info.broken)
1614 return -EPIPE;
1615
1616 xe_gt_assert(gt, xe_guc_ct_enabled(ct));
1617
1618 desc_status = desc_read(xe, g2h, status);
1619 if (desc_status) {
1620 if (desc_status & GUC_CTB_STATUS_DISABLED) {
1621 /*
1622 * Potentially valid if a CLIENT_RESET request resulted in
1623 * contexts/engines being reset. But should never happen as
1624 * no contexts should be active when CLIENT_RESET is sent.
1625 */
1626 xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n");
1627 desc_status &= ~GUC_CTB_STATUS_DISABLED;
1628 }
1629
1630 if (desc_status) {
1631 xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status);
1632 goto corrupted;
1633 }
1634 }
1635
1636 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
1637 u32 desc_tail = desc_read(xe, g2h, tail);
1638 /*
1639 u32 desc_head = desc_read(xe, g2h, head);
1640
1641 * info.head and desc_head are updated back-to-back at the end of
1642 * this function and nowhere else. Hence, they cannot be different
1643 * unless two g2h_read calls are running concurrently. Which is not
1644 * possible because it is guarded by ct->fast_lock. And yet, some
1645 * discrete platforms are regularly hitting this error :(.
1646 *
1647 * desc_head rolling backwards shouldn't cause any noticeable
1648 * problems - just a delay in GuC being allowed to proceed past that
1649 * point in the queue. So for now, just disable the error until it
1650 * can be root caused.
1651 *
1652 if (g2h->info.head != desc_head) {
1653 desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH);
1654 xe_gt_err(gt, "CT read: head was modified %u != %u\n",
1655 desc_head, g2h->info.head);
1656 goto corrupted;
1657 }
1658 */
1659
1660 if (g2h->info.head > g2h->info.size) {
1661 desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1662 xe_gt_err(gt, "CT read: head out of range: %u vs %u\n",
1663 g2h->info.head, g2h->info.size);
1664 goto corrupted;
1665 }
1666
1667 if (desc_tail >= g2h->info.size) {
1668 desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1669 xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n",
1670 desc_tail, g2h->info.size);
1671 goto corrupted;
1672 }
1673 }
1674
1675 /* Calculate DW available to read */
1676 tail = desc_read(xe, g2h, tail);
1677 avail = tail - g2h->info.head;
1678 if (unlikely(avail == 0))
1679 return 0;
1680
1681 if (avail < 0)
1682 avail += g2h->info.size;
1683
1684 /* Read header */
1685 xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head,
1686 sizeof(u32));
1687 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
1688 if (len > avail) {
1689 xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
1690 avail, len);
1691 goto corrupted;
1692 }
1693
1694 head = (g2h->info.head + 1) % g2h->info.size;
1695 avail = len - 1;
1696
1697 /* Read G2H message */
1698 if (avail + head > g2h->info.size) {
1699 u32 avail_til_wrap = g2h->info.size - head;
1700
1701 xe_map_memcpy_from(xe, msg + 1,
1702 &g2h->cmds, sizeof(u32) * head,
1703 avail_til_wrap * sizeof(u32));
1704 xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap,
1705 &g2h->cmds, 0,
1706 (avail - avail_til_wrap) * sizeof(u32));
1707 } else {
1708 xe_map_memcpy_from(xe, msg + 1,
1709 &g2h->cmds, sizeof(u32) * head,
1710 avail * sizeof(u32));
1711 }
1712
1713 hxg = msg_to_hxg(msg);
1714 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1715
1716 if (fast_path) {
1717 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1718 return 0;
1719
1720 switch (action) {
1721 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1722 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1723 break; /* Process these in fast-path */
1724 default:
1725 return 0;
1726 }
1727 }
1728
1729 /* Update local / descriptor header */
1730 g2h->info.head = (head + avail) % g2h->info.size;
1731 desc_write(xe, g2h, head, g2h->info.head);
1732
1733 trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id,
1734 action, len, g2h->info.head, tail);
1735
1736 return len;
1737
1738 corrupted:
1739 CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ);
1740 return -EPROTO;
1741 }
1742
g2h_fast_path(struct xe_guc_ct * ct,u32 * msg,u32 len)1743 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
1744 {
1745 struct xe_gt *gt = ct_to_gt(ct);
1746 struct xe_guc *guc = ct_to_guc(ct);
1747 u32 hxg_len = msg_len_to_hxg_len(len);
1748 u32 *hxg = msg_to_hxg(msg);
1749 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1750 u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN;
1751 u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN;
1752 int ret = 0;
1753
1754 switch (action) {
1755 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1756 ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1757 break;
1758 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1759 __g2h_release_space(ct, len);
1760 ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
1761 break;
1762 default:
1763 xe_gt_warn(gt, "NOT_POSSIBLE");
1764 }
1765
1766 if (ret) {
1767 xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
1768 action, ERR_PTR(ret));
1769 CT_DEAD(ct, NULL, FAST_G2H);
1770 }
1771 }
1772
1773 /**
1774 * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
1775 * @ct: GuC CT object
1776 *
1777 * Anything related to page faults is critical for performance, process these
1778 * critical G2H in the IRQ. This is safe as these handlers either just wake up
1779 * waiters or queue another worker.
1780 */
xe_guc_ct_fast_path(struct xe_guc_ct * ct)1781 void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
1782 {
1783 struct xe_device *xe = ct_to_xe(ct);
1784 bool ongoing;
1785 int len;
1786
1787 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1788 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1789 return;
1790
1791 spin_lock(&ct->fast_lock);
1792 do {
1793 len = g2h_read(ct, ct->fast_msg, true);
1794 if (len > 0)
1795 g2h_fast_path(ct, ct->fast_msg, len);
1796 } while (len > 0);
1797 spin_unlock(&ct->fast_lock);
1798
1799 if (ongoing)
1800 xe_pm_runtime_put(xe);
1801 }
1802
1803 /* Returns less than zero on error, 0 on done, 1 on more available */
dequeue_one_g2h(struct xe_guc_ct * ct)1804 static int dequeue_one_g2h(struct xe_guc_ct *ct)
1805 {
1806 int len;
1807 int ret;
1808
1809 lockdep_assert_held(&ct->lock);
1810
1811 spin_lock_irq(&ct->fast_lock);
1812 len = g2h_read(ct, ct->msg, false);
1813 spin_unlock_irq(&ct->fast_lock);
1814 if (len <= 0)
1815 return len;
1816
1817 ret = parse_g2h_msg(ct, ct->msg, len);
1818 if (unlikely(ret < 0))
1819 return ret;
1820
1821 ret = process_g2h_msg(ct, ct->msg, len);
1822 if (unlikely(ret < 0))
1823 return ret;
1824
1825 return 1;
1826 }
1827
receive_g2h(struct xe_guc_ct * ct)1828 static void receive_g2h(struct xe_guc_ct *ct)
1829 {
1830 bool ongoing;
1831 int ret;
1832
1833 /*
1834 * Normal users must always hold mem_access.ref around CT calls. However
1835 * during the runtime pm callbacks we rely on CT to talk to the GuC, but
1836 * at this stage we can't rely on mem_access.ref and even the
1837 * callback_task will be different than current. For such cases we just
1838 * need to ensure we always process the responses from any blocking
1839 * ct_send requests or where we otherwise expect some response when
1840 * initiated from those callbacks (which will need to wait for the below
1841 * dequeue_one_g2h()). The dequeue_one_g2h() will gracefully fail if
1842 * the device has suspended to the point that the CT communication has
1843 * been disabled.
1844 *
1845 * If we are inside the runtime pm callback, we can be the only task
1846 * still issuing CT requests (since that requires having the
1847 * mem_access.ref). It seems like it might in theory be possible to
1848 * receive unsolicited events from the GuC just as we are
1849 * suspending-resuming, but those will currently anyway be lost when
1850 * eventually exiting from suspend, hence no need to wake up the device
1851 * here. If we ever need something stronger than get_if_ongoing() then
1852 * we need to be careful with blocking the pm callbacks from getting CT
1853 * responses, if the worker here is blocked on those callbacks
1854 * completing, creating a deadlock.
1855 */
1856 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1857 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1858 return;
1859
1860 do {
1861 mutex_lock(&ct->lock);
1862 ret = dequeue_one_g2h(ct);
1863 mutex_unlock(&ct->lock);
1864
1865 if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
1866 xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret);
1867 CT_DEAD(ct, NULL, G2H_RECV);
1868 kick_reset(ct);
1869 }
1870 } while (ret == 1);
1871
1872 if (ongoing)
1873 xe_pm_runtime_put(ct_to_xe(ct));
1874 }
1875
g2h_worker_func(struct work_struct * w)1876 static void g2h_worker_func(struct work_struct *w)
1877 {
1878 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
1879
1880 receive_g2h(ct);
1881 }
1882
guc_ct_snapshot_alloc(struct xe_guc_ct * ct,bool atomic,bool want_ctb)1883 static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
1884 bool want_ctb)
1885 {
1886 struct xe_guc_ct_snapshot *snapshot;
1887
1888 snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL);
1889 if (!snapshot)
1890 return NULL;
1891
1892 if (ct->bo && want_ctb) {
1893 snapshot->ctb_size = xe_bo_size(ct->bo);
1894 snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
1895 }
1896
1897 return snapshot;
1898 }
1899
guc_ctb_snapshot_capture(struct xe_device * xe,struct guc_ctb * ctb,struct guc_ctb_snapshot * snapshot)1900 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
1901 struct guc_ctb_snapshot *snapshot)
1902 {
1903 xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
1904 sizeof(struct guc_ct_buffer_desc));
1905 memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
1906 }
1907
guc_ctb_snapshot_print(struct guc_ctb_snapshot * snapshot,struct drm_printer * p)1908 static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
1909 struct drm_printer *p)
1910 {
1911 drm_printf(p, "\tsize: %d\n", snapshot->info.size);
1912 drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
1913 drm_printf(p, "\thead: %d\n", snapshot->info.head);
1914 drm_printf(p, "\ttail: %d\n", snapshot->info.tail);
1915 drm_printf(p, "\tspace: %d\n", snapshot->info.space);
1916 drm_printf(p, "\tbroken: %d\n", snapshot->info.broken);
1917 drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
1918 drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
1919 drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
1920 }
1921
guc_ct_snapshot_capture(struct xe_guc_ct * ct,bool atomic,bool want_ctb)1922 static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic,
1923 bool want_ctb)
1924 {
1925 struct xe_device *xe = ct_to_xe(ct);
1926 struct xe_guc_ct_snapshot *snapshot;
1927
1928 snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb);
1929 if (!snapshot) {
1930 xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n");
1931 return NULL;
1932 }
1933
1934 if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
1935 snapshot->ct_enabled = true;
1936 snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
1937 guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g);
1938 guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h);
1939 }
1940
1941 if (ct->bo && snapshot->ctb)
1942 xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size);
1943
1944 return snapshot;
1945 }
1946
1947 /**
1948 * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
1949 * @ct: GuC CT object.
1950 *
1951 * This can be printed out in a later stage like during dev_coredump
1952 * analysis. This is safe to be called during atomic context.
1953 *
1954 * Returns: a GuC CT snapshot object that must be freed by the caller
1955 * by using `xe_guc_ct_snapshot_free`.
1956 */
xe_guc_ct_snapshot_capture(struct xe_guc_ct * ct)1957 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct)
1958 {
1959 return guc_ct_snapshot_capture(ct, true, true);
1960 }
1961
1962 /**
1963 * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
1964 * @snapshot: GuC CT snapshot object.
1965 * @p: drm_printer where it will be printed out.
1966 *
1967 * This function prints out a given GuC CT snapshot object.
1968 */
xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot * snapshot,struct drm_printer * p)1969 void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
1970 struct drm_printer *p)
1971 {
1972 if (!snapshot)
1973 return;
1974
1975 if (snapshot->ct_enabled) {
1976 drm_puts(p, "H2G CTB (all sizes in DW):\n");
1977 guc_ctb_snapshot_print(&snapshot->h2g, p);
1978
1979 drm_puts(p, "G2H CTB (all sizes in DW):\n");
1980 guc_ctb_snapshot_print(&snapshot->g2h, p);
1981 drm_printf(p, "\tg2h outstanding: %d\n",
1982 snapshot->g2h_outstanding);
1983
1984 if (snapshot->ctb) {
1985 drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size);
1986 xe_print_blob_ascii85(p, "[CTB].data", '\n',
1987 snapshot->ctb, 0, snapshot->ctb_size);
1988 }
1989 } else {
1990 drm_puts(p, "CT disabled\n");
1991 }
1992 }
1993
1994 /**
1995 * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot.
1996 * @snapshot: GuC CT snapshot object.
1997 *
1998 * This function free all the memory that needed to be allocated at capture
1999 * time.
2000 */
xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot * snapshot)2001 void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
2002 {
2003 if (!snapshot)
2004 return;
2005
2006 kfree(snapshot->ctb);
2007 kfree(snapshot);
2008 }
2009
2010 /**
2011 * xe_guc_ct_print - GuC CT Print.
2012 * @ct: GuC CT.
2013 * @p: drm_printer where it will be printed out.
2014 * @want_ctb: Should the full CTB content be dumped (vs just the headers)
2015 *
2016 * This function will quickly capture a snapshot of the CT state
2017 * and immediately print it out.
2018 */
xe_guc_ct_print(struct xe_guc_ct * ct,struct drm_printer * p,bool want_ctb)2019 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
2020 {
2021 struct xe_guc_ct_snapshot *snapshot;
2022
2023 snapshot = guc_ct_snapshot_capture(ct, false, want_ctb);
2024 xe_guc_ct_snapshot_print(snapshot, p);
2025 xe_guc_ct_snapshot_free(snapshot);
2026 }
2027
2028 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
2029
2030 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
2031 /*
2032 * This is a helper function which assists the driver in identifying if a fault
2033 * injection test is currently active, allowing it to reduce unnecessary debug
2034 * output. Typically, the function returns zero, but the fault injection
2035 * framework can alter this to return an error. Since faults are injected
2036 * through this function, it's important to ensure the compiler doesn't optimize
2037 * it into an inline function. To avoid such optimization, the 'noinline'
2038 * attribute is applied. Compiler optimizes the static function defined in the
2039 * header file as an inline function.
2040 */
xe_is_injection_active(void)2041 noinline int xe_is_injection_active(void) { return 0; }
2042 ALLOW_ERROR_INJECTION(xe_is_injection_active, ERRNO);
2043 #else
xe_is_injection_active(void)2044 int xe_is_injection_active(void) { return 0; }
2045 #endif
2046
ct_dead_capture(struct xe_guc_ct * ct,struct guc_ctb * ctb,u32 reason_code)2047 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
2048 {
2049 struct xe_guc_log_snapshot *snapshot_log;
2050 struct xe_guc_ct_snapshot *snapshot_ct;
2051 struct xe_guc *guc = ct_to_guc(ct);
2052 unsigned long flags;
2053 bool have_capture;
2054
2055 if (ctb)
2056 ctb->info.broken = true;
2057 /*
2058 * Huge dump is getting generated when injecting error for guc CT/MMIO
2059 * functions. So, let us suppress the dump when fault is injected.
2060 */
2061 if (xe_is_injection_active())
2062 return;
2063
2064 /* Ignore further errors after the first dump until a reset */
2065 if (ct->dead.reported)
2066 return;
2067
2068 spin_lock_irqsave(&ct->dead.lock, flags);
2069
2070 /* And only capture one dump at a time */
2071 have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE);
2072 ct->dead.reason |= (1 << reason_code) |
2073 (1 << CT_DEAD_STATE_CAPTURE);
2074
2075 spin_unlock_irqrestore(&ct->dead.lock, flags);
2076
2077 if (have_capture)
2078 return;
2079
2080 snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true);
2081 snapshot_ct = xe_guc_ct_snapshot_capture((ct));
2082
2083 spin_lock_irqsave(&ct->dead.lock, flags);
2084
2085 if (ct->dead.snapshot_log || ct->dead.snapshot_ct) {
2086 xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n");
2087 xe_guc_log_snapshot_free(snapshot_log);
2088 xe_guc_ct_snapshot_free(snapshot_ct);
2089 } else {
2090 ct->dead.snapshot_log = snapshot_log;
2091 ct->dead.snapshot_ct = snapshot_ct;
2092 }
2093
2094 spin_unlock_irqrestore(&ct->dead.lock, flags);
2095
2096 queue_work(system_unbound_wq, &(ct)->dead.worker);
2097 }
2098
ct_dead_print(struct xe_dead_ct * dead)2099 static void ct_dead_print(struct xe_dead_ct *dead)
2100 {
2101 struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead);
2102 struct xe_device *xe = ct_to_xe(ct);
2103 struct xe_gt *gt = ct_to_gt(ct);
2104 static int g_count;
2105 struct drm_printer ip = xe_gt_info_printer(gt);
2106 struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
2107
2108 if (!dead->reason) {
2109 xe_gt_err(gt, "CTB is dead for no reason!?\n");
2110 return;
2111 }
2112
2113 /* Can't generate a genuine core dump at this point, so just do the good bits */
2114 drm_puts(&lp, "**** Xe Device Coredump ****\n");
2115 drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
2116 xe_device_snapshot_print(xe, &lp);
2117
2118 drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
2119 drm_printf(&lp, "\tTile: %d\n", gt->tile->id);
2120
2121 drm_puts(&lp, "**** GuC Log ****\n");
2122 xe_guc_log_snapshot_print(dead->snapshot_log, &lp);
2123
2124 drm_puts(&lp, "**** GuC CT ****\n");
2125 xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp);
2126
2127 drm_puts(&lp, "Done.\n");
2128 }
2129
ct_dead_worker_func(struct work_struct * w)2130 static void ct_dead_worker_func(struct work_struct *w)
2131 {
2132 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker);
2133
2134 if (!ct->dead.reported) {
2135 ct->dead.reported = true;
2136 ct_dead_print(&ct->dead);
2137 }
2138
2139 spin_lock_irq(&ct->dead.lock);
2140
2141 xe_guc_log_snapshot_free(ct->dead.snapshot_log);
2142 ct->dead.snapshot_log = NULL;
2143 xe_guc_ct_snapshot_free(ct->dead.snapshot_ct);
2144 ct->dead.snapshot_ct = NULL;
2145
2146 if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) {
2147 /* A reset has occurred so re-arm the error reporting */
2148 ct->dead.reason = 0;
2149 ct->dead.reported = false;
2150 }
2151
2152 spin_unlock_irq(&ct->dead.lock);
2153 }
2154 #endif
2155