1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_guc_ct.h" 7 8 #include <linux/bitfield.h> 9 #include <linux/circ_buf.h> 10 #include <linux/delay.h> 11 #include <linux/fault-inject.h> 12 13 #include <kunit/static_stub.h> 14 15 #include <drm/drm_managed.h> 16 17 #include "abi/guc_actions_abi.h" 18 #include "abi/guc_actions_sriov_abi.h" 19 #include "abi/guc_klvs_abi.h" 20 #include "xe_bo.h" 21 #include "xe_devcoredump.h" 22 #include "xe_device.h" 23 #include "xe_gt.h" 24 #include "xe_gt_pagefault.h" 25 #include "xe_gt_printk.h" 26 #include "xe_gt_sriov_pf_control.h" 27 #include "xe_gt_sriov_pf_monitor.h" 28 #include "xe_gt_tlb_invalidation.h" 29 #include "xe_guc.h" 30 #include "xe_guc_log.h" 31 #include "xe_guc_relay.h" 32 #include "xe_guc_submit.h" 33 #include "xe_map.h" 34 #include "xe_pm.h" 35 #include "xe_trace_guc.h" 36 37 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) 38 enum { 39 /* Internal states, not error conditions */ 40 CT_DEAD_STATE_REARM, /* 0x0001 */ 41 CT_DEAD_STATE_CAPTURE, /* 0x0002 */ 42 43 /* Error conditions */ 44 CT_DEAD_SETUP, /* 0x0004 */ 45 CT_DEAD_H2G_WRITE, /* 0x0008 */ 46 CT_DEAD_H2G_HAS_ROOM, /* 0x0010 */ 47 CT_DEAD_G2H_READ, /* 0x0020 */ 48 CT_DEAD_G2H_RECV, /* 0x0040 */ 49 CT_DEAD_G2H_RELEASE, /* 0x0080 */ 50 CT_DEAD_DEADLOCK, /* 0x0100 */ 51 CT_DEAD_PROCESS_FAILED, /* 0x0200 */ 52 CT_DEAD_FAST_G2H, /* 0x0400 */ 53 CT_DEAD_PARSE_G2H_RESPONSE, /* 0x0800 */ 54 CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */ 55 CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */ 56 CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */ 57 }; 58 59 static void ct_dead_worker_func(struct work_struct *w); 60 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code); 61 62 #define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code) 63 #else 64 #define CT_DEAD(ct, ctb, reason) \ 65 do { \ 66 struct guc_ctb *_ctb = (ctb); \ 67 if (_ctb) \ 68 _ctb->info.broken = true; \ 69 } while (0) 70 #endif 71 72 /* Used when a CT send wants to block and / or receive data */ 73 struct g2h_fence { 74 u32 *response_buffer; 75 u32 seqno; 76 u32 response_data; 77 u16 response_len; 78 u16 error; 79 u16 hint; 80 u16 reason; 81 bool retry; 82 bool fail; 83 bool done; 84 }; 85 86 static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer) 87 { 88 g2h_fence->response_buffer = response_buffer; 89 g2h_fence->response_data = 0; 90 g2h_fence->response_len = 0; 91 g2h_fence->fail = false; 92 g2h_fence->retry = false; 93 g2h_fence->done = false; 94 g2h_fence->seqno = ~0x0; 95 } 96 97 static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence) 98 { 99 return g2h_fence->seqno == ~0x0; 100 } 101 102 static struct xe_guc * 103 ct_to_guc(struct xe_guc_ct *ct) 104 { 105 return container_of(ct, struct xe_guc, ct); 106 } 107 108 static struct xe_gt * 109 ct_to_gt(struct xe_guc_ct *ct) 110 { 111 return container_of(ct, struct xe_gt, uc.guc.ct); 112 } 113 114 static struct xe_device * 115 ct_to_xe(struct xe_guc_ct *ct) 116 { 117 return gt_to_xe(ct_to_gt(ct)); 118 } 119 120 /** 121 * DOC: GuC CTB Blob 122 * 123 * We allocate single blob to hold both CTB descriptors and buffers: 124 * 125 * +--------+-----------------------------------------------+------+ 126 * | offset | contents | size | 127 * +========+===============================================+======+ 128 * | 0x0000 | H2G CTB Descriptor (send) | | 129 * +--------+-----------------------------------------------+ 4K | 130 * | 0x0800 | G2H CTB Descriptor (g2h) | | 131 * +--------+-----------------------------------------------+------+ 132 * | 0x1000 | H2G CT Buffer (send) | n*4K | 133 * | | | | 134 * +--------+-----------------------------------------------+------+ 135 * | 0x1000 | G2H CT Buffer (g2h) | m*4K | 136 * | + n*4K | | | 137 * +--------+-----------------------------------------------+------+ 138 * 139 * Size of each ``CT Buffer`` must be multiple of 4K. 140 * We don't expect too many messages in flight at any time, unless we are 141 * using the GuC submission. In that case each request requires a minimum 142 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this 143 * enough space to avoid backpressure on the driver. We increase the size 144 * of the receive buffer (relative to the send) to ensure a G2H response 145 * CTB has a landing spot. 146 * 147 * In addition to submissions, the G2H buffer needs to be able to hold 148 * enough space for recoverable page fault notifications. The number of 149 * page faults is interrupt driven and can be as much as the number of 150 * compute resources available. However, most of the actual work for these 151 * is in a separate page fault worker thread. Therefore we only need to 152 * make sure the queue has enough space to handle all of the submissions 153 * and responses and an extra buffer for incoming page faults. 154 */ 155 156 #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K) 157 #define CTB_H2G_BUFFER_SIZE (SZ_4K) 158 #define CTB_G2H_BUFFER_SIZE (SZ_128K) 159 #define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2) 160 161 /** 162 * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full 163 * CT command queue 164 * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future. 165 * 166 * Observation is that a 4KiB buffer full of commands takes a little over a 167 * second to process. Use that to calculate maximum time to process a full CT 168 * command queue. 169 * 170 * Return: Maximum time to process a full CT queue in jiffies. 171 */ 172 long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct) 173 { 174 BUILD_BUG_ON(!IS_ALIGNED(CTB_H2G_BUFFER_SIZE, SZ_4)); 175 return (CTB_H2G_BUFFER_SIZE / SZ_4K) * HZ; 176 } 177 178 static size_t guc_ct_size(void) 179 { 180 return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + 181 CTB_G2H_BUFFER_SIZE; 182 } 183 184 static void guc_ct_fini(struct drm_device *drm, void *arg) 185 { 186 struct xe_guc_ct *ct = arg; 187 188 destroy_workqueue(ct->g2h_wq); 189 xa_destroy(&ct->fence_lookup); 190 } 191 192 static void receive_g2h(struct xe_guc_ct *ct); 193 static void g2h_worker_func(struct work_struct *w); 194 static void safe_mode_worker_func(struct work_struct *w); 195 196 static void primelockdep(struct xe_guc_ct *ct) 197 { 198 if (!IS_ENABLED(CONFIG_LOCKDEP)) 199 return; 200 201 fs_reclaim_acquire(GFP_KERNEL); 202 might_lock(&ct->lock); 203 fs_reclaim_release(GFP_KERNEL); 204 } 205 206 int xe_guc_ct_init(struct xe_guc_ct *ct) 207 { 208 struct xe_device *xe = ct_to_xe(ct); 209 struct xe_gt *gt = ct_to_gt(ct); 210 struct xe_tile *tile = gt_to_tile(gt); 211 struct xe_bo *bo; 212 int err; 213 214 xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE)); 215 216 ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM); 217 if (!ct->g2h_wq) 218 return -ENOMEM; 219 220 spin_lock_init(&ct->fast_lock); 221 xa_init(&ct->fence_lookup); 222 INIT_WORK(&ct->g2h_worker, g2h_worker_func); 223 INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func); 224 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) 225 spin_lock_init(&ct->dead.lock); 226 INIT_WORK(&ct->dead.worker, ct_dead_worker_func); 227 #endif 228 init_waitqueue_head(&ct->wq); 229 init_waitqueue_head(&ct->g2h_fence_wq); 230 231 err = drmm_mutex_init(&xe->drm, &ct->lock); 232 if (err) 233 return err; 234 235 primelockdep(ct); 236 237 bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(), 238 XE_BO_FLAG_SYSTEM | 239 XE_BO_FLAG_GGTT | 240 XE_BO_FLAG_GGTT_INVALIDATE); 241 if (IS_ERR(bo)) 242 return PTR_ERR(bo); 243 244 ct->bo = bo; 245 246 err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct); 247 if (err) 248 return err; 249 250 xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED); 251 ct->state = XE_GUC_CT_STATE_DISABLED; 252 return 0; 253 } 254 ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */ 255 256 #define desc_read(xe_, guc_ctb__, field_) \ 257 xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \ 258 struct guc_ct_buffer_desc, field_) 259 260 #define desc_write(xe_, guc_ctb__, field_, val_) \ 261 xe_map_wr_field(xe_, &guc_ctb__->desc, 0, \ 262 struct guc_ct_buffer_desc, field_, val_) 263 264 static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g, 265 struct iosys_map *map) 266 { 267 h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32); 268 h2g->info.resv_space = 0; 269 h2g->info.tail = 0; 270 h2g->info.head = 0; 271 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head, 272 h2g->info.size) - 273 h2g->info.resv_space; 274 h2g->info.broken = false; 275 276 h2g->desc = *map; 277 xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc)); 278 279 h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2); 280 } 281 282 static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h, 283 struct iosys_map *map) 284 { 285 g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32); 286 g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32); 287 g2h->info.head = 0; 288 g2h->info.tail = 0; 289 g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head, 290 g2h->info.size) - 291 g2h->info.resv_space; 292 g2h->info.broken = false; 293 294 g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE); 295 xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc)); 296 297 g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 + 298 CTB_H2G_BUFFER_SIZE); 299 } 300 301 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct) 302 { 303 struct xe_guc *guc = ct_to_guc(ct); 304 u32 desc_addr, ctb_addr, size; 305 int err; 306 307 desc_addr = xe_bo_ggtt_addr(ct->bo); 308 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2; 309 size = ct->ctbs.h2g.info.size * sizeof(u32); 310 311 err = xe_guc_self_cfg64(guc, 312 GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY, 313 desc_addr); 314 if (err) 315 return err; 316 317 err = xe_guc_self_cfg64(guc, 318 GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY, 319 ctb_addr); 320 if (err) 321 return err; 322 323 return xe_guc_self_cfg32(guc, 324 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY, 325 size); 326 } 327 328 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct) 329 { 330 struct xe_guc *guc = ct_to_guc(ct); 331 u32 desc_addr, ctb_addr, size; 332 int err; 333 334 desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE; 335 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 + 336 CTB_H2G_BUFFER_SIZE; 337 size = ct->ctbs.g2h.info.size * sizeof(u32); 338 339 err = xe_guc_self_cfg64(guc, 340 GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY, 341 desc_addr); 342 if (err) 343 return err; 344 345 err = xe_guc_self_cfg64(guc, 346 GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY, 347 ctb_addr); 348 if (err) 349 return err; 350 351 return xe_guc_self_cfg32(guc, 352 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY, 353 size); 354 } 355 356 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable) 357 { 358 u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = { 359 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 360 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 361 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, 362 GUC_ACTION_HOST2GUC_CONTROL_CTB), 363 FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL, 364 enable ? GUC_CTB_CONTROL_ENABLE : 365 GUC_CTB_CONTROL_DISABLE), 366 }; 367 int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request)); 368 369 return ret > 0 ? -EPROTO : ret; 370 } 371 372 static void xe_guc_ct_set_state(struct xe_guc_ct *ct, 373 enum xe_guc_ct_state state) 374 { 375 mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */ 376 spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */ 377 378 xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 || 379 state == XE_GUC_CT_STATE_STOPPED); 380 381 if (ct->g2h_outstanding) 382 xe_pm_runtime_put(ct_to_xe(ct)); 383 ct->g2h_outstanding = 0; 384 ct->state = state; 385 386 spin_unlock_irq(&ct->fast_lock); 387 388 /* 389 * Lockdep doesn't like this under the fast lock and he destroy only 390 * needs to be serialized with the send path which ct lock provides. 391 */ 392 xa_destroy(&ct->fence_lookup); 393 394 mutex_unlock(&ct->lock); 395 } 396 397 static bool ct_needs_safe_mode(struct xe_guc_ct *ct) 398 { 399 return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev)); 400 } 401 402 static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct) 403 { 404 if (!ct_needs_safe_mode(ct)) 405 return false; 406 407 queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10); 408 return true; 409 } 410 411 static void safe_mode_worker_func(struct work_struct *w) 412 { 413 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work); 414 415 receive_g2h(ct); 416 417 if (!ct_restart_safe_mode_worker(ct)) 418 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n"); 419 } 420 421 static void ct_enter_safe_mode(struct xe_guc_ct *ct) 422 { 423 if (ct_restart_safe_mode_worker(ct)) 424 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n"); 425 } 426 427 static void ct_exit_safe_mode(struct xe_guc_ct *ct) 428 { 429 if (cancel_delayed_work_sync(&ct->safe_mode_worker)) 430 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n"); 431 } 432 433 int xe_guc_ct_enable(struct xe_guc_ct *ct) 434 { 435 struct xe_device *xe = ct_to_xe(ct); 436 struct xe_gt *gt = ct_to_gt(ct); 437 int err; 438 439 xe_gt_assert(gt, !xe_guc_ct_enabled(ct)); 440 441 xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size); 442 guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); 443 guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); 444 445 err = guc_ct_ctb_h2g_register(ct); 446 if (err) 447 goto err_out; 448 449 err = guc_ct_ctb_g2h_register(ct); 450 if (err) 451 goto err_out; 452 453 err = guc_ct_control_toggle(ct, true); 454 if (err) 455 goto err_out; 456 457 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED); 458 459 smp_mb(); 460 wake_up_all(&ct->wq); 461 xe_gt_dbg(gt, "GuC CT communication channel enabled\n"); 462 463 if (ct_needs_safe_mode(ct)) 464 ct_enter_safe_mode(ct); 465 466 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) 467 /* 468 * The CT has now been reset so the dumper can be re-armed 469 * after any existing dead state has been dumped. 470 */ 471 spin_lock_irq(&ct->dead.lock); 472 if (ct->dead.reason) 473 ct->dead.reason |= (1 << CT_DEAD_STATE_REARM); 474 spin_unlock_irq(&ct->dead.lock); 475 #endif 476 477 return 0; 478 479 err_out: 480 xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err)); 481 CT_DEAD(ct, NULL, SETUP); 482 483 return err; 484 } 485 486 static void stop_g2h_handler(struct xe_guc_ct *ct) 487 { 488 cancel_work_sync(&ct->g2h_worker); 489 } 490 491 /** 492 * xe_guc_ct_disable - Set GuC to disabled state 493 * @ct: the &xe_guc_ct 494 * 495 * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected 496 * in this transition. 497 */ 498 void xe_guc_ct_disable(struct xe_guc_ct *ct) 499 { 500 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED); 501 ct_exit_safe_mode(ct); 502 stop_g2h_handler(ct); 503 } 504 505 /** 506 * xe_guc_ct_stop - Set GuC to stopped state 507 * @ct: the &xe_guc_ct 508 * 509 * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h 510 */ 511 void xe_guc_ct_stop(struct xe_guc_ct *ct) 512 { 513 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED); 514 stop_g2h_handler(ct); 515 } 516 517 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len) 518 { 519 struct guc_ctb *h2g = &ct->ctbs.h2g; 520 521 lockdep_assert_held(&ct->lock); 522 523 if (cmd_len > h2g->info.space) { 524 h2g->info.head = desc_read(ct_to_xe(ct), h2g, head); 525 526 if (h2g->info.head > h2g->info.size) { 527 struct xe_device *xe = ct_to_xe(ct); 528 u32 desc_status = desc_read(xe, h2g, status); 529 530 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); 531 532 xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n", 533 h2g->info.head, h2g->info.size); 534 CT_DEAD(ct, h2g, H2G_HAS_ROOM); 535 return false; 536 } 537 538 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head, 539 h2g->info.size) - 540 h2g->info.resv_space; 541 if (cmd_len > h2g->info.space) 542 return false; 543 } 544 545 return true; 546 } 547 548 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len) 549 { 550 if (!g2h_len) 551 return true; 552 553 lockdep_assert_held(&ct->fast_lock); 554 555 return ct->ctbs.g2h.info.space > g2h_len; 556 } 557 558 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len) 559 { 560 lockdep_assert_held(&ct->lock); 561 562 if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len)) 563 return -EBUSY; 564 565 return 0; 566 } 567 568 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len) 569 { 570 lockdep_assert_held(&ct->lock); 571 ct->ctbs.h2g.info.space -= cmd_len; 572 } 573 574 static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) 575 { 576 xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space); 577 xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) || 578 (g2h_len && num_g2h)); 579 580 if (g2h_len) { 581 lockdep_assert_held(&ct->fast_lock); 582 583 if (!ct->g2h_outstanding) 584 xe_pm_runtime_get_noresume(ct_to_xe(ct)); 585 586 ct->ctbs.g2h.info.space -= g2h_len; 587 ct->g2h_outstanding += num_g2h; 588 } 589 } 590 591 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) 592 { 593 bool bad = false; 594 595 lockdep_assert_held(&ct->fast_lock); 596 597 bad = ct->ctbs.g2h.info.space + g2h_len > 598 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space; 599 bad |= !ct->g2h_outstanding; 600 601 if (bad) { 602 xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n", 603 ct->ctbs.g2h.info.space, g2h_len, 604 ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space, 605 ct->ctbs.g2h.info.space + g2h_len, 606 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space, 607 ct->g2h_outstanding); 608 CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE); 609 return; 610 } 611 612 ct->ctbs.g2h.info.space += g2h_len; 613 if (!--ct->g2h_outstanding) 614 xe_pm_runtime_put(ct_to_xe(ct)); 615 } 616 617 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) 618 { 619 spin_lock_irq(&ct->fast_lock); 620 __g2h_release_space(ct, g2h_len); 621 spin_unlock_irq(&ct->fast_lock); 622 } 623 624 #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */ 625 626 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, 627 u32 ct_fence_value, bool want_response) 628 { 629 struct xe_device *xe = ct_to_xe(ct); 630 struct xe_gt *gt = ct_to_gt(ct); 631 struct guc_ctb *h2g = &ct->ctbs.h2g; 632 u32 cmd[H2G_CT_HEADERS]; 633 u32 tail = h2g->info.tail; 634 u32 full_len; 635 struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds, 636 tail * sizeof(u32)); 637 u32 desc_status; 638 639 full_len = len + GUC_CTB_HDR_LEN; 640 641 lockdep_assert_held(&ct->lock); 642 xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN); 643 644 desc_status = desc_read(xe, h2g, status); 645 if (desc_status) { 646 xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status); 647 goto corrupted; 648 } 649 650 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { 651 u32 desc_tail = desc_read(xe, h2g, tail); 652 u32 desc_head = desc_read(xe, h2g, head); 653 654 if (tail != desc_tail) { 655 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH); 656 xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail); 657 goto corrupted; 658 } 659 660 if (tail > h2g->info.size) { 661 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); 662 xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n", 663 tail, h2g->info.size); 664 goto corrupted; 665 } 666 667 if (desc_head >= h2g->info.size) { 668 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); 669 xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n", 670 desc_head, h2g->info.size); 671 goto corrupted; 672 } 673 } 674 675 /* Command will wrap, zero fill (NOPs), return and check credits again */ 676 if (tail + full_len > h2g->info.size) { 677 xe_map_memset(xe, &map, 0, 0, 678 (h2g->info.size - tail) * sizeof(u32)); 679 h2g_reserve_space(ct, (h2g->info.size - tail)); 680 h2g->info.tail = 0; 681 desc_write(xe, h2g, tail, h2g->info.tail); 682 683 return -EAGAIN; 684 } 685 686 /* 687 * dw0: CT header (including fence) 688 * dw1: HXG header (including action code) 689 * dw2+: action data 690 */ 691 cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) | 692 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) | 693 FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value); 694 if (want_response) { 695 cmd[1] = 696 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 697 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | 698 GUC_HXG_EVENT_MSG_0_DATA0, action[0]); 699 } else { 700 cmd[1] = 701 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) | 702 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | 703 GUC_HXG_EVENT_MSG_0_DATA0, action[0]); 704 } 705 706 /* H2G header in cmd[1] replaces action[0] so: */ 707 --len; 708 ++action; 709 710 /* Write H2G ensuring visable before descriptor update */ 711 xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32)); 712 xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32)); 713 xe_device_wmb(xe); 714 715 /* Update local copies */ 716 h2g->info.tail = (tail + full_len) % h2g->info.size; 717 h2g_reserve_space(ct, full_len); 718 719 /* Update descriptor */ 720 desc_write(xe, h2g, tail, h2g->info.tail); 721 722 trace_xe_guc_ctb_h2g(xe, gt->info.id, *(action - 1), full_len, 723 desc_read(xe, h2g, head), h2g->info.tail); 724 725 return 0; 726 727 corrupted: 728 CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE); 729 return -EPIPE; 730 } 731 732 /* 733 * The CT protocol accepts a 16 bits fence. This field is fully owned by the 734 * driver, the GuC will just copy it to the reply message. Since we need to 735 * be able to distinguish between replies to REQUEST and FAST_REQUEST messages, 736 * we use one bit of the seqno as an indicator for that and a rolling counter 737 * for the remaining 15 bits. 738 */ 739 #define CT_SEQNO_MASK GENMASK(14, 0) 740 #define CT_SEQNO_UNTRACKED BIT(15) 741 static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence) 742 { 743 u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK; 744 745 if (!is_g2h_fence) 746 seqno |= CT_SEQNO_UNTRACKED; 747 748 return seqno; 749 } 750 751 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, 752 u32 len, u32 g2h_len, u32 num_g2h, 753 struct g2h_fence *g2h_fence) 754 { 755 struct xe_gt *gt __maybe_unused = ct_to_gt(ct); 756 u16 seqno; 757 int ret; 758 759 xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); 760 xe_gt_assert(gt, !g2h_len || !g2h_fence); 761 xe_gt_assert(gt, !num_g2h || !g2h_fence); 762 xe_gt_assert(gt, !g2h_len || num_g2h); 763 xe_gt_assert(gt, g2h_len || !num_g2h); 764 lockdep_assert_held(&ct->lock); 765 766 if (unlikely(ct->ctbs.h2g.info.broken)) { 767 ret = -EPIPE; 768 goto out; 769 } 770 771 if (ct->state == XE_GUC_CT_STATE_DISABLED) { 772 ret = -ENODEV; 773 goto out; 774 } 775 776 if (ct->state == XE_GUC_CT_STATE_STOPPED) { 777 ret = -ECANCELED; 778 goto out; 779 } 780 781 xe_gt_assert(gt, xe_guc_ct_enabled(ct)); 782 783 if (g2h_fence) { 784 g2h_len = GUC_CTB_HXG_MSG_MAX_LEN; 785 num_g2h = 1; 786 787 if (g2h_fence_needs_alloc(g2h_fence)) { 788 g2h_fence->seqno = next_ct_seqno(ct, true); 789 ret = xa_err(xa_store(&ct->fence_lookup, 790 g2h_fence->seqno, g2h_fence, 791 GFP_ATOMIC)); 792 if (ret) 793 goto out; 794 } 795 796 seqno = g2h_fence->seqno; 797 } else { 798 seqno = next_ct_seqno(ct, false); 799 } 800 801 if (g2h_len) 802 spin_lock_irq(&ct->fast_lock); 803 retry: 804 ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len); 805 if (unlikely(ret)) 806 goto out_unlock; 807 808 ret = h2g_write(ct, action, len, seqno, !!g2h_fence); 809 if (unlikely(ret)) { 810 if (ret == -EAGAIN) 811 goto retry; 812 goto out_unlock; 813 } 814 815 __g2h_reserve_space(ct, g2h_len, num_g2h); 816 xe_guc_notify(ct_to_guc(ct)); 817 out_unlock: 818 if (g2h_len) 819 spin_unlock_irq(&ct->fast_lock); 820 out: 821 return ret; 822 } 823 824 static void kick_reset(struct xe_guc_ct *ct) 825 { 826 xe_gt_reset_async(ct_to_gt(ct)); 827 } 828 829 static int dequeue_one_g2h(struct xe_guc_ct *ct); 830 831 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, 832 u32 g2h_len, u32 num_g2h, 833 struct g2h_fence *g2h_fence) 834 { 835 struct xe_device *xe = ct_to_xe(ct); 836 struct xe_gt *gt = ct_to_gt(ct); 837 unsigned int sleep_period_ms = 1; 838 int ret; 839 840 xe_gt_assert(gt, !g2h_len || !g2h_fence); 841 lockdep_assert_held(&ct->lock); 842 xe_device_assert_mem_access(ct_to_xe(ct)); 843 844 try_again: 845 ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, 846 g2h_fence); 847 848 /* 849 * We wait to try to restore credits for about 1 second before bailing. 850 * In the case of H2G credits we have no choice but just to wait for the 851 * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In 852 * the case of G2H we process any G2H in the channel, hopefully freeing 853 * credits as we consume the G2H messages. 854 */ 855 if (unlikely(ret == -EBUSY && 856 !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) { 857 struct guc_ctb *h2g = &ct->ctbs.h2g; 858 859 if (sleep_period_ms == 1024) 860 goto broken; 861 862 trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail, 863 h2g->info.size, 864 h2g->info.space, 865 len + GUC_CTB_HDR_LEN); 866 msleep(sleep_period_ms); 867 sleep_period_ms <<= 1; 868 869 goto try_again; 870 } else if (unlikely(ret == -EBUSY)) { 871 struct xe_device *xe = ct_to_xe(ct); 872 struct guc_ctb *g2h = &ct->ctbs.g2h; 873 874 trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head, 875 desc_read(xe, g2h, tail), 876 g2h->info.size, 877 g2h->info.space, 878 g2h_fence ? 879 GUC_CTB_HXG_MSG_MAX_LEN : 880 g2h_len); 881 882 #define g2h_avail(ct) \ 883 (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head) 884 if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding || 885 g2h_avail(ct), HZ)) 886 goto broken; 887 #undef g2h_avail 888 889 ret = dequeue_one_g2h(ct); 890 if (ret < 0) { 891 if (ret != -ECANCELED) 892 xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)", 893 ERR_PTR(ret)); 894 goto broken; 895 } 896 897 goto try_again; 898 } 899 900 return ret; 901 902 broken: 903 xe_gt_err(gt, "No forward process on H2G, reset required\n"); 904 CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK); 905 906 return -EDEADLK; 907 } 908 909 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len, 910 u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence) 911 { 912 int ret; 913 914 xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence); 915 916 mutex_lock(&ct->lock); 917 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence); 918 mutex_unlock(&ct->lock); 919 920 return ret; 921 } 922 923 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len, 924 u32 g2h_len, u32 num_g2h) 925 { 926 int ret; 927 928 ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL); 929 if (ret == -EDEADLK) 930 kick_reset(ct); 931 932 return ret; 933 } 934 935 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, 936 u32 g2h_len, u32 num_g2h) 937 { 938 int ret; 939 940 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL); 941 if (ret == -EDEADLK) 942 kick_reset(ct); 943 944 return ret; 945 } 946 947 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len) 948 { 949 int ret; 950 951 lockdep_assert_held(&ct->lock); 952 953 ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL); 954 if (ret == -EDEADLK) 955 kick_reset(ct); 956 957 return ret; 958 } 959 960 /* 961 * Check if a GT reset is in progress or will occur and if GT reset brought the 962 * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset. 963 */ 964 static bool retry_failure(struct xe_guc_ct *ct, int ret) 965 { 966 if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV)) 967 return false; 968 969 #define ct_alive(ct) \ 970 (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \ 971 !ct->ctbs.g2h.info.broken) 972 if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) 973 return false; 974 #undef ct_alive 975 976 return true; 977 } 978 979 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, 980 u32 *response_buffer, bool no_fail) 981 { 982 struct xe_gt *gt = ct_to_gt(ct); 983 struct g2h_fence g2h_fence; 984 int ret = 0; 985 986 /* 987 * We use a fence to implement blocking sends / receiving response data. 988 * The seqno of the fence is sent in the H2G, returned in the G2H, and 989 * an xarray is used as storage media with the seqno being to key. 990 * Fields in the fence hold success, failure, retry status and the 991 * response data. Safe to allocate on the stack as the xarray is the 992 * only reference and it cannot be present after this function exits. 993 */ 994 retry: 995 g2h_fence_init(&g2h_fence, response_buffer); 996 retry_same_fence: 997 ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence); 998 if (unlikely(ret == -ENOMEM)) { 999 /* Retry allocation /w GFP_KERNEL */ 1000 ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno, 1001 &g2h_fence, GFP_KERNEL)); 1002 if (ret) 1003 return ret; 1004 1005 goto retry_same_fence; 1006 } else if (unlikely(ret)) { 1007 if (ret == -EDEADLK) 1008 kick_reset(ct); 1009 1010 if (no_fail && retry_failure(ct, ret)) 1011 goto retry_same_fence; 1012 1013 if (!g2h_fence_needs_alloc(&g2h_fence)) 1014 xa_erase(&ct->fence_lookup, g2h_fence.seqno); 1015 1016 return ret; 1017 } 1018 1019 ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ); 1020 1021 /* 1022 * Occasionally it is seen that the G2H worker starts running after a delay of more than 1023 * a second even after being queued and activated by the Linux workqueue subsystem. This 1024 * leads to G2H timeout error. The root cause of issue lies with scheduling latency of 1025 * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS 1026 * and this is beyond xe kmd. 1027 * 1028 * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU. 1029 */ 1030 if (!ret) { 1031 flush_work(&ct->g2h_worker); 1032 if (g2h_fence.done) { 1033 xe_gt_warn(gt, "G2H fence %u, action %04x, done\n", 1034 g2h_fence.seqno, action[0]); 1035 ret = 1; 1036 } 1037 } 1038 1039 /* 1040 * Ensure we serialize with completion side to prevent UAF with fence going out of scope on 1041 * the stack, since we have no clue if it will fire after the timeout before we can erase 1042 * from the xa. Also we have some dependent loads and stores below for which we need the 1043 * correct ordering, and we lack the needed barriers. 1044 */ 1045 mutex_lock(&ct->lock); 1046 if (!ret) { 1047 xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s", 1048 g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done)); 1049 xa_erase(&ct->fence_lookup, g2h_fence.seqno); 1050 mutex_unlock(&ct->lock); 1051 return -ETIME; 1052 } 1053 1054 if (g2h_fence.retry) { 1055 xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n", 1056 action[0], g2h_fence.reason); 1057 mutex_unlock(&ct->lock); 1058 goto retry; 1059 } 1060 if (g2h_fence.fail) { 1061 xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n", 1062 action[0], g2h_fence.error, g2h_fence.hint); 1063 ret = -EIO; 1064 } 1065 1066 if (ret > 0) 1067 ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data; 1068 1069 mutex_unlock(&ct->lock); 1070 1071 return ret; 1072 } 1073 1074 /** 1075 * xe_guc_ct_send_recv - Send and receive HXG to the GuC 1076 * @ct: the &xe_guc_ct 1077 * @action: the dword array with `HXG Request`_ message (can't be NULL) 1078 * @len: length of the `HXG Request`_ message (in dwords, can't be 0) 1079 * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL) 1080 * 1081 * Send a `HXG Request`_ message to the GuC over CT communication channel and 1082 * blocks until GuC replies with a `HXG Response`_ message. 1083 * 1084 * For non-blocking communication with GuC use xe_guc_ct_send(). 1085 * 1086 * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_. 1087 * 1088 * Return: response length (in dwords) if &response_buffer was not NULL, or 1089 * DATA0 from `HXG Response`_ if &response_buffer was NULL, or 1090 * a negative error code on failure. 1091 */ 1092 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, 1093 u32 *response_buffer) 1094 { 1095 KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer); 1096 return guc_ct_send_recv(ct, action, len, response_buffer, false); 1097 } 1098 1099 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action, 1100 u32 len, u32 *response_buffer) 1101 { 1102 return guc_ct_send_recv(ct, action, len, response_buffer, true); 1103 } 1104 1105 static u32 *msg_to_hxg(u32 *msg) 1106 { 1107 return msg + GUC_CTB_MSG_MIN_LEN; 1108 } 1109 1110 static u32 msg_len_to_hxg_len(u32 len) 1111 { 1112 return len - GUC_CTB_MSG_MIN_LEN; 1113 } 1114 1115 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len) 1116 { 1117 u32 *hxg = msg_to_hxg(msg); 1118 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1119 1120 lockdep_assert_held(&ct->lock); 1121 1122 switch (action) { 1123 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE: 1124 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE: 1125 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE: 1126 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 1127 g2h_release_space(ct, len); 1128 } 1129 1130 return 0; 1131 } 1132 1133 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) 1134 { 1135 struct xe_gt *gt = ct_to_gt(ct); 1136 u32 *hxg = msg_to_hxg(msg); 1137 u32 hxg_len = msg_len_to_hxg_len(len); 1138 u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]); 1139 u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]); 1140 struct g2h_fence *g2h_fence; 1141 1142 lockdep_assert_held(&ct->lock); 1143 1144 /* 1145 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup. 1146 * Those messages should never fail, so if we do get an error back it 1147 * means we're likely doing an illegal operation and the GuC is 1148 * rejecting it. We have no way to inform the code that submitted the 1149 * H2G that the message was rejected, so we need to escalate the 1150 * failure to trigger a reset. 1151 */ 1152 if (fence & CT_SEQNO_UNTRACKED) { 1153 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) 1154 xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n", 1155 fence, 1156 FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]), 1157 FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0])); 1158 else 1159 xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n", 1160 type, fence); 1161 CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE); 1162 1163 return -EPROTO; 1164 } 1165 1166 g2h_fence = xa_erase(&ct->fence_lookup, fence); 1167 if (unlikely(!g2h_fence)) { 1168 /* Don't tear down channel, as send could've timed out */ 1169 /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */ 1170 xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence); 1171 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); 1172 return 0; 1173 } 1174 1175 xe_gt_assert(gt, fence == g2h_fence->seqno); 1176 1177 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) { 1178 g2h_fence->fail = true; 1179 g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]); 1180 g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]); 1181 } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) { 1182 g2h_fence->retry = true; 1183 g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]); 1184 } else if (g2h_fence->response_buffer) { 1185 g2h_fence->response_len = hxg_len; 1186 memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32)); 1187 } else { 1188 g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]); 1189 } 1190 1191 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); 1192 1193 g2h_fence->done = true; 1194 smp_mb(); 1195 1196 wake_up_all(&ct->g2h_fence_wq); 1197 1198 return 0; 1199 } 1200 1201 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) 1202 { 1203 struct xe_gt *gt = ct_to_gt(ct); 1204 u32 *hxg = msg_to_hxg(msg); 1205 u32 origin, type; 1206 int ret; 1207 1208 lockdep_assert_held(&ct->lock); 1209 1210 origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]); 1211 if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) { 1212 xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n", 1213 origin); 1214 CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN); 1215 1216 return -EPROTO; 1217 } 1218 1219 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]); 1220 switch (type) { 1221 case GUC_HXG_TYPE_EVENT: 1222 ret = parse_g2h_event(ct, msg, len); 1223 break; 1224 case GUC_HXG_TYPE_RESPONSE_SUCCESS: 1225 case GUC_HXG_TYPE_RESPONSE_FAILURE: 1226 case GUC_HXG_TYPE_NO_RESPONSE_RETRY: 1227 ret = parse_g2h_response(ct, msg, len); 1228 break; 1229 default: 1230 xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n", 1231 type); 1232 CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE); 1233 1234 ret = -EOPNOTSUPP; 1235 } 1236 1237 return ret; 1238 } 1239 1240 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) 1241 { 1242 struct xe_guc *guc = ct_to_guc(ct); 1243 struct xe_gt *gt = ct_to_gt(ct); 1244 u32 hxg_len = msg_len_to_hxg_len(len); 1245 u32 *hxg = msg_to_hxg(msg); 1246 u32 action, adj_len; 1247 u32 *payload; 1248 int ret = 0; 1249 1250 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT) 1251 return 0; 1252 1253 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1254 payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN; 1255 adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN; 1256 1257 switch (action) { 1258 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE: 1259 ret = xe_guc_sched_done_handler(guc, payload, adj_len); 1260 break; 1261 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE: 1262 ret = xe_guc_deregister_done_handler(guc, payload, adj_len); 1263 break; 1264 case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION: 1265 ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len); 1266 break; 1267 case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION: 1268 ret = xe_guc_exec_queue_reset_failure_handler(guc, payload, 1269 adj_len); 1270 break; 1271 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE: 1272 /* Selftest only at the moment */ 1273 break; 1274 case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION: 1275 ret = xe_guc_error_capture_handler(guc, payload, adj_len); 1276 break; 1277 case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE: 1278 /* FIXME: Handle this */ 1279 break; 1280 case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR: 1281 ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload, 1282 adj_len); 1283 break; 1284 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC: 1285 ret = xe_guc_pagefault_handler(guc, payload, adj_len); 1286 break; 1287 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 1288 ret = xe_guc_tlb_invalidation_done_handler(guc, payload, 1289 adj_len); 1290 break; 1291 case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY: 1292 ret = xe_guc_access_counter_notify_handler(guc, payload, 1293 adj_len); 1294 break; 1295 case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF: 1296 ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len); 1297 break; 1298 case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF: 1299 ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len); 1300 break; 1301 case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY: 1302 ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len); 1303 break; 1304 case GUC_ACTION_GUC2PF_ADVERSE_EVENT: 1305 ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len); 1306 break; 1307 default: 1308 xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action); 1309 } 1310 1311 if (ret) { 1312 xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", 1313 action, ERR_PTR(ret)); 1314 CT_DEAD(ct, NULL, PROCESS_FAILED); 1315 } 1316 1317 return 0; 1318 } 1319 1320 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) 1321 { 1322 struct xe_device *xe = ct_to_xe(ct); 1323 struct xe_gt *gt = ct_to_gt(ct); 1324 struct guc_ctb *g2h = &ct->ctbs.g2h; 1325 u32 tail, head, len, desc_status; 1326 s32 avail; 1327 u32 action; 1328 u32 *hxg; 1329 1330 xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); 1331 lockdep_assert_held(&ct->fast_lock); 1332 1333 if (ct->state == XE_GUC_CT_STATE_DISABLED) 1334 return -ENODEV; 1335 1336 if (ct->state == XE_GUC_CT_STATE_STOPPED) 1337 return -ECANCELED; 1338 1339 if (g2h->info.broken) 1340 return -EPIPE; 1341 1342 xe_gt_assert(gt, xe_guc_ct_enabled(ct)); 1343 1344 desc_status = desc_read(xe, g2h, status); 1345 if (desc_status) { 1346 if (desc_status & GUC_CTB_STATUS_DISABLED) { 1347 /* 1348 * Potentially valid if a CLIENT_RESET request resulted in 1349 * contexts/engines being reset. But should never happen as 1350 * no contexts should be active when CLIENT_RESET is sent. 1351 */ 1352 xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n"); 1353 desc_status &= ~GUC_CTB_STATUS_DISABLED; 1354 } 1355 1356 if (desc_status) { 1357 xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status); 1358 goto corrupted; 1359 } 1360 } 1361 1362 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { 1363 u32 desc_tail = desc_read(xe, g2h, tail); 1364 /* 1365 u32 desc_head = desc_read(xe, g2h, head); 1366 1367 * info.head and desc_head are updated back-to-back at the end of 1368 * this function and nowhere else. Hence, they cannot be different 1369 * unless two g2h_read calls are running concurrently. Which is not 1370 * possible because it is guarded by ct->fast_lock. And yet, some 1371 * discrete platforms are reguarly hitting this error :(. 1372 * 1373 * desc_head rolling backwards shouldn't cause any noticeable 1374 * problems - just a delay in GuC being allowed to proceed past that 1375 * point in the queue. So for now, just disable the error until it 1376 * can be root caused. 1377 * 1378 if (g2h->info.head != desc_head) { 1379 desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH); 1380 xe_gt_err(gt, "CT read: head was modified %u != %u\n", 1381 desc_head, g2h->info.head); 1382 goto corrupted; 1383 } 1384 */ 1385 1386 if (g2h->info.head > g2h->info.size) { 1387 desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW); 1388 xe_gt_err(gt, "CT read: head out of range: %u vs %u\n", 1389 g2h->info.head, g2h->info.size); 1390 goto corrupted; 1391 } 1392 1393 if (desc_tail >= g2h->info.size) { 1394 desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW); 1395 xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n", 1396 desc_tail, g2h->info.size); 1397 goto corrupted; 1398 } 1399 } 1400 1401 /* Calculate DW available to read */ 1402 tail = desc_read(xe, g2h, tail); 1403 avail = tail - g2h->info.head; 1404 if (unlikely(avail == 0)) 1405 return 0; 1406 1407 if (avail < 0) 1408 avail += g2h->info.size; 1409 1410 /* Read header */ 1411 xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head, 1412 sizeof(u32)); 1413 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN; 1414 if (len > avail) { 1415 xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n", 1416 avail, len); 1417 goto corrupted; 1418 } 1419 1420 head = (g2h->info.head + 1) % g2h->info.size; 1421 avail = len - 1; 1422 1423 /* Read G2H message */ 1424 if (avail + head > g2h->info.size) { 1425 u32 avail_til_wrap = g2h->info.size - head; 1426 1427 xe_map_memcpy_from(xe, msg + 1, 1428 &g2h->cmds, sizeof(u32) * head, 1429 avail_til_wrap * sizeof(u32)); 1430 xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap, 1431 &g2h->cmds, 0, 1432 (avail - avail_til_wrap) * sizeof(u32)); 1433 } else { 1434 xe_map_memcpy_from(xe, msg + 1, 1435 &g2h->cmds, sizeof(u32) * head, 1436 avail * sizeof(u32)); 1437 } 1438 1439 hxg = msg_to_hxg(msg); 1440 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1441 1442 if (fast_path) { 1443 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT) 1444 return 0; 1445 1446 switch (action) { 1447 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC: 1448 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 1449 break; /* Process these in fast-path */ 1450 default: 1451 return 0; 1452 } 1453 } 1454 1455 /* Update local / descriptor header */ 1456 g2h->info.head = (head + avail) % g2h->info.size; 1457 desc_write(xe, g2h, head, g2h->info.head); 1458 1459 trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id, 1460 action, len, g2h->info.head, tail); 1461 1462 return len; 1463 1464 corrupted: 1465 CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ); 1466 return -EPROTO; 1467 } 1468 1469 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) 1470 { 1471 struct xe_gt *gt = ct_to_gt(ct); 1472 struct xe_guc *guc = ct_to_guc(ct); 1473 u32 hxg_len = msg_len_to_hxg_len(len); 1474 u32 *hxg = msg_to_hxg(msg); 1475 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1476 u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN; 1477 u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN; 1478 int ret = 0; 1479 1480 switch (action) { 1481 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC: 1482 ret = xe_guc_pagefault_handler(guc, payload, adj_len); 1483 break; 1484 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 1485 __g2h_release_space(ct, len); 1486 ret = xe_guc_tlb_invalidation_done_handler(guc, payload, 1487 adj_len); 1488 break; 1489 default: 1490 xe_gt_warn(gt, "NOT_POSSIBLE"); 1491 } 1492 1493 if (ret) { 1494 xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", 1495 action, ERR_PTR(ret)); 1496 CT_DEAD(ct, NULL, FAST_G2H); 1497 } 1498 } 1499 1500 /** 1501 * xe_guc_ct_fast_path - process critical G2H in the IRQ handler 1502 * @ct: GuC CT object 1503 * 1504 * Anything related to page faults is critical for performance, process these 1505 * critical G2H in the IRQ. This is safe as these handlers either just wake up 1506 * waiters or queue another worker. 1507 */ 1508 void xe_guc_ct_fast_path(struct xe_guc_ct *ct) 1509 { 1510 struct xe_device *xe = ct_to_xe(ct); 1511 bool ongoing; 1512 int len; 1513 1514 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct)); 1515 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL) 1516 return; 1517 1518 spin_lock(&ct->fast_lock); 1519 do { 1520 len = g2h_read(ct, ct->fast_msg, true); 1521 if (len > 0) 1522 g2h_fast_path(ct, ct->fast_msg, len); 1523 } while (len > 0); 1524 spin_unlock(&ct->fast_lock); 1525 1526 if (ongoing) 1527 xe_pm_runtime_put(xe); 1528 } 1529 1530 /* Returns less than zero on error, 0 on done, 1 on more available */ 1531 static int dequeue_one_g2h(struct xe_guc_ct *ct) 1532 { 1533 int len; 1534 int ret; 1535 1536 lockdep_assert_held(&ct->lock); 1537 1538 spin_lock_irq(&ct->fast_lock); 1539 len = g2h_read(ct, ct->msg, false); 1540 spin_unlock_irq(&ct->fast_lock); 1541 if (len <= 0) 1542 return len; 1543 1544 ret = parse_g2h_msg(ct, ct->msg, len); 1545 if (unlikely(ret < 0)) 1546 return ret; 1547 1548 ret = process_g2h_msg(ct, ct->msg, len); 1549 if (unlikely(ret < 0)) 1550 return ret; 1551 1552 return 1; 1553 } 1554 1555 static void receive_g2h(struct xe_guc_ct *ct) 1556 { 1557 bool ongoing; 1558 int ret; 1559 1560 /* 1561 * Normal users must always hold mem_access.ref around CT calls. However 1562 * during the runtime pm callbacks we rely on CT to talk to the GuC, but 1563 * at this stage we can't rely on mem_access.ref and even the 1564 * callback_task will be different than current. For such cases we just 1565 * need to ensure we always process the responses from any blocking 1566 * ct_send requests or where we otherwise expect some response when 1567 * initiated from those callbacks (which will need to wait for the below 1568 * dequeue_one_g2h()). The dequeue_one_g2h() will gracefully fail if 1569 * the device has suspended to the point that the CT communication has 1570 * been disabled. 1571 * 1572 * If we are inside the runtime pm callback, we can be the only task 1573 * still issuing CT requests (since that requires having the 1574 * mem_access.ref). It seems like it might in theory be possible to 1575 * receive unsolicited events from the GuC just as we are 1576 * suspending-resuming, but those will currently anyway be lost when 1577 * eventually exiting from suspend, hence no need to wake up the device 1578 * here. If we ever need something stronger than get_if_ongoing() then 1579 * we need to be careful with blocking the pm callbacks from getting CT 1580 * responses, if the worker here is blocked on those callbacks 1581 * completing, creating a deadlock. 1582 */ 1583 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct)); 1584 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL) 1585 return; 1586 1587 do { 1588 mutex_lock(&ct->lock); 1589 ret = dequeue_one_g2h(ct); 1590 mutex_unlock(&ct->lock); 1591 1592 if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) { 1593 xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret); 1594 CT_DEAD(ct, NULL, G2H_RECV); 1595 kick_reset(ct); 1596 } 1597 } while (ret == 1); 1598 1599 if (ongoing) 1600 xe_pm_runtime_put(ct_to_xe(ct)); 1601 } 1602 1603 static void g2h_worker_func(struct work_struct *w) 1604 { 1605 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker); 1606 1607 receive_g2h(ct); 1608 } 1609 1610 static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic, 1611 bool want_ctb) 1612 { 1613 struct xe_guc_ct_snapshot *snapshot; 1614 1615 snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL); 1616 if (!snapshot) 1617 return NULL; 1618 1619 if (ct->bo && want_ctb) { 1620 snapshot->ctb_size = ct->bo->size; 1621 snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL); 1622 } 1623 1624 return snapshot; 1625 } 1626 1627 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb, 1628 struct guc_ctb_snapshot *snapshot) 1629 { 1630 xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0, 1631 sizeof(struct guc_ct_buffer_desc)); 1632 memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info)); 1633 } 1634 1635 static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot, 1636 struct drm_printer *p) 1637 { 1638 drm_printf(p, "\tsize: %d\n", snapshot->info.size); 1639 drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space); 1640 drm_printf(p, "\thead: %d\n", snapshot->info.head); 1641 drm_printf(p, "\ttail: %d\n", snapshot->info.tail); 1642 drm_printf(p, "\tspace: %d\n", snapshot->info.space); 1643 drm_printf(p, "\tbroken: %d\n", snapshot->info.broken); 1644 drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head); 1645 drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail); 1646 drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status); 1647 } 1648 1649 static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic, 1650 bool want_ctb) 1651 { 1652 struct xe_device *xe = ct_to_xe(ct); 1653 struct xe_guc_ct_snapshot *snapshot; 1654 1655 snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb); 1656 if (!snapshot) { 1657 xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n"); 1658 return NULL; 1659 } 1660 1661 if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) { 1662 snapshot->ct_enabled = true; 1663 snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding); 1664 guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g); 1665 guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h); 1666 } 1667 1668 if (ct->bo && snapshot->ctb) 1669 xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size); 1670 1671 return snapshot; 1672 } 1673 1674 /** 1675 * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state. 1676 * @ct: GuC CT object. 1677 * 1678 * This can be printed out in a later stage like during dev_coredump 1679 * analysis. This is safe to be called during atomic context. 1680 * 1681 * Returns: a GuC CT snapshot object that must be freed by the caller 1682 * by using `xe_guc_ct_snapshot_free`. 1683 */ 1684 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct) 1685 { 1686 return guc_ct_snapshot_capture(ct, true, true); 1687 } 1688 1689 /** 1690 * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot. 1691 * @snapshot: GuC CT snapshot object. 1692 * @p: drm_printer where it will be printed out. 1693 * 1694 * This function prints out a given GuC CT snapshot object. 1695 */ 1696 void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, 1697 struct drm_printer *p) 1698 { 1699 if (!snapshot) 1700 return; 1701 1702 if (snapshot->ct_enabled) { 1703 drm_puts(p, "H2G CTB (all sizes in DW):\n"); 1704 guc_ctb_snapshot_print(&snapshot->h2g, p); 1705 1706 drm_puts(p, "G2H CTB (all sizes in DW):\n"); 1707 guc_ctb_snapshot_print(&snapshot->g2h, p); 1708 drm_printf(p, "\tg2h outstanding: %d\n", 1709 snapshot->g2h_outstanding); 1710 1711 if (snapshot->ctb) 1712 xe_print_blob_ascii85(p, "CTB data", snapshot->ctb, 0, snapshot->ctb_size); 1713 } else { 1714 drm_puts(p, "CT disabled\n"); 1715 } 1716 } 1717 1718 /** 1719 * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot. 1720 * @snapshot: GuC CT snapshot object. 1721 * 1722 * This function free all the memory that needed to be allocated at capture 1723 * time. 1724 */ 1725 void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot) 1726 { 1727 if (!snapshot) 1728 return; 1729 1730 kfree(snapshot->ctb); 1731 kfree(snapshot); 1732 } 1733 1734 /** 1735 * xe_guc_ct_print - GuC CT Print. 1736 * @ct: GuC CT. 1737 * @p: drm_printer where it will be printed out. 1738 * @want_ctb: Should the full CTB content be dumped (vs just the headers) 1739 * 1740 * This function will quickly capture a snapshot of the CT state 1741 * and immediately print it out. 1742 */ 1743 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb) 1744 { 1745 struct xe_guc_ct_snapshot *snapshot; 1746 1747 snapshot = guc_ct_snapshot_capture(ct, false, want_ctb); 1748 xe_guc_ct_snapshot_print(snapshot, p); 1749 xe_guc_ct_snapshot_free(snapshot); 1750 } 1751 1752 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) 1753 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code) 1754 { 1755 struct xe_guc_log_snapshot *snapshot_log; 1756 struct xe_guc_ct_snapshot *snapshot_ct; 1757 struct xe_guc *guc = ct_to_guc(ct); 1758 unsigned long flags; 1759 bool have_capture; 1760 1761 if (ctb) 1762 ctb->info.broken = true; 1763 1764 /* Ignore further errors after the first dump until a reset */ 1765 if (ct->dead.reported) 1766 return; 1767 1768 spin_lock_irqsave(&ct->dead.lock, flags); 1769 1770 /* And only capture one dump at a time */ 1771 have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE); 1772 ct->dead.reason |= (1 << reason_code) | 1773 (1 << CT_DEAD_STATE_CAPTURE); 1774 1775 spin_unlock_irqrestore(&ct->dead.lock, flags); 1776 1777 if (have_capture) 1778 return; 1779 1780 snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true); 1781 snapshot_ct = xe_guc_ct_snapshot_capture((ct)); 1782 1783 spin_lock_irqsave(&ct->dead.lock, flags); 1784 1785 if (ct->dead.snapshot_log || ct->dead.snapshot_ct) { 1786 xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n"); 1787 xe_guc_log_snapshot_free(snapshot_log); 1788 xe_guc_ct_snapshot_free(snapshot_ct); 1789 } else { 1790 ct->dead.snapshot_log = snapshot_log; 1791 ct->dead.snapshot_ct = snapshot_ct; 1792 } 1793 1794 spin_unlock_irqrestore(&ct->dead.lock, flags); 1795 1796 queue_work(system_unbound_wq, &(ct)->dead.worker); 1797 } 1798 1799 static void ct_dead_print(struct xe_dead_ct *dead) 1800 { 1801 struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead); 1802 struct xe_device *xe = ct_to_xe(ct); 1803 struct xe_gt *gt = ct_to_gt(ct); 1804 static int g_count; 1805 struct drm_printer ip = xe_gt_info_printer(gt); 1806 struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count); 1807 1808 if (!dead->reason) { 1809 xe_gt_err(gt, "CTB is dead for no reason!?\n"); 1810 return; 1811 } 1812 1813 drm_printf(&lp, "CTB is dead - reason=0x%X\n", dead->reason); 1814 1815 /* Can't generate a genuine core dump at this point, so just do the good bits */ 1816 drm_puts(&lp, "**** Xe Device Coredump ****\n"); 1817 xe_device_snapshot_print(xe, &lp); 1818 1819 drm_printf(&lp, "**** GT #%d ****\n", gt->info.id); 1820 drm_printf(&lp, "\tTile: %d\n", gt->tile->id); 1821 1822 drm_puts(&lp, "**** GuC Log ****\n"); 1823 xe_guc_log_snapshot_print(dead->snapshot_log, &lp); 1824 1825 drm_puts(&lp, "**** GuC CT ****\n"); 1826 xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp); 1827 1828 drm_puts(&lp, "Done.\n"); 1829 } 1830 1831 static void ct_dead_worker_func(struct work_struct *w) 1832 { 1833 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker); 1834 1835 if (!ct->dead.reported) { 1836 ct->dead.reported = true; 1837 ct_dead_print(&ct->dead); 1838 } 1839 1840 spin_lock_irq(&ct->dead.lock); 1841 1842 xe_guc_log_snapshot_free(ct->dead.snapshot_log); 1843 ct->dead.snapshot_log = NULL; 1844 xe_guc_ct_snapshot_free(ct->dead.snapshot_ct); 1845 ct->dead.snapshot_ct = NULL; 1846 1847 if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) { 1848 /* A reset has occurred so re-arm the error reporting */ 1849 ct->dead.reason = 0; 1850 ct->dead.reported = false; 1851 } 1852 1853 spin_unlock_irq(&ct->dead.lock); 1854 } 1855 #endif 1856