1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_guc_ct.h" 7 8 #include <linux/bitfield.h> 9 #include <linux/circ_buf.h> 10 #include <linux/delay.h> 11 12 #include <kunit/static_stub.h> 13 14 #include <drm/drm_managed.h> 15 16 #include "abi/guc_actions_abi.h" 17 #include "abi/guc_actions_sriov_abi.h" 18 #include "abi/guc_klvs_abi.h" 19 #include "xe_bo.h" 20 #include "xe_device.h" 21 #include "xe_gt.h" 22 #include "xe_gt_pagefault.h" 23 #include "xe_gt_printk.h" 24 #include "xe_gt_sriov_pf_control.h" 25 #include "xe_gt_sriov_pf_monitor.h" 26 #include "xe_gt_tlb_invalidation.h" 27 #include "xe_guc.h" 28 #include "xe_guc_relay.h" 29 #include "xe_guc_submit.h" 30 #include "xe_map.h" 31 #include "xe_pm.h" 32 #include "xe_trace_guc.h" 33 34 /* Used when a CT send wants to block and / or receive data */ 35 struct g2h_fence { 36 u32 *response_buffer; 37 u32 seqno; 38 u32 response_data; 39 u16 response_len; 40 u16 error; 41 u16 hint; 42 u16 reason; 43 bool retry; 44 bool fail; 45 bool done; 46 }; 47 48 static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer) 49 { 50 g2h_fence->response_buffer = response_buffer; 51 g2h_fence->response_data = 0; 52 g2h_fence->response_len = 0; 53 g2h_fence->fail = false; 54 g2h_fence->retry = false; 55 g2h_fence->done = false; 56 g2h_fence->seqno = ~0x0; 57 } 58 59 static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence) 60 { 61 return g2h_fence->seqno == ~0x0; 62 } 63 64 static struct xe_guc * 65 ct_to_guc(struct xe_guc_ct *ct) 66 { 67 return container_of(ct, struct xe_guc, ct); 68 } 69 70 static struct xe_gt * 71 ct_to_gt(struct xe_guc_ct *ct) 72 { 73 return container_of(ct, struct xe_gt, uc.guc.ct); 74 } 75 76 static struct xe_device * 77 ct_to_xe(struct xe_guc_ct *ct) 78 { 79 return gt_to_xe(ct_to_gt(ct)); 80 } 81 82 /** 83 * DOC: GuC CTB Blob 84 * 85 * We allocate single blob to hold both CTB descriptors and buffers: 86 * 87 * +--------+-----------------------------------------------+------+ 88 * | offset | contents | size | 89 * +========+===============================================+======+ 90 * | 0x0000 | H2G CTB Descriptor (send) | | 91 * +--------+-----------------------------------------------+ 4K | 92 * | 0x0800 | G2H CTB Descriptor (g2h) | | 93 * +--------+-----------------------------------------------+------+ 94 * | 0x1000 | H2G CT Buffer (send) | n*4K | 95 * | | | | 96 * +--------+-----------------------------------------------+------+ 97 * | 0x1000 | G2H CT Buffer (g2h) | m*4K | 98 * | + n*4K | | | 99 * +--------+-----------------------------------------------+------+ 100 * 101 * Size of each ``CT Buffer`` must be multiple of 4K. 102 * We don't expect too many messages in flight at any time, unless we are 103 * using the GuC submission. In that case each request requires a minimum 104 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this 105 * enough space to avoid backpressure on the driver. We increase the size 106 * of the receive buffer (relative to the send) to ensure a G2H response 107 * CTB has a landing spot. 108 * 109 * In addition to submissions, the G2H buffer needs to be able to hold 110 * enough space for recoverable page fault notifications. The number of 111 * page faults is interrupt driven and can be as much as the number of 112 * compute resources available. However, most of the actual work for these 113 * is in a separate page fault worker thread. Therefore we only need to 114 * make sure the queue has enough space to handle all of the submissions 115 * and responses and an extra buffer for incoming page faults. 116 */ 117 118 #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K) 119 #define CTB_H2G_BUFFER_SIZE (SZ_4K) 120 #define CTB_G2H_BUFFER_SIZE (SZ_128K) 121 #define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2) 122 123 /** 124 * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full 125 * CT command queue 126 * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future. 127 * 128 * Observation is that a 4KiB buffer full of commands takes a little over a 129 * second to process. Use that to calculate maximum time to process a full CT 130 * command queue. 131 * 132 * Return: Maximum time to process a full CT queue in jiffies. 133 */ 134 long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct) 135 { 136 BUILD_BUG_ON(!IS_ALIGNED(CTB_H2G_BUFFER_SIZE, SZ_4)); 137 return (CTB_H2G_BUFFER_SIZE / SZ_4K) * HZ; 138 } 139 140 static size_t guc_ct_size(void) 141 { 142 return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + 143 CTB_G2H_BUFFER_SIZE; 144 } 145 146 static void guc_ct_fini(struct drm_device *drm, void *arg) 147 { 148 struct xe_guc_ct *ct = arg; 149 150 destroy_workqueue(ct->g2h_wq); 151 xa_destroy(&ct->fence_lookup); 152 } 153 154 static void receive_g2h(struct xe_guc_ct *ct); 155 static void g2h_worker_func(struct work_struct *w); 156 static void safe_mode_worker_func(struct work_struct *w); 157 158 static void primelockdep(struct xe_guc_ct *ct) 159 { 160 if (!IS_ENABLED(CONFIG_LOCKDEP)) 161 return; 162 163 fs_reclaim_acquire(GFP_KERNEL); 164 might_lock(&ct->lock); 165 fs_reclaim_release(GFP_KERNEL); 166 } 167 168 int xe_guc_ct_init(struct xe_guc_ct *ct) 169 { 170 struct xe_device *xe = ct_to_xe(ct); 171 struct xe_gt *gt = ct_to_gt(ct); 172 struct xe_tile *tile = gt_to_tile(gt); 173 struct xe_bo *bo; 174 int err; 175 176 xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE)); 177 178 ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", 0); 179 if (!ct->g2h_wq) 180 return -ENOMEM; 181 182 spin_lock_init(&ct->fast_lock); 183 xa_init(&ct->fence_lookup); 184 INIT_WORK(&ct->g2h_worker, g2h_worker_func); 185 INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func); 186 init_waitqueue_head(&ct->wq); 187 init_waitqueue_head(&ct->g2h_fence_wq); 188 189 err = drmm_mutex_init(&xe->drm, &ct->lock); 190 if (err) 191 return err; 192 193 primelockdep(ct); 194 195 bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(), 196 XE_BO_FLAG_SYSTEM | 197 XE_BO_FLAG_GGTT | 198 XE_BO_FLAG_GGTT_INVALIDATE); 199 if (IS_ERR(bo)) 200 return PTR_ERR(bo); 201 202 ct->bo = bo; 203 204 err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct); 205 if (err) 206 return err; 207 208 xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED); 209 ct->state = XE_GUC_CT_STATE_DISABLED; 210 return 0; 211 } 212 213 #define desc_read(xe_, guc_ctb__, field_) \ 214 xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \ 215 struct guc_ct_buffer_desc, field_) 216 217 #define desc_write(xe_, guc_ctb__, field_, val_) \ 218 xe_map_wr_field(xe_, &guc_ctb__->desc, 0, \ 219 struct guc_ct_buffer_desc, field_, val_) 220 221 static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g, 222 struct iosys_map *map) 223 { 224 h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32); 225 h2g->info.resv_space = 0; 226 h2g->info.tail = 0; 227 h2g->info.head = 0; 228 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head, 229 h2g->info.size) - 230 h2g->info.resv_space; 231 h2g->info.broken = false; 232 233 h2g->desc = *map; 234 xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc)); 235 236 h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2); 237 } 238 239 static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h, 240 struct iosys_map *map) 241 { 242 g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32); 243 g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32); 244 g2h->info.head = 0; 245 g2h->info.tail = 0; 246 g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head, 247 g2h->info.size) - 248 g2h->info.resv_space; 249 g2h->info.broken = false; 250 251 g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE); 252 xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc)); 253 254 g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 + 255 CTB_H2G_BUFFER_SIZE); 256 } 257 258 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct) 259 { 260 struct xe_guc *guc = ct_to_guc(ct); 261 u32 desc_addr, ctb_addr, size; 262 int err; 263 264 desc_addr = xe_bo_ggtt_addr(ct->bo); 265 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2; 266 size = ct->ctbs.h2g.info.size * sizeof(u32); 267 268 err = xe_guc_self_cfg64(guc, 269 GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY, 270 desc_addr); 271 if (err) 272 return err; 273 274 err = xe_guc_self_cfg64(guc, 275 GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY, 276 ctb_addr); 277 if (err) 278 return err; 279 280 return xe_guc_self_cfg32(guc, 281 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY, 282 size); 283 } 284 285 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct) 286 { 287 struct xe_guc *guc = ct_to_guc(ct); 288 u32 desc_addr, ctb_addr, size; 289 int err; 290 291 desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE; 292 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 + 293 CTB_H2G_BUFFER_SIZE; 294 size = ct->ctbs.g2h.info.size * sizeof(u32); 295 296 err = xe_guc_self_cfg64(guc, 297 GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY, 298 desc_addr); 299 if (err) 300 return err; 301 302 err = xe_guc_self_cfg64(guc, 303 GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY, 304 ctb_addr); 305 if (err) 306 return err; 307 308 return xe_guc_self_cfg32(guc, 309 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY, 310 size); 311 } 312 313 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable) 314 { 315 u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = { 316 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 317 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 318 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, 319 GUC_ACTION_HOST2GUC_CONTROL_CTB), 320 FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL, 321 enable ? GUC_CTB_CONTROL_ENABLE : 322 GUC_CTB_CONTROL_DISABLE), 323 }; 324 int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request)); 325 326 return ret > 0 ? -EPROTO : ret; 327 } 328 329 static void xe_guc_ct_set_state(struct xe_guc_ct *ct, 330 enum xe_guc_ct_state state) 331 { 332 mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */ 333 spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */ 334 335 xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 || 336 state == XE_GUC_CT_STATE_STOPPED); 337 338 if (ct->g2h_outstanding) 339 xe_pm_runtime_put(ct_to_xe(ct)); 340 ct->g2h_outstanding = 0; 341 ct->state = state; 342 343 spin_unlock_irq(&ct->fast_lock); 344 345 /* 346 * Lockdep doesn't like this under the fast lock and he destroy only 347 * needs to be serialized with the send path which ct lock provides. 348 */ 349 xa_destroy(&ct->fence_lookup); 350 351 mutex_unlock(&ct->lock); 352 } 353 354 static bool ct_needs_safe_mode(struct xe_guc_ct *ct) 355 { 356 return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev)); 357 } 358 359 static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct) 360 { 361 if (!ct_needs_safe_mode(ct)) 362 return false; 363 364 queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10); 365 return true; 366 } 367 368 static void safe_mode_worker_func(struct work_struct *w) 369 { 370 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work); 371 372 receive_g2h(ct); 373 374 if (!ct_restart_safe_mode_worker(ct)) 375 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n"); 376 } 377 378 static void ct_enter_safe_mode(struct xe_guc_ct *ct) 379 { 380 if (ct_restart_safe_mode_worker(ct)) 381 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n"); 382 } 383 384 static void ct_exit_safe_mode(struct xe_guc_ct *ct) 385 { 386 if (cancel_delayed_work_sync(&ct->safe_mode_worker)) 387 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n"); 388 } 389 390 int xe_guc_ct_enable(struct xe_guc_ct *ct) 391 { 392 struct xe_device *xe = ct_to_xe(ct); 393 struct xe_gt *gt = ct_to_gt(ct); 394 int err; 395 396 xe_gt_assert(gt, !xe_guc_ct_enabled(ct)); 397 398 guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); 399 guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); 400 401 err = guc_ct_ctb_h2g_register(ct); 402 if (err) 403 goto err_out; 404 405 err = guc_ct_ctb_g2h_register(ct); 406 if (err) 407 goto err_out; 408 409 err = guc_ct_control_toggle(ct, true); 410 if (err) 411 goto err_out; 412 413 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED); 414 415 smp_mb(); 416 wake_up_all(&ct->wq); 417 xe_gt_dbg(gt, "GuC CT communication channel enabled\n"); 418 419 if (ct_needs_safe_mode(ct)) 420 ct_enter_safe_mode(ct); 421 422 return 0; 423 424 err_out: 425 xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err)); 426 427 return err; 428 } 429 430 static void stop_g2h_handler(struct xe_guc_ct *ct) 431 { 432 cancel_work_sync(&ct->g2h_worker); 433 } 434 435 /** 436 * xe_guc_ct_disable - Set GuC to disabled state 437 * @ct: the &xe_guc_ct 438 * 439 * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected 440 * in this transition. 441 */ 442 void xe_guc_ct_disable(struct xe_guc_ct *ct) 443 { 444 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED); 445 ct_exit_safe_mode(ct); 446 stop_g2h_handler(ct); 447 } 448 449 /** 450 * xe_guc_ct_stop - Set GuC to stopped state 451 * @ct: the &xe_guc_ct 452 * 453 * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h 454 */ 455 void xe_guc_ct_stop(struct xe_guc_ct *ct) 456 { 457 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED); 458 stop_g2h_handler(ct); 459 } 460 461 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len) 462 { 463 struct guc_ctb *h2g = &ct->ctbs.h2g; 464 465 lockdep_assert_held(&ct->lock); 466 467 if (cmd_len > h2g->info.space) { 468 h2g->info.head = desc_read(ct_to_xe(ct), h2g, head); 469 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head, 470 h2g->info.size) - 471 h2g->info.resv_space; 472 if (cmd_len > h2g->info.space) 473 return false; 474 } 475 476 return true; 477 } 478 479 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len) 480 { 481 if (!g2h_len) 482 return true; 483 484 lockdep_assert_held(&ct->fast_lock); 485 486 return ct->ctbs.g2h.info.space > g2h_len; 487 } 488 489 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len) 490 { 491 lockdep_assert_held(&ct->lock); 492 493 if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len)) 494 return -EBUSY; 495 496 return 0; 497 } 498 499 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len) 500 { 501 lockdep_assert_held(&ct->lock); 502 ct->ctbs.h2g.info.space -= cmd_len; 503 } 504 505 static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) 506 { 507 xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space); 508 xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) || 509 (g2h_len && num_g2h)); 510 511 if (g2h_len) { 512 lockdep_assert_held(&ct->fast_lock); 513 514 if (!ct->g2h_outstanding) 515 xe_pm_runtime_get_noresume(ct_to_xe(ct)); 516 517 ct->ctbs.g2h.info.space -= g2h_len; 518 ct->g2h_outstanding += num_g2h; 519 } 520 } 521 522 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) 523 { 524 lockdep_assert_held(&ct->fast_lock); 525 xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <= 526 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space); 527 xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding); 528 529 ct->ctbs.g2h.info.space += g2h_len; 530 if (!--ct->g2h_outstanding) 531 xe_pm_runtime_put(ct_to_xe(ct)); 532 } 533 534 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) 535 { 536 spin_lock_irq(&ct->fast_lock); 537 __g2h_release_space(ct, g2h_len); 538 spin_unlock_irq(&ct->fast_lock); 539 } 540 541 #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */ 542 543 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, 544 u32 ct_fence_value, bool want_response) 545 { 546 struct xe_device *xe = ct_to_xe(ct); 547 struct xe_gt *gt = ct_to_gt(ct); 548 struct guc_ctb *h2g = &ct->ctbs.h2g; 549 u32 cmd[H2G_CT_HEADERS]; 550 u32 tail = h2g->info.tail; 551 u32 full_len; 552 struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds, 553 tail * sizeof(u32)); 554 555 full_len = len + GUC_CTB_HDR_LEN; 556 557 lockdep_assert_held(&ct->lock); 558 xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN); 559 xe_gt_assert(gt, tail <= h2g->info.size); 560 561 /* Command will wrap, zero fill (NOPs), return and check credits again */ 562 if (tail + full_len > h2g->info.size) { 563 xe_map_memset(xe, &map, 0, 0, 564 (h2g->info.size - tail) * sizeof(u32)); 565 h2g_reserve_space(ct, (h2g->info.size - tail)); 566 h2g->info.tail = 0; 567 desc_write(xe, h2g, tail, h2g->info.tail); 568 569 return -EAGAIN; 570 } 571 572 /* 573 * dw0: CT header (including fence) 574 * dw1: HXG header (including action code) 575 * dw2+: action data 576 */ 577 cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) | 578 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) | 579 FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value); 580 if (want_response) { 581 cmd[1] = 582 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 583 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | 584 GUC_HXG_EVENT_MSG_0_DATA0, action[0]); 585 } else { 586 cmd[1] = 587 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) | 588 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | 589 GUC_HXG_EVENT_MSG_0_DATA0, action[0]); 590 } 591 592 /* H2G header in cmd[1] replaces action[0] so: */ 593 --len; 594 ++action; 595 596 /* Write H2G ensuring visable before descriptor update */ 597 xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32)); 598 xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32)); 599 xe_device_wmb(xe); 600 601 /* Update local copies */ 602 h2g->info.tail = (tail + full_len) % h2g->info.size; 603 h2g_reserve_space(ct, full_len); 604 605 /* Update descriptor */ 606 desc_write(xe, h2g, tail, h2g->info.tail); 607 608 trace_xe_guc_ctb_h2g(xe, gt->info.id, *(action - 1), full_len, 609 desc_read(xe, h2g, head), h2g->info.tail); 610 611 return 0; 612 } 613 614 /* 615 * The CT protocol accepts a 16 bits fence. This field is fully owned by the 616 * driver, the GuC will just copy it to the reply message. Since we need to 617 * be able to distinguish between replies to REQUEST and FAST_REQUEST messages, 618 * we use one bit of the seqno as an indicator for that and a rolling counter 619 * for the remaining 15 bits. 620 */ 621 #define CT_SEQNO_MASK GENMASK(14, 0) 622 #define CT_SEQNO_UNTRACKED BIT(15) 623 static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence) 624 { 625 u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK; 626 627 if (!is_g2h_fence) 628 seqno |= CT_SEQNO_UNTRACKED; 629 630 return seqno; 631 } 632 633 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, 634 u32 len, u32 g2h_len, u32 num_g2h, 635 struct g2h_fence *g2h_fence) 636 { 637 struct xe_gt *gt __maybe_unused = ct_to_gt(ct); 638 u16 seqno; 639 int ret; 640 641 xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); 642 xe_gt_assert(gt, !g2h_len || !g2h_fence); 643 xe_gt_assert(gt, !num_g2h || !g2h_fence); 644 xe_gt_assert(gt, !g2h_len || num_g2h); 645 xe_gt_assert(gt, g2h_len || !num_g2h); 646 lockdep_assert_held(&ct->lock); 647 648 if (unlikely(ct->ctbs.h2g.info.broken)) { 649 ret = -EPIPE; 650 goto out; 651 } 652 653 if (ct->state == XE_GUC_CT_STATE_DISABLED) { 654 ret = -ENODEV; 655 goto out; 656 } 657 658 if (ct->state == XE_GUC_CT_STATE_STOPPED) { 659 ret = -ECANCELED; 660 goto out; 661 } 662 663 xe_gt_assert(gt, xe_guc_ct_enabled(ct)); 664 665 if (g2h_fence) { 666 g2h_len = GUC_CTB_HXG_MSG_MAX_LEN; 667 num_g2h = 1; 668 669 if (g2h_fence_needs_alloc(g2h_fence)) { 670 g2h_fence->seqno = next_ct_seqno(ct, true); 671 ret = xa_err(xa_store(&ct->fence_lookup, 672 g2h_fence->seqno, g2h_fence, 673 GFP_ATOMIC)); 674 if (ret) 675 goto out; 676 } 677 678 seqno = g2h_fence->seqno; 679 } else { 680 seqno = next_ct_seqno(ct, false); 681 } 682 683 if (g2h_len) 684 spin_lock_irq(&ct->fast_lock); 685 retry: 686 ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len); 687 if (unlikely(ret)) 688 goto out_unlock; 689 690 ret = h2g_write(ct, action, len, seqno, !!g2h_fence); 691 if (unlikely(ret)) { 692 if (ret == -EAGAIN) 693 goto retry; 694 goto out_unlock; 695 } 696 697 __g2h_reserve_space(ct, g2h_len, num_g2h); 698 xe_guc_notify(ct_to_guc(ct)); 699 out_unlock: 700 if (g2h_len) 701 spin_unlock_irq(&ct->fast_lock); 702 out: 703 return ret; 704 } 705 706 static void kick_reset(struct xe_guc_ct *ct) 707 { 708 xe_gt_reset_async(ct_to_gt(ct)); 709 } 710 711 static int dequeue_one_g2h(struct xe_guc_ct *ct); 712 713 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, 714 u32 g2h_len, u32 num_g2h, 715 struct g2h_fence *g2h_fence) 716 { 717 struct xe_device *xe = ct_to_xe(ct); 718 struct xe_gt *gt = ct_to_gt(ct); 719 struct drm_printer p = xe_gt_info_printer(gt); 720 unsigned int sleep_period_ms = 1; 721 int ret; 722 723 xe_gt_assert(gt, !g2h_len || !g2h_fence); 724 lockdep_assert_held(&ct->lock); 725 xe_device_assert_mem_access(ct_to_xe(ct)); 726 727 try_again: 728 ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, 729 g2h_fence); 730 731 /* 732 * We wait to try to restore credits for about 1 second before bailing. 733 * In the case of H2G credits we have no choice but just to wait for the 734 * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In 735 * the case of G2H we process any G2H in the channel, hopefully freeing 736 * credits as we consume the G2H messages. 737 */ 738 if (unlikely(ret == -EBUSY && 739 !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) { 740 struct guc_ctb *h2g = &ct->ctbs.h2g; 741 742 if (sleep_period_ms == 1024) 743 goto broken; 744 745 trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail, 746 h2g->info.size, 747 h2g->info.space, 748 len + GUC_CTB_HDR_LEN); 749 msleep(sleep_period_ms); 750 sleep_period_ms <<= 1; 751 752 goto try_again; 753 } else if (unlikely(ret == -EBUSY)) { 754 struct xe_device *xe = ct_to_xe(ct); 755 struct guc_ctb *g2h = &ct->ctbs.g2h; 756 757 trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head, 758 desc_read(xe, g2h, tail), 759 g2h->info.size, 760 g2h->info.space, 761 g2h_fence ? 762 GUC_CTB_HXG_MSG_MAX_LEN : 763 g2h_len); 764 765 #define g2h_avail(ct) \ 766 (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head) 767 if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding || 768 g2h_avail(ct), HZ)) 769 goto broken; 770 #undef g2h_avail 771 772 if (dequeue_one_g2h(ct) < 0) 773 goto broken; 774 775 goto try_again; 776 } 777 778 return ret; 779 780 broken: 781 xe_gt_err(gt, "No forward process on H2G, reset required\n"); 782 xe_guc_ct_print(ct, &p, true); 783 ct->ctbs.h2g.info.broken = true; 784 785 return -EDEADLK; 786 } 787 788 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len, 789 u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence) 790 { 791 int ret; 792 793 xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence); 794 795 mutex_lock(&ct->lock); 796 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence); 797 mutex_unlock(&ct->lock); 798 799 return ret; 800 } 801 802 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len, 803 u32 g2h_len, u32 num_g2h) 804 { 805 int ret; 806 807 ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL); 808 if (ret == -EDEADLK) 809 kick_reset(ct); 810 811 return ret; 812 } 813 814 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, 815 u32 g2h_len, u32 num_g2h) 816 { 817 int ret; 818 819 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL); 820 if (ret == -EDEADLK) 821 kick_reset(ct); 822 823 return ret; 824 } 825 826 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len) 827 { 828 int ret; 829 830 lockdep_assert_held(&ct->lock); 831 832 ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL); 833 if (ret == -EDEADLK) 834 kick_reset(ct); 835 836 return ret; 837 } 838 839 /* 840 * Check if a GT reset is in progress or will occur and if GT reset brought the 841 * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset. 842 */ 843 static bool retry_failure(struct xe_guc_ct *ct, int ret) 844 { 845 if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV)) 846 return false; 847 848 #define ct_alive(ct) \ 849 (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \ 850 !ct->ctbs.g2h.info.broken) 851 if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) 852 return false; 853 #undef ct_alive 854 855 return true; 856 } 857 858 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, 859 u32 *response_buffer, bool no_fail) 860 { 861 struct xe_gt *gt = ct_to_gt(ct); 862 struct g2h_fence g2h_fence; 863 int ret = 0; 864 865 /* 866 * We use a fence to implement blocking sends / receiving response data. 867 * The seqno of the fence is sent in the H2G, returned in the G2H, and 868 * an xarray is used as storage media with the seqno being to key. 869 * Fields in the fence hold success, failure, retry status and the 870 * response data. Safe to allocate on the stack as the xarray is the 871 * only reference and it cannot be present after this function exits. 872 */ 873 retry: 874 g2h_fence_init(&g2h_fence, response_buffer); 875 retry_same_fence: 876 ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence); 877 if (unlikely(ret == -ENOMEM)) { 878 /* Retry allocation /w GFP_KERNEL */ 879 ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno, 880 &g2h_fence, GFP_KERNEL)); 881 if (ret) 882 return ret; 883 884 goto retry_same_fence; 885 } else if (unlikely(ret)) { 886 if (ret == -EDEADLK) 887 kick_reset(ct); 888 889 if (no_fail && retry_failure(ct, ret)) 890 goto retry_same_fence; 891 892 if (!g2h_fence_needs_alloc(&g2h_fence)) 893 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); 894 895 return ret; 896 } 897 898 ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ); 899 900 /* 901 * Occasionally it is seen that the G2H worker starts running after a delay of more than 902 * a second even after being queued and activated by the Linux workqueue subsystem. This 903 * leads to G2H timeout error. The root cause of issue lies with scheduling latency of 904 * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS 905 * and this is beyond xe kmd. 906 * 907 * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU. 908 */ 909 if (!ret) { 910 flush_work(&ct->g2h_worker); 911 if (g2h_fence.done) { 912 xe_gt_warn(gt, "G2H fence %u, action %04x, done\n", 913 g2h_fence.seqno, action[0]); 914 ret = 1; 915 } 916 } 917 918 /* 919 * Ensure we serialize with completion side to prevent UAF with fence going out of scope on 920 * the stack, since we have no clue if it will fire after the timeout before we can erase 921 * from the xa. Also we have some dependent loads and stores below for which we need the 922 * correct ordering, and we lack the needed barriers. 923 */ 924 mutex_lock(&ct->lock); 925 if (!ret) { 926 xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s", 927 g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done)); 928 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); 929 mutex_unlock(&ct->lock); 930 return -ETIME; 931 } 932 933 if (g2h_fence.retry) { 934 xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n", 935 action[0], g2h_fence.reason); 936 mutex_unlock(&ct->lock); 937 goto retry; 938 } 939 if (g2h_fence.fail) { 940 xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n", 941 action[0], g2h_fence.error, g2h_fence.hint); 942 ret = -EIO; 943 } 944 945 if (ret > 0) 946 ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data; 947 948 mutex_unlock(&ct->lock); 949 950 return ret; 951 } 952 953 /** 954 * xe_guc_ct_send_recv - Send and receive HXG to the GuC 955 * @ct: the &xe_guc_ct 956 * @action: the dword array with `HXG Request`_ message (can't be NULL) 957 * @len: length of the `HXG Request`_ message (in dwords, can't be 0) 958 * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL) 959 * 960 * Send a `HXG Request`_ message to the GuC over CT communication channel and 961 * blocks until GuC replies with a `HXG Response`_ message. 962 * 963 * For non-blocking communication with GuC use xe_guc_ct_send(). 964 * 965 * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_. 966 * 967 * Return: response length (in dwords) if &response_buffer was not NULL, or 968 * DATA0 from `HXG Response`_ if &response_buffer was NULL, or 969 * a negative error code on failure. 970 */ 971 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, 972 u32 *response_buffer) 973 { 974 KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer); 975 return guc_ct_send_recv(ct, action, len, response_buffer, false); 976 } 977 978 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action, 979 u32 len, u32 *response_buffer) 980 { 981 return guc_ct_send_recv(ct, action, len, response_buffer, true); 982 } 983 984 static u32 *msg_to_hxg(u32 *msg) 985 { 986 return msg + GUC_CTB_MSG_MIN_LEN; 987 } 988 989 static u32 msg_len_to_hxg_len(u32 len) 990 { 991 return len - GUC_CTB_MSG_MIN_LEN; 992 } 993 994 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len) 995 { 996 u32 *hxg = msg_to_hxg(msg); 997 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 998 999 lockdep_assert_held(&ct->lock); 1000 1001 switch (action) { 1002 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE: 1003 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE: 1004 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE: 1005 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 1006 g2h_release_space(ct, len); 1007 } 1008 1009 return 0; 1010 } 1011 1012 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) 1013 { 1014 struct xe_gt *gt = ct_to_gt(ct); 1015 u32 *hxg = msg_to_hxg(msg); 1016 u32 hxg_len = msg_len_to_hxg_len(len); 1017 u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]); 1018 u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]); 1019 struct g2h_fence *g2h_fence; 1020 1021 lockdep_assert_held(&ct->lock); 1022 1023 /* 1024 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup. 1025 * Those messages should never fail, so if we do get an error back it 1026 * means we're likely doing an illegal operation and the GuC is 1027 * rejecting it. We have no way to inform the code that submitted the 1028 * H2G that the message was rejected, so we need to escalate the 1029 * failure to trigger a reset. 1030 */ 1031 if (fence & CT_SEQNO_UNTRACKED) { 1032 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) 1033 xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n", 1034 fence, 1035 FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]), 1036 FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0])); 1037 else 1038 xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n", 1039 type, fence); 1040 1041 return -EPROTO; 1042 } 1043 1044 g2h_fence = xa_erase(&ct->fence_lookup, fence); 1045 if (unlikely(!g2h_fence)) { 1046 /* Don't tear down channel, as send could've timed out */ 1047 xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence); 1048 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); 1049 return 0; 1050 } 1051 1052 xe_gt_assert(gt, fence == g2h_fence->seqno); 1053 1054 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) { 1055 g2h_fence->fail = true; 1056 g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]); 1057 g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]); 1058 } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) { 1059 g2h_fence->retry = true; 1060 g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]); 1061 } else if (g2h_fence->response_buffer) { 1062 g2h_fence->response_len = hxg_len; 1063 memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32)); 1064 } else { 1065 g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]); 1066 } 1067 1068 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); 1069 1070 g2h_fence->done = true; 1071 smp_mb(); 1072 1073 wake_up_all(&ct->g2h_fence_wq); 1074 1075 return 0; 1076 } 1077 1078 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) 1079 { 1080 struct xe_gt *gt = ct_to_gt(ct); 1081 u32 *hxg = msg_to_hxg(msg); 1082 u32 origin, type; 1083 int ret; 1084 1085 lockdep_assert_held(&ct->lock); 1086 1087 origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]); 1088 if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) { 1089 xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n", 1090 origin); 1091 ct->ctbs.g2h.info.broken = true; 1092 1093 return -EPROTO; 1094 } 1095 1096 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]); 1097 switch (type) { 1098 case GUC_HXG_TYPE_EVENT: 1099 ret = parse_g2h_event(ct, msg, len); 1100 break; 1101 case GUC_HXG_TYPE_RESPONSE_SUCCESS: 1102 case GUC_HXG_TYPE_RESPONSE_FAILURE: 1103 case GUC_HXG_TYPE_NO_RESPONSE_RETRY: 1104 ret = parse_g2h_response(ct, msg, len); 1105 break; 1106 default: 1107 xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n", 1108 type); 1109 ct->ctbs.g2h.info.broken = true; 1110 1111 ret = -EOPNOTSUPP; 1112 } 1113 1114 return ret; 1115 } 1116 1117 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) 1118 { 1119 struct xe_guc *guc = ct_to_guc(ct); 1120 struct xe_gt *gt = ct_to_gt(ct); 1121 u32 hxg_len = msg_len_to_hxg_len(len); 1122 u32 *hxg = msg_to_hxg(msg); 1123 u32 action, adj_len; 1124 u32 *payload; 1125 int ret = 0; 1126 1127 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT) 1128 return 0; 1129 1130 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1131 payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN; 1132 adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN; 1133 1134 switch (action) { 1135 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE: 1136 ret = xe_guc_sched_done_handler(guc, payload, adj_len); 1137 break; 1138 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE: 1139 ret = xe_guc_deregister_done_handler(guc, payload, adj_len); 1140 break; 1141 case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION: 1142 ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len); 1143 break; 1144 case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION: 1145 ret = xe_guc_exec_queue_reset_failure_handler(guc, payload, 1146 adj_len); 1147 break; 1148 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE: 1149 /* Selftest only at the moment */ 1150 break; 1151 case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION: 1152 case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE: 1153 /* FIXME: Handle this */ 1154 break; 1155 case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR: 1156 ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload, 1157 adj_len); 1158 break; 1159 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC: 1160 ret = xe_guc_pagefault_handler(guc, payload, adj_len); 1161 break; 1162 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 1163 ret = xe_guc_tlb_invalidation_done_handler(guc, payload, 1164 adj_len); 1165 break; 1166 case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY: 1167 ret = xe_guc_access_counter_notify_handler(guc, payload, 1168 adj_len); 1169 break; 1170 case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF: 1171 ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len); 1172 break; 1173 case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF: 1174 ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len); 1175 break; 1176 case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY: 1177 ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len); 1178 break; 1179 case GUC_ACTION_GUC2PF_ADVERSE_EVENT: 1180 ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len); 1181 break; 1182 default: 1183 xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action); 1184 } 1185 1186 if (ret) 1187 xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", 1188 action, ERR_PTR(ret)); 1189 1190 return 0; 1191 } 1192 1193 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) 1194 { 1195 struct xe_device *xe = ct_to_xe(ct); 1196 struct xe_gt *gt = ct_to_gt(ct); 1197 struct guc_ctb *g2h = &ct->ctbs.g2h; 1198 u32 tail, head, len; 1199 s32 avail; 1200 u32 action; 1201 u32 *hxg; 1202 1203 xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); 1204 lockdep_assert_held(&ct->fast_lock); 1205 1206 if (ct->state == XE_GUC_CT_STATE_DISABLED) 1207 return -ENODEV; 1208 1209 if (ct->state == XE_GUC_CT_STATE_STOPPED) 1210 return -ECANCELED; 1211 1212 if (g2h->info.broken) 1213 return -EPIPE; 1214 1215 xe_gt_assert(gt, xe_guc_ct_enabled(ct)); 1216 1217 /* Calculate DW available to read */ 1218 tail = desc_read(xe, g2h, tail); 1219 avail = tail - g2h->info.head; 1220 if (unlikely(avail == 0)) 1221 return 0; 1222 1223 if (avail < 0) 1224 avail += g2h->info.size; 1225 1226 /* Read header */ 1227 xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head, 1228 sizeof(u32)); 1229 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN; 1230 if (len > avail) { 1231 xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n", 1232 avail, len); 1233 g2h->info.broken = true; 1234 1235 return -EPROTO; 1236 } 1237 1238 head = (g2h->info.head + 1) % g2h->info.size; 1239 avail = len - 1; 1240 1241 /* Read G2H message */ 1242 if (avail + head > g2h->info.size) { 1243 u32 avail_til_wrap = g2h->info.size - head; 1244 1245 xe_map_memcpy_from(xe, msg + 1, 1246 &g2h->cmds, sizeof(u32) * head, 1247 avail_til_wrap * sizeof(u32)); 1248 xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap, 1249 &g2h->cmds, 0, 1250 (avail - avail_til_wrap) * sizeof(u32)); 1251 } else { 1252 xe_map_memcpy_from(xe, msg + 1, 1253 &g2h->cmds, sizeof(u32) * head, 1254 avail * sizeof(u32)); 1255 } 1256 1257 hxg = msg_to_hxg(msg); 1258 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1259 1260 if (fast_path) { 1261 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT) 1262 return 0; 1263 1264 switch (action) { 1265 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC: 1266 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 1267 break; /* Process these in fast-path */ 1268 default: 1269 return 0; 1270 } 1271 } 1272 1273 /* Update local / descriptor header */ 1274 g2h->info.head = (head + avail) % g2h->info.size; 1275 desc_write(xe, g2h, head, g2h->info.head); 1276 1277 trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id, 1278 action, len, g2h->info.head, tail); 1279 1280 return len; 1281 } 1282 1283 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) 1284 { 1285 struct xe_gt *gt = ct_to_gt(ct); 1286 struct xe_guc *guc = ct_to_guc(ct); 1287 u32 hxg_len = msg_len_to_hxg_len(len); 1288 u32 *hxg = msg_to_hxg(msg); 1289 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1290 u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN; 1291 u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN; 1292 int ret = 0; 1293 1294 switch (action) { 1295 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC: 1296 ret = xe_guc_pagefault_handler(guc, payload, adj_len); 1297 break; 1298 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 1299 __g2h_release_space(ct, len); 1300 ret = xe_guc_tlb_invalidation_done_handler(guc, payload, 1301 adj_len); 1302 break; 1303 default: 1304 xe_gt_warn(gt, "NOT_POSSIBLE"); 1305 } 1306 1307 if (ret) 1308 xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", 1309 action, ERR_PTR(ret)); 1310 } 1311 1312 /** 1313 * xe_guc_ct_fast_path - process critical G2H in the IRQ handler 1314 * @ct: GuC CT object 1315 * 1316 * Anything related to page faults is critical for performance, process these 1317 * critical G2H in the IRQ. This is safe as these handlers either just wake up 1318 * waiters or queue another worker. 1319 */ 1320 void xe_guc_ct_fast_path(struct xe_guc_ct *ct) 1321 { 1322 struct xe_device *xe = ct_to_xe(ct); 1323 bool ongoing; 1324 int len; 1325 1326 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct)); 1327 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL) 1328 return; 1329 1330 spin_lock(&ct->fast_lock); 1331 do { 1332 len = g2h_read(ct, ct->fast_msg, true); 1333 if (len > 0) 1334 g2h_fast_path(ct, ct->fast_msg, len); 1335 } while (len > 0); 1336 spin_unlock(&ct->fast_lock); 1337 1338 if (ongoing) 1339 xe_pm_runtime_put(xe); 1340 } 1341 1342 /* Returns less than zero on error, 0 on done, 1 on more available */ 1343 static int dequeue_one_g2h(struct xe_guc_ct *ct) 1344 { 1345 int len; 1346 int ret; 1347 1348 lockdep_assert_held(&ct->lock); 1349 1350 spin_lock_irq(&ct->fast_lock); 1351 len = g2h_read(ct, ct->msg, false); 1352 spin_unlock_irq(&ct->fast_lock); 1353 if (len <= 0) 1354 return len; 1355 1356 ret = parse_g2h_msg(ct, ct->msg, len); 1357 if (unlikely(ret < 0)) 1358 return ret; 1359 1360 ret = process_g2h_msg(ct, ct->msg, len); 1361 if (unlikely(ret < 0)) 1362 return ret; 1363 1364 return 1; 1365 } 1366 1367 static void receive_g2h(struct xe_guc_ct *ct) 1368 { 1369 struct xe_gt *gt = ct_to_gt(ct); 1370 bool ongoing; 1371 int ret; 1372 1373 /* 1374 * Normal users must always hold mem_access.ref around CT calls. However 1375 * during the runtime pm callbacks we rely on CT to talk to the GuC, but 1376 * at this stage we can't rely on mem_access.ref and even the 1377 * callback_task will be different than current. For such cases we just 1378 * need to ensure we always process the responses from any blocking 1379 * ct_send requests or where we otherwise expect some response when 1380 * initiated from those callbacks (which will need to wait for the below 1381 * dequeue_one_g2h()). The dequeue_one_g2h() will gracefully fail if 1382 * the device has suspended to the point that the CT communication has 1383 * been disabled. 1384 * 1385 * If we are inside the runtime pm callback, we can be the only task 1386 * still issuing CT requests (since that requires having the 1387 * mem_access.ref). It seems like it might in theory be possible to 1388 * receive unsolicited events from the GuC just as we are 1389 * suspending-resuming, but those will currently anyway be lost when 1390 * eventually exiting from suspend, hence no need to wake up the device 1391 * here. If we ever need something stronger than get_if_ongoing() then 1392 * we need to be careful with blocking the pm callbacks from getting CT 1393 * responses, if the worker here is blocked on those callbacks 1394 * completing, creating a deadlock. 1395 */ 1396 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct)); 1397 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL) 1398 return; 1399 1400 do { 1401 mutex_lock(&ct->lock); 1402 ret = dequeue_one_g2h(ct); 1403 mutex_unlock(&ct->lock); 1404 1405 if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) { 1406 struct drm_printer p = xe_gt_info_printer(gt); 1407 1408 xe_guc_ct_print(ct, &p, false); 1409 kick_reset(ct); 1410 } 1411 } while (ret == 1); 1412 1413 if (ongoing) 1414 xe_pm_runtime_put(ct_to_xe(ct)); 1415 } 1416 1417 static void g2h_worker_func(struct work_struct *w) 1418 { 1419 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker); 1420 1421 receive_g2h(ct); 1422 } 1423 1424 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb, 1425 struct guc_ctb_snapshot *snapshot, 1426 bool atomic) 1427 { 1428 u32 head, tail; 1429 1430 xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0, 1431 sizeof(struct guc_ct_buffer_desc)); 1432 memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info)); 1433 1434 snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32), 1435 atomic ? GFP_ATOMIC : GFP_KERNEL); 1436 1437 if (!snapshot->cmds) { 1438 drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n"); 1439 return; 1440 } 1441 1442 head = snapshot->desc.head; 1443 tail = snapshot->desc.tail; 1444 1445 if (head != tail) { 1446 struct iosys_map map = 1447 IOSYS_MAP_INIT_OFFSET(&ctb->cmds, head * sizeof(u32)); 1448 1449 while (head != tail) { 1450 snapshot->cmds[head] = xe_map_rd(xe, &map, 0, u32); 1451 ++head; 1452 if (head == ctb->info.size) { 1453 head = 0; 1454 map = ctb->cmds; 1455 } else { 1456 iosys_map_incr(&map, sizeof(u32)); 1457 } 1458 } 1459 } 1460 } 1461 1462 static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot, 1463 struct drm_printer *p) 1464 { 1465 u32 head, tail; 1466 1467 drm_printf(p, "\tsize: %d\n", snapshot->info.size); 1468 drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space); 1469 drm_printf(p, "\thead: %d\n", snapshot->info.head); 1470 drm_printf(p, "\ttail: %d\n", snapshot->info.tail); 1471 drm_printf(p, "\tspace: %d\n", snapshot->info.space); 1472 drm_printf(p, "\tbroken: %d\n", snapshot->info.broken); 1473 drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head); 1474 drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail); 1475 drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status); 1476 1477 if (!snapshot->cmds) 1478 return; 1479 1480 head = snapshot->desc.head; 1481 tail = snapshot->desc.tail; 1482 1483 while (head != tail) { 1484 drm_printf(p, "\tcmd[%d]: 0x%08x\n", head, 1485 snapshot->cmds[head]); 1486 ++head; 1487 if (head == snapshot->info.size) 1488 head = 0; 1489 } 1490 } 1491 1492 static void guc_ctb_snapshot_free(struct guc_ctb_snapshot *snapshot) 1493 { 1494 kfree(snapshot->cmds); 1495 } 1496 1497 /** 1498 * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state. 1499 * @ct: GuC CT object. 1500 * @atomic: Boolean to indicate if this is called from atomic context like 1501 * reset or CTB handler or from some regular path like debugfs. 1502 * 1503 * This can be printed out in a later stage like during dev_coredump 1504 * analysis. 1505 * 1506 * Returns: a GuC CT snapshot object that must be freed by the caller 1507 * by using `xe_guc_ct_snapshot_free`. 1508 */ 1509 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, 1510 bool atomic) 1511 { 1512 struct xe_device *xe = ct_to_xe(ct); 1513 struct xe_guc_ct_snapshot *snapshot; 1514 1515 snapshot = kzalloc(sizeof(*snapshot), 1516 atomic ? GFP_ATOMIC : GFP_KERNEL); 1517 1518 if (!snapshot) { 1519 drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n"); 1520 return NULL; 1521 } 1522 1523 if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) { 1524 snapshot->ct_enabled = true; 1525 snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding); 1526 guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, 1527 &snapshot->h2g, atomic); 1528 guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, 1529 &snapshot->g2h, atomic); 1530 } 1531 1532 return snapshot; 1533 } 1534 1535 /** 1536 * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot. 1537 * @snapshot: GuC CT snapshot object. 1538 * @p: drm_printer where it will be printed out. 1539 * 1540 * This function prints out a given GuC CT snapshot object. 1541 */ 1542 void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, 1543 struct drm_printer *p) 1544 { 1545 if (!snapshot) 1546 return; 1547 1548 if (snapshot->ct_enabled) { 1549 drm_puts(p, "H2G CTB (all sizes in DW):\n"); 1550 guc_ctb_snapshot_print(&snapshot->h2g, p); 1551 1552 drm_puts(p, "\nG2H CTB (all sizes in DW):\n"); 1553 guc_ctb_snapshot_print(&snapshot->g2h, p); 1554 1555 drm_printf(p, "\tg2h outstanding: %d\n", 1556 snapshot->g2h_outstanding); 1557 } else { 1558 drm_puts(p, "CT disabled\n"); 1559 } 1560 } 1561 1562 /** 1563 * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot. 1564 * @snapshot: GuC CT snapshot object. 1565 * 1566 * This function free all the memory that needed to be allocated at capture 1567 * time. 1568 */ 1569 void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot) 1570 { 1571 if (!snapshot) 1572 return; 1573 1574 guc_ctb_snapshot_free(&snapshot->h2g); 1575 guc_ctb_snapshot_free(&snapshot->g2h); 1576 kfree(snapshot); 1577 } 1578 1579 /** 1580 * xe_guc_ct_print - GuC CT Print. 1581 * @ct: GuC CT. 1582 * @p: drm_printer where it will be printed out. 1583 * @atomic: Boolean to indicate if this is called from atomic context like 1584 * reset or CTB handler or from some regular path like debugfs. 1585 * 1586 * This function quickly capture a snapshot and immediately print it out. 1587 */ 1588 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic) 1589 { 1590 struct xe_guc_ct_snapshot *snapshot; 1591 1592 snapshot = xe_guc_ct_snapshot_capture(ct, atomic); 1593 xe_guc_ct_snapshot_print(snapshot, p); 1594 xe_guc_ct_snapshot_free(snapshot); 1595 } 1596