1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_guc_ct.h" 7 8 #include <linux/bitfield.h> 9 #include <linux/circ_buf.h> 10 #include <linux/delay.h> 11 12 #include <kunit/static_stub.h> 13 14 #include <drm/drm_managed.h> 15 16 #include "abi/guc_actions_abi.h" 17 #include "abi/guc_actions_sriov_abi.h" 18 #include "abi/guc_klvs_abi.h" 19 #include "xe_bo.h" 20 #include "xe_device.h" 21 #include "xe_gt.h" 22 #include "xe_gt_pagefault.h" 23 #include "xe_gt_printk.h" 24 #include "xe_gt_sriov_pf_control.h" 25 #include "xe_gt_sriov_pf_monitor.h" 26 #include "xe_gt_tlb_invalidation.h" 27 #include "xe_guc.h" 28 #include "xe_guc_relay.h" 29 #include "xe_guc_submit.h" 30 #include "xe_map.h" 31 #include "xe_pm.h" 32 #include "xe_trace_guc.h" 33 34 /* Used when a CT send wants to block and / or receive data */ 35 struct g2h_fence { 36 u32 *response_buffer; 37 u32 seqno; 38 u32 response_data; 39 u16 response_len; 40 u16 error; 41 u16 hint; 42 u16 reason; 43 bool retry; 44 bool fail; 45 bool done; 46 }; 47 48 static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer) 49 { 50 g2h_fence->response_buffer = response_buffer; 51 g2h_fence->response_data = 0; 52 g2h_fence->response_len = 0; 53 g2h_fence->fail = false; 54 g2h_fence->retry = false; 55 g2h_fence->done = false; 56 g2h_fence->seqno = ~0x0; 57 } 58 59 static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence) 60 { 61 return g2h_fence->seqno == ~0x0; 62 } 63 64 static struct xe_guc * 65 ct_to_guc(struct xe_guc_ct *ct) 66 { 67 return container_of(ct, struct xe_guc, ct); 68 } 69 70 static struct xe_gt * 71 ct_to_gt(struct xe_guc_ct *ct) 72 { 73 return container_of(ct, struct xe_gt, uc.guc.ct); 74 } 75 76 static struct xe_device * 77 ct_to_xe(struct xe_guc_ct *ct) 78 { 79 return gt_to_xe(ct_to_gt(ct)); 80 } 81 82 /** 83 * DOC: GuC CTB Blob 84 * 85 * We allocate single blob to hold both CTB descriptors and buffers: 86 * 87 * +--------+-----------------------------------------------+------+ 88 * | offset | contents | size | 89 * +========+===============================================+======+ 90 * | 0x0000 | H2G CTB Descriptor (send) | | 91 * +--------+-----------------------------------------------+ 4K | 92 * | 0x0800 | G2H CTB Descriptor (g2h) | | 93 * +--------+-----------------------------------------------+------+ 94 * | 0x1000 | H2G CT Buffer (send) | n*4K | 95 * | | | | 96 * +--------+-----------------------------------------------+------+ 97 * | 0x1000 | G2H CT Buffer (g2h) | m*4K | 98 * | + n*4K | | | 99 * +--------+-----------------------------------------------+------+ 100 * 101 * Size of each ``CT Buffer`` must be multiple of 4K. 102 * We don't expect too many messages in flight at any time, unless we are 103 * using the GuC submission. In that case each request requires a minimum 104 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this 105 * enough space to avoid backpressure on the driver. We increase the size 106 * of the receive buffer (relative to the send) to ensure a G2H response 107 * CTB has a landing spot. 108 */ 109 110 #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K) 111 #define CTB_H2G_BUFFER_SIZE (SZ_4K) 112 #define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE) 113 #define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4) 114 115 /** 116 * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full 117 * CT command queue 118 * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future. 119 * 120 * Observation is that a 4KiB buffer full of commands takes a little over a 121 * second to process. Use that to calculate maximum time to process a full CT 122 * command queue. 123 * 124 * Return: Maximum time to process a full CT queue in jiffies. 125 */ 126 long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct) 127 { 128 BUILD_BUG_ON(!IS_ALIGNED(CTB_H2G_BUFFER_SIZE, SZ_4)); 129 return (CTB_H2G_BUFFER_SIZE / SZ_4K) * HZ; 130 } 131 132 static size_t guc_ct_size(void) 133 { 134 return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + 135 CTB_G2H_BUFFER_SIZE; 136 } 137 138 static void guc_ct_fini(struct drm_device *drm, void *arg) 139 { 140 struct xe_guc_ct *ct = arg; 141 142 destroy_workqueue(ct->g2h_wq); 143 xa_destroy(&ct->fence_lookup); 144 } 145 146 static void receive_g2h(struct xe_guc_ct *ct); 147 static void g2h_worker_func(struct work_struct *w); 148 static void safe_mode_worker_func(struct work_struct *w); 149 150 static void primelockdep(struct xe_guc_ct *ct) 151 { 152 if (!IS_ENABLED(CONFIG_LOCKDEP)) 153 return; 154 155 fs_reclaim_acquire(GFP_KERNEL); 156 might_lock(&ct->lock); 157 fs_reclaim_release(GFP_KERNEL); 158 } 159 160 int xe_guc_ct_init(struct xe_guc_ct *ct) 161 { 162 struct xe_device *xe = ct_to_xe(ct); 163 struct xe_gt *gt = ct_to_gt(ct); 164 struct xe_tile *tile = gt_to_tile(gt); 165 struct xe_bo *bo; 166 int err; 167 168 xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE)); 169 170 ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", 0); 171 if (!ct->g2h_wq) 172 return -ENOMEM; 173 174 spin_lock_init(&ct->fast_lock); 175 xa_init(&ct->fence_lookup); 176 INIT_WORK(&ct->g2h_worker, g2h_worker_func); 177 INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func); 178 init_waitqueue_head(&ct->wq); 179 init_waitqueue_head(&ct->g2h_fence_wq); 180 181 err = drmm_mutex_init(&xe->drm, &ct->lock); 182 if (err) 183 return err; 184 185 primelockdep(ct); 186 187 bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(), 188 XE_BO_FLAG_SYSTEM | 189 XE_BO_FLAG_GGTT | 190 XE_BO_FLAG_GGTT_INVALIDATE); 191 if (IS_ERR(bo)) 192 return PTR_ERR(bo); 193 194 ct->bo = bo; 195 196 err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct); 197 if (err) 198 return err; 199 200 xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED); 201 ct->state = XE_GUC_CT_STATE_DISABLED; 202 return 0; 203 } 204 205 #define desc_read(xe_, guc_ctb__, field_) \ 206 xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \ 207 struct guc_ct_buffer_desc, field_) 208 209 #define desc_write(xe_, guc_ctb__, field_, val_) \ 210 xe_map_wr_field(xe_, &guc_ctb__->desc, 0, \ 211 struct guc_ct_buffer_desc, field_, val_) 212 213 static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g, 214 struct iosys_map *map) 215 { 216 h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32); 217 h2g->info.resv_space = 0; 218 h2g->info.tail = 0; 219 h2g->info.head = 0; 220 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head, 221 h2g->info.size) - 222 h2g->info.resv_space; 223 h2g->info.broken = false; 224 225 h2g->desc = *map; 226 xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc)); 227 228 h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2); 229 } 230 231 static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h, 232 struct iosys_map *map) 233 { 234 g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32); 235 g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32); 236 g2h->info.head = 0; 237 g2h->info.tail = 0; 238 g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head, 239 g2h->info.size) - 240 g2h->info.resv_space; 241 g2h->info.broken = false; 242 243 g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE); 244 xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc)); 245 246 g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 + 247 CTB_H2G_BUFFER_SIZE); 248 } 249 250 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct) 251 { 252 struct xe_guc *guc = ct_to_guc(ct); 253 u32 desc_addr, ctb_addr, size; 254 int err; 255 256 desc_addr = xe_bo_ggtt_addr(ct->bo); 257 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2; 258 size = ct->ctbs.h2g.info.size * sizeof(u32); 259 260 err = xe_guc_self_cfg64(guc, 261 GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY, 262 desc_addr); 263 if (err) 264 return err; 265 266 err = xe_guc_self_cfg64(guc, 267 GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY, 268 ctb_addr); 269 if (err) 270 return err; 271 272 return xe_guc_self_cfg32(guc, 273 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY, 274 size); 275 } 276 277 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct) 278 { 279 struct xe_guc *guc = ct_to_guc(ct); 280 u32 desc_addr, ctb_addr, size; 281 int err; 282 283 desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE; 284 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 + 285 CTB_H2G_BUFFER_SIZE; 286 size = ct->ctbs.g2h.info.size * sizeof(u32); 287 288 err = xe_guc_self_cfg64(guc, 289 GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY, 290 desc_addr); 291 if (err) 292 return err; 293 294 err = xe_guc_self_cfg64(guc, 295 GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY, 296 ctb_addr); 297 if (err) 298 return err; 299 300 return xe_guc_self_cfg32(guc, 301 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY, 302 size); 303 } 304 305 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable) 306 { 307 u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = { 308 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 309 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 310 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, 311 GUC_ACTION_HOST2GUC_CONTROL_CTB), 312 FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL, 313 enable ? GUC_CTB_CONTROL_ENABLE : 314 GUC_CTB_CONTROL_DISABLE), 315 }; 316 int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request)); 317 318 return ret > 0 ? -EPROTO : ret; 319 } 320 321 static void xe_guc_ct_set_state(struct xe_guc_ct *ct, 322 enum xe_guc_ct_state state) 323 { 324 mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */ 325 spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */ 326 327 xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 || 328 state == XE_GUC_CT_STATE_STOPPED); 329 330 if (ct->g2h_outstanding) 331 xe_pm_runtime_put(ct_to_xe(ct)); 332 ct->g2h_outstanding = 0; 333 ct->state = state; 334 335 spin_unlock_irq(&ct->fast_lock); 336 337 /* 338 * Lockdep doesn't like this under the fast lock and he destroy only 339 * needs to be serialized with the send path which ct lock provides. 340 */ 341 xa_destroy(&ct->fence_lookup); 342 343 mutex_unlock(&ct->lock); 344 } 345 346 static bool ct_needs_safe_mode(struct xe_guc_ct *ct) 347 { 348 return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev)); 349 } 350 351 static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct) 352 { 353 if (!ct_needs_safe_mode(ct)) 354 return false; 355 356 queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10); 357 return true; 358 } 359 360 static void safe_mode_worker_func(struct work_struct *w) 361 { 362 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work); 363 364 receive_g2h(ct); 365 366 if (!ct_restart_safe_mode_worker(ct)) 367 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n"); 368 } 369 370 static void ct_enter_safe_mode(struct xe_guc_ct *ct) 371 { 372 if (ct_restart_safe_mode_worker(ct)) 373 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n"); 374 } 375 376 static void ct_exit_safe_mode(struct xe_guc_ct *ct) 377 { 378 if (cancel_delayed_work_sync(&ct->safe_mode_worker)) 379 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n"); 380 } 381 382 int xe_guc_ct_enable(struct xe_guc_ct *ct) 383 { 384 struct xe_device *xe = ct_to_xe(ct); 385 struct xe_gt *gt = ct_to_gt(ct); 386 int err; 387 388 xe_gt_assert(gt, !xe_guc_ct_enabled(ct)); 389 390 guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); 391 guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); 392 393 err = guc_ct_ctb_h2g_register(ct); 394 if (err) 395 goto err_out; 396 397 err = guc_ct_ctb_g2h_register(ct); 398 if (err) 399 goto err_out; 400 401 err = guc_ct_control_toggle(ct, true); 402 if (err) 403 goto err_out; 404 405 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED); 406 407 smp_mb(); 408 wake_up_all(&ct->wq); 409 xe_gt_dbg(gt, "GuC CT communication channel enabled\n"); 410 411 if (ct_needs_safe_mode(ct)) 412 ct_enter_safe_mode(ct); 413 414 return 0; 415 416 err_out: 417 xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err)); 418 419 return err; 420 } 421 422 static void stop_g2h_handler(struct xe_guc_ct *ct) 423 { 424 cancel_work_sync(&ct->g2h_worker); 425 } 426 427 /** 428 * xe_guc_ct_disable - Set GuC to disabled state 429 * @ct: the &xe_guc_ct 430 * 431 * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected 432 * in this transition. 433 */ 434 void xe_guc_ct_disable(struct xe_guc_ct *ct) 435 { 436 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED); 437 ct_exit_safe_mode(ct); 438 stop_g2h_handler(ct); 439 } 440 441 /** 442 * xe_guc_ct_stop - Set GuC to stopped state 443 * @ct: the &xe_guc_ct 444 * 445 * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h 446 */ 447 void xe_guc_ct_stop(struct xe_guc_ct *ct) 448 { 449 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED); 450 stop_g2h_handler(ct); 451 } 452 453 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len) 454 { 455 struct guc_ctb *h2g = &ct->ctbs.h2g; 456 457 lockdep_assert_held(&ct->lock); 458 459 if (cmd_len > h2g->info.space) { 460 h2g->info.head = desc_read(ct_to_xe(ct), h2g, head); 461 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head, 462 h2g->info.size) - 463 h2g->info.resv_space; 464 if (cmd_len > h2g->info.space) 465 return false; 466 } 467 468 return true; 469 } 470 471 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len) 472 { 473 if (!g2h_len) 474 return true; 475 476 lockdep_assert_held(&ct->fast_lock); 477 478 return ct->ctbs.g2h.info.space > g2h_len; 479 } 480 481 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len) 482 { 483 lockdep_assert_held(&ct->lock); 484 485 if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len)) 486 return -EBUSY; 487 488 return 0; 489 } 490 491 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len) 492 { 493 lockdep_assert_held(&ct->lock); 494 ct->ctbs.h2g.info.space -= cmd_len; 495 } 496 497 static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) 498 { 499 xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space); 500 xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) || 501 (g2h_len && num_g2h)); 502 503 if (g2h_len) { 504 lockdep_assert_held(&ct->fast_lock); 505 506 if (!ct->g2h_outstanding) 507 xe_pm_runtime_get_noresume(ct_to_xe(ct)); 508 509 ct->ctbs.g2h.info.space -= g2h_len; 510 ct->g2h_outstanding += num_g2h; 511 } 512 } 513 514 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) 515 { 516 lockdep_assert_held(&ct->fast_lock); 517 xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <= 518 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space); 519 520 ct->ctbs.g2h.info.space += g2h_len; 521 if (!--ct->g2h_outstanding) 522 xe_pm_runtime_put(ct_to_xe(ct)); 523 } 524 525 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) 526 { 527 spin_lock_irq(&ct->fast_lock); 528 __g2h_release_space(ct, g2h_len); 529 spin_unlock_irq(&ct->fast_lock); 530 } 531 532 #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */ 533 534 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, 535 u32 ct_fence_value, bool want_response) 536 { 537 struct xe_device *xe = ct_to_xe(ct); 538 struct xe_gt *gt = ct_to_gt(ct); 539 struct guc_ctb *h2g = &ct->ctbs.h2g; 540 u32 cmd[H2G_CT_HEADERS]; 541 u32 tail = h2g->info.tail; 542 u32 full_len; 543 struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds, 544 tail * sizeof(u32)); 545 546 full_len = len + GUC_CTB_HDR_LEN; 547 548 lockdep_assert_held(&ct->lock); 549 xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN); 550 xe_gt_assert(gt, tail <= h2g->info.size); 551 552 /* Command will wrap, zero fill (NOPs), return and check credits again */ 553 if (tail + full_len > h2g->info.size) { 554 xe_map_memset(xe, &map, 0, 0, 555 (h2g->info.size - tail) * sizeof(u32)); 556 h2g_reserve_space(ct, (h2g->info.size - tail)); 557 h2g->info.tail = 0; 558 desc_write(xe, h2g, tail, h2g->info.tail); 559 560 return -EAGAIN; 561 } 562 563 /* 564 * dw0: CT header (including fence) 565 * dw1: HXG header (including action code) 566 * dw2+: action data 567 */ 568 cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) | 569 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) | 570 FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value); 571 if (want_response) { 572 cmd[1] = 573 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 574 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | 575 GUC_HXG_EVENT_MSG_0_DATA0, action[0]); 576 } else { 577 cmd[1] = 578 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) | 579 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | 580 GUC_HXG_EVENT_MSG_0_DATA0, action[0]); 581 } 582 583 /* H2G header in cmd[1] replaces action[0] so: */ 584 --len; 585 ++action; 586 587 /* Write H2G ensuring visable before descriptor update */ 588 xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32)); 589 xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32)); 590 xe_device_wmb(xe); 591 592 /* Update local copies */ 593 h2g->info.tail = (tail + full_len) % h2g->info.size; 594 h2g_reserve_space(ct, full_len); 595 596 /* Update descriptor */ 597 desc_write(xe, h2g, tail, h2g->info.tail); 598 599 trace_xe_guc_ctb_h2g(xe, gt->info.id, *(action - 1), full_len, 600 desc_read(xe, h2g, head), h2g->info.tail); 601 602 return 0; 603 } 604 605 /* 606 * The CT protocol accepts a 16 bits fence. This field is fully owned by the 607 * driver, the GuC will just copy it to the reply message. Since we need to 608 * be able to distinguish between replies to REQUEST and FAST_REQUEST messages, 609 * we use one bit of the seqno as an indicator for that and a rolling counter 610 * for the remaining 15 bits. 611 */ 612 #define CT_SEQNO_MASK GENMASK(14, 0) 613 #define CT_SEQNO_UNTRACKED BIT(15) 614 static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence) 615 { 616 u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK; 617 618 if (!is_g2h_fence) 619 seqno |= CT_SEQNO_UNTRACKED; 620 621 return seqno; 622 } 623 624 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, 625 u32 len, u32 g2h_len, u32 num_g2h, 626 struct g2h_fence *g2h_fence) 627 { 628 struct xe_gt *gt __maybe_unused = ct_to_gt(ct); 629 u16 seqno; 630 int ret; 631 632 xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); 633 xe_gt_assert(gt, !g2h_len || !g2h_fence); 634 xe_gt_assert(gt, !num_g2h || !g2h_fence); 635 xe_gt_assert(gt, !g2h_len || num_g2h); 636 xe_gt_assert(gt, g2h_len || !num_g2h); 637 lockdep_assert_held(&ct->lock); 638 639 if (unlikely(ct->ctbs.h2g.info.broken)) { 640 ret = -EPIPE; 641 goto out; 642 } 643 644 if (ct->state == XE_GUC_CT_STATE_DISABLED) { 645 ret = -ENODEV; 646 goto out; 647 } 648 649 if (ct->state == XE_GUC_CT_STATE_STOPPED) { 650 ret = -ECANCELED; 651 goto out; 652 } 653 654 xe_gt_assert(gt, xe_guc_ct_enabled(ct)); 655 656 if (g2h_fence) { 657 g2h_len = GUC_CTB_HXG_MSG_MAX_LEN; 658 num_g2h = 1; 659 660 if (g2h_fence_needs_alloc(g2h_fence)) { 661 void *ptr; 662 663 g2h_fence->seqno = next_ct_seqno(ct, true); 664 ptr = xa_store(&ct->fence_lookup, 665 g2h_fence->seqno, 666 g2h_fence, GFP_ATOMIC); 667 if (IS_ERR(ptr)) { 668 ret = PTR_ERR(ptr); 669 goto out; 670 } 671 } 672 673 seqno = g2h_fence->seqno; 674 } else { 675 seqno = next_ct_seqno(ct, false); 676 } 677 678 if (g2h_len) 679 spin_lock_irq(&ct->fast_lock); 680 retry: 681 ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len); 682 if (unlikely(ret)) 683 goto out_unlock; 684 685 ret = h2g_write(ct, action, len, seqno, !!g2h_fence); 686 if (unlikely(ret)) { 687 if (ret == -EAGAIN) 688 goto retry; 689 goto out_unlock; 690 } 691 692 __g2h_reserve_space(ct, g2h_len, num_g2h); 693 xe_guc_notify(ct_to_guc(ct)); 694 out_unlock: 695 if (g2h_len) 696 spin_unlock_irq(&ct->fast_lock); 697 out: 698 return ret; 699 } 700 701 static void kick_reset(struct xe_guc_ct *ct) 702 { 703 xe_gt_reset_async(ct_to_gt(ct)); 704 } 705 706 static int dequeue_one_g2h(struct xe_guc_ct *ct); 707 708 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, 709 u32 g2h_len, u32 num_g2h, 710 struct g2h_fence *g2h_fence) 711 { 712 struct xe_device *xe = ct_to_xe(ct); 713 struct xe_gt *gt = ct_to_gt(ct); 714 struct drm_printer p = xe_gt_info_printer(gt); 715 unsigned int sleep_period_ms = 1; 716 int ret; 717 718 xe_gt_assert(gt, !g2h_len || !g2h_fence); 719 lockdep_assert_held(&ct->lock); 720 xe_device_assert_mem_access(ct_to_xe(ct)); 721 722 try_again: 723 ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, 724 g2h_fence); 725 726 /* 727 * We wait to try to restore credits for about 1 second before bailing. 728 * In the case of H2G credits we have no choice but just to wait for the 729 * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In 730 * the case of G2H we process any G2H in the channel, hopefully freeing 731 * credits as we consume the G2H messages. 732 */ 733 if (unlikely(ret == -EBUSY && 734 !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) { 735 struct guc_ctb *h2g = &ct->ctbs.h2g; 736 737 if (sleep_period_ms == 1024) 738 goto broken; 739 740 trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail, 741 h2g->info.size, 742 h2g->info.space, 743 len + GUC_CTB_HDR_LEN); 744 msleep(sleep_period_ms); 745 sleep_period_ms <<= 1; 746 747 goto try_again; 748 } else if (unlikely(ret == -EBUSY)) { 749 struct xe_device *xe = ct_to_xe(ct); 750 struct guc_ctb *g2h = &ct->ctbs.g2h; 751 752 trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head, 753 desc_read(xe, g2h, tail), 754 g2h->info.size, 755 g2h->info.space, 756 g2h_fence ? 757 GUC_CTB_HXG_MSG_MAX_LEN : 758 g2h_len); 759 760 #define g2h_avail(ct) \ 761 (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head) 762 if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding || 763 g2h_avail(ct), HZ)) 764 goto broken; 765 #undef g2h_avail 766 767 if (dequeue_one_g2h(ct) < 0) 768 goto broken; 769 770 goto try_again; 771 } 772 773 return ret; 774 775 broken: 776 xe_gt_err(gt, "No forward process on H2G, reset required\n"); 777 xe_guc_ct_print(ct, &p, true); 778 ct->ctbs.h2g.info.broken = true; 779 780 return -EDEADLK; 781 } 782 783 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len, 784 u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence) 785 { 786 int ret; 787 788 xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence); 789 790 mutex_lock(&ct->lock); 791 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence); 792 mutex_unlock(&ct->lock); 793 794 return ret; 795 } 796 797 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len, 798 u32 g2h_len, u32 num_g2h) 799 { 800 int ret; 801 802 ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL); 803 if (ret == -EDEADLK) 804 kick_reset(ct); 805 806 return ret; 807 } 808 809 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, 810 u32 g2h_len, u32 num_g2h) 811 { 812 int ret; 813 814 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL); 815 if (ret == -EDEADLK) 816 kick_reset(ct); 817 818 return ret; 819 } 820 821 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len) 822 { 823 int ret; 824 825 lockdep_assert_held(&ct->lock); 826 827 ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL); 828 if (ret == -EDEADLK) 829 kick_reset(ct); 830 831 return ret; 832 } 833 834 /* 835 * Check if a GT reset is in progress or will occur and if GT reset brought the 836 * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset. 837 */ 838 static bool retry_failure(struct xe_guc_ct *ct, int ret) 839 { 840 if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV)) 841 return false; 842 843 #define ct_alive(ct) \ 844 (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \ 845 !ct->ctbs.g2h.info.broken) 846 if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) 847 return false; 848 #undef ct_alive 849 850 return true; 851 } 852 853 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, 854 u32 *response_buffer, bool no_fail) 855 { 856 struct xe_gt *gt = ct_to_gt(ct); 857 struct g2h_fence g2h_fence; 858 int ret = 0; 859 860 /* 861 * We use a fence to implement blocking sends / receiving response data. 862 * The seqno of the fence is sent in the H2G, returned in the G2H, and 863 * an xarray is used as storage media with the seqno being to key. 864 * Fields in the fence hold success, failure, retry status and the 865 * response data. Safe to allocate on the stack as the xarray is the 866 * only reference and it cannot be present after this function exits. 867 */ 868 retry: 869 g2h_fence_init(&g2h_fence, response_buffer); 870 retry_same_fence: 871 ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence); 872 if (unlikely(ret == -ENOMEM)) { 873 void *ptr; 874 875 /* Retry allocation /w GFP_KERNEL */ 876 ptr = xa_store(&ct->fence_lookup, 877 g2h_fence.seqno, 878 &g2h_fence, GFP_KERNEL); 879 if (IS_ERR(ptr)) 880 return PTR_ERR(ptr); 881 882 goto retry_same_fence; 883 } else if (unlikely(ret)) { 884 if (ret == -EDEADLK) 885 kick_reset(ct); 886 887 if (no_fail && retry_failure(ct, ret)) 888 goto retry_same_fence; 889 890 if (!g2h_fence_needs_alloc(&g2h_fence)) 891 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); 892 893 return ret; 894 } 895 896 ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ); 897 if (!ret) { 898 xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x", 899 g2h_fence.seqno, action[0]); 900 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); 901 return -ETIME; 902 } 903 904 if (g2h_fence.retry) { 905 xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n", 906 action[0], g2h_fence.reason); 907 goto retry; 908 } 909 if (g2h_fence.fail) { 910 xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n", 911 action[0], g2h_fence.error, g2h_fence.hint); 912 ret = -EIO; 913 } 914 915 return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret; 916 } 917 918 /** 919 * xe_guc_ct_send_recv - Send and receive HXG to the GuC 920 * @ct: the &xe_guc_ct 921 * @action: the dword array with `HXG Request`_ message (can't be NULL) 922 * @len: length of the `HXG Request`_ message (in dwords, can't be 0) 923 * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL) 924 * 925 * Send a `HXG Request`_ message to the GuC over CT communication channel and 926 * blocks until GuC replies with a `HXG Response`_ message. 927 * 928 * For non-blocking communication with GuC use xe_guc_ct_send(). 929 * 930 * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_. 931 * 932 * Return: response length (in dwords) if &response_buffer was not NULL, or 933 * DATA0 from `HXG Response`_ if &response_buffer was NULL, or 934 * a negative error code on failure. 935 */ 936 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, 937 u32 *response_buffer) 938 { 939 KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer); 940 return guc_ct_send_recv(ct, action, len, response_buffer, false); 941 } 942 943 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action, 944 u32 len, u32 *response_buffer) 945 { 946 return guc_ct_send_recv(ct, action, len, response_buffer, true); 947 } 948 949 static u32 *msg_to_hxg(u32 *msg) 950 { 951 return msg + GUC_CTB_MSG_MIN_LEN; 952 } 953 954 static u32 msg_len_to_hxg_len(u32 len) 955 { 956 return len - GUC_CTB_MSG_MIN_LEN; 957 } 958 959 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len) 960 { 961 u32 *hxg = msg_to_hxg(msg); 962 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 963 964 lockdep_assert_held(&ct->lock); 965 966 switch (action) { 967 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE: 968 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE: 969 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE: 970 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 971 g2h_release_space(ct, len); 972 } 973 974 return 0; 975 } 976 977 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) 978 { 979 struct xe_gt *gt = ct_to_gt(ct); 980 u32 *hxg = msg_to_hxg(msg); 981 u32 hxg_len = msg_len_to_hxg_len(len); 982 u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]); 983 u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]); 984 struct g2h_fence *g2h_fence; 985 986 lockdep_assert_held(&ct->lock); 987 988 /* 989 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup. 990 * Those messages should never fail, so if we do get an error back it 991 * means we're likely doing an illegal operation and the GuC is 992 * rejecting it. We have no way to inform the code that submitted the 993 * H2G that the message was rejected, so we need to escalate the 994 * failure to trigger a reset. 995 */ 996 if (fence & CT_SEQNO_UNTRACKED) { 997 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) 998 xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n", 999 fence, 1000 FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]), 1001 FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0])); 1002 else 1003 xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n", 1004 type, fence); 1005 1006 return -EPROTO; 1007 } 1008 1009 g2h_fence = xa_erase(&ct->fence_lookup, fence); 1010 if (unlikely(!g2h_fence)) { 1011 /* Don't tear down channel, as send could've timed out */ 1012 xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence); 1013 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); 1014 return 0; 1015 } 1016 1017 xe_gt_assert(gt, fence == g2h_fence->seqno); 1018 1019 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) { 1020 g2h_fence->fail = true; 1021 g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]); 1022 g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]); 1023 } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) { 1024 g2h_fence->retry = true; 1025 g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]); 1026 } else if (g2h_fence->response_buffer) { 1027 g2h_fence->response_len = hxg_len; 1028 memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32)); 1029 } else { 1030 g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]); 1031 } 1032 1033 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); 1034 1035 g2h_fence->done = true; 1036 smp_mb(); 1037 1038 wake_up_all(&ct->g2h_fence_wq); 1039 1040 return 0; 1041 } 1042 1043 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) 1044 { 1045 struct xe_gt *gt = ct_to_gt(ct); 1046 u32 *hxg = msg_to_hxg(msg); 1047 u32 origin, type; 1048 int ret; 1049 1050 lockdep_assert_held(&ct->lock); 1051 1052 origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]); 1053 if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) { 1054 xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n", 1055 origin); 1056 ct->ctbs.g2h.info.broken = true; 1057 1058 return -EPROTO; 1059 } 1060 1061 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]); 1062 switch (type) { 1063 case GUC_HXG_TYPE_EVENT: 1064 ret = parse_g2h_event(ct, msg, len); 1065 break; 1066 case GUC_HXG_TYPE_RESPONSE_SUCCESS: 1067 case GUC_HXG_TYPE_RESPONSE_FAILURE: 1068 case GUC_HXG_TYPE_NO_RESPONSE_RETRY: 1069 ret = parse_g2h_response(ct, msg, len); 1070 break; 1071 default: 1072 xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n", 1073 type); 1074 ct->ctbs.g2h.info.broken = true; 1075 1076 ret = -EOPNOTSUPP; 1077 } 1078 1079 return ret; 1080 } 1081 1082 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) 1083 { 1084 struct xe_guc *guc = ct_to_guc(ct); 1085 struct xe_gt *gt = ct_to_gt(ct); 1086 u32 hxg_len = msg_len_to_hxg_len(len); 1087 u32 *hxg = msg_to_hxg(msg); 1088 u32 action, adj_len; 1089 u32 *payload; 1090 int ret = 0; 1091 1092 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT) 1093 return 0; 1094 1095 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1096 payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN; 1097 adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN; 1098 1099 switch (action) { 1100 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE: 1101 ret = xe_guc_sched_done_handler(guc, payload, adj_len); 1102 break; 1103 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE: 1104 ret = xe_guc_deregister_done_handler(guc, payload, adj_len); 1105 break; 1106 case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION: 1107 ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len); 1108 break; 1109 case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION: 1110 ret = xe_guc_exec_queue_reset_failure_handler(guc, payload, 1111 adj_len); 1112 break; 1113 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE: 1114 /* Selftest only at the moment */ 1115 break; 1116 case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION: 1117 case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE: 1118 /* FIXME: Handle this */ 1119 break; 1120 case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR: 1121 ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload, 1122 adj_len); 1123 break; 1124 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC: 1125 ret = xe_guc_pagefault_handler(guc, payload, adj_len); 1126 break; 1127 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 1128 ret = xe_guc_tlb_invalidation_done_handler(guc, payload, 1129 adj_len); 1130 break; 1131 case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY: 1132 ret = xe_guc_access_counter_notify_handler(guc, payload, 1133 adj_len); 1134 break; 1135 case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF: 1136 ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len); 1137 break; 1138 case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF: 1139 ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len); 1140 break; 1141 case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY: 1142 ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len); 1143 break; 1144 case GUC_ACTION_GUC2PF_ADVERSE_EVENT: 1145 ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len); 1146 break; 1147 default: 1148 xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action); 1149 } 1150 1151 if (ret) 1152 xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", 1153 action, ERR_PTR(ret)); 1154 1155 return 0; 1156 } 1157 1158 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) 1159 { 1160 struct xe_device *xe = ct_to_xe(ct); 1161 struct xe_gt *gt = ct_to_gt(ct); 1162 struct guc_ctb *g2h = &ct->ctbs.g2h; 1163 u32 tail, head, len; 1164 s32 avail; 1165 u32 action; 1166 u32 *hxg; 1167 1168 xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); 1169 lockdep_assert_held(&ct->fast_lock); 1170 1171 if (ct->state == XE_GUC_CT_STATE_DISABLED) 1172 return -ENODEV; 1173 1174 if (ct->state == XE_GUC_CT_STATE_STOPPED) 1175 return -ECANCELED; 1176 1177 if (g2h->info.broken) 1178 return -EPIPE; 1179 1180 xe_gt_assert(gt, xe_guc_ct_enabled(ct)); 1181 1182 /* Calculate DW available to read */ 1183 tail = desc_read(xe, g2h, tail); 1184 avail = tail - g2h->info.head; 1185 if (unlikely(avail == 0)) 1186 return 0; 1187 1188 if (avail < 0) 1189 avail += g2h->info.size; 1190 1191 /* Read header */ 1192 xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head, 1193 sizeof(u32)); 1194 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN; 1195 if (len > avail) { 1196 xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n", 1197 avail, len); 1198 g2h->info.broken = true; 1199 1200 return -EPROTO; 1201 } 1202 1203 head = (g2h->info.head + 1) % g2h->info.size; 1204 avail = len - 1; 1205 1206 /* Read G2H message */ 1207 if (avail + head > g2h->info.size) { 1208 u32 avail_til_wrap = g2h->info.size - head; 1209 1210 xe_map_memcpy_from(xe, msg + 1, 1211 &g2h->cmds, sizeof(u32) * head, 1212 avail_til_wrap * sizeof(u32)); 1213 xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap, 1214 &g2h->cmds, 0, 1215 (avail - avail_til_wrap) * sizeof(u32)); 1216 } else { 1217 xe_map_memcpy_from(xe, msg + 1, 1218 &g2h->cmds, sizeof(u32) * head, 1219 avail * sizeof(u32)); 1220 } 1221 1222 hxg = msg_to_hxg(msg); 1223 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1224 1225 if (fast_path) { 1226 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT) 1227 return 0; 1228 1229 switch (action) { 1230 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC: 1231 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 1232 break; /* Process these in fast-path */ 1233 default: 1234 return 0; 1235 } 1236 } 1237 1238 /* Update local / descriptor header */ 1239 g2h->info.head = (head + avail) % g2h->info.size; 1240 desc_write(xe, g2h, head, g2h->info.head); 1241 1242 trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id, 1243 action, len, g2h->info.head, tail); 1244 1245 return len; 1246 } 1247 1248 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) 1249 { 1250 struct xe_gt *gt = ct_to_gt(ct); 1251 struct xe_guc *guc = ct_to_guc(ct); 1252 u32 hxg_len = msg_len_to_hxg_len(len); 1253 u32 *hxg = msg_to_hxg(msg); 1254 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1255 u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN; 1256 u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN; 1257 int ret = 0; 1258 1259 switch (action) { 1260 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC: 1261 ret = xe_guc_pagefault_handler(guc, payload, adj_len); 1262 break; 1263 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 1264 __g2h_release_space(ct, len); 1265 ret = xe_guc_tlb_invalidation_done_handler(guc, payload, 1266 adj_len); 1267 break; 1268 default: 1269 xe_gt_warn(gt, "NOT_POSSIBLE"); 1270 } 1271 1272 if (ret) 1273 xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", 1274 action, ERR_PTR(ret)); 1275 } 1276 1277 /** 1278 * xe_guc_ct_fast_path - process critical G2H in the IRQ handler 1279 * @ct: GuC CT object 1280 * 1281 * Anything related to page faults is critical for performance, process these 1282 * critical G2H in the IRQ. This is safe as these handlers either just wake up 1283 * waiters or queue another worker. 1284 */ 1285 void xe_guc_ct_fast_path(struct xe_guc_ct *ct) 1286 { 1287 struct xe_device *xe = ct_to_xe(ct); 1288 bool ongoing; 1289 int len; 1290 1291 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct)); 1292 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL) 1293 return; 1294 1295 spin_lock(&ct->fast_lock); 1296 do { 1297 len = g2h_read(ct, ct->fast_msg, true); 1298 if (len > 0) 1299 g2h_fast_path(ct, ct->fast_msg, len); 1300 } while (len > 0); 1301 spin_unlock(&ct->fast_lock); 1302 1303 if (ongoing) 1304 xe_pm_runtime_put(xe); 1305 } 1306 1307 /* Returns less than zero on error, 0 on done, 1 on more available */ 1308 static int dequeue_one_g2h(struct xe_guc_ct *ct) 1309 { 1310 int len; 1311 int ret; 1312 1313 lockdep_assert_held(&ct->lock); 1314 1315 spin_lock_irq(&ct->fast_lock); 1316 len = g2h_read(ct, ct->msg, false); 1317 spin_unlock_irq(&ct->fast_lock); 1318 if (len <= 0) 1319 return len; 1320 1321 ret = parse_g2h_msg(ct, ct->msg, len); 1322 if (unlikely(ret < 0)) 1323 return ret; 1324 1325 ret = process_g2h_msg(ct, ct->msg, len); 1326 if (unlikely(ret < 0)) 1327 return ret; 1328 1329 return 1; 1330 } 1331 1332 static void receive_g2h(struct xe_guc_ct *ct) 1333 { 1334 struct xe_gt *gt = ct_to_gt(ct); 1335 bool ongoing; 1336 int ret; 1337 1338 /* 1339 * Normal users must always hold mem_access.ref around CT calls. However 1340 * during the runtime pm callbacks we rely on CT to talk to the GuC, but 1341 * at this stage we can't rely on mem_access.ref and even the 1342 * callback_task will be different than current. For such cases we just 1343 * need to ensure we always process the responses from any blocking 1344 * ct_send requests or where we otherwise expect some response when 1345 * initiated from those callbacks (which will need to wait for the below 1346 * dequeue_one_g2h()). The dequeue_one_g2h() will gracefully fail if 1347 * the device has suspended to the point that the CT communication has 1348 * been disabled. 1349 * 1350 * If we are inside the runtime pm callback, we can be the only task 1351 * still issuing CT requests (since that requires having the 1352 * mem_access.ref). It seems like it might in theory be possible to 1353 * receive unsolicited events from the GuC just as we are 1354 * suspending-resuming, but those will currently anyway be lost when 1355 * eventually exiting from suspend, hence no need to wake up the device 1356 * here. If we ever need something stronger than get_if_ongoing() then 1357 * we need to be careful with blocking the pm callbacks from getting CT 1358 * responses, if the worker here is blocked on those callbacks 1359 * completing, creating a deadlock. 1360 */ 1361 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct)); 1362 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL) 1363 return; 1364 1365 do { 1366 mutex_lock(&ct->lock); 1367 ret = dequeue_one_g2h(ct); 1368 mutex_unlock(&ct->lock); 1369 1370 if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) { 1371 struct drm_printer p = xe_gt_info_printer(gt); 1372 1373 xe_guc_ct_print(ct, &p, false); 1374 kick_reset(ct); 1375 } 1376 } while (ret == 1); 1377 1378 if (ongoing) 1379 xe_pm_runtime_put(ct_to_xe(ct)); 1380 } 1381 1382 static void g2h_worker_func(struct work_struct *w) 1383 { 1384 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker); 1385 1386 receive_g2h(ct); 1387 } 1388 1389 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb, 1390 struct guc_ctb_snapshot *snapshot, 1391 bool atomic) 1392 { 1393 u32 head, tail; 1394 1395 xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0, 1396 sizeof(struct guc_ct_buffer_desc)); 1397 memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info)); 1398 1399 snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32), 1400 atomic ? GFP_ATOMIC : GFP_KERNEL); 1401 1402 if (!snapshot->cmds) { 1403 drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n"); 1404 return; 1405 } 1406 1407 head = snapshot->desc.head; 1408 tail = snapshot->desc.tail; 1409 1410 if (head != tail) { 1411 struct iosys_map map = 1412 IOSYS_MAP_INIT_OFFSET(&ctb->cmds, head * sizeof(u32)); 1413 1414 while (head != tail) { 1415 snapshot->cmds[head] = xe_map_rd(xe, &map, 0, u32); 1416 ++head; 1417 if (head == ctb->info.size) { 1418 head = 0; 1419 map = ctb->cmds; 1420 } else { 1421 iosys_map_incr(&map, sizeof(u32)); 1422 } 1423 } 1424 } 1425 } 1426 1427 static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot, 1428 struct drm_printer *p) 1429 { 1430 u32 head, tail; 1431 1432 drm_printf(p, "\tsize: %d\n", snapshot->info.size); 1433 drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space); 1434 drm_printf(p, "\thead: %d\n", snapshot->info.head); 1435 drm_printf(p, "\ttail: %d\n", snapshot->info.tail); 1436 drm_printf(p, "\tspace: %d\n", snapshot->info.space); 1437 drm_printf(p, "\tbroken: %d\n", snapshot->info.broken); 1438 drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head); 1439 drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail); 1440 drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status); 1441 1442 if (!snapshot->cmds) 1443 return; 1444 1445 head = snapshot->desc.head; 1446 tail = snapshot->desc.tail; 1447 1448 while (head != tail) { 1449 drm_printf(p, "\tcmd[%d]: 0x%08x\n", head, 1450 snapshot->cmds[head]); 1451 ++head; 1452 if (head == snapshot->info.size) 1453 head = 0; 1454 } 1455 } 1456 1457 static void guc_ctb_snapshot_free(struct guc_ctb_snapshot *snapshot) 1458 { 1459 kfree(snapshot->cmds); 1460 } 1461 1462 /** 1463 * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state. 1464 * @ct: GuC CT object. 1465 * @atomic: Boolean to indicate if this is called from atomic context like 1466 * reset or CTB handler or from some regular path like debugfs. 1467 * 1468 * This can be printed out in a later stage like during dev_coredump 1469 * analysis. 1470 * 1471 * Returns: a GuC CT snapshot object that must be freed by the caller 1472 * by using `xe_guc_ct_snapshot_free`. 1473 */ 1474 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, 1475 bool atomic) 1476 { 1477 struct xe_device *xe = ct_to_xe(ct); 1478 struct xe_guc_ct_snapshot *snapshot; 1479 1480 snapshot = kzalloc(sizeof(*snapshot), 1481 atomic ? GFP_ATOMIC : GFP_KERNEL); 1482 1483 if (!snapshot) { 1484 drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n"); 1485 return NULL; 1486 } 1487 1488 if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) { 1489 snapshot->ct_enabled = true; 1490 snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding); 1491 guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, 1492 &snapshot->h2g, atomic); 1493 guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, 1494 &snapshot->g2h, atomic); 1495 } 1496 1497 return snapshot; 1498 } 1499 1500 /** 1501 * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot. 1502 * @snapshot: GuC CT snapshot object. 1503 * @p: drm_printer where it will be printed out. 1504 * 1505 * This function prints out a given GuC CT snapshot object. 1506 */ 1507 void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, 1508 struct drm_printer *p) 1509 { 1510 if (!snapshot) 1511 return; 1512 1513 if (snapshot->ct_enabled) { 1514 drm_puts(p, "H2G CTB (all sizes in DW):\n"); 1515 guc_ctb_snapshot_print(&snapshot->h2g, p); 1516 1517 drm_puts(p, "\nG2H CTB (all sizes in DW):\n"); 1518 guc_ctb_snapshot_print(&snapshot->g2h, p); 1519 1520 drm_printf(p, "\tg2h outstanding: %d\n", 1521 snapshot->g2h_outstanding); 1522 } else { 1523 drm_puts(p, "CT disabled\n"); 1524 } 1525 } 1526 1527 /** 1528 * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot. 1529 * @snapshot: GuC CT snapshot object. 1530 * 1531 * This function free all the memory that needed to be allocated at capture 1532 * time. 1533 */ 1534 void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot) 1535 { 1536 if (!snapshot) 1537 return; 1538 1539 guc_ctb_snapshot_free(&snapshot->h2g); 1540 guc_ctb_snapshot_free(&snapshot->g2h); 1541 kfree(snapshot); 1542 } 1543 1544 /** 1545 * xe_guc_ct_print - GuC CT Print. 1546 * @ct: GuC CT. 1547 * @p: drm_printer where it will be printed out. 1548 * @atomic: Boolean to indicate if this is called from atomic context like 1549 * reset or CTB handler or from some regular path like debugfs. 1550 * 1551 * This function quickly capture a snapshot and immediately print it out. 1552 */ 1553 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic) 1554 { 1555 struct xe_guc_ct_snapshot *snapshot; 1556 1557 snapshot = xe_guc_ct_snapshot_capture(ct, atomic); 1558 xe_guc_ct_snapshot_print(snapshot, p); 1559 xe_guc_ct_snapshot_free(snapshot); 1560 } 1561