1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_guc_ct.h" 7 8 #include <linux/bitfield.h> 9 #include <linux/circ_buf.h> 10 #include <linux/delay.h> 11 12 #include <kunit/static_stub.h> 13 14 #include <drm/drm_managed.h> 15 16 #include "abi/guc_actions_abi.h" 17 #include "abi/guc_actions_sriov_abi.h" 18 #include "abi/guc_klvs_abi.h" 19 #include "xe_bo.h" 20 #include "xe_device.h" 21 #include "xe_gt.h" 22 #include "xe_gt_pagefault.h" 23 #include "xe_gt_printk.h" 24 #include "xe_gt_sriov_pf_control.h" 25 #include "xe_gt_tlb_invalidation.h" 26 #include "xe_guc.h" 27 #include "xe_guc_relay.h" 28 #include "xe_guc_submit.h" 29 #include "xe_map.h" 30 #include "xe_pm.h" 31 #include "xe_trace.h" 32 33 /* Used when a CT send wants to block and / or receive data */ 34 struct g2h_fence { 35 u32 *response_buffer; 36 u32 seqno; 37 u32 response_data; 38 u16 response_len; 39 u16 error; 40 u16 hint; 41 u16 reason; 42 bool retry; 43 bool fail; 44 bool done; 45 }; 46 47 static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer) 48 { 49 g2h_fence->response_buffer = response_buffer; 50 g2h_fence->response_data = 0; 51 g2h_fence->response_len = 0; 52 g2h_fence->fail = false; 53 g2h_fence->retry = false; 54 g2h_fence->done = false; 55 g2h_fence->seqno = ~0x0; 56 } 57 58 static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence) 59 { 60 return g2h_fence->seqno == ~0x0; 61 } 62 63 static struct xe_guc * 64 ct_to_guc(struct xe_guc_ct *ct) 65 { 66 return container_of(ct, struct xe_guc, ct); 67 } 68 69 static struct xe_gt * 70 ct_to_gt(struct xe_guc_ct *ct) 71 { 72 return container_of(ct, struct xe_gt, uc.guc.ct); 73 } 74 75 static struct xe_device * 76 ct_to_xe(struct xe_guc_ct *ct) 77 { 78 return gt_to_xe(ct_to_gt(ct)); 79 } 80 81 /** 82 * DOC: GuC CTB Blob 83 * 84 * We allocate single blob to hold both CTB descriptors and buffers: 85 * 86 * +--------+-----------------------------------------------+------+ 87 * | offset | contents | size | 88 * +========+===============================================+======+ 89 * | 0x0000 | H2G CTB Descriptor (send) | | 90 * +--------+-----------------------------------------------+ 4K | 91 * | 0x0800 | G2H CTB Descriptor (g2h) | | 92 * +--------+-----------------------------------------------+------+ 93 * | 0x1000 | H2G CT Buffer (send) | n*4K | 94 * | | | | 95 * +--------+-----------------------------------------------+------+ 96 * | 0x1000 | G2H CT Buffer (g2h) | m*4K | 97 * | + n*4K | | | 98 * +--------+-----------------------------------------------+------+ 99 * 100 * Size of each ``CT Buffer`` must be multiple of 4K. 101 * We don't expect too many messages in flight at any time, unless we are 102 * using the GuC submission. In that case each request requires a minimum 103 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this 104 * enough space to avoid backpressure on the driver. We increase the size 105 * of the receive buffer (relative to the send) to ensure a G2H response 106 * CTB has a landing spot. 107 */ 108 109 #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K) 110 #define CTB_H2G_BUFFER_SIZE (SZ_4K) 111 #define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE) 112 #define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4) 113 114 static size_t guc_ct_size(void) 115 { 116 return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + 117 CTB_G2H_BUFFER_SIZE; 118 } 119 120 static void guc_ct_fini(struct drm_device *drm, void *arg) 121 { 122 struct xe_guc_ct *ct = arg; 123 124 xa_destroy(&ct->fence_lookup); 125 } 126 127 static void g2h_worker_func(struct work_struct *w); 128 129 static void primelockdep(struct xe_guc_ct *ct) 130 { 131 if (!IS_ENABLED(CONFIG_LOCKDEP)) 132 return; 133 134 fs_reclaim_acquire(GFP_KERNEL); 135 might_lock(&ct->lock); 136 fs_reclaim_release(GFP_KERNEL); 137 } 138 139 int xe_guc_ct_init(struct xe_guc_ct *ct) 140 { 141 struct xe_device *xe = ct_to_xe(ct); 142 struct xe_gt *gt = ct_to_gt(ct); 143 struct xe_tile *tile = gt_to_tile(gt); 144 struct xe_bo *bo; 145 int err; 146 147 xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE)); 148 149 spin_lock_init(&ct->fast_lock); 150 xa_init(&ct->fence_lookup); 151 INIT_WORK(&ct->g2h_worker, g2h_worker_func); 152 init_waitqueue_head(&ct->wq); 153 init_waitqueue_head(&ct->g2h_fence_wq); 154 155 err = drmm_mutex_init(&xe->drm, &ct->lock); 156 if (err) 157 return err; 158 159 primelockdep(ct); 160 161 bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(), 162 XE_BO_FLAG_SYSTEM | 163 XE_BO_FLAG_GGTT | 164 XE_BO_FLAG_GGTT_INVALIDATE); 165 if (IS_ERR(bo)) 166 return PTR_ERR(bo); 167 168 ct->bo = bo; 169 170 err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct); 171 if (err) 172 return err; 173 174 xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED); 175 ct->state = XE_GUC_CT_STATE_DISABLED; 176 return 0; 177 } 178 179 #define desc_read(xe_, guc_ctb__, field_) \ 180 xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \ 181 struct guc_ct_buffer_desc, field_) 182 183 #define desc_write(xe_, guc_ctb__, field_, val_) \ 184 xe_map_wr_field(xe_, &guc_ctb__->desc, 0, \ 185 struct guc_ct_buffer_desc, field_, val_) 186 187 static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g, 188 struct iosys_map *map) 189 { 190 h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32); 191 h2g->info.resv_space = 0; 192 h2g->info.tail = 0; 193 h2g->info.head = 0; 194 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head, 195 h2g->info.size) - 196 h2g->info.resv_space; 197 h2g->info.broken = false; 198 199 h2g->desc = *map; 200 xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc)); 201 202 h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2); 203 } 204 205 static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h, 206 struct iosys_map *map) 207 { 208 g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32); 209 g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32); 210 g2h->info.head = 0; 211 g2h->info.tail = 0; 212 g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head, 213 g2h->info.size) - 214 g2h->info.resv_space; 215 g2h->info.broken = false; 216 217 g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE); 218 xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc)); 219 220 g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 + 221 CTB_H2G_BUFFER_SIZE); 222 } 223 224 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct) 225 { 226 struct xe_guc *guc = ct_to_guc(ct); 227 u32 desc_addr, ctb_addr, size; 228 int err; 229 230 desc_addr = xe_bo_ggtt_addr(ct->bo); 231 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2; 232 size = ct->ctbs.h2g.info.size * sizeof(u32); 233 234 err = xe_guc_self_cfg64(guc, 235 GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY, 236 desc_addr); 237 if (err) 238 return err; 239 240 err = xe_guc_self_cfg64(guc, 241 GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY, 242 ctb_addr); 243 if (err) 244 return err; 245 246 return xe_guc_self_cfg32(guc, 247 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY, 248 size); 249 } 250 251 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct) 252 { 253 struct xe_guc *guc = ct_to_guc(ct); 254 u32 desc_addr, ctb_addr, size; 255 int err; 256 257 desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE; 258 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 + 259 CTB_H2G_BUFFER_SIZE; 260 size = ct->ctbs.g2h.info.size * sizeof(u32); 261 262 err = xe_guc_self_cfg64(guc, 263 GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY, 264 desc_addr); 265 if (err) 266 return err; 267 268 err = xe_guc_self_cfg64(guc, 269 GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY, 270 ctb_addr); 271 if (err) 272 return err; 273 274 return xe_guc_self_cfg32(guc, 275 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY, 276 size); 277 } 278 279 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable) 280 { 281 u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = { 282 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 283 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 284 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, 285 GUC_ACTION_HOST2GUC_CONTROL_CTB), 286 FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL, 287 enable ? GUC_CTB_CONTROL_ENABLE : 288 GUC_CTB_CONTROL_DISABLE), 289 }; 290 int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request)); 291 292 return ret > 0 ? -EPROTO : ret; 293 } 294 295 static void xe_guc_ct_set_state(struct xe_guc_ct *ct, 296 enum xe_guc_ct_state state) 297 { 298 mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */ 299 spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */ 300 301 xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 || 302 state == XE_GUC_CT_STATE_STOPPED); 303 304 ct->g2h_outstanding = 0; 305 ct->state = state; 306 307 spin_unlock_irq(&ct->fast_lock); 308 309 /* 310 * Lockdep doesn't like this under the fast lock and he destroy only 311 * needs to be serialized with the send path which ct lock provides. 312 */ 313 xa_destroy(&ct->fence_lookup); 314 315 mutex_unlock(&ct->lock); 316 } 317 318 int xe_guc_ct_enable(struct xe_guc_ct *ct) 319 { 320 struct xe_device *xe = ct_to_xe(ct); 321 struct xe_gt *gt = ct_to_gt(ct); 322 int err; 323 324 xe_gt_assert(gt, !xe_guc_ct_enabled(ct)); 325 326 guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); 327 guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); 328 329 err = guc_ct_ctb_h2g_register(ct); 330 if (err) 331 goto err_out; 332 333 err = guc_ct_ctb_g2h_register(ct); 334 if (err) 335 goto err_out; 336 337 err = guc_ct_control_toggle(ct, true); 338 if (err) 339 goto err_out; 340 341 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED); 342 343 smp_mb(); 344 wake_up_all(&ct->wq); 345 xe_gt_dbg(gt, "GuC CT communication channel enabled\n"); 346 347 return 0; 348 349 err_out: 350 xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err)); 351 352 return err; 353 } 354 355 static void stop_g2h_handler(struct xe_guc_ct *ct) 356 { 357 cancel_work_sync(&ct->g2h_worker); 358 } 359 360 /** 361 * xe_guc_ct_disable - Set GuC to disabled state 362 * @ct: the &xe_guc_ct 363 * 364 * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected 365 * in this transition. 366 */ 367 void xe_guc_ct_disable(struct xe_guc_ct *ct) 368 { 369 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED); 370 stop_g2h_handler(ct); 371 } 372 373 /** 374 * xe_guc_ct_stop - Set GuC to stopped state 375 * @ct: the &xe_guc_ct 376 * 377 * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h 378 */ 379 void xe_guc_ct_stop(struct xe_guc_ct *ct) 380 { 381 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED); 382 stop_g2h_handler(ct); 383 } 384 385 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len) 386 { 387 struct guc_ctb *h2g = &ct->ctbs.h2g; 388 389 lockdep_assert_held(&ct->lock); 390 391 if (cmd_len > h2g->info.space) { 392 h2g->info.head = desc_read(ct_to_xe(ct), h2g, head); 393 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head, 394 h2g->info.size) - 395 h2g->info.resv_space; 396 if (cmd_len > h2g->info.space) 397 return false; 398 } 399 400 return true; 401 } 402 403 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len) 404 { 405 if (!g2h_len) 406 return true; 407 408 lockdep_assert_held(&ct->fast_lock); 409 410 return ct->ctbs.g2h.info.space > g2h_len; 411 } 412 413 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len) 414 { 415 lockdep_assert_held(&ct->lock); 416 417 if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len)) 418 return -EBUSY; 419 420 return 0; 421 } 422 423 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len) 424 { 425 lockdep_assert_held(&ct->lock); 426 ct->ctbs.h2g.info.space -= cmd_len; 427 } 428 429 static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) 430 { 431 xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space); 432 433 if (g2h_len) { 434 lockdep_assert_held(&ct->fast_lock); 435 436 ct->ctbs.g2h.info.space -= g2h_len; 437 ct->g2h_outstanding += num_g2h; 438 } 439 } 440 441 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) 442 { 443 lockdep_assert_held(&ct->fast_lock); 444 xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <= 445 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space); 446 447 ct->ctbs.g2h.info.space += g2h_len; 448 --ct->g2h_outstanding; 449 } 450 451 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) 452 { 453 spin_lock_irq(&ct->fast_lock); 454 __g2h_release_space(ct, g2h_len); 455 spin_unlock_irq(&ct->fast_lock); 456 } 457 458 #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */ 459 460 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, 461 u32 ct_fence_value, bool want_response) 462 { 463 struct xe_device *xe = ct_to_xe(ct); 464 struct xe_gt *gt = ct_to_gt(ct); 465 struct guc_ctb *h2g = &ct->ctbs.h2g; 466 u32 cmd[H2G_CT_HEADERS]; 467 u32 tail = h2g->info.tail; 468 u32 full_len; 469 struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds, 470 tail * sizeof(u32)); 471 472 full_len = len + GUC_CTB_HDR_LEN; 473 474 lockdep_assert_held(&ct->lock); 475 xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN); 476 xe_gt_assert(gt, tail <= h2g->info.size); 477 478 /* Command will wrap, zero fill (NOPs), return and check credits again */ 479 if (tail + full_len > h2g->info.size) { 480 xe_map_memset(xe, &map, 0, 0, 481 (h2g->info.size - tail) * sizeof(u32)); 482 h2g_reserve_space(ct, (h2g->info.size - tail)); 483 h2g->info.tail = 0; 484 desc_write(xe, h2g, tail, h2g->info.tail); 485 486 return -EAGAIN; 487 } 488 489 /* 490 * dw0: CT header (including fence) 491 * dw1: HXG header (including action code) 492 * dw2+: action data 493 */ 494 cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) | 495 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) | 496 FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value); 497 if (want_response) { 498 cmd[1] = 499 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 500 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | 501 GUC_HXG_EVENT_MSG_0_DATA0, action[0]); 502 } else { 503 cmd[1] = 504 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) | 505 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | 506 GUC_HXG_EVENT_MSG_0_DATA0, action[0]); 507 } 508 509 /* H2G header in cmd[1] replaces action[0] so: */ 510 --len; 511 ++action; 512 513 /* Write H2G ensuring visable before descriptor update */ 514 xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32)); 515 xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32)); 516 xe_device_wmb(xe); 517 518 /* Update local copies */ 519 h2g->info.tail = (tail + full_len) % h2g->info.size; 520 h2g_reserve_space(ct, full_len); 521 522 /* Update descriptor */ 523 desc_write(xe, h2g, tail, h2g->info.tail); 524 525 trace_xe_guc_ctb_h2g(gt->info.id, *(action - 1), full_len, 526 desc_read(xe, h2g, head), h2g->info.tail); 527 528 return 0; 529 } 530 531 /* 532 * The CT protocol accepts a 16 bits fence. This field is fully owned by the 533 * driver, the GuC will just copy it to the reply message. Since we need to 534 * be able to distinguish between replies to REQUEST and FAST_REQUEST messages, 535 * we use one bit of the seqno as an indicator for that and a rolling counter 536 * for the remaining 15 bits. 537 */ 538 #define CT_SEQNO_MASK GENMASK(14, 0) 539 #define CT_SEQNO_UNTRACKED BIT(15) 540 static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence) 541 { 542 u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK; 543 544 if (!is_g2h_fence) 545 seqno |= CT_SEQNO_UNTRACKED; 546 547 return seqno; 548 } 549 550 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, 551 u32 len, u32 g2h_len, u32 num_g2h, 552 struct g2h_fence *g2h_fence) 553 { 554 struct xe_gt *gt __maybe_unused = ct_to_gt(ct); 555 u16 seqno; 556 int ret; 557 558 xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); 559 xe_gt_assert(gt, !g2h_len || !g2h_fence); 560 xe_gt_assert(gt, !num_g2h || !g2h_fence); 561 xe_gt_assert(gt, !g2h_len || num_g2h); 562 xe_gt_assert(gt, g2h_len || !num_g2h); 563 lockdep_assert_held(&ct->lock); 564 565 if (unlikely(ct->ctbs.h2g.info.broken)) { 566 ret = -EPIPE; 567 goto out; 568 } 569 570 if (ct->state == XE_GUC_CT_STATE_DISABLED) { 571 ret = -ENODEV; 572 goto out; 573 } 574 575 if (ct->state == XE_GUC_CT_STATE_STOPPED) { 576 ret = -ECANCELED; 577 goto out; 578 } 579 580 xe_gt_assert(gt, xe_guc_ct_enabled(ct)); 581 582 if (g2h_fence) { 583 g2h_len = GUC_CTB_HXG_MSG_MAX_LEN; 584 num_g2h = 1; 585 586 if (g2h_fence_needs_alloc(g2h_fence)) { 587 void *ptr; 588 589 g2h_fence->seqno = next_ct_seqno(ct, true); 590 ptr = xa_store(&ct->fence_lookup, 591 g2h_fence->seqno, 592 g2h_fence, GFP_ATOMIC); 593 if (IS_ERR(ptr)) { 594 ret = PTR_ERR(ptr); 595 goto out; 596 } 597 } 598 599 seqno = g2h_fence->seqno; 600 } else { 601 seqno = next_ct_seqno(ct, false); 602 } 603 604 if (g2h_len) 605 spin_lock_irq(&ct->fast_lock); 606 retry: 607 ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len); 608 if (unlikely(ret)) 609 goto out_unlock; 610 611 ret = h2g_write(ct, action, len, seqno, !!g2h_fence); 612 if (unlikely(ret)) { 613 if (ret == -EAGAIN) 614 goto retry; 615 goto out_unlock; 616 } 617 618 __g2h_reserve_space(ct, g2h_len, num_g2h); 619 xe_guc_notify(ct_to_guc(ct)); 620 out_unlock: 621 if (g2h_len) 622 spin_unlock_irq(&ct->fast_lock); 623 out: 624 return ret; 625 } 626 627 static void kick_reset(struct xe_guc_ct *ct) 628 { 629 xe_gt_reset_async(ct_to_gt(ct)); 630 } 631 632 static int dequeue_one_g2h(struct xe_guc_ct *ct); 633 634 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, 635 u32 g2h_len, u32 num_g2h, 636 struct g2h_fence *g2h_fence) 637 { 638 struct xe_gt *gt = ct_to_gt(ct); 639 struct drm_printer p = xe_gt_info_printer(gt); 640 unsigned int sleep_period_ms = 1; 641 int ret; 642 643 xe_gt_assert(gt, !g2h_len || !g2h_fence); 644 lockdep_assert_held(&ct->lock); 645 xe_device_assert_mem_access(ct_to_xe(ct)); 646 647 try_again: 648 ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, 649 g2h_fence); 650 651 /* 652 * We wait to try to restore credits for about 1 second before bailing. 653 * In the case of H2G credits we have no choice but just to wait for the 654 * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In 655 * the case of G2H we process any G2H in the channel, hopefully freeing 656 * credits as we consume the G2H messages. 657 */ 658 if (unlikely(ret == -EBUSY && 659 !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) { 660 struct guc_ctb *h2g = &ct->ctbs.h2g; 661 662 if (sleep_period_ms == 1024) 663 goto broken; 664 665 trace_xe_guc_ct_h2g_flow_control(h2g->info.head, h2g->info.tail, 666 h2g->info.size, 667 h2g->info.space, 668 len + GUC_CTB_HDR_LEN); 669 msleep(sleep_period_ms); 670 sleep_period_ms <<= 1; 671 672 goto try_again; 673 } else if (unlikely(ret == -EBUSY)) { 674 struct xe_device *xe = ct_to_xe(ct); 675 struct guc_ctb *g2h = &ct->ctbs.g2h; 676 677 trace_xe_guc_ct_g2h_flow_control(g2h->info.head, 678 desc_read(xe, g2h, tail), 679 g2h->info.size, 680 g2h->info.space, 681 g2h_fence ? 682 GUC_CTB_HXG_MSG_MAX_LEN : 683 g2h_len); 684 685 #define g2h_avail(ct) \ 686 (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head) 687 if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding || 688 g2h_avail(ct), HZ)) 689 goto broken; 690 #undef g2h_avail 691 692 if (dequeue_one_g2h(ct) < 0) 693 goto broken; 694 695 goto try_again; 696 } 697 698 return ret; 699 700 broken: 701 xe_gt_err(gt, "No forward process on H2G, reset required\n"); 702 xe_guc_ct_print(ct, &p, true); 703 ct->ctbs.h2g.info.broken = true; 704 705 return -EDEADLK; 706 } 707 708 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len, 709 u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence) 710 { 711 int ret; 712 713 xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence); 714 715 mutex_lock(&ct->lock); 716 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence); 717 mutex_unlock(&ct->lock); 718 719 return ret; 720 } 721 722 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len, 723 u32 g2h_len, u32 num_g2h) 724 { 725 int ret; 726 727 ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL); 728 if (ret == -EDEADLK) 729 kick_reset(ct); 730 731 return ret; 732 } 733 734 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, 735 u32 g2h_len, u32 num_g2h) 736 { 737 int ret; 738 739 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL); 740 if (ret == -EDEADLK) 741 kick_reset(ct); 742 743 return ret; 744 } 745 746 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len) 747 { 748 int ret; 749 750 lockdep_assert_held(&ct->lock); 751 752 ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL); 753 if (ret == -EDEADLK) 754 kick_reset(ct); 755 756 return ret; 757 } 758 759 /* 760 * Check if a GT reset is in progress or will occur and if GT reset brought the 761 * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset. 762 */ 763 static bool retry_failure(struct xe_guc_ct *ct, int ret) 764 { 765 if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV)) 766 return false; 767 768 #define ct_alive(ct) \ 769 (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \ 770 !ct->ctbs.g2h.info.broken) 771 if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) 772 return false; 773 #undef ct_alive 774 775 return true; 776 } 777 778 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, 779 u32 *response_buffer, bool no_fail) 780 { 781 struct xe_gt *gt = ct_to_gt(ct); 782 struct g2h_fence g2h_fence; 783 int ret = 0; 784 785 /* 786 * We use a fence to implement blocking sends / receiving response data. 787 * The seqno of the fence is sent in the H2G, returned in the G2H, and 788 * an xarray is used as storage media with the seqno being to key. 789 * Fields in the fence hold success, failure, retry status and the 790 * response data. Safe to allocate on the stack as the xarray is the 791 * only reference and it cannot be present after this function exits. 792 */ 793 retry: 794 g2h_fence_init(&g2h_fence, response_buffer); 795 retry_same_fence: 796 ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence); 797 if (unlikely(ret == -ENOMEM)) { 798 void *ptr; 799 800 /* Retry allocation /w GFP_KERNEL */ 801 ptr = xa_store(&ct->fence_lookup, 802 g2h_fence.seqno, 803 &g2h_fence, GFP_KERNEL); 804 if (IS_ERR(ptr)) 805 return PTR_ERR(ptr); 806 807 goto retry_same_fence; 808 } else if (unlikely(ret)) { 809 if (ret == -EDEADLK) 810 kick_reset(ct); 811 812 if (no_fail && retry_failure(ct, ret)) 813 goto retry_same_fence; 814 815 if (!g2h_fence_needs_alloc(&g2h_fence)) 816 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); 817 818 return ret; 819 } 820 821 ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ); 822 if (!ret) { 823 xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x", 824 g2h_fence.seqno, action[0]); 825 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); 826 return -ETIME; 827 } 828 829 if (g2h_fence.retry) { 830 xe_gt_warn(gt, "H2G retry, action 0x%04x, reason %u", 831 action[0], g2h_fence.reason); 832 goto retry; 833 } 834 if (g2h_fence.fail) { 835 xe_gt_err(gt, "H2G send failed, action 0x%04x, error %d, hint %u", 836 action[0], g2h_fence.error, g2h_fence.hint); 837 ret = -EIO; 838 } 839 840 return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret; 841 } 842 843 /** 844 * xe_guc_ct_send_recv - Send and receive HXG to the GuC 845 * @ct: the &xe_guc_ct 846 * @action: the dword array with `HXG Request`_ message (can't be NULL) 847 * @len: length of the `HXG Request`_ message (in dwords, can't be 0) 848 * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL) 849 * 850 * Send a `HXG Request`_ message to the GuC over CT communication channel and 851 * blocks until GuC replies with a `HXG Response`_ message. 852 * 853 * For non-blocking communication with GuC use xe_guc_ct_send(). 854 * 855 * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_. 856 * 857 * Return: response length (in dwords) if &response_buffer was not NULL, or 858 * DATA0 from `HXG Response`_ if &response_buffer was NULL, or 859 * a negative error code on failure. 860 */ 861 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, 862 u32 *response_buffer) 863 { 864 KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer); 865 return guc_ct_send_recv(ct, action, len, response_buffer, false); 866 } 867 868 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action, 869 u32 len, u32 *response_buffer) 870 { 871 return guc_ct_send_recv(ct, action, len, response_buffer, true); 872 } 873 874 static u32 *msg_to_hxg(u32 *msg) 875 { 876 return msg + GUC_CTB_MSG_MIN_LEN; 877 } 878 879 static u32 msg_len_to_hxg_len(u32 len) 880 { 881 return len - GUC_CTB_MSG_MIN_LEN; 882 } 883 884 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len) 885 { 886 u32 *hxg = msg_to_hxg(msg); 887 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 888 889 lockdep_assert_held(&ct->lock); 890 891 switch (action) { 892 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE: 893 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE: 894 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE: 895 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 896 g2h_release_space(ct, len); 897 } 898 899 return 0; 900 } 901 902 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) 903 { 904 struct xe_gt *gt = ct_to_gt(ct); 905 u32 *hxg = msg_to_hxg(msg); 906 u32 hxg_len = msg_len_to_hxg_len(len); 907 u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]); 908 u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]); 909 struct g2h_fence *g2h_fence; 910 911 lockdep_assert_held(&ct->lock); 912 913 /* 914 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup. 915 * Those messages should never fail, so if we do get an error back it 916 * means we're likely doing an illegal operation and the GuC is 917 * rejecting it. We have no way to inform the code that submitted the 918 * H2G that the message was rejected, so we need to escalate the 919 * failure to trigger a reset. 920 */ 921 if (fence & CT_SEQNO_UNTRACKED) { 922 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) 923 xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n", 924 fence, 925 FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]), 926 FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0])); 927 else 928 xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n", 929 type, fence); 930 931 return -EPROTO; 932 } 933 934 g2h_fence = xa_erase(&ct->fence_lookup, fence); 935 if (unlikely(!g2h_fence)) { 936 /* Don't tear down channel, as send could've timed out */ 937 xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence); 938 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); 939 return 0; 940 } 941 942 xe_gt_assert(gt, fence == g2h_fence->seqno); 943 944 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) { 945 g2h_fence->fail = true; 946 g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]); 947 g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]); 948 } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) { 949 g2h_fence->retry = true; 950 g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]); 951 } else if (g2h_fence->response_buffer) { 952 g2h_fence->response_len = hxg_len; 953 memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32)); 954 } else { 955 g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]); 956 } 957 958 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); 959 960 g2h_fence->done = true; 961 smp_mb(); 962 963 wake_up_all(&ct->g2h_fence_wq); 964 965 return 0; 966 } 967 968 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) 969 { 970 struct xe_gt *gt = ct_to_gt(ct); 971 u32 *hxg = msg_to_hxg(msg); 972 u32 origin, type; 973 int ret; 974 975 lockdep_assert_held(&ct->lock); 976 977 origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]); 978 if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) { 979 xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n", 980 origin); 981 ct->ctbs.g2h.info.broken = true; 982 983 return -EPROTO; 984 } 985 986 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]); 987 switch (type) { 988 case GUC_HXG_TYPE_EVENT: 989 ret = parse_g2h_event(ct, msg, len); 990 break; 991 case GUC_HXG_TYPE_RESPONSE_SUCCESS: 992 case GUC_HXG_TYPE_RESPONSE_FAILURE: 993 case GUC_HXG_TYPE_NO_RESPONSE_RETRY: 994 ret = parse_g2h_response(ct, msg, len); 995 break; 996 default: 997 xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n", 998 type); 999 ct->ctbs.g2h.info.broken = true; 1000 1001 ret = -EOPNOTSUPP; 1002 } 1003 1004 return ret; 1005 } 1006 1007 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) 1008 { 1009 struct xe_guc *guc = ct_to_guc(ct); 1010 struct xe_gt *gt = ct_to_gt(ct); 1011 u32 hxg_len = msg_len_to_hxg_len(len); 1012 u32 *hxg = msg_to_hxg(msg); 1013 u32 action, adj_len; 1014 u32 *payload; 1015 int ret = 0; 1016 1017 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT) 1018 return 0; 1019 1020 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1021 payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN; 1022 adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN; 1023 1024 switch (action) { 1025 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE: 1026 ret = xe_guc_sched_done_handler(guc, payload, adj_len); 1027 break; 1028 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE: 1029 ret = xe_guc_deregister_done_handler(guc, payload, adj_len); 1030 break; 1031 case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION: 1032 ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len); 1033 break; 1034 case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION: 1035 ret = xe_guc_exec_queue_reset_failure_handler(guc, payload, 1036 adj_len); 1037 break; 1038 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE: 1039 /* Selftest only at the moment */ 1040 break; 1041 case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION: 1042 case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE: 1043 /* FIXME: Handle this */ 1044 break; 1045 case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR: 1046 ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload, 1047 adj_len); 1048 break; 1049 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC: 1050 ret = xe_guc_pagefault_handler(guc, payload, adj_len); 1051 break; 1052 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 1053 ret = xe_guc_tlb_invalidation_done_handler(guc, payload, 1054 adj_len); 1055 break; 1056 case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY: 1057 ret = xe_guc_access_counter_notify_handler(guc, payload, 1058 adj_len); 1059 break; 1060 case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF: 1061 ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len); 1062 break; 1063 case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF: 1064 ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len); 1065 break; 1066 case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY: 1067 ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len); 1068 break; 1069 default: 1070 xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action); 1071 } 1072 1073 if (ret) 1074 xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", 1075 action, ERR_PTR(ret)); 1076 1077 return 0; 1078 } 1079 1080 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) 1081 { 1082 struct xe_device *xe = ct_to_xe(ct); 1083 struct xe_gt *gt = ct_to_gt(ct); 1084 struct guc_ctb *g2h = &ct->ctbs.g2h; 1085 u32 tail, head, len; 1086 s32 avail; 1087 u32 action; 1088 u32 *hxg; 1089 1090 xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); 1091 lockdep_assert_held(&ct->fast_lock); 1092 1093 if (ct->state == XE_GUC_CT_STATE_DISABLED) 1094 return -ENODEV; 1095 1096 if (ct->state == XE_GUC_CT_STATE_STOPPED) 1097 return -ECANCELED; 1098 1099 if (g2h->info.broken) 1100 return -EPIPE; 1101 1102 xe_gt_assert(gt, xe_guc_ct_enabled(ct)); 1103 1104 /* Calculate DW available to read */ 1105 tail = desc_read(xe, g2h, tail); 1106 avail = tail - g2h->info.head; 1107 if (unlikely(avail == 0)) 1108 return 0; 1109 1110 if (avail < 0) 1111 avail += g2h->info.size; 1112 1113 /* Read header */ 1114 xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head, 1115 sizeof(u32)); 1116 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN; 1117 if (len > avail) { 1118 xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n", 1119 avail, len); 1120 g2h->info.broken = true; 1121 1122 return -EPROTO; 1123 } 1124 1125 head = (g2h->info.head + 1) % g2h->info.size; 1126 avail = len - 1; 1127 1128 /* Read G2H message */ 1129 if (avail + head > g2h->info.size) { 1130 u32 avail_til_wrap = g2h->info.size - head; 1131 1132 xe_map_memcpy_from(xe, msg + 1, 1133 &g2h->cmds, sizeof(u32) * head, 1134 avail_til_wrap * sizeof(u32)); 1135 xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap, 1136 &g2h->cmds, 0, 1137 (avail - avail_til_wrap) * sizeof(u32)); 1138 } else { 1139 xe_map_memcpy_from(xe, msg + 1, 1140 &g2h->cmds, sizeof(u32) * head, 1141 avail * sizeof(u32)); 1142 } 1143 1144 hxg = msg_to_hxg(msg); 1145 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1146 1147 if (fast_path) { 1148 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT) 1149 return 0; 1150 1151 switch (action) { 1152 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC: 1153 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 1154 break; /* Process these in fast-path */ 1155 default: 1156 return 0; 1157 } 1158 } 1159 1160 /* Update local / descriptor header */ 1161 g2h->info.head = (head + avail) % g2h->info.size; 1162 desc_write(xe, g2h, head, g2h->info.head); 1163 1164 trace_xe_guc_ctb_g2h(ct_to_gt(ct)->info.id, action, len, 1165 g2h->info.head, tail); 1166 1167 return len; 1168 } 1169 1170 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) 1171 { 1172 struct xe_gt *gt = ct_to_gt(ct); 1173 struct xe_guc *guc = ct_to_guc(ct); 1174 u32 hxg_len = msg_len_to_hxg_len(len); 1175 u32 *hxg = msg_to_hxg(msg); 1176 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1177 u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN; 1178 u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN; 1179 int ret = 0; 1180 1181 switch (action) { 1182 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC: 1183 ret = xe_guc_pagefault_handler(guc, payload, adj_len); 1184 break; 1185 case XE_GUC_ACTION_TLB_INVALIDATION_DONE: 1186 __g2h_release_space(ct, len); 1187 ret = xe_guc_tlb_invalidation_done_handler(guc, payload, 1188 adj_len); 1189 break; 1190 default: 1191 xe_gt_warn(gt, "NOT_POSSIBLE"); 1192 } 1193 1194 if (ret) 1195 xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", 1196 action, ERR_PTR(ret)); 1197 } 1198 1199 /** 1200 * xe_guc_ct_fast_path - process critical G2H in the IRQ handler 1201 * @ct: GuC CT object 1202 * 1203 * Anything related to page faults is critical for performance, process these 1204 * critical G2H in the IRQ. This is safe as these handlers either just wake up 1205 * waiters or queue another worker. 1206 */ 1207 void xe_guc_ct_fast_path(struct xe_guc_ct *ct) 1208 { 1209 struct xe_device *xe = ct_to_xe(ct); 1210 bool ongoing; 1211 int len; 1212 1213 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct)); 1214 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL) 1215 return; 1216 1217 spin_lock(&ct->fast_lock); 1218 do { 1219 len = g2h_read(ct, ct->fast_msg, true); 1220 if (len > 0) 1221 g2h_fast_path(ct, ct->fast_msg, len); 1222 } while (len > 0); 1223 spin_unlock(&ct->fast_lock); 1224 1225 if (ongoing) 1226 xe_pm_runtime_put(xe); 1227 } 1228 1229 /* Returns less than zero on error, 0 on done, 1 on more available */ 1230 static int dequeue_one_g2h(struct xe_guc_ct *ct) 1231 { 1232 int len; 1233 int ret; 1234 1235 lockdep_assert_held(&ct->lock); 1236 1237 spin_lock_irq(&ct->fast_lock); 1238 len = g2h_read(ct, ct->msg, false); 1239 spin_unlock_irq(&ct->fast_lock); 1240 if (len <= 0) 1241 return len; 1242 1243 ret = parse_g2h_msg(ct, ct->msg, len); 1244 if (unlikely(ret < 0)) 1245 return ret; 1246 1247 ret = process_g2h_msg(ct, ct->msg, len); 1248 if (unlikely(ret < 0)) 1249 return ret; 1250 1251 return 1; 1252 } 1253 1254 static void g2h_worker_func(struct work_struct *w) 1255 { 1256 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker); 1257 struct xe_gt *gt = ct_to_gt(ct); 1258 bool ongoing; 1259 int ret; 1260 1261 /* 1262 * Normal users must always hold mem_access.ref around CT calls. However 1263 * during the runtime pm callbacks we rely on CT to talk to the GuC, but 1264 * at this stage we can't rely on mem_access.ref and even the 1265 * callback_task will be different than current. For such cases we just 1266 * need to ensure we always process the responses from any blocking 1267 * ct_send requests or where we otherwise expect some response when 1268 * initiated from those callbacks (which will need to wait for the below 1269 * dequeue_one_g2h()). The dequeue_one_g2h() will gracefully fail if 1270 * the device has suspended to the point that the CT communication has 1271 * been disabled. 1272 * 1273 * If we are inside the runtime pm callback, we can be the only task 1274 * still issuing CT requests (since that requires having the 1275 * mem_access.ref). It seems like it might in theory be possible to 1276 * receive unsolicited events from the GuC just as we are 1277 * suspending-resuming, but those will currently anyway be lost when 1278 * eventually exiting from suspend, hence no need to wake up the device 1279 * here. If we ever need something stronger than get_if_ongoing() then 1280 * we need to be careful with blocking the pm callbacks from getting CT 1281 * responses, if the worker here is blocked on those callbacks 1282 * completing, creating a deadlock. 1283 */ 1284 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct)); 1285 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL) 1286 return; 1287 1288 do { 1289 mutex_lock(&ct->lock); 1290 ret = dequeue_one_g2h(ct); 1291 mutex_unlock(&ct->lock); 1292 1293 if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) { 1294 struct drm_printer p = xe_gt_info_printer(gt); 1295 1296 xe_guc_ct_print(ct, &p, false); 1297 kick_reset(ct); 1298 } 1299 } while (ret == 1); 1300 1301 if (ongoing) 1302 xe_pm_runtime_put(ct_to_xe(ct)); 1303 } 1304 1305 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb, 1306 struct guc_ctb_snapshot *snapshot, 1307 bool atomic) 1308 { 1309 u32 head, tail; 1310 1311 xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0, 1312 sizeof(struct guc_ct_buffer_desc)); 1313 memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info)); 1314 1315 snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32), 1316 atomic ? GFP_ATOMIC : GFP_KERNEL); 1317 1318 if (!snapshot->cmds) { 1319 drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n"); 1320 return; 1321 } 1322 1323 head = snapshot->desc.head; 1324 tail = snapshot->desc.tail; 1325 1326 if (head != tail) { 1327 struct iosys_map map = 1328 IOSYS_MAP_INIT_OFFSET(&ctb->cmds, head * sizeof(u32)); 1329 1330 while (head != tail) { 1331 snapshot->cmds[head] = xe_map_rd(xe, &map, 0, u32); 1332 ++head; 1333 if (head == ctb->info.size) { 1334 head = 0; 1335 map = ctb->cmds; 1336 } else { 1337 iosys_map_incr(&map, sizeof(u32)); 1338 } 1339 } 1340 } 1341 } 1342 1343 static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot, 1344 struct drm_printer *p) 1345 { 1346 u32 head, tail; 1347 1348 drm_printf(p, "\tsize: %d\n", snapshot->info.size); 1349 drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space); 1350 drm_printf(p, "\thead: %d\n", snapshot->info.head); 1351 drm_printf(p, "\ttail: %d\n", snapshot->info.tail); 1352 drm_printf(p, "\tspace: %d\n", snapshot->info.space); 1353 drm_printf(p, "\tbroken: %d\n", snapshot->info.broken); 1354 drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head); 1355 drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail); 1356 drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status); 1357 1358 if (!snapshot->cmds) 1359 return; 1360 1361 head = snapshot->desc.head; 1362 tail = snapshot->desc.tail; 1363 1364 while (head != tail) { 1365 drm_printf(p, "\tcmd[%d]: 0x%08x\n", head, 1366 snapshot->cmds[head]); 1367 ++head; 1368 if (head == snapshot->info.size) 1369 head = 0; 1370 } 1371 } 1372 1373 static void guc_ctb_snapshot_free(struct guc_ctb_snapshot *snapshot) 1374 { 1375 kfree(snapshot->cmds); 1376 } 1377 1378 /** 1379 * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state. 1380 * @ct: GuC CT object. 1381 * @atomic: Boolean to indicate if this is called from atomic context like 1382 * reset or CTB handler or from some regular path like debugfs. 1383 * 1384 * This can be printed out in a later stage like during dev_coredump 1385 * analysis. 1386 * 1387 * Returns: a GuC CT snapshot object that must be freed by the caller 1388 * by using `xe_guc_ct_snapshot_free`. 1389 */ 1390 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, 1391 bool atomic) 1392 { 1393 struct xe_device *xe = ct_to_xe(ct); 1394 struct xe_guc_ct_snapshot *snapshot; 1395 1396 snapshot = kzalloc(sizeof(*snapshot), 1397 atomic ? GFP_ATOMIC : GFP_KERNEL); 1398 1399 if (!snapshot) { 1400 drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n"); 1401 return NULL; 1402 } 1403 1404 if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) { 1405 snapshot->ct_enabled = true; 1406 snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding); 1407 guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, 1408 &snapshot->h2g, atomic); 1409 guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, 1410 &snapshot->g2h, atomic); 1411 } 1412 1413 return snapshot; 1414 } 1415 1416 /** 1417 * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot. 1418 * @snapshot: GuC CT snapshot object. 1419 * @p: drm_printer where it will be printed out. 1420 * 1421 * This function prints out a given GuC CT snapshot object. 1422 */ 1423 void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, 1424 struct drm_printer *p) 1425 { 1426 if (!snapshot) 1427 return; 1428 1429 if (snapshot->ct_enabled) { 1430 drm_puts(p, "H2G CTB (all sizes in DW):\n"); 1431 guc_ctb_snapshot_print(&snapshot->h2g, p); 1432 1433 drm_puts(p, "\nG2H CTB (all sizes in DW):\n"); 1434 guc_ctb_snapshot_print(&snapshot->g2h, p); 1435 1436 drm_printf(p, "\tg2h outstanding: %d\n", 1437 snapshot->g2h_outstanding); 1438 } else { 1439 drm_puts(p, "CT disabled\n"); 1440 } 1441 } 1442 1443 /** 1444 * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot. 1445 * @snapshot: GuC CT snapshot object. 1446 * 1447 * This function free all the memory that needed to be allocated at capture 1448 * time. 1449 */ 1450 void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot) 1451 { 1452 if (!snapshot) 1453 return; 1454 1455 guc_ctb_snapshot_free(&snapshot->h2g); 1456 guc_ctb_snapshot_free(&snapshot->g2h); 1457 kfree(snapshot); 1458 } 1459 1460 /** 1461 * xe_guc_ct_print - GuC CT Print. 1462 * @ct: GuC CT. 1463 * @p: drm_printer where it will be printed out. 1464 * @atomic: Boolean to indicate if this is called from atomic context like 1465 * reset or CTB handler or from some regular path like debugfs. 1466 * 1467 * This function quickly capture a snapshot and immediately print it out. 1468 */ 1469 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic) 1470 { 1471 struct xe_guc_ct_snapshot *snapshot; 1472 1473 snapshot = xe_guc_ct_snapshot_capture(ct, atomic); 1474 xe_guc_ct_snapshot_print(snapshot, p); 1475 xe_guc_ct_snapshot_free(snapshot); 1476 } 1477