1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <linux/bitfield.h> 7 #include <linux/delay.h> 8 9 #include <drm/drm_managed.h> 10 11 #include <kunit/static_stub.h> 12 13 #include "abi/guc_actions_sriov_abi.h" 14 #include "abi/guc_relay_actions_abi.h" 15 #include "abi/guc_relay_communication_abi.h" 16 17 #include "xe_assert.h" 18 #include "xe_device.h" 19 #include "xe_gt.h" 20 #include "xe_gt_sriov_printk.h" 21 #include "xe_guc.h" 22 #include "xe_guc_ct.h" 23 #include "xe_guc_hxg_helpers.h" 24 #include "xe_guc_relay.h" 25 #include "xe_guc_relay_types.h" 26 #include "xe_sriov.h" 27 28 /* 29 * How long should we wait for the response? 30 * XXX this value is subject for the profiling. 31 */ 32 #define RELAY_TIMEOUT_MSEC (2500) 33 34 static void relays_worker_fn(struct work_struct *w); 35 36 static struct xe_guc *relay_to_guc(struct xe_guc_relay *relay) 37 { 38 return container_of(relay, struct xe_guc, relay); 39 } 40 41 static struct xe_guc_ct *relay_to_ct(struct xe_guc_relay *relay) 42 { 43 return &relay_to_guc(relay)->ct; 44 } 45 46 static struct xe_gt *relay_to_gt(struct xe_guc_relay *relay) 47 { 48 return guc_to_gt(relay_to_guc(relay)); 49 } 50 51 static struct xe_device *relay_to_xe(struct xe_guc_relay *relay) 52 { 53 return gt_to_xe(relay_to_gt(relay)); 54 } 55 56 #define relay_assert(relay, condition) xe_gt_assert(relay_to_gt(relay), condition) 57 #define relay_notice(relay, msg...) xe_gt_sriov_notice(relay_to_gt(relay), "relay: " msg) 58 #define relay_debug(relay, msg...) xe_gt_sriov_dbg_verbose(relay_to_gt(relay), "relay: " msg) 59 60 static int relay_get_totalvfs(struct xe_guc_relay *relay) 61 { 62 struct xe_device *xe = relay_to_xe(relay); 63 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 64 65 KUNIT_STATIC_STUB_REDIRECT(relay_get_totalvfs, relay); 66 return IS_SRIOV_VF(xe) ? 0 : pci_sriov_get_totalvfs(pdev); 67 } 68 69 static bool relay_is_ready(struct xe_guc_relay *relay) 70 { 71 return mempool_initialized(&relay->pool); 72 } 73 74 static u32 relay_get_next_rid(struct xe_guc_relay *relay) 75 { 76 u32 rid; 77 78 spin_lock(&relay->lock); 79 rid = ++relay->last_rid; 80 spin_unlock(&relay->lock); 81 82 return rid; 83 } 84 85 /** 86 * struct relay_transaction - internal data used to handle transactions 87 * 88 * Relation between struct relay_transaction members:: 89 * 90 * <-------------------- GUC_CTB_MAX_DWORDS --------------> 91 * <-------- GUC_RELAY_MSG_MAX_LEN ---> 92 * <--- offset ---> <--- request_len -------> 93 * +----------------+-------------------------+----------+--+ 94 * | | | | | 95 * +----------------+-------------------------+----------+--+ 96 * ^ ^ 97 * / / 98 * request_buf request 99 * 100 * <-------------------- GUC_CTB_MAX_DWORDS --------------> 101 * <-------- GUC_RELAY_MSG_MAX_LEN ---> 102 * <--- offset ---> <--- response_len ---> 103 * +----------------+----------------------+-------------+--+ 104 * | | | | | 105 * +----------------+----------------------+-------------+--+ 106 * ^ ^ 107 * / / 108 * response_buf response 109 */ 110 struct relay_transaction { 111 /** 112 * @incoming: indicates whether this transaction represents an incoming 113 * request from the remote VF/PF or this transaction 114 * represents outgoing request to the remote VF/PF. 115 */ 116 bool incoming; 117 118 /** 119 * @remote: PF/VF identifier of the origin (or target) of the relay 120 * request message. 121 */ 122 u32 remote; 123 124 /** @rid: identifier of the VF/PF relay message. */ 125 u32 rid; 126 127 /** 128 * @request: points to the inner VF/PF request message, copied to the 129 * #response_buf starting at #offset. 130 */ 131 u32 *request; 132 133 /** @request_len: length of the inner VF/PF request message. */ 134 u32 request_len; 135 136 /** 137 * @response: points to the placeholder buffer where inner VF/PF 138 * response will be located, for outgoing transaction 139 * this could be caller's buffer (if provided) otherwise 140 * it points to the #response_buf starting at #offset. 141 */ 142 u32 *response; 143 144 /** 145 * @response_len: length of the inner VF/PF response message (only 146 * if #status is 0), initially set to the size of the 147 * placeholder buffer where response message will be 148 * copied. 149 */ 150 u32 response_len; 151 152 /** 153 * @offset: offset to the start of the inner VF/PF relay message inside 154 * buffers; this offset is equal the length of the outer GuC 155 * relay header message. 156 */ 157 u32 offset; 158 159 /** 160 * @request_buf: buffer with VF/PF request message including outer 161 * transport message. 162 */ 163 u32 request_buf[GUC_CTB_MAX_DWORDS]; 164 165 /** 166 * @response_buf: buffer with VF/PF response message including outer 167 * transport message. 168 */ 169 u32 response_buf[GUC_CTB_MAX_DWORDS]; 170 171 /** 172 * @reply: status of the reply, 0 means that data pointed by the 173 * #response is valid. 174 */ 175 int reply; 176 177 /** @done: completion of the outgoing transaction. */ 178 struct completion done; 179 180 /** @link: transaction list link */ 181 struct list_head link; 182 }; 183 184 static u32 prepare_pf2guc(u32 *msg, u32 target, u32 rid) 185 { 186 msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 187 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 188 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, XE_GUC_ACTION_PF2GUC_RELAY_TO_VF); 189 msg[1] = FIELD_PREP(PF2GUC_RELAY_TO_VF_REQUEST_MSG_1_VFID, target); 190 msg[2] = FIELD_PREP(PF2GUC_RELAY_TO_VF_REQUEST_MSG_2_RELAY_ID, rid); 191 192 return PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN; 193 } 194 195 static u32 prepare_vf2guc(u32 *msg, u32 rid) 196 { 197 msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 198 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 199 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, XE_GUC_ACTION_VF2GUC_RELAY_TO_PF); 200 msg[1] = FIELD_PREP(VF2GUC_RELAY_TO_PF_REQUEST_MSG_1_RELAY_ID, rid); 201 202 return VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN; 203 } 204 205 static struct relay_transaction * 206 __relay_get_transaction(struct xe_guc_relay *relay, bool incoming, u32 remote, u32 rid, 207 const u32 *action, u32 action_len, u32 *resp, u32 resp_size) 208 { 209 struct relay_transaction *txn; 210 211 relay_assert(relay, action_len >= GUC_RELAY_MSG_MIN_LEN); 212 relay_assert(relay, action_len <= GUC_RELAY_MSG_MAX_LEN); 213 relay_assert(relay, !(!!resp ^ !!resp_size)); 214 relay_assert(relay, resp_size <= GUC_RELAY_MSG_MAX_LEN); 215 relay_assert(relay, resp_size == 0 || resp_size >= GUC_RELAY_MSG_MIN_LEN); 216 217 if (unlikely(!relay_is_ready(relay))) 218 return ERR_PTR(-ENODEV); 219 220 /* 221 * For incoming requests we can't use GFP_KERNEL as those are delivered 222 * with CTB lock held which is marked as used in the reclaim path. 223 * Btw, that's one of the reason why we use mempool here! 224 */ 225 txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_KERNEL); 226 if (!txn) 227 return ERR_PTR(-ENOMEM); 228 229 txn->incoming = incoming; 230 txn->remote = remote; 231 txn->rid = rid; 232 txn->offset = remote ? 233 prepare_pf2guc(incoming ? txn->response_buf : txn->request_buf, remote, rid) : 234 prepare_vf2guc(incoming ? txn->response_buf : txn->request_buf, rid); 235 236 relay_assert(relay, txn->offset); 237 relay_assert(relay, txn->offset + GUC_RELAY_MSG_MAX_LEN <= ARRAY_SIZE(txn->request_buf)); 238 relay_assert(relay, txn->offset + GUC_RELAY_MSG_MAX_LEN <= ARRAY_SIZE(txn->response_buf)); 239 240 txn->request = txn->request_buf + txn->offset; 241 memcpy(&txn->request_buf[txn->offset], action, sizeof(u32) * action_len); 242 txn->request_len = action_len; 243 244 txn->response = resp ?: txn->response_buf + txn->offset; 245 txn->response_len = resp_size ?: GUC_RELAY_MSG_MAX_LEN; 246 txn->reply = -ENOMSG; 247 INIT_LIST_HEAD(&txn->link); 248 init_completion(&txn->done); 249 250 return txn; 251 } 252 253 static struct relay_transaction * 254 relay_new_transaction(struct xe_guc_relay *relay, u32 target, const u32 *action, u32 len, 255 u32 *resp, u32 resp_size) 256 { 257 u32 rid = relay_get_next_rid(relay); 258 259 return __relay_get_transaction(relay, false, target, rid, action, len, resp, resp_size); 260 } 261 262 static struct relay_transaction * 263 relay_new_incoming_transaction(struct xe_guc_relay *relay, u32 origin, u32 rid, 264 const u32 *action, u32 len) 265 { 266 return __relay_get_transaction(relay, true, origin, rid, action, len, NULL, 0); 267 } 268 269 static void relay_release_transaction(struct xe_guc_relay *relay, struct relay_transaction *txn) 270 { 271 relay_assert(relay, list_empty(&txn->link)); 272 273 txn->offset = 0; 274 txn->response = NULL; 275 txn->reply = -ESTALE; 276 mempool_free(txn, &relay->pool); 277 } 278 279 static int relay_send_transaction(struct xe_guc_relay *relay, struct relay_transaction *txn) 280 { 281 u32 len = txn->incoming ? txn->response_len : txn->request_len; 282 u32 *buf = txn->incoming ? txn->response_buf : txn->request_buf; 283 u32 *msg = buf + txn->offset; 284 int ret; 285 286 relay_assert(relay, txn->offset); 287 relay_assert(relay, txn->offset + len <= GUC_CTB_MAX_DWORDS); 288 relay_assert(relay, len >= GUC_RELAY_MSG_MIN_LEN); 289 relay_assert(relay, len <= GUC_RELAY_MSG_MAX_LEN); 290 291 relay_debug(relay, "sending %s.%u to %u = %*ph\n", 292 guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])), 293 txn->rid, txn->remote, (int)sizeof(u32) * len, msg); 294 295 ret = xe_guc_ct_send_block(relay_to_ct(relay), buf, len + txn->offset); 296 297 if (unlikely(ret > 0)) { 298 relay_notice(relay, "Unexpected data=%d from GuC, wrong ABI?\n", ret); 299 ret = -EPROTO; 300 } 301 if (unlikely(ret < 0)) { 302 relay_notice(relay, "Failed to send %s.%x to GuC (%pe) %*ph ...\n", 303 guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, buf[0])), 304 FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, buf[0]), 305 ERR_PTR(ret), (int)sizeof(u32) * txn->offset, buf); 306 relay_notice(relay, "Failed to send %s.%u to %u (%pe) %*ph\n", 307 guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])), 308 txn->rid, txn->remote, ERR_PTR(ret), (int)sizeof(u32) * len, msg); 309 } 310 311 return ret; 312 } 313 314 static void __fini_relay(struct drm_device *drm, void *arg) 315 { 316 struct xe_guc_relay *relay = arg; 317 318 mempool_exit(&relay->pool); 319 } 320 321 /** 322 * xe_guc_relay_init - Initialize a &xe_guc_relay 323 * @relay: the &xe_guc_relay to initialize 324 * 325 * Initialize remaining members of &xe_guc_relay that may depend 326 * on the SR-IOV mode. 327 * 328 * Return: 0 on success or a negative error code on failure. 329 */ 330 int xe_guc_relay_init(struct xe_guc_relay *relay) 331 { 332 const int XE_RELAY_MEMPOOL_MIN_NUM = 1; 333 struct xe_device *xe = relay_to_xe(relay); 334 int err; 335 336 relay_assert(relay, !relay_is_ready(relay)); 337 338 if (!IS_SRIOV(xe)) 339 return 0; 340 341 spin_lock_init(&relay->lock); 342 INIT_WORK(&relay->worker, relays_worker_fn); 343 INIT_LIST_HEAD(&relay->pending_relays); 344 INIT_LIST_HEAD(&relay->incoming_actions); 345 346 err = mempool_init_kmalloc_pool(&relay->pool, XE_RELAY_MEMPOOL_MIN_NUM + 347 relay_get_totalvfs(relay), 348 sizeof(struct relay_transaction)); 349 if (err) 350 return err; 351 352 relay_debug(relay, "using mempool with %d elements\n", relay->pool.min_nr); 353 354 return drmm_add_action_or_reset(&xe->drm, __fini_relay, relay); 355 } 356 357 static u32 to_relay_error(int err) 358 { 359 /* XXX: assume that relay errors match errno codes */ 360 return err < 0 ? -err : GUC_RELAY_ERROR_UNDISCLOSED; 361 } 362 363 static int from_relay_error(u32 error) 364 { 365 /* XXX: assume that relay errors match errno codes */ 366 return error ? -error : -ENODATA; 367 } 368 369 static u32 sanitize_relay_error(u32 error) 370 { 371 /* XXX TBD if generic error codes will be allowed */ 372 if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG)) 373 error = GUC_RELAY_ERROR_UNDISCLOSED; 374 return error; 375 } 376 377 static u32 sanitize_relay_error_hint(u32 hint) 378 { 379 /* XXX TBD if generic error codes will be allowed */ 380 if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG)) 381 hint = 0; 382 return hint; 383 } 384 385 static u32 prepare_error_reply(u32 *msg, u32 error, u32 hint) 386 { 387 msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 388 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_FAILURE) | 389 FIELD_PREP(GUC_HXG_FAILURE_MSG_0_HINT, hint) | 390 FIELD_PREP(GUC_HXG_FAILURE_MSG_0_ERROR, error); 391 392 XE_WARN_ON(!FIELD_FIT(GUC_HXG_FAILURE_MSG_0_ERROR, error)); 393 XE_WARN_ON(!FIELD_FIT(GUC_HXG_FAILURE_MSG_0_HINT, hint)); 394 395 return GUC_HXG_FAILURE_MSG_LEN; 396 } 397 398 static void relay_testonly_nop(struct xe_guc_relay *relay) 399 { 400 KUNIT_STATIC_STUB_REDIRECT(relay_testonly_nop, relay); 401 } 402 403 static int relay_send_message_and_wait(struct xe_guc_relay *relay, 404 struct relay_transaction *txn, 405 u32 *buf, u32 buf_size) 406 { 407 unsigned long timeout = msecs_to_jiffies(RELAY_TIMEOUT_MSEC); 408 u32 *msg = &txn->request_buf[txn->offset]; 409 u32 len = txn->request_len; 410 u32 type, action, data0; 411 int ret; 412 long n; 413 414 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]); 415 action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]); 416 data0 = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]); 417 418 relay_debug(relay, "%s.%u to %u action %#x:%u\n", 419 guc_hxg_type_to_string(type), 420 txn->rid, txn->remote, action, data0); 421 422 /* list ordering does not need to match RID ordering */ 423 spin_lock(&relay->lock); 424 list_add_tail(&txn->link, &relay->pending_relays); 425 spin_unlock(&relay->lock); 426 427 resend: 428 ret = relay_send_transaction(relay, txn); 429 if (unlikely(ret < 0)) 430 goto unlink; 431 432 wait: 433 n = wait_for_completion_timeout(&txn->done, timeout); 434 if (unlikely(n == 0 && txn->reply)) { 435 ret = -ETIME; 436 goto unlink; 437 } 438 439 relay_debug(relay, "%u.%u reply %d after %u msec\n", 440 txn->remote, txn->rid, txn->reply, jiffies_to_msecs(timeout - n)); 441 if (unlikely(txn->reply)) { 442 reinit_completion(&txn->done); 443 if (txn->reply == -EAGAIN) 444 goto resend; 445 if (txn->reply == -EBUSY) { 446 relay_testonly_nop(relay); 447 goto wait; 448 } 449 if (txn->reply > 0) 450 ret = from_relay_error(txn->reply); 451 else 452 ret = txn->reply; 453 goto unlink; 454 } 455 456 relay_debug(relay, "%u.%u response %*ph\n", txn->remote, txn->rid, 457 (int)sizeof(u32) * txn->response_len, txn->response); 458 relay_assert(relay, txn->response_len >= GUC_RELAY_MSG_MIN_LEN); 459 ret = txn->response_len; 460 461 unlink: 462 spin_lock(&relay->lock); 463 list_del_init(&txn->link); 464 spin_unlock(&relay->lock); 465 466 if (unlikely(ret < 0)) { 467 relay_notice(relay, "Unsuccessful %s.%u %#x:%u to %u (%pe) %*ph\n", 468 guc_hxg_type_to_string(type), txn->rid, 469 action, data0, txn->remote, ERR_PTR(ret), 470 (int)sizeof(u32) * len, msg); 471 } 472 473 return ret; 474 } 475 476 static int relay_send_to(struct xe_guc_relay *relay, u32 target, 477 const u32 *msg, u32 len, u32 *buf, u32 buf_size) 478 { 479 struct relay_transaction *txn; 480 int ret; 481 482 relay_assert(relay, len >= GUC_RELAY_MSG_MIN_LEN); 483 relay_assert(relay, len <= GUC_RELAY_MSG_MAX_LEN); 484 relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_HOST); 485 relay_assert(relay, guc_hxg_type_is_action(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]))); 486 487 if (unlikely(!relay_is_ready(relay))) 488 return -ENODEV; 489 490 txn = relay_new_transaction(relay, target, msg, len, buf, buf_size); 491 if (IS_ERR(txn)) 492 return PTR_ERR(txn); 493 494 switch (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])) { 495 case GUC_HXG_TYPE_REQUEST: 496 ret = relay_send_message_and_wait(relay, txn, buf, buf_size); 497 break; 498 case GUC_HXG_TYPE_FAST_REQUEST: 499 relay_assert(relay, !GUC_HXG_TYPE_FAST_REQUEST); 500 fallthrough; 501 case GUC_HXG_TYPE_EVENT: 502 ret = relay_send_transaction(relay, txn); 503 break; 504 default: 505 ret = -EINVAL; 506 break; 507 } 508 509 relay_release_transaction(relay, txn); 510 return ret; 511 } 512 513 #ifdef CONFIG_PCI_IOV 514 /** 515 * xe_guc_relay_send_to_vf - Send a message to the VF. 516 * @relay: the &xe_guc_relay which will send the message 517 * @target: target VF number 518 * @msg: request message to be sent 519 * @len: length of the request message (in dwords, can't be 0) 520 * @buf: placeholder for the response message 521 * @buf_size: size of the response message placeholder (in dwords) 522 * 523 * This function can only be used by the driver running in the SR-IOV PF mode. 524 * 525 * Return: Non-negative response length (in dwords) or 526 * a negative error code on failure. 527 */ 528 int xe_guc_relay_send_to_vf(struct xe_guc_relay *relay, u32 target, 529 const u32 *msg, u32 len, u32 *buf, u32 buf_size) 530 { 531 relay_assert(relay, IS_SRIOV_PF(relay_to_xe(relay))); 532 533 return relay_send_to(relay, target, msg, len, buf, buf_size); 534 } 535 #endif 536 537 /** 538 * xe_guc_relay_send_to_pf - Send a message to the PF. 539 * @relay: the &xe_guc_relay which will send the message 540 * @msg: request message to be sent 541 * @len: length of the message (in dwords, can't be 0) 542 * @buf: placeholder for the response message 543 * @buf_size: size of the response message placeholder (in dwords) 544 * 545 * This function can only be used by driver running in SR-IOV VF mode. 546 * 547 * Return: Non-negative response length (in dwords) or 548 * a negative error code on failure. 549 */ 550 int xe_guc_relay_send_to_pf(struct xe_guc_relay *relay, 551 const u32 *msg, u32 len, u32 *buf, u32 buf_size) 552 { 553 relay_assert(relay, IS_SRIOV_VF(relay_to_xe(relay))); 554 555 return relay_send_to(relay, PFID, msg, len, buf, buf_size); 556 } 557 558 static int relay_handle_reply(struct xe_guc_relay *relay, u32 origin, 559 u32 rid, int reply, const u32 *msg, u32 len) 560 { 561 struct relay_transaction *pending; 562 int err = -ESRCH; 563 564 spin_lock(&relay->lock); 565 list_for_each_entry(pending, &relay->pending_relays, link) { 566 if (pending->remote != origin || pending->rid != rid) { 567 relay_debug(relay, "%u.%u still awaits response\n", 568 pending->remote, pending->rid); 569 continue; 570 } 571 err = 0; /* found! */ 572 if (reply == 0) { 573 if (len > pending->response_len) { 574 reply = -ENOBUFS; 575 err = -ENOBUFS; 576 } else { 577 memcpy(pending->response, msg, 4 * len); 578 pending->response_len = len; 579 } 580 } 581 pending->reply = reply; 582 complete_all(&pending->done); 583 break; 584 } 585 spin_unlock(&relay->lock); 586 587 return err; 588 } 589 590 static int relay_handle_failure(struct xe_guc_relay *relay, u32 origin, 591 u32 rid, const u32 *msg, u32 len) 592 { 593 int error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, msg[0]); 594 u32 hint __maybe_unused = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, msg[0]); 595 596 relay_assert(relay, len); 597 relay_debug(relay, "%u.%u error %#x (%pe) hint %u debug %*ph\n", 598 origin, rid, error, ERR_PTR(-error), hint, 4 * (len - 1), msg + 1); 599 600 return relay_handle_reply(relay, origin, rid, error ?: -EREMOTEIO, NULL, 0); 601 } 602 603 static int relay_testloop_action_handler(struct xe_guc_relay *relay, u32 origin, 604 const u32 *msg, u32 len, u32 *response, u32 size) 605 { 606 static ktime_t last_reply = 0; 607 u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]); 608 u32 action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]); 609 u32 opcode = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]); 610 ktime_t now = ktime_get(); 611 bool busy; 612 int ret; 613 614 relay_assert(relay, guc_hxg_type_is_action(type)); 615 relay_assert(relay, action == GUC_RELAY_ACTION_VFXPF_TESTLOOP); 616 617 if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) 618 return -ECONNREFUSED; 619 620 if (!last_reply) 621 last_reply = now; 622 busy = ktime_before(now, ktime_add_ms(last_reply, 2 * RELAY_TIMEOUT_MSEC)); 623 if (!busy) 624 last_reply = now; 625 626 switch (opcode) { 627 case VFXPF_TESTLOOP_OPCODE_NOP: 628 if (type == GUC_HXG_TYPE_EVENT) 629 return 0; 630 return guc_hxg_msg_encode_success(response, 0); 631 case VFXPF_TESTLOOP_OPCODE_BUSY: 632 if (type == GUC_HXG_TYPE_EVENT) 633 return -EPROTO; 634 msleep(RELAY_TIMEOUT_MSEC / 8); 635 if (busy) 636 return -EINPROGRESS; 637 return guc_hxg_msg_encode_success(response, 0); 638 case VFXPF_TESTLOOP_OPCODE_RETRY: 639 if (type == GUC_HXG_TYPE_EVENT) 640 return -EPROTO; 641 msleep(RELAY_TIMEOUT_MSEC / 8); 642 if (busy) 643 return guc_hxg_msg_encode_retry(response, 0); 644 return guc_hxg_msg_encode_success(response, 0); 645 case VFXPF_TESTLOOP_OPCODE_ECHO: 646 if (type == GUC_HXG_TYPE_EVENT) 647 return -EPROTO; 648 if (size < len) 649 return -ENOBUFS; 650 ret = guc_hxg_msg_encode_success(response, len); 651 memcpy(response + ret, msg + ret, (len - ret) * sizeof(u32)); 652 return len; 653 case VFXPF_TESTLOOP_OPCODE_FAIL: 654 return -EHWPOISON; 655 default: 656 break; 657 } 658 659 relay_notice(relay, "Unexpected action %#x opcode %#x\n", action, opcode); 660 return -EBADRQC; 661 } 662 663 static int relay_action_handler(struct xe_guc_relay *relay, u32 origin, 664 const u32 *msg, u32 len, u32 *response, u32 size) 665 { 666 u32 type; 667 int ret; 668 669 relay_assert(relay, len >= GUC_HXG_MSG_MIN_LEN); 670 671 if (FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]) == GUC_RELAY_ACTION_VFXPF_TESTLOOP) 672 return relay_testloop_action_handler(relay, origin, msg, len, response, size); 673 674 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]); 675 676 /* XXX: PF services will be added later */ 677 ret = -EOPNOTSUPP; 678 679 if (type == GUC_HXG_TYPE_EVENT) 680 relay_assert(relay, ret <= 0); 681 682 return ret; 683 } 684 685 static struct relay_transaction *relay_dequeue_transaction(struct xe_guc_relay *relay) 686 { 687 struct relay_transaction *txn; 688 689 spin_lock(&relay->lock); 690 txn = list_first_entry_or_null(&relay->incoming_actions, struct relay_transaction, link); 691 if (txn) 692 list_del_init(&txn->link); 693 spin_unlock(&relay->lock); 694 695 return txn; 696 } 697 698 static void relay_process_incoming_action(struct xe_guc_relay *relay) 699 { 700 struct relay_transaction *txn; 701 bool again = false; 702 u32 type; 703 int ret; 704 705 txn = relay_dequeue_transaction(relay); 706 if (!txn) 707 return; 708 709 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, txn->request_buf[txn->offset]); 710 711 ret = relay_action_handler(relay, txn->remote, 712 txn->request_buf + txn->offset, txn->request_len, 713 txn->response_buf + txn->offset, 714 ARRAY_SIZE(txn->response_buf) - txn->offset); 715 716 if (ret == -EINPROGRESS) { 717 again = true; 718 ret = guc_hxg_msg_encode_busy(txn->response_buf + txn->offset, 0); 719 } 720 721 if (ret > 0) { 722 txn->response_len = ret; 723 ret = relay_send_transaction(relay, txn); 724 } 725 726 if (ret < 0) { 727 u32 error = to_relay_error(ret); 728 729 relay_notice(relay, "Failed to handle %s.%u from %u (%pe) %*ph\n", 730 guc_hxg_type_to_string(type), txn->rid, txn->remote, 731 ERR_PTR(ret), 4 * txn->request_len, txn->request_buf + txn->offset); 732 733 txn->response_len = prepare_error_reply(txn->response_buf + txn->offset, 734 txn->remote ? 735 sanitize_relay_error(error) : error, 736 txn->remote ? 737 sanitize_relay_error_hint(-ret) : -ret); 738 ret = relay_send_transaction(relay, txn); 739 again = false; 740 } 741 742 if (again) { 743 spin_lock(&relay->lock); 744 list_add(&txn->link, &relay->incoming_actions); 745 spin_unlock(&relay->lock); 746 return; 747 } 748 749 if (unlikely(ret < 0)) 750 relay_notice(relay, "Failed to process action.%u (%pe) %*ph\n", 751 txn->rid, ERR_PTR(ret), 4 * txn->request_len, 752 txn->request_buf + txn->offset); 753 754 relay_release_transaction(relay, txn); 755 } 756 757 static bool relay_needs_worker(struct xe_guc_relay *relay) 758 { 759 return !list_empty(&relay->incoming_actions); 760 } 761 762 static void relay_kick_worker(struct xe_guc_relay *relay) 763 { 764 KUNIT_STATIC_STUB_REDIRECT(relay_kick_worker, relay); 765 queue_work(relay_to_xe(relay)->sriov.wq, &relay->worker); 766 } 767 768 static void relays_worker_fn(struct work_struct *w) 769 { 770 struct xe_guc_relay *relay = container_of(w, struct xe_guc_relay, worker); 771 772 relay_process_incoming_action(relay); 773 774 if (relay_needs_worker(relay)) 775 relay_kick_worker(relay); 776 } 777 778 static int relay_queue_action_msg(struct xe_guc_relay *relay, u32 origin, u32 rid, 779 const u32 *msg, u32 len) 780 { 781 struct relay_transaction *txn; 782 783 txn = relay_new_incoming_transaction(relay, origin, rid, msg, len); 784 if (IS_ERR(txn)) 785 return PTR_ERR(txn); 786 787 spin_lock(&relay->lock); 788 list_add_tail(&txn->link, &relay->incoming_actions); 789 spin_unlock(&relay->lock); 790 791 relay_kick_worker(relay); 792 return 0; 793 } 794 795 static int relay_process_msg(struct xe_guc_relay *relay, u32 origin, u32 rid, 796 const u32 *msg, u32 len) 797 { 798 u32 type; 799 int err; 800 801 if (unlikely(len < GUC_HXG_MSG_MIN_LEN)) 802 return -EPROTO; 803 804 if (FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) != GUC_HXG_ORIGIN_HOST) 805 return -EPROTO; 806 807 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]); 808 relay_debug(relay, "received %s.%u from %u = %*ph\n", 809 guc_hxg_type_to_string(type), rid, origin, 4 * len, msg); 810 811 switch (type) { 812 case GUC_HXG_TYPE_REQUEST: 813 case GUC_HXG_TYPE_FAST_REQUEST: 814 case GUC_HXG_TYPE_EVENT: 815 err = relay_queue_action_msg(relay, origin, rid, msg, len); 816 break; 817 case GUC_HXG_TYPE_RESPONSE_SUCCESS: 818 err = relay_handle_reply(relay, origin, rid, 0, msg, len); 819 break; 820 case GUC_HXG_TYPE_NO_RESPONSE_BUSY: 821 err = relay_handle_reply(relay, origin, rid, -EBUSY, NULL, 0); 822 break; 823 case GUC_HXG_TYPE_NO_RESPONSE_RETRY: 824 err = relay_handle_reply(relay, origin, rid, -EAGAIN, NULL, 0); 825 break; 826 case GUC_HXG_TYPE_RESPONSE_FAILURE: 827 err = relay_handle_failure(relay, origin, rid, msg, len); 828 break; 829 default: 830 err = -EBADRQC; 831 } 832 833 if (unlikely(err)) 834 relay_notice(relay, "Failed to process %s.%u from %u (%pe) %*ph\n", 835 guc_hxg_type_to_string(type), rid, origin, 836 ERR_PTR(err), 4 * len, msg); 837 838 return err; 839 } 840 841 /** 842 * xe_guc_relay_process_guc2vf - Handle relay notification message from the GuC. 843 * @relay: the &xe_guc_relay which will handle the message 844 * @msg: message to be handled 845 * @len: length of the message (in dwords) 846 * 847 * This function will handle relay messages received from the GuC. 848 * 849 * This function is can only be used if driver is running in SR-IOV mode. 850 * 851 * Return: 0 on success or a negative error code on failure. 852 */ 853 int xe_guc_relay_process_guc2vf(struct xe_guc_relay *relay, const u32 *msg, u32 len) 854 { 855 u32 rid; 856 857 relay_assert(relay, len >= GUC_HXG_MSG_MIN_LEN); 858 relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC); 859 relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT); 860 relay_assert(relay, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) == 861 XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF); 862 863 if (unlikely(!IS_SRIOV_VF(relay_to_xe(relay)) && !kunit_get_current_test())) 864 return -EPERM; 865 866 if (unlikely(!relay_is_ready(relay))) 867 return -ENODEV; 868 869 if (unlikely(len < GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN)) 870 return -EPROTO; 871 872 if (unlikely(len > GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN)) 873 return -EMSGSIZE; 874 875 if (unlikely(FIELD_GET(GUC_HXG_EVENT_MSG_0_DATA0, msg[0]))) 876 return -EPFNOSUPPORT; 877 878 rid = FIELD_GET(GUC2VF_RELAY_FROM_PF_EVENT_MSG_1_RELAY_ID, msg[1]); 879 880 return relay_process_msg(relay, PFID, rid, 881 msg + GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN, 882 len - GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN); 883 } 884 885 #ifdef CONFIG_PCI_IOV 886 /** 887 * xe_guc_relay_process_guc2pf - Handle relay notification message from the GuC. 888 * @relay: the &xe_guc_relay which will handle the message 889 * @msg: message to be handled 890 * @len: length of the message (in dwords) 891 * 892 * This function will handle relay messages received from the GuC. 893 * 894 * This function can only be used if driver is running in SR-IOV PF mode. 895 * 896 * Return: 0 on success or a negative error code on failure. 897 */ 898 int xe_guc_relay_process_guc2pf(struct xe_guc_relay *relay, const u32 *msg, u32 len) 899 { 900 u32 origin, rid; 901 int err; 902 903 relay_assert(relay, len >= GUC_HXG_EVENT_MSG_MIN_LEN); 904 relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC); 905 relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT); 906 relay_assert(relay, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) == 907 XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF); 908 909 if (unlikely(!IS_SRIOV_PF(relay_to_xe(relay)) && !kunit_get_current_test())) 910 return -EPERM; 911 912 if (unlikely(!relay_is_ready(relay))) 913 return -ENODEV; 914 915 if (unlikely(len < GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN)) 916 return -EPROTO; 917 918 if (unlikely(len > GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN)) 919 return -EMSGSIZE; 920 921 if (unlikely(FIELD_GET(GUC_HXG_EVENT_MSG_0_DATA0, msg[0]))) 922 return -EPFNOSUPPORT; 923 924 origin = FIELD_GET(GUC2PF_RELAY_FROM_VF_EVENT_MSG_1_VFID, msg[1]); 925 rid = FIELD_GET(GUC2PF_RELAY_FROM_VF_EVENT_MSG_2_RELAY_ID, msg[2]); 926 927 if (unlikely(origin > relay_get_totalvfs(relay))) 928 return -ENOENT; 929 930 err = relay_process_msg(relay, origin, rid, 931 msg + GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN, 932 len - GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN); 933 934 return err; 935 } 936 #endif 937 938 #if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST) 939 #include "tests/xe_guc_relay_test.c" 940 #endif 941