1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <linux/bitfield.h> 7 #include <linux/delay.h> 8 9 #include <drm/drm_managed.h> 10 11 #include <kunit/static_stub.h> 12 #include <kunit/test-bug.h> 13 14 #include "abi/guc_actions_sriov_abi.h" 15 #include "abi/guc_relay_actions_abi.h" 16 #include "abi/guc_relay_communication_abi.h" 17 18 #include "xe_assert.h" 19 #include "xe_device.h" 20 #include "xe_gt.h" 21 #include "xe_gt_sriov_printk.h" 22 #include "xe_guc.h" 23 #include "xe_guc_ct.h" 24 #include "xe_guc_hxg_helpers.h" 25 #include "xe_guc_relay.h" 26 #include "xe_guc_relay_types.h" 27 #include "xe_sriov.h" 28 29 /* 30 * How long should we wait for the response? 31 * XXX this value is subject for the profiling. 32 */ 33 #define RELAY_TIMEOUT_MSEC (2500) 34 35 static void relays_worker_fn(struct work_struct *w); 36 37 static struct xe_guc *relay_to_guc(struct xe_guc_relay *relay) 38 { 39 return container_of(relay, struct xe_guc, relay); 40 } 41 42 static struct xe_guc_ct *relay_to_ct(struct xe_guc_relay *relay) 43 { 44 return &relay_to_guc(relay)->ct; 45 } 46 47 static struct xe_gt *relay_to_gt(struct xe_guc_relay *relay) 48 { 49 return guc_to_gt(relay_to_guc(relay)); 50 } 51 52 static struct xe_device *relay_to_xe(struct xe_guc_relay *relay) 53 { 54 return gt_to_xe(relay_to_gt(relay)); 55 } 56 57 #define relay_assert(relay, condition) xe_gt_assert(relay_to_gt(relay), condition) 58 #define relay_notice(relay, msg...) xe_gt_sriov_notice(relay_to_gt(relay), "relay: " msg) 59 #define relay_debug(relay, msg...) xe_gt_sriov_dbg_verbose(relay_to_gt(relay), "relay: " msg) 60 61 static int relay_get_totalvfs(struct xe_guc_relay *relay) 62 { 63 struct xe_device *xe = relay_to_xe(relay); 64 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 65 66 KUNIT_STATIC_STUB_REDIRECT(relay_get_totalvfs, relay); 67 return IS_SRIOV_VF(xe) ? 0 : pci_sriov_get_totalvfs(pdev); 68 } 69 70 static bool relay_is_ready(struct xe_guc_relay *relay) 71 { 72 return mempool_initialized(&relay->pool); 73 } 74 75 static u32 relay_get_next_rid(struct xe_guc_relay *relay) 76 { 77 u32 rid; 78 79 spin_lock(&relay->lock); 80 rid = ++relay->last_rid; 81 spin_unlock(&relay->lock); 82 83 return rid; 84 } 85 86 /** 87 * struct relay_transaction - internal data used to handle transactions 88 * 89 * Relation between struct relay_transaction members:: 90 * 91 * <-------------------- GUC_CTB_MAX_DWORDS --------------> 92 * <-------- GUC_RELAY_MSG_MAX_LEN ---> 93 * <--- offset ---> <--- request_len -------> 94 * +----------------+-------------------------+----------+--+ 95 * | | | | | 96 * +----------------+-------------------------+----------+--+ 97 * ^ ^ 98 * / / 99 * request_buf request 100 * 101 * <-------------------- GUC_CTB_MAX_DWORDS --------------> 102 * <-------- GUC_RELAY_MSG_MAX_LEN ---> 103 * <--- offset ---> <--- response_len ---> 104 * +----------------+----------------------+-------------+--+ 105 * | | | | | 106 * +----------------+----------------------+-------------+--+ 107 * ^ ^ 108 * / / 109 * response_buf response 110 */ 111 struct relay_transaction { 112 /** 113 * @incoming: indicates whether this transaction represents an incoming 114 * request from the remote VF/PF or this transaction 115 * represents outgoing request to the remote VF/PF. 116 */ 117 bool incoming; 118 119 /** 120 * @remote: PF/VF identifier of the origin (or target) of the relay 121 * request message. 122 */ 123 u32 remote; 124 125 /** @rid: identifier of the VF/PF relay message. */ 126 u32 rid; 127 128 /** 129 * @request: points to the inner VF/PF request message, copied to the 130 * #response_buf starting at #offset. 131 */ 132 u32 *request; 133 134 /** @request_len: length of the inner VF/PF request message. */ 135 u32 request_len; 136 137 /** 138 * @response: points to the placeholder buffer where inner VF/PF 139 * response will be located, for outgoing transaction 140 * this could be caller's buffer (if provided) otherwise 141 * it points to the #response_buf starting at #offset. 142 */ 143 u32 *response; 144 145 /** 146 * @response_len: length of the inner VF/PF response message (only 147 * if #status is 0), initially set to the size of the 148 * placeholder buffer where response message will be 149 * copied. 150 */ 151 u32 response_len; 152 153 /** 154 * @offset: offset to the start of the inner VF/PF relay message inside 155 * buffers; this offset is equal the length of the outer GuC 156 * relay header message. 157 */ 158 u32 offset; 159 160 /** 161 * @request_buf: buffer with VF/PF request message including outer 162 * transport message. 163 */ 164 u32 request_buf[GUC_CTB_MAX_DWORDS]; 165 166 /** 167 * @response_buf: buffer with VF/PF response message including outer 168 * transport message. 169 */ 170 u32 response_buf[GUC_CTB_MAX_DWORDS]; 171 172 /** 173 * @reply: status of the reply, 0 means that data pointed by the 174 * #response is valid. 175 */ 176 int reply; 177 178 /** @done: completion of the outgoing transaction. */ 179 struct completion done; 180 181 /** @link: transaction list link */ 182 struct list_head link; 183 }; 184 185 static u32 prepare_pf2guc(u32 *msg, u32 target, u32 rid) 186 { 187 msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 188 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 189 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, XE_GUC_ACTION_PF2GUC_RELAY_TO_VF); 190 msg[1] = FIELD_PREP(PF2GUC_RELAY_TO_VF_REQUEST_MSG_1_VFID, target); 191 msg[2] = FIELD_PREP(PF2GUC_RELAY_TO_VF_REQUEST_MSG_2_RELAY_ID, rid); 192 193 return PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN; 194 } 195 196 static u32 prepare_vf2guc(u32 *msg, u32 rid) 197 { 198 msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 199 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 200 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, XE_GUC_ACTION_VF2GUC_RELAY_TO_PF); 201 msg[1] = FIELD_PREP(VF2GUC_RELAY_TO_PF_REQUEST_MSG_1_RELAY_ID, rid); 202 203 return VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN; 204 } 205 206 static struct relay_transaction * 207 __relay_get_transaction(struct xe_guc_relay *relay, bool incoming, u32 remote, u32 rid, 208 const u32 *action, u32 action_len, u32 *resp, u32 resp_size) 209 { 210 struct relay_transaction *txn; 211 212 relay_assert(relay, action_len >= GUC_RELAY_MSG_MIN_LEN); 213 relay_assert(relay, action_len <= GUC_RELAY_MSG_MAX_LEN); 214 relay_assert(relay, !(!!resp ^ !!resp_size)); 215 relay_assert(relay, resp_size <= GUC_RELAY_MSG_MAX_LEN); 216 relay_assert(relay, resp_size == 0 || resp_size >= GUC_RELAY_MSG_MIN_LEN); 217 218 if (unlikely(!relay_is_ready(relay))) 219 return ERR_PTR(-ENODEV); 220 221 /* 222 * For incoming requests we can't use GFP_KERNEL as those are delivered 223 * with CTB lock held which is marked as used in the reclaim path. 224 * Btw, that's one of the reason why we use mempool here! 225 */ 226 txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_KERNEL); 227 if (!txn) 228 return ERR_PTR(-ENOMEM); 229 230 txn->incoming = incoming; 231 txn->remote = remote; 232 txn->rid = rid; 233 txn->offset = remote ? 234 prepare_pf2guc(incoming ? txn->response_buf : txn->request_buf, remote, rid) : 235 prepare_vf2guc(incoming ? txn->response_buf : txn->request_buf, rid); 236 237 relay_assert(relay, txn->offset); 238 relay_assert(relay, txn->offset + GUC_RELAY_MSG_MAX_LEN <= ARRAY_SIZE(txn->request_buf)); 239 relay_assert(relay, txn->offset + GUC_RELAY_MSG_MAX_LEN <= ARRAY_SIZE(txn->response_buf)); 240 241 txn->request = txn->request_buf + txn->offset; 242 memcpy(&txn->request_buf[txn->offset], action, sizeof(u32) * action_len); 243 txn->request_len = action_len; 244 245 txn->response = resp ?: txn->response_buf + txn->offset; 246 txn->response_len = resp_size ?: GUC_RELAY_MSG_MAX_LEN; 247 txn->reply = -ENOMSG; 248 INIT_LIST_HEAD(&txn->link); 249 init_completion(&txn->done); 250 251 return txn; 252 } 253 254 static struct relay_transaction * 255 relay_new_transaction(struct xe_guc_relay *relay, u32 target, const u32 *action, u32 len, 256 u32 *resp, u32 resp_size) 257 { 258 u32 rid = relay_get_next_rid(relay); 259 260 return __relay_get_transaction(relay, false, target, rid, action, len, resp, resp_size); 261 } 262 263 static struct relay_transaction * 264 relay_new_incoming_transaction(struct xe_guc_relay *relay, u32 origin, u32 rid, 265 const u32 *action, u32 len) 266 { 267 return __relay_get_transaction(relay, true, origin, rid, action, len, NULL, 0); 268 } 269 270 static void relay_release_transaction(struct xe_guc_relay *relay, struct relay_transaction *txn) 271 { 272 relay_assert(relay, list_empty(&txn->link)); 273 274 txn->offset = 0; 275 txn->response = NULL; 276 txn->reply = -ESTALE; 277 mempool_free(txn, &relay->pool); 278 } 279 280 static int relay_send_transaction(struct xe_guc_relay *relay, struct relay_transaction *txn) 281 { 282 u32 len = txn->incoming ? txn->response_len : txn->request_len; 283 u32 *buf = txn->incoming ? txn->response_buf : txn->request_buf; 284 u32 *msg = buf + txn->offset; 285 int ret; 286 287 relay_assert(relay, txn->offset); 288 relay_assert(relay, txn->offset + len <= GUC_CTB_MAX_DWORDS); 289 relay_assert(relay, len >= GUC_RELAY_MSG_MIN_LEN); 290 relay_assert(relay, len <= GUC_RELAY_MSG_MAX_LEN); 291 292 relay_debug(relay, "sending %s.%u to %u = %*ph\n", 293 guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])), 294 txn->rid, txn->remote, (int)sizeof(u32) * len, msg); 295 296 ret = xe_guc_ct_send_block(relay_to_ct(relay), buf, len + txn->offset); 297 298 if (unlikely(ret > 0)) { 299 relay_notice(relay, "Unexpected data=%d from GuC, wrong ABI?\n", ret); 300 ret = -EPROTO; 301 } 302 if (unlikely(ret < 0)) { 303 relay_notice(relay, "Failed to send %s.%x to GuC (%pe) %*ph ...\n", 304 guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, buf[0])), 305 FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, buf[0]), 306 ERR_PTR(ret), (int)sizeof(u32) * txn->offset, buf); 307 relay_notice(relay, "Failed to send %s.%u to %u (%pe) %*ph\n", 308 guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])), 309 txn->rid, txn->remote, ERR_PTR(ret), (int)sizeof(u32) * len, msg); 310 } 311 312 return ret; 313 } 314 315 static void __fini_relay(struct drm_device *drm, void *arg) 316 { 317 struct xe_guc_relay *relay = arg; 318 319 mempool_exit(&relay->pool); 320 } 321 322 /** 323 * xe_guc_relay_init - Initialize a &xe_guc_relay 324 * @relay: the &xe_guc_relay to initialize 325 * 326 * Initialize remaining members of &xe_guc_relay that may depend 327 * on the SR-IOV mode. 328 * 329 * Return: 0 on success or a negative error code on failure. 330 */ 331 int xe_guc_relay_init(struct xe_guc_relay *relay) 332 { 333 const int XE_RELAY_MEMPOOL_MIN_NUM = 1; 334 struct xe_device *xe = relay_to_xe(relay); 335 int err; 336 337 relay_assert(relay, !relay_is_ready(relay)); 338 339 if (!IS_SRIOV(xe)) 340 return 0; 341 342 spin_lock_init(&relay->lock); 343 INIT_WORK(&relay->worker, relays_worker_fn); 344 INIT_LIST_HEAD(&relay->pending_relays); 345 INIT_LIST_HEAD(&relay->incoming_actions); 346 347 err = mempool_init_kmalloc_pool(&relay->pool, XE_RELAY_MEMPOOL_MIN_NUM + 348 relay_get_totalvfs(relay), 349 sizeof(struct relay_transaction)); 350 if (err) 351 return err; 352 353 relay_debug(relay, "using mempool with %d elements\n", relay->pool.min_nr); 354 355 return drmm_add_action_or_reset(&xe->drm, __fini_relay, relay); 356 } 357 358 static u32 to_relay_error(int err) 359 { 360 /* XXX: assume that relay errors match errno codes */ 361 return err < 0 ? -err : GUC_RELAY_ERROR_UNDISCLOSED; 362 } 363 364 static int from_relay_error(u32 error) 365 { 366 /* XXX: assume that relay errors match errno codes */ 367 return error ? -error : -ENODATA; 368 } 369 370 static u32 sanitize_relay_error(u32 error) 371 { 372 /* XXX TBD if generic error codes will be allowed */ 373 if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG)) 374 error = GUC_RELAY_ERROR_UNDISCLOSED; 375 return error; 376 } 377 378 static u32 sanitize_relay_error_hint(u32 hint) 379 { 380 /* XXX TBD if generic error codes will be allowed */ 381 if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG)) 382 hint = 0; 383 return hint; 384 } 385 386 static u32 prepare_error_reply(u32 *msg, u32 error, u32 hint) 387 { 388 msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 389 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_FAILURE) | 390 FIELD_PREP(GUC_HXG_FAILURE_MSG_0_HINT, hint) | 391 FIELD_PREP(GUC_HXG_FAILURE_MSG_0_ERROR, error); 392 393 XE_WARN_ON(!FIELD_FIT(GUC_HXG_FAILURE_MSG_0_ERROR, error)); 394 XE_WARN_ON(!FIELD_FIT(GUC_HXG_FAILURE_MSG_0_HINT, hint)); 395 396 return GUC_HXG_FAILURE_MSG_LEN; 397 } 398 399 static void relay_testonly_nop(struct xe_guc_relay *relay) 400 { 401 KUNIT_STATIC_STUB_REDIRECT(relay_testonly_nop, relay); 402 } 403 404 static int relay_send_message_and_wait(struct xe_guc_relay *relay, 405 struct relay_transaction *txn, 406 u32 *buf, u32 buf_size) 407 { 408 unsigned long timeout = msecs_to_jiffies(RELAY_TIMEOUT_MSEC); 409 u32 *msg = &txn->request_buf[txn->offset]; 410 u32 len = txn->request_len; 411 u32 type, action, data0; 412 int ret; 413 long n; 414 415 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]); 416 action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]); 417 data0 = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]); 418 419 relay_debug(relay, "%s.%u to %u action %#x:%u\n", 420 guc_hxg_type_to_string(type), 421 txn->rid, txn->remote, action, data0); 422 423 /* list ordering does not need to match RID ordering */ 424 spin_lock(&relay->lock); 425 list_add_tail(&txn->link, &relay->pending_relays); 426 spin_unlock(&relay->lock); 427 428 resend: 429 ret = relay_send_transaction(relay, txn); 430 if (unlikely(ret < 0)) 431 goto unlink; 432 433 wait: 434 n = wait_for_completion_timeout(&txn->done, timeout); 435 if (unlikely(n == 0 && txn->reply)) { 436 ret = -ETIME; 437 goto unlink; 438 } 439 440 relay_debug(relay, "%u.%u reply %d after %u msec\n", 441 txn->remote, txn->rid, txn->reply, jiffies_to_msecs(timeout - n)); 442 if (unlikely(txn->reply)) { 443 reinit_completion(&txn->done); 444 if (txn->reply == -EAGAIN) 445 goto resend; 446 if (txn->reply == -EBUSY) { 447 relay_testonly_nop(relay); 448 goto wait; 449 } 450 if (txn->reply > 0) 451 ret = from_relay_error(txn->reply); 452 else 453 ret = txn->reply; 454 goto unlink; 455 } 456 457 relay_debug(relay, "%u.%u response %*ph\n", txn->remote, txn->rid, 458 (int)sizeof(u32) * txn->response_len, txn->response); 459 relay_assert(relay, txn->response_len >= GUC_RELAY_MSG_MIN_LEN); 460 ret = txn->response_len; 461 462 unlink: 463 spin_lock(&relay->lock); 464 list_del_init(&txn->link); 465 spin_unlock(&relay->lock); 466 467 if (unlikely(ret < 0)) { 468 relay_notice(relay, "Unsuccessful %s.%u %#x:%u to %u (%pe) %*ph\n", 469 guc_hxg_type_to_string(type), txn->rid, 470 action, data0, txn->remote, ERR_PTR(ret), 471 (int)sizeof(u32) * len, msg); 472 } 473 474 return ret; 475 } 476 477 static int relay_send_to(struct xe_guc_relay *relay, u32 target, 478 const u32 *msg, u32 len, u32 *buf, u32 buf_size) 479 { 480 struct relay_transaction *txn; 481 int ret; 482 483 relay_assert(relay, len >= GUC_RELAY_MSG_MIN_LEN); 484 relay_assert(relay, len <= GUC_RELAY_MSG_MAX_LEN); 485 relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_HOST); 486 relay_assert(relay, guc_hxg_type_is_action(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]))); 487 488 if (unlikely(!relay_is_ready(relay))) 489 return -ENODEV; 490 491 txn = relay_new_transaction(relay, target, msg, len, buf, buf_size); 492 if (IS_ERR(txn)) 493 return PTR_ERR(txn); 494 495 switch (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])) { 496 case GUC_HXG_TYPE_REQUEST: 497 ret = relay_send_message_and_wait(relay, txn, buf, buf_size); 498 break; 499 case GUC_HXG_TYPE_FAST_REQUEST: 500 relay_assert(relay, !GUC_HXG_TYPE_FAST_REQUEST); 501 fallthrough; 502 case GUC_HXG_TYPE_EVENT: 503 ret = relay_send_transaction(relay, txn); 504 break; 505 default: 506 ret = -EINVAL; 507 break; 508 } 509 510 relay_release_transaction(relay, txn); 511 return ret; 512 } 513 514 #ifdef CONFIG_PCI_IOV 515 /** 516 * xe_guc_relay_send_to_vf - Send a message to the VF. 517 * @relay: the &xe_guc_relay which will send the message 518 * @target: target VF number 519 * @msg: request message to be sent 520 * @len: length of the request message (in dwords, can't be 0) 521 * @buf: placeholder for the response message 522 * @buf_size: size of the response message placeholder (in dwords) 523 * 524 * This function can only be used by the driver running in the SR-IOV PF mode. 525 * 526 * Return: Non-negative response length (in dwords) or 527 * a negative error code on failure. 528 */ 529 int xe_guc_relay_send_to_vf(struct xe_guc_relay *relay, u32 target, 530 const u32 *msg, u32 len, u32 *buf, u32 buf_size) 531 { 532 relay_assert(relay, IS_SRIOV_PF(relay_to_xe(relay))); 533 534 return relay_send_to(relay, target, msg, len, buf, buf_size); 535 } 536 #endif 537 538 /** 539 * xe_guc_relay_send_to_pf - Send a message to the PF. 540 * @relay: the &xe_guc_relay which will send the message 541 * @msg: request message to be sent 542 * @len: length of the message (in dwords, can't be 0) 543 * @buf: placeholder for the response message 544 * @buf_size: size of the response message placeholder (in dwords) 545 * 546 * This function can only be used by driver running in SR-IOV VF mode. 547 * 548 * Return: Non-negative response length (in dwords) or 549 * a negative error code on failure. 550 */ 551 int xe_guc_relay_send_to_pf(struct xe_guc_relay *relay, 552 const u32 *msg, u32 len, u32 *buf, u32 buf_size) 553 { 554 relay_assert(relay, IS_SRIOV_VF(relay_to_xe(relay))); 555 556 return relay_send_to(relay, PFID, msg, len, buf, buf_size); 557 } 558 559 static int relay_handle_reply(struct xe_guc_relay *relay, u32 origin, 560 u32 rid, int reply, const u32 *msg, u32 len) 561 { 562 struct relay_transaction *pending; 563 int err = -ESRCH; 564 565 spin_lock(&relay->lock); 566 list_for_each_entry(pending, &relay->pending_relays, link) { 567 if (pending->remote != origin || pending->rid != rid) { 568 relay_debug(relay, "%u.%u still awaits response\n", 569 pending->remote, pending->rid); 570 continue; 571 } 572 err = 0; /* found! */ 573 if (reply == 0) { 574 if (len > pending->response_len) { 575 reply = -ENOBUFS; 576 err = -ENOBUFS; 577 } else { 578 memcpy(pending->response, msg, 4 * len); 579 pending->response_len = len; 580 } 581 } 582 pending->reply = reply; 583 complete_all(&pending->done); 584 break; 585 } 586 spin_unlock(&relay->lock); 587 588 return err; 589 } 590 591 static int relay_handle_failure(struct xe_guc_relay *relay, u32 origin, 592 u32 rid, const u32 *msg, u32 len) 593 { 594 int error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, msg[0]); 595 u32 hint __maybe_unused = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, msg[0]); 596 597 relay_assert(relay, len); 598 relay_debug(relay, "%u.%u error %#x (%pe) hint %u debug %*ph\n", 599 origin, rid, error, ERR_PTR(-error), hint, 4 * (len - 1), msg + 1); 600 601 return relay_handle_reply(relay, origin, rid, error ?: -EREMOTEIO, NULL, 0); 602 } 603 604 static int relay_testloop_action_handler(struct xe_guc_relay *relay, u32 origin, 605 const u32 *msg, u32 len, u32 *response, u32 size) 606 { 607 static ktime_t last_reply = 0; 608 u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]); 609 u32 action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]); 610 u32 opcode = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]); 611 ktime_t now = ktime_get(); 612 bool busy; 613 int ret; 614 615 relay_assert(relay, guc_hxg_type_is_action(type)); 616 relay_assert(relay, action == GUC_RELAY_ACTION_VFXPF_TESTLOOP); 617 618 if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) 619 return -ECONNREFUSED; 620 621 if (!last_reply) 622 last_reply = now; 623 busy = ktime_before(now, ktime_add_ms(last_reply, 2 * RELAY_TIMEOUT_MSEC)); 624 if (!busy) 625 last_reply = now; 626 627 switch (opcode) { 628 case VFXPF_TESTLOOP_OPCODE_NOP: 629 if (type == GUC_HXG_TYPE_EVENT) 630 return 0; 631 return guc_hxg_msg_encode_success(response, 0); 632 case VFXPF_TESTLOOP_OPCODE_BUSY: 633 if (type == GUC_HXG_TYPE_EVENT) 634 return -EPROTO; 635 msleep(RELAY_TIMEOUT_MSEC / 8); 636 if (busy) 637 return -EINPROGRESS; 638 return guc_hxg_msg_encode_success(response, 0); 639 case VFXPF_TESTLOOP_OPCODE_RETRY: 640 if (type == GUC_HXG_TYPE_EVENT) 641 return -EPROTO; 642 msleep(RELAY_TIMEOUT_MSEC / 8); 643 if (busy) 644 return guc_hxg_msg_encode_retry(response, 0); 645 return guc_hxg_msg_encode_success(response, 0); 646 case VFXPF_TESTLOOP_OPCODE_ECHO: 647 if (type == GUC_HXG_TYPE_EVENT) 648 return -EPROTO; 649 if (size < len) 650 return -ENOBUFS; 651 ret = guc_hxg_msg_encode_success(response, len); 652 memcpy(response + ret, msg + ret, (len - ret) * sizeof(u32)); 653 return len; 654 case VFXPF_TESTLOOP_OPCODE_FAIL: 655 return -EHWPOISON; 656 default: 657 break; 658 } 659 660 relay_notice(relay, "Unexpected action %#x opcode %#x\n", action, opcode); 661 return -EBADRQC; 662 } 663 664 static int relay_action_handler(struct xe_guc_relay *relay, u32 origin, 665 const u32 *msg, u32 len, u32 *response, u32 size) 666 { 667 u32 type; 668 int ret; 669 670 relay_assert(relay, len >= GUC_HXG_MSG_MIN_LEN); 671 672 if (FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]) == GUC_RELAY_ACTION_VFXPF_TESTLOOP) 673 return relay_testloop_action_handler(relay, origin, msg, len, response, size); 674 675 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]); 676 677 /* XXX: PF services will be added later */ 678 ret = -EOPNOTSUPP; 679 680 if (type == GUC_HXG_TYPE_EVENT) 681 relay_assert(relay, ret <= 0); 682 683 return ret; 684 } 685 686 static struct relay_transaction *relay_dequeue_transaction(struct xe_guc_relay *relay) 687 { 688 struct relay_transaction *txn; 689 690 spin_lock(&relay->lock); 691 txn = list_first_entry_or_null(&relay->incoming_actions, struct relay_transaction, link); 692 if (txn) 693 list_del_init(&txn->link); 694 spin_unlock(&relay->lock); 695 696 return txn; 697 } 698 699 static void relay_process_incoming_action(struct xe_guc_relay *relay) 700 { 701 struct relay_transaction *txn; 702 bool again = false; 703 u32 type; 704 int ret; 705 706 txn = relay_dequeue_transaction(relay); 707 if (!txn) 708 return; 709 710 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, txn->request_buf[txn->offset]); 711 712 ret = relay_action_handler(relay, txn->remote, 713 txn->request_buf + txn->offset, txn->request_len, 714 txn->response_buf + txn->offset, 715 ARRAY_SIZE(txn->response_buf) - txn->offset); 716 717 if (ret == -EINPROGRESS) { 718 again = true; 719 ret = guc_hxg_msg_encode_busy(txn->response_buf + txn->offset, 0); 720 } 721 722 if (ret > 0) { 723 txn->response_len = ret; 724 ret = relay_send_transaction(relay, txn); 725 } 726 727 if (ret < 0) { 728 u32 error = to_relay_error(ret); 729 730 relay_notice(relay, "Failed to handle %s.%u from %u (%pe) %*ph\n", 731 guc_hxg_type_to_string(type), txn->rid, txn->remote, 732 ERR_PTR(ret), 4 * txn->request_len, txn->request_buf + txn->offset); 733 734 txn->response_len = prepare_error_reply(txn->response_buf + txn->offset, 735 txn->remote ? 736 sanitize_relay_error(error) : error, 737 txn->remote ? 738 sanitize_relay_error_hint(-ret) : -ret); 739 ret = relay_send_transaction(relay, txn); 740 again = false; 741 } 742 743 if (again) { 744 spin_lock(&relay->lock); 745 list_add(&txn->link, &relay->incoming_actions); 746 spin_unlock(&relay->lock); 747 return; 748 } 749 750 if (unlikely(ret < 0)) 751 relay_notice(relay, "Failed to process action.%u (%pe) %*ph\n", 752 txn->rid, ERR_PTR(ret), 4 * txn->request_len, 753 txn->request_buf + txn->offset); 754 755 relay_release_transaction(relay, txn); 756 } 757 758 static bool relay_needs_worker(struct xe_guc_relay *relay) 759 { 760 return !list_empty(&relay->incoming_actions); 761 } 762 763 static void relay_kick_worker(struct xe_guc_relay *relay) 764 { 765 KUNIT_STATIC_STUB_REDIRECT(relay_kick_worker, relay); 766 queue_work(relay_to_xe(relay)->sriov.wq, &relay->worker); 767 } 768 769 static void relays_worker_fn(struct work_struct *w) 770 { 771 struct xe_guc_relay *relay = container_of(w, struct xe_guc_relay, worker); 772 773 relay_process_incoming_action(relay); 774 775 if (relay_needs_worker(relay)) 776 relay_kick_worker(relay); 777 } 778 779 static int relay_queue_action_msg(struct xe_guc_relay *relay, u32 origin, u32 rid, 780 const u32 *msg, u32 len) 781 { 782 struct relay_transaction *txn; 783 784 txn = relay_new_incoming_transaction(relay, origin, rid, msg, len); 785 if (IS_ERR(txn)) 786 return PTR_ERR(txn); 787 788 spin_lock(&relay->lock); 789 list_add_tail(&txn->link, &relay->incoming_actions); 790 spin_unlock(&relay->lock); 791 792 relay_kick_worker(relay); 793 return 0; 794 } 795 796 static int relay_process_msg(struct xe_guc_relay *relay, u32 origin, u32 rid, 797 const u32 *msg, u32 len) 798 { 799 u32 type; 800 int err; 801 802 if (unlikely(len < GUC_HXG_MSG_MIN_LEN)) 803 return -EPROTO; 804 805 if (FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) != GUC_HXG_ORIGIN_HOST) 806 return -EPROTO; 807 808 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]); 809 relay_debug(relay, "received %s.%u from %u = %*ph\n", 810 guc_hxg_type_to_string(type), rid, origin, 4 * len, msg); 811 812 switch (type) { 813 case GUC_HXG_TYPE_REQUEST: 814 case GUC_HXG_TYPE_FAST_REQUEST: 815 case GUC_HXG_TYPE_EVENT: 816 err = relay_queue_action_msg(relay, origin, rid, msg, len); 817 break; 818 case GUC_HXG_TYPE_RESPONSE_SUCCESS: 819 err = relay_handle_reply(relay, origin, rid, 0, msg, len); 820 break; 821 case GUC_HXG_TYPE_NO_RESPONSE_BUSY: 822 err = relay_handle_reply(relay, origin, rid, -EBUSY, NULL, 0); 823 break; 824 case GUC_HXG_TYPE_NO_RESPONSE_RETRY: 825 err = relay_handle_reply(relay, origin, rid, -EAGAIN, NULL, 0); 826 break; 827 case GUC_HXG_TYPE_RESPONSE_FAILURE: 828 err = relay_handle_failure(relay, origin, rid, msg, len); 829 break; 830 default: 831 err = -EBADRQC; 832 } 833 834 if (unlikely(err)) 835 relay_notice(relay, "Failed to process %s.%u from %u (%pe) %*ph\n", 836 guc_hxg_type_to_string(type), rid, origin, 837 ERR_PTR(err), 4 * len, msg); 838 839 return err; 840 } 841 842 /** 843 * xe_guc_relay_process_guc2vf - Handle relay notification message from the GuC. 844 * @relay: the &xe_guc_relay which will handle the message 845 * @msg: message to be handled 846 * @len: length of the message (in dwords) 847 * 848 * This function will handle relay messages received from the GuC. 849 * 850 * This function is can only be used if driver is running in SR-IOV mode. 851 * 852 * Return: 0 on success or a negative error code on failure. 853 */ 854 int xe_guc_relay_process_guc2vf(struct xe_guc_relay *relay, const u32 *msg, u32 len) 855 { 856 u32 rid; 857 858 relay_assert(relay, len >= GUC_HXG_MSG_MIN_LEN); 859 relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC); 860 relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT); 861 relay_assert(relay, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) == 862 XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF); 863 864 if (unlikely(!IS_SRIOV_VF(relay_to_xe(relay)) && !kunit_get_current_test())) 865 return -EPERM; 866 867 if (unlikely(!relay_is_ready(relay))) 868 return -ENODEV; 869 870 if (unlikely(len < GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN)) 871 return -EPROTO; 872 873 if (unlikely(len > GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN)) 874 return -EMSGSIZE; 875 876 if (unlikely(FIELD_GET(GUC_HXG_EVENT_MSG_0_DATA0, msg[0]))) 877 return -EPFNOSUPPORT; 878 879 rid = FIELD_GET(GUC2VF_RELAY_FROM_PF_EVENT_MSG_1_RELAY_ID, msg[1]); 880 881 return relay_process_msg(relay, PFID, rid, 882 msg + GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN, 883 len - GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN); 884 } 885 886 #ifdef CONFIG_PCI_IOV 887 /** 888 * xe_guc_relay_process_guc2pf - Handle relay notification message from the GuC. 889 * @relay: the &xe_guc_relay which will handle the message 890 * @msg: message to be handled 891 * @len: length of the message (in dwords) 892 * 893 * This function will handle relay messages received from the GuC. 894 * 895 * This function can only be used if driver is running in SR-IOV PF mode. 896 * 897 * Return: 0 on success or a negative error code on failure. 898 */ 899 int xe_guc_relay_process_guc2pf(struct xe_guc_relay *relay, const u32 *msg, u32 len) 900 { 901 u32 origin, rid; 902 int err; 903 904 relay_assert(relay, len >= GUC_HXG_EVENT_MSG_MIN_LEN); 905 relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC); 906 relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT); 907 relay_assert(relay, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) == 908 XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF); 909 910 if (unlikely(!IS_SRIOV_PF(relay_to_xe(relay)) && !kunit_get_current_test())) 911 return -EPERM; 912 913 if (unlikely(!relay_is_ready(relay))) 914 return -ENODEV; 915 916 if (unlikely(len < GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN)) 917 return -EPROTO; 918 919 if (unlikely(len > GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN)) 920 return -EMSGSIZE; 921 922 if (unlikely(FIELD_GET(GUC_HXG_EVENT_MSG_0_DATA0, msg[0]))) 923 return -EPFNOSUPPORT; 924 925 origin = FIELD_GET(GUC2PF_RELAY_FROM_VF_EVENT_MSG_1_VFID, msg[1]); 926 rid = FIELD_GET(GUC2PF_RELAY_FROM_VF_EVENT_MSG_2_RELAY_ID, msg[2]); 927 928 if (unlikely(origin > relay_get_totalvfs(relay))) 929 return -ENOENT; 930 931 err = relay_process_msg(relay, origin, rid, 932 msg + GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN, 933 len - GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN); 934 935 return err; 936 } 937 #endif 938 939 #if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST) 940 #include "tests/xe_guc_relay_test.c" 941 #endif 942