1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023-2024 Intel Corporation 4 */ 5 6 #include <linux/bitfield.h> 7 #include <linux/bsearch.h> 8 9 #include <drm/drm_managed.h> 10 #include <drm/drm_print.h> 11 12 #include "abi/guc_actions_sriov_abi.h" 13 #include "abi/guc_communication_mmio_abi.h" 14 #include "abi/guc_klvs_abi.h" 15 #include "abi/guc_relay_actions_abi.h" 16 #include "regs/xe_gt_regs.h" 17 #include "regs/xe_gtt_defs.h" 18 19 #include "xe_assert.h" 20 #include "xe_device.h" 21 #include "xe_ggtt.h" 22 #include "xe_gt_sriov_printk.h" 23 #include "xe_gt_sriov_vf.h" 24 #include "xe_gt_sriov_vf_types.h" 25 #include "xe_guc.h" 26 #include "xe_guc_hxg_helpers.h" 27 #include "xe_guc_relay.h" 28 #include "xe_mmio.h" 29 #include "xe_sriov.h" 30 #include "xe_sriov_vf.h" 31 #include "xe_uc_fw.h" 32 #include "xe_wopcm.h" 33 34 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo))) 35 36 static int guc_action_vf_reset(struct xe_guc *guc) 37 { 38 u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = { 39 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 40 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 41 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_VF_RESET), 42 }; 43 int ret; 44 45 ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request)); 46 47 return ret > 0 ? -EPROTO : ret; 48 } 49 50 #define GUC_RESET_VF_STATE_RETRY_MAX 10 51 static int vf_reset_guc_state(struct xe_gt *gt) 52 { 53 unsigned int retry = GUC_RESET_VF_STATE_RETRY_MAX; 54 struct xe_guc *guc = >->uc.guc; 55 int err; 56 57 do { 58 err = guc_action_vf_reset(guc); 59 if (!err || err != -ETIMEDOUT) 60 break; 61 } while (--retry); 62 63 if (unlikely(err)) 64 xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err)); 65 return err; 66 } 67 68 /** 69 * xe_gt_sriov_vf_reset - Reset GuC VF internal state. 70 * @gt: the &xe_gt 71 * 72 * It requires functional `GuC MMIO based communication`_. 73 * 74 * Return: 0 on success or a negative error code on failure. 75 */ 76 int xe_gt_sriov_vf_reset(struct xe_gt *gt) 77 { 78 if (!xe_device_uc_enabled(gt_to_xe(gt))) 79 return -ENODEV; 80 81 return vf_reset_guc_state(gt); 82 } 83 84 static int guc_action_match_version(struct xe_guc *guc, 85 u32 wanted_branch, u32 wanted_major, u32 wanted_minor, 86 u32 *branch, u32 *major, u32 *minor, u32 *patch) 87 { 88 u32 request[VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN] = { 89 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 90 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 91 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, 92 GUC_ACTION_VF2GUC_MATCH_VERSION), 93 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted_branch) | 94 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted_major) | 95 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted_minor), 96 }; 97 u32 response[GUC_MAX_MMIO_MSG_LEN]; 98 int ret; 99 100 BUILD_BUG_ON(VF2GUC_MATCH_VERSION_RESPONSE_MSG_LEN > GUC_MAX_MMIO_MSG_LEN); 101 102 ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response); 103 if (unlikely(ret < 0)) 104 return ret; 105 106 if (unlikely(FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ, response[0]))) 107 return -EPROTO; 108 109 *branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]); 110 *major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]); 111 *minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]); 112 *patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]); 113 114 return 0; 115 } 116 117 static void vf_minimum_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor) 118 { 119 struct xe_device *xe = gt_to_xe(gt); 120 121 switch (xe->info.platform) { 122 case XE_TIGERLAKE ... XE_PVC: 123 /* 1.1 this is current baseline for Xe driver */ 124 *branch = 0; 125 *major = 1; 126 *minor = 1; 127 break; 128 default: 129 /* 1.2 has support for the GMD_ID KLV */ 130 *branch = 0; 131 *major = 1; 132 *minor = 2; 133 break; 134 } 135 } 136 137 static void vf_wanted_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor) 138 { 139 /* for now it's the same as minimum */ 140 return vf_minimum_guc_version(gt, branch, major, minor); 141 } 142 143 static int vf_handshake_with_guc(struct xe_gt *gt) 144 { 145 struct xe_gt_sriov_vf_guc_version *guc_version = >->sriov.vf.guc_version; 146 struct xe_guc *guc = >->uc.guc; 147 u32 wanted_branch, wanted_major, wanted_minor; 148 u32 branch, major, minor, patch; 149 int err; 150 151 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 152 153 /* select wanted version - prefer previous (if any) */ 154 if (guc_version->major || guc_version->minor) { 155 wanted_branch = guc_version->branch; 156 wanted_major = guc_version->major; 157 wanted_minor = guc_version->minor; 158 } else { 159 vf_wanted_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor); 160 xe_gt_assert(gt, wanted_major != GUC_VERSION_MAJOR_ANY); 161 } 162 163 err = guc_action_match_version(guc, wanted_branch, wanted_major, wanted_minor, 164 &branch, &major, &minor, &patch); 165 if (unlikely(err)) 166 goto fail; 167 168 /* we don't support interface version change */ 169 if ((guc_version->major || guc_version->minor) && 170 (guc_version->branch != branch || guc_version->major != major || 171 guc_version->minor != minor)) { 172 xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n", 173 branch, major, minor, patch); 174 xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n", 175 guc_version->branch, guc_version->major, 176 guc_version->minor, guc_version->patch); 177 err = -EREMCHG; 178 goto fail; 179 } 180 181 /* illegal */ 182 if (major > wanted_major) { 183 err = -EPROTO; 184 goto unsupported; 185 } 186 187 /* there's no fallback on major version. */ 188 if (major != wanted_major) { 189 err = -ENOPKG; 190 goto unsupported; 191 } 192 193 /* check against minimum version supported by us */ 194 vf_minimum_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor); 195 xe_gt_assert(gt, major != GUC_VERSION_MAJOR_ANY); 196 if (major < wanted_major || (major == wanted_major && minor < wanted_minor)) { 197 err = -ENOKEY; 198 goto unsupported; 199 } 200 201 xe_gt_sriov_dbg(gt, "using GuC interface version %u.%u.%u.%u\n", 202 branch, major, minor, patch); 203 204 guc_version->branch = branch; 205 guc_version->major = major; 206 guc_version->minor = minor; 207 guc_version->patch = patch; 208 return 0; 209 210 unsupported: 211 xe_gt_sriov_err(gt, "Unsupported GuC version %u.%u.%u.%u (%pe)\n", 212 branch, major, minor, patch, ERR_PTR(err)); 213 fail: 214 xe_gt_sriov_err(gt, "Unable to confirm GuC version %u.%u (%pe)\n", 215 wanted_major, wanted_minor, ERR_PTR(err)); 216 217 /* try again with *any* just to query which version is supported */ 218 if (!guc_action_match_version(guc, GUC_VERSION_BRANCH_ANY, 219 GUC_VERSION_MAJOR_ANY, GUC_VERSION_MINOR_ANY, 220 &branch, &major, &minor, &patch)) 221 xe_gt_sriov_notice(gt, "GuC reports interface version %u.%u.%u.%u\n", 222 branch, major, minor, patch); 223 return err; 224 } 225 226 /** 227 * xe_gt_sriov_vf_bootstrap - Query and setup GuC ABI interface version. 228 * @gt: the &xe_gt 229 * 230 * This function is for VF use only. 231 * It requires functional `GuC MMIO based communication`_. 232 * 233 * Return: 0 on success or a negative error code on failure. 234 */ 235 int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt) 236 { 237 int err; 238 239 if (!xe_device_uc_enabled(gt_to_xe(gt))) 240 return -ENODEV; 241 242 err = vf_reset_guc_state(gt); 243 if (unlikely(err)) 244 return err; 245 246 err = vf_handshake_with_guc(gt); 247 if (unlikely(err)) 248 return err; 249 250 return 0; 251 } 252 253 static int guc_action_vf_notify_resfix_done(struct xe_guc *guc) 254 { 255 u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = { 256 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 257 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 258 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE), 259 }; 260 int ret; 261 262 ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request)); 263 264 return ret > 0 ? -EPROTO : ret; 265 } 266 267 /** 268 * xe_gt_sriov_vf_notify_resfix_done - Notify GuC about resource fixups apply completed. 269 * @gt: the &xe_gt struct instance linked to target GuC 270 * 271 * Returns: 0 if the operation completed successfully, or a negative error 272 * code otherwise. 273 */ 274 int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt) 275 { 276 struct xe_guc *guc = >->uc.guc; 277 int err; 278 279 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 280 281 err = guc_action_vf_notify_resfix_done(guc); 282 if (unlikely(err)) 283 xe_gt_sriov_err(gt, "Failed to notify GuC about resource fixup done (%pe)\n", 284 ERR_PTR(err)); 285 else 286 xe_gt_sriov_dbg_verbose(gt, "sent GuC resource fixup done\n"); 287 288 return err; 289 } 290 291 static int guc_action_query_single_klv(struct xe_guc *guc, u32 key, 292 u32 *value, u32 value_len) 293 { 294 u32 request[VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_LEN] = { 295 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 296 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 297 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, 298 GUC_ACTION_VF2GUC_QUERY_SINGLE_KLV), 299 FIELD_PREP(VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_1_KEY, key), 300 }; 301 u32 response[GUC_MAX_MMIO_MSG_LEN]; 302 u32 length; 303 int ret; 304 305 BUILD_BUG_ON(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_MAX_LEN > GUC_MAX_MMIO_MSG_LEN); 306 ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response); 307 if (unlikely(ret < 0)) 308 return ret; 309 310 if (unlikely(FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_MBZ, response[0]))) 311 return -EPROTO; 312 313 length = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_LENGTH, response[0]); 314 if (unlikely(length > value_len)) 315 return -EOVERFLOW; 316 if (unlikely(length < value_len)) 317 return -ENODATA; 318 319 switch (value_len) { 320 default: 321 xe_gt_WARN_ON(guc_to_gt(guc), value_len > 3); 322 fallthrough; 323 case 3: 324 value[2] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_3_VALUE96, response[3]); 325 fallthrough; 326 case 2: 327 value[1] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_2_VALUE64, response[2]); 328 fallthrough; 329 case 1: 330 value[0] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_1_VALUE32, response[1]); 331 fallthrough; 332 case 0: 333 break; 334 } 335 336 return 0; 337 } 338 339 static int guc_action_query_single_klv32(struct xe_guc *guc, u32 key, u32 *value32) 340 { 341 return guc_action_query_single_klv(guc, key, value32, hxg_sizeof(u32)); 342 } 343 344 static int guc_action_query_single_klv64(struct xe_guc *guc, u32 key, u64 *value64) 345 { 346 u32 value[2]; 347 int err; 348 349 err = guc_action_query_single_klv(guc, key, value, hxg_sizeof(value)); 350 if (unlikely(err)) 351 return err; 352 353 *value64 = make_u64_from_u32(value[1], value[0]); 354 return 0; 355 } 356 357 static bool has_gmdid(struct xe_device *xe) 358 { 359 return GRAPHICS_VERx100(xe) >= 1270; 360 } 361 362 /** 363 * xe_gt_sriov_vf_gmdid - Query GMDID over MMIO. 364 * @gt: the &xe_gt 365 * 366 * This function is for VF use only. 367 * 368 * Return: value of GMDID KLV on success or 0 on failure. 369 */ 370 u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt) 371 { 372 const char *type = xe_gt_is_media_type(gt) ? "media" : "graphics"; 373 struct xe_guc *guc = >->uc.guc; 374 u32 value; 375 int err; 376 377 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 378 xe_gt_assert(gt, !GRAPHICS_VERx100(gt_to_xe(gt)) || has_gmdid(gt_to_xe(gt))); 379 xe_gt_assert(gt, gt->sriov.vf.guc_version.major > 1 || gt->sriov.vf.guc_version.minor >= 2); 380 381 err = guc_action_query_single_klv32(guc, GUC_KLV_GLOBAL_CFG_GMD_ID_KEY, &value); 382 if (unlikely(err)) { 383 xe_gt_sriov_err(gt, "Failed to obtain %s GMDID (%pe)\n", 384 type, ERR_PTR(err)); 385 return 0; 386 } 387 388 xe_gt_sriov_dbg(gt, "%s GMDID = %#x\n", type, value); 389 return value; 390 } 391 392 static int vf_get_ggtt_info(struct xe_gt *gt) 393 { 394 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; 395 struct xe_guc *guc = >->uc.guc; 396 u64 start, size; 397 int err; 398 399 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 400 401 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start); 402 if (unlikely(err)) 403 return err; 404 405 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_SIZE_KEY, &size); 406 if (unlikely(err)) 407 return err; 408 409 if (config->ggtt_size && config->ggtt_size != size) { 410 xe_gt_sriov_err(gt, "Unexpected GGTT reassignment: %lluK != %lluK\n", 411 size / SZ_1K, config->ggtt_size / SZ_1K); 412 return -EREMCHG; 413 } 414 415 xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n", 416 start, start + size - 1, size / SZ_1K); 417 418 config->ggtt_base = start; 419 config->ggtt_size = size; 420 421 return config->ggtt_size ? 0 : -ENODATA; 422 } 423 424 static int vf_get_lmem_info(struct xe_gt *gt) 425 { 426 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; 427 struct xe_guc *guc = >->uc.guc; 428 char size_str[10]; 429 u64 size; 430 int err; 431 432 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 433 434 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, &size); 435 if (unlikely(err)) 436 return err; 437 438 if (config->lmem_size && config->lmem_size != size) { 439 xe_gt_sriov_err(gt, "Unexpected LMEM reassignment: %lluM != %lluM\n", 440 size / SZ_1M, config->lmem_size / SZ_1M); 441 return -EREMCHG; 442 } 443 444 string_get_size(size, 1, STRING_UNITS_2, size_str, sizeof(size_str)); 445 xe_gt_sriov_dbg_verbose(gt, "LMEM %lluM %s\n", size / SZ_1M, size_str); 446 447 config->lmem_size = size; 448 449 return config->lmem_size ? 0 : -ENODATA; 450 } 451 452 static int vf_get_submission_cfg(struct xe_gt *gt) 453 { 454 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; 455 struct xe_guc *guc = >->uc.guc; 456 u32 num_ctxs, num_dbs; 457 int err; 458 459 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 460 461 err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY, &num_ctxs); 462 if (unlikely(err)) 463 return err; 464 465 err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY, &num_dbs); 466 if (unlikely(err)) 467 return err; 468 469 if (config->num_ctxs && config->num_ctxs != num_ctxs) { 470 xe_gt_sriov_err(gt, "Unexpected CTXs reassignment: %u != %u\n", 471 num_ctxs, config->num_ctxs); 472 return -EREMCHG; 473 } 474 if (config->num_dbs && config->num_dbs != num_dbs) { 475 xe_gt_sriov_err(gt, "Unexpected DBs reassignment: %u != %u\n", 476 num_dbs, config->num_dbs); 477 return -EREMCHG; 478 } 479 480 xe_gt_sriov_dbg_verbose(gt, "CTXs %u DBs %u\n", num_ctxs, num_dbs); 481 482 config->num_ctxs = num_ctxs; 483 config->num_dbs = num_dbs; 484 485 return config->num_ctxs ? 0 : -ENODATA; 486 } 487 488 static void vf_cache_gmdid(struct xe_gt *gt) 489 { 490 xe_gt_assert(gt, has_gmdid(gt_to_xe(gt))); 491 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 492 493 gt->sriov.vf.runtime.gmdid = xe_gt_sriov_vf_gmdid(gt); 494 } 495 496 /** 497 * xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO. 498 * @gt: the &xe_gt 499 * 500 * This function is for VF use only. 501 * 502 * Return: 0 on success or a negative error code on failure. 503 */ 504 int xe_gt_sriov_vf_query_config(struct xe_gt *gt) 505 { 506 struct xe_device *xe = gt_to_xe(gt); 507 int err; 508 509 err = vf_get_ggtt_info(gt); 510 if (unlikely(err)) 511 return err; 512 513 if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) { 514 err = vf_get_lmem_info(gt); 515 if (unlikely(err)) 516 return err; 517 } 518 519 err = vf_get_submission_cfg(gt); 520 if (unlikely(err)) 521 return err; 522 523 if (has_gmdid(xe)) 524 vf_cache_gmdid(gt); 525 526 return 0; 527 } 528 529 /** 530 * xe_gt_sriov_vf_guc_ids - VF GuC context IDs configuration. 531 * @gt: the &xe_gt 532 * 533 * This function is for VF use only. 534 * 535 * Return: number of GuC context IDs assigned to VF. 536 */ 537 u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt) 538 { 539 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 540 xe_gt_assert(gt, gt->sriov.vf.guc_version.major); 541 xe_gt_assert(gt, gt->sriov.vf.self_config.num_ctxs); 542 543 return gt->sriov.vf.self_config.num_ctxs; 544 } 545 546 /** 547 * xe_gt_sriov_vf_lmem - VF LMEM configuration. 548 * @gt: the &xe_gt 549 * 550 * This function is for VF use only. 551 * 552 * Return: size of the LMEM assigned to VF. 553 */ 554 u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt) 555 { 556 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 557 xe_gt_assert(gt, gt->sriov.vf.guc_version.major); 558 xe_gt_assert(gt, gt->sriov.vf.self_config.lmem_size); 559 560 return gt->sriov.vf.self_config.lmem_size; 561 } 562 563 static struct xe_ggtt_node * 564 vf_balloon_ggtt_node(struct xe_ggtt *ggtt, u64 start, u64 end) 565 { 566 struct xe_ggtt_node *node; 567 int err; 568 569 node = xe_ggtt_node_init(ggtt); 570 if (IS_ERR(node)) 571 return node; 572 573 err = xe_ggtt_node_insert_balloon(node, start, end); 574 if (err) { 575 xe_ggtt_node_fini(node); 576 return ERR_PTR(err); 577 } 578 579 return node; 580 } 581 582 static int vf_balloon_ggtt(struct xe_gt *gt) 583 { 584 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; 585 struct xe_tile *tile = gt_to_tile(gt); 586 struct xe_ggtt *ggtt = tile->mem.ggtt; 587 struct xe_device *xe = gt_to_xe(gt); 588 u64 start, end; 589 590 xe_gt_assert(gt, IS_SRIOV_VF(xe)); 591 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 592 593 if (!config->ggtt_size) 594 return -ENODATA; 595 596 /* 597 * VF can only use part of the GGTT as allocated by the PF: 598 * 599 * WOPCM GUC_GGTT_TOP 600 * |<------------ Total GGTT size ------------------>| 601 * 602 * VF GGTT base -->|<- size ->| 603 * 604 * +--------------------+----------+-----------------+ 605 * |////////////////////| block |\\\\\\\\\\\\\\\\\| 606 * +--------------------+----------+-----------------+ 607 * 608 * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->| 609 */ 610 611 start = xe_wopcm_size(xe); 612 end = config->ggtt_base; 613 if (end != start) { 614 tile->sriov.vf.ggtt_balloon[0] = vf_balloon_ggtt_node(ggtt, start, end); 615 if (IS_ERR(tile->sriov.vf.ggtt_balloon[0])) 616 return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]); 617 } 618 619 start = config->ggtt_base + config->ggtt_size; 620 end = GUC_GGTT_TOP; 621 if (end != start) { 622 tile->sriov.vf.ggtt_balloon[1] = vf_balloon_ggtt_node(ggtt, start, end); 623 if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) { 624 xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]); 625 return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]); 626 } 627 } 628 629 return 0; 630 } 631 632 static void deballoon_ggtt(struct drm_device *drm, void *arg) 633 { 634 struct xe_tile *tile = arg; 635 636 xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile))); 637 xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[1]); 638 xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]); 639 } 640 641 /** 642 * xe_gt_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration. 643 * @gt: the &xe_gt 644 * 645 * This function is for VF use only. 646 * 647 * Return: 0 on success or a negative error code on failure. 648 */ 649 int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt) 650 { 651 struct xe_tile *tile = gt_to_tile(gt); 652 struct xe_device *xe = tile_to_xe(tile); 653 int err; 654 655 if (xe_gt_is_media_type(gt)) 656 return 0; 657 658 err = vf_balloon_ggtt(gt); 659 if (err) 660 return err; 661 662 return drmm_add_action_or_reset(&xe->drm, deballoon_ggtt, tile); 663 } 664 665 static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor) 666 { 667 u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = { 668 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 669 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 670 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VF2PF_HANDSHAKE), 671 FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, *major) | 672 FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, *minor), 673 }; 674 u32 response[VF2PF_HANDSHAKE_RESPONSE_MSG_LEN]; 675 int ret; 676 677 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 678 679 ret = xe_guc_relay_send_to_pf(>->uc.guc.relay, 680 request, ARRAY_SIZE(request), 681 response, ARRAY_SIZE(response)); 682 if (unlikely(ret < 0)) 683 return ret; 684 685 if (unlikely(ret != VF2PF_HANDSHAKE_RESPONSE_MSG_LEN)) 686 return -EPROTO; 687 688 if (unlikely(FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_0_MBZ, response[0]))) 689 return -EPROTO; 690 691 *major = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, response[1]); 692 *minor = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, response[1]); 693 694 return 0; 695 } 696 697 static void vf_connect_pf(struct xe_gt *gt, u16 major, u16 minor) 698 { 699 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 700 701 gt->sriov.vf.pf_version.major = major; 702 gt->sriov.vf.pf_version.minor = minor; 703 } 704 705 static void vf_disconnect_pf(struct xe_gt *gt) 706 { 707 vf_connect_pf(gt, 0, 0); 708 } 709 710 static int vf_handshake_with_pf(struct xe_gt *gt) 711 { 712 u32 major_wanted = GUC_RELAY_VERSION_LATEST_MAJOR; 713 u32 minor_wanted = GUC_RELAY_VERSION_LATEST_MINOR; 714 u32 major = major_wanted, minor = minor_wanted; 715 int err; 716 717 err = relay_action_handshake(gt, &major, &minor); 718 if (unlikely(err)) 719 goto failed; 720 721 if (!major && !minor) { 722 err = -ENODATA; 723 goto failed; 724 } 725 726 xe_gt_sriov_dbg(gt, "using VF/PF ABI %u.%u\n", major, minor); 727 vf_connect_pf(gt, major, minor); 728 return 0; 729 730 failed: 731 xe_gt_sriov_err(gt, "Unable to confirm VF/PF ABI version %u.%u (%pe)\n", 732 major, minor, ERR_PTR(err)); 733 vf_disconnect_pf(gt); 734 return err; 735 } 736 737 /** 738 * xe_gt_sriov_vf_connect - Establish connection with the PF driver. 739 * @gt: the &xe_gt 740 * 741 * This function is for VF use only. 742 * 743 * Return: 0 on success or a negative error code on failure. 744 */ 745 int xe_gt_sriov_vf_connect(struct xe_gt *gt) 746 { 747 int err; 748 749 err = vf_handshake_with_pf(gt); 750 if (unlikely(err)) 751 goto failed; 752 753 return 0; 754 755 failed: 756 xe_gt_sriov_err(gt, "Failed to get version info (%pe)\n", ERR_PTR(err)); 757 return err; 758 } 759 760 /** 761 * xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery, 762 * or just mark that a GuC is ready for it. 763 * @gt: the &xe_gt struct instance linked to target GuC 764 * 765 * This function shall be called only by VF. 766 */ 767 void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt) 768 { 769 struct xe_device *xe = gt_to_xe(gt); 770 771 xe_gt_assert(gt, IS_SRIOV_VF(xe)); 772 773 set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags); 774 /* 775 * We need to be certain that if all flags were set, at least one 776 * thread will notice that and schedule the recovery. 777 */ 778 smp_mb__after_atomic(); 779 780 xe_gt_sriov_info(gt, "ready for recovery after migration\n"); 781 xe_sriov_vf_start_migration_recovery(xe); 782 } 783 784 static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor) 785 { 786 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 787 788 return major == gt->sriov.vf.pf_version.major && 789 minor <= gt->sriov.vf.pf_version.minor; 790 } 791 792 static int vf_prepare_runtime_info(struct xe_gt *gt, unsigned int num_regs) 793 { 794 struct vf_runtime_reg *regs = gt->sriov.vf.runtime.regs; 795 unsigned int regs_size = round_up(num_regs, 4); 796 struct xe_device *xe = gt_to_xe(gt); 797 798 xe_gt_assert(gt, IS_SRIOV_VF(xe)); 799 800 if (regs) { 801 if (num_regs <= gt->sriov.vf.runtime.regs_size) { 802 memset(regs, 0, num_regs * sizeof(*regs)); 803 gt->sriov.vf.runtime.num_regs = num_regs; 804 return 0; 805 } 806 807 drmm_kfree(&xe->drm, regs); 808 gt->sriov.vf.runtime.regs = NULL; 809 gt->sriov.vf.runtime.num_regs = 0; 810 gt->sriov.vf.runtime.regs_size = 0; 811 } 812 813 regs = drmm_kcalloc(&xe->drm, regs_size, sizeof(*regs), GFP_KERNEL); 814 if (unlikely(!regs)) 815 return -ENOMEM; 816 817 gt->sriov.vf.runtime.regs = regs; 818 gt->sriov.vf.runtime.num_regs = num_regs; 819 gt->sriov.vf.runtime.regs_size = regs_size; 820 return 0; 821 } 822 823 static int vf_query_runtime_info(struct xe_gt *gt) 824 { 825 u32 request[VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN]; 826 u32 response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 32]; /* up to 16 regs */ 827 u32 limit = (ARRAY_SIZE(response) - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2; 828 u32 count, remaining, num, i; 829 u32 start = 0; 830 int ret; 831 832 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 833 xe_gt_assert(gt, limit); 834 835 /* this is part of the 1.0 PF/VF ABI */ 836 if (!vf_is_negotiated(gt, 1, 0)) 837 return -ENOPKG; 838 839 request[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 840 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 841 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, 842 GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME) | 843 FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, limit); 844 845 repeat: 846 request[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, start); 847 ret = xe_guc_relay_send_to_pf(>->uc.guc.relay, 848 request, ARRAY_SIZE(request), 849 response, ARRAY_SIZE(response)); 850 if (unlikely(ret < 0)) 851 goto failed; 852 853 if (unlikely(ret < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN)) { 854 ret = -EPROTO; 855 goto failed; 856 } 857 if (unlikely((ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) % 2)) { 858 ret = -EPROTO; 859 goto failed; 860 } 861 862 num = (ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2; 863 count = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, response[0]); 864 remaining = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, response[1]); 865 866 xe_gt_sriov_dbg_verbose(gt, "count=%u num=%u ret=%d start=%u remaining=%u\n", 867 count, num, ret, start, remaining); 868 869 if (unlikely(count != num)) { 870 ret = -EPROTO; 871 goto failed; 872 } 873 874 if (start == 0) { 875 ret = vf_prepare_runtime_info(gt, num + remaining); 876 if (unlikely(ret < 0)) 877 goto failed; 878 } else if (unlikely(start + num > gt->sriov.vf.runtime.num_regs)) { 879 ret = -EPROTO; 880 goto failed; 881 } 882 883 for (i = 0; i < num; ++i) { 884 struct vf_runtime_reg *reg = >->sriov.vf.runtime.regs[start + i]; 885 886 reg->offset = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i]; 887 reg->value = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i + 1]; 888 } 889 890 if (remaining) { 891 start += num; 892 goto repeat; 893 } 894 895 return 0; 896 897 failed: 898 vf_prepare_runtime_info(gt, 0); 899 return ret; 900 } 901 902 static void vf_show_runtime_info(struct xe_gt *gt) 903 { 904 struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs; 905 unsigned int size = gt->sriov.vf.runtime.num_regs; 906 907 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 908 909 for (; size--; vf_regs++) 910 xe_gt_sriov_dbg(gt, "runtime(%#x) = %#x\n", 911 vf_regs->offset, vf_regs->value); 912 } 913 914 /** 915 * xe_gt_sriov_vf_query_runtime - Query SR-IOV runtime data. 916 * @gt: the &xe_gt 917 * 918 * This function is for VF use only. 919 * 920 * Return: 0 on success or a negative error code on failure. 921 */ 922 int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt) 923 { 924 int err; 925 926 err = vf_query_runtime_info(gt); 927 if (unlikely(err)) 928 goto failed; 929 930 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) 931 vf_show_runtime_info(gt); 932 933 return 0; 934 935 failed: 936 xe_gt_sriov_err(gt, "Failed to get runtime info (%pe)\n", 937 ERR_PTR(err)); 938 return err; 939 } 940 941 static int vf_runtime_reg_cmp(const void *a, const void *b) 942 { 943 const struct vf_runtime_reg *ra = a; 944 const struct vf_runtime_reg *rb = b; 945 946 return (int)ra->offset - (int)rb->offset; 947 } 948 949 static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr) 950 { 951 struct xe_gt_sriov_vf_runtime *runtime = >->sriov.vf.runtime; 952 struct vf_runtime_reg key = { .offset = addr }; 953 954 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 955 956 return bsearch(&key, runtime->regs, runtime->num_regs, sizeof(key), 957 vf_runtime_reg_cmp); 958 } 959 960 /** 961 * xe_gt_sriov_vf_read32 - Get a register value from the runtime data. 962 * @gt: the &xe_gt 963 * @reg: the register to read 964 * 965 * This function is for VF use only. 966 * This function shall be called after VF has connected to PF. 967 * This function is dedicated for registers that VFs can't read directly. 968 * 969 * Return: register value obtained from the PF or 0 if not found. 970 */ 971 u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg) 972 { 973 u32 addr = xe_mmio_adjusted_addr(>->mmio, reg.addr); 974 struct vf_runtime_reg *rr; 975 976 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 977 xe_gt_assert(gt, gt->sriov.vf.pf_version.major); 978 xe_gt_assert(gt, !reg.vf); 979 980 if (reg.addr == GMD_ID.addr) { 981 xe_gt_sriov_dbg_verbose(gt, "gmdid(%#x) = %#x\n", 982 addr, gt->sriov.vf.runtime.gmdid); 983 return gt->sriov.vf.runtime.gmdid; 984 } 985 986 rr = vf_lookup_reg(gt, addr); 987 if (!rr) { 988 xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG), 989 "VF is trying to read an inaccessible register %#x+%#x\n", 990 reg.addr, addr - reg.addr); 991 return 0; 992 } 993 994 xe_gt_sriov_dbg_verbose(gt, "runtime[%#x] = %#x\n", addr, rr->value); 995 return rr->value; 996 } 997 998 /** 999 * xe_gt_sriov_vf_write32 - Handle a write to an inaccessible register. 1000 * @gt: the &xe_gt 1001 * @reg: the register to write 1002 * @val: value to write 1003 * 1004 * This function is for VF use only. 1005 * Currently it will trigger a WARN if running on debug build. 1006 */ 1007 void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val) 1008 { 1009 u32 addr = xe_mmio_adjusted_addr(>->mmio, reg.addr); 1010 1011 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 1012 xe_gt_assert(gt, !reg.vf); 1013 1014 /* 1015 * In the future, we may want to handle selected writes to inaccessible 1016 * registers in some custom way, but for now let's just log a warning 1017 * about such attempt, as likely we might be doing something wrong. 1018 */ 1019 xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG), 1020 "VF is trying to write %#x to an inaccessible register %#x+%#x\n", 1021 val, reg.addr, addr - reg.addr); 1022 } 1023 1024 /** 1025 * xe_gt_sriov_vf_print_config - Print VF self config. 1026 * @gt: the &xe_gt 1027 * @p: the &drm_printer 1028 * 1029 * This function is for VF use only. 1030 */ 1031 void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p) 1032 { 1033 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; 1034 struct xe_device *xe = gt_to_xe(gt); 1035 char buf[10]; 1036 1037 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 1038 1039 drm_printf(p, "GGTT range:\t%#llx-%#llx\n", 1040 config->ggtt_base, 1041 config->ggtt_base + config->ggtt_size - 1); 1042 1043 string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf)); 1044 drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf); 1045 1046 if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) { 1047 string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf)); 1048 drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf); 1049 } 1050 1051 drm_printf(p, "GuC contexts:\t%u\n", config->num_ctxs); 1052 drm_printf(p, "GuC doorbells:\t%u\n", config->num_dbs); 1053 } 1054 1055 /** 1056 * xe_gt_sriov_vf_print_runtime - Print VF's runtime regs received from PF. 1057 * @gt: the &xe_gt 1058 * @p: the &drm_printer 1059 * 1060 * This function is for VF use only. 1061 */ 1062 void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p) 1063 { 1064 struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs; 1065 unsigned int size = gt->sriov.vf.runtime.num_regs; 1066 1067 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 1068 1069 for (; size--; vf_regs++) 1070 drm_printf(p, "%#x = %#x\n", vf_regs->offset, vf_regs->value); 1071 } 1072 1073 /** 1074 * xe_gt_sriov_vf_print_version - Print VF ABI versions. 1075 * @gt: the &xe_gt 1076 * @p: the &drm_printer 1077 * 1078 * This function is for VF use only. 1079 */ 1080 void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p) 1081 { 1082 struct xe_gt_sriov_vf_guc_version *guc_version = >->sriov.vf.guc_version; 1083 struct xe_gt_sriov_vf_relay_version *pf_version = >->sriov.vf.pf_version; 1084 u32 branch, major, minor; 1085 1086 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 1087 1088 drm_printf(p, "GuC ABI:\n"); 1089 1090 vf_minimum_guc_version(gt, &branch, &major, &minor); 1091 drm_printf(p, "\tbase:\t%u.%u.%u.*\n", branch, major, minor); 1092 1093 vf_wanted_guc_version(gt, &branch, &major, &minor); 1094 drm_printf(p, "\twanted:\t%u.%u.%u.*\n", branch, major, minor); 1095 1096 drm_printf(p, "\thandshake:\t%u.%u.%u.%u\n", 1097 guc_version->branch, guc_version->major, 1098 guc_version->minor, guc_version->patch); 1099 1100 drm_printf(p, "PF ABI:\n"); 1101 1102 drm_printf(p, "\tbase:\t%u.%u\n", 1103 GUC_RELAY_VERSION_BASE_MAJOR, GUC_RELAY_VERSION_BASE_MINOR); 1104 drm_printf(p, "\twanted:\t%u.%u\n", 1105 GUC_RELAY_VERSION_LATEST_MAJOR, GUC_RELAY_VERSION_LATEST_MINOR); 1106 drm_printf(p, "\thandshake:\t%u.%u\n", 1107 pf_version->major, pf_version->minor); 1108 } 1109