1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023-2024 Intel Corporation 4 */ 5 6 #include <linux/bitfield.h> 7 #include <linux/bsearch.h> 8 9 #include <drm/drm_managed.h> 10 #include <drm/drm_print.h> 11 12 #include "abi/guc_actions_sriov_abi.h" 13 #include "abi/guc_communication_mmio_abi.h" 14 #include "abi/guc_klvs_abi.h" 15 #include "abi/guc_relay_actions_abi.h" 16 #include "regs/xe_gt_regs.h" 17 #include "regs/xe_gtt_defs.h" 18 19 #include "xe_assert.h" 20 #include "xe_device.h" 21 #include "xe_ggtt.h" 22 #include "xe_gt_sriov_printk.h" 23 #include "xe_gt_sriov_vf.h" 24 #include "xe_gt_sriov_vf_types.h" 25 #include "xe_guc.h" 26 #include "xe_guc_hxg_helpers.h" 27 #include "xe_guc_relay.h" 28 #include "xe_mmio.h" 29 #include "xe_sriov.h" 30 #include "xe_sriov_vf.h" 31 #include "xe_uc_fw.h" 32 #include "xe_wopcm.h" 33 34 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo))) 35 36 static int guc_action_vf_reset(struct xe_guc *guc) 37 { 38 u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = { 39 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 40 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 41 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_VF_RESET), 42 }; 43 int ret; 44 45 ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request)); 46 47 return ret > 0 ? -EPROTO : ret; 48 } 49 50 #define GUC_RESET_VF_STATE_RETRY_MAX 10 51 static int vf_reset_guc_state(struct xe_gt *gt) 52 { 53 unsigned int retry = GUC_RESET_VF_STATE_RETRY_MAX; 54 struct xe_guc *guc = >->uc.guc; 55 int err; 56 57 do { 58 err = guc_action_vf_reset(guc); 59 if (!err || err != -ETIMEDOUT) 60 break; 61 } while (--retry); 62 63 if (unlikely(err)) 64 xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err)); 65 return err; 66 } 67 68 /** 69 * xe_gt_sriov_vf_reset - Reset GuC VF internal state. 70 * @gt: the &xe_gt 71 * 72 * It requires functional `GuC MMIO based communication`_. 73 * 74 * Return: 0 on success or a negative error code on failure. 75 */ 76 int xe_gt_sriov_vf_reset(struct xe_gt *gt) 77 { 78 if (!xe_device_uc_enabled(gt_to_xe(gt))) 79 return -ENODEV; 80 81 return vf_reset_guc_state(gt); 82 } 83 84 static int guc_action_match_version(struct xe_guc *guc, 85 struct xe_uc_fw_version *wanted, 86 struct xe_uc_fw_version *found) 87 { 88 u32 request[VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN] = { 89 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 90 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 91 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, 92 GUC_ACTION_VF2GUC_MATCH_VERSION), 93 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted->branch) | 94 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted->major) | 95 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted->minor), 96 }; 97 u32 response[GUC_MAX_MMIO_MSG_LEN]; 98 int ret; 99 100 BUILD_BUG_ON(VF2GUC_MATCH_VERSION_RESPONSE_MSG_LEN > GUC_MAX_MMIO_MSG_LEN); 101 102 ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response); 103 if (unlikely(ret < 0)) 104 return ret; 105 106 if (unlikely(FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ, response[0]))) 107 return -EPROTO; 108 109 memset(found, 0, sizeof(struct xe_uc_fw_version)); 110 found->branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]); 111 found->major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]); 112 found->minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]); 113 found->patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]); 114 115 return 0; 116 } 117 118 static int guc_action_match_version_any(struct xe_guc *guc, 119 struct xe_uc_fw_version *found) 120 { 121 struct xe_uc_fw_version wanted = { 122 .branch = GUC_VERSION_BRANCH_ANY, 123 .major = GUC_VERSION_MAJOR_ANY, 124 .minor = GUC_VERSION_MINOR_ANY, 125 .patch = 0 126 }; 127 128 return guc_action_match_version(guc, &wanted, found); 129 } 130 131 static void vf_minimum_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver) 132 { 133 struct xe_device *xe = gt_to_xe(gt); 134 135 memset(ver, 0, sizeof(struct xe_uc_fw_version)); 136 137 switch (xe->info.platform) { 138 case XE_TIGERLAKE ... XE_PVC: 139 /* 1.1 this is current baseline for Xe driver */ 140 ver->branch = 0; 141 ver->major = 1; 142 ver->minor = 1; 143 break; 144 default: 145 /* 1.2 has support for the GMD_ID KLV */ 146 ver->branch = 0; 147 ver->major = 1; 148 ver->minor = 2; 149 break; 150 } 151 } 152 153 static void vf_wanted_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver) 154 { 155 /* for now it's the same as minimum */ 156 return vf_minimum_guc_version(gt, ver); 157 } 158 159 static int vf_handshake_with_guc(struct xe_gt *gt) 160 { 161 struct xe_uc_fw_version *guc_version = >->sriov.vf.guc_version; 162 struct xe_uc_fw_version wanted = {0}; 163 struct xe_guc *guc = >->uc.guc; 164 bool old = false; 165 int err; 166 167 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 168 169 /* select wanted version - prefer previous (if any) */ 170 if (guc_version->major || guc_version->minor) { 171 wanted = *guc_version; 172 old = true; 173 } else { 174 vf_wanted_guc_version(gt, &wanted); 175 xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY); 176 177 /* First time we handshake, so record the minimum wanted */ 178 gt->sriov.vf.wanted_guc_version = wanted; 179 } 180 181 err = guc_action_match_version(guc, &wanted, guc_version); 182 if (unlikely(err)) 183 goto fail; 184 185 if (old) { 186 /* we don't support interface version change */ 187 if (MAKE_GUC_VER_STRUCT(*guc_version) != MAKE_GUC_VER_STRUCT(wanted)) { 188 xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n", 189 guc_version->branch, guc_version->major, 190 guc_version->minor, guc_version->patch); 191 xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n", 192 wanted.branch, wanted.major, 193 wanted.minor, wanted.patch); 194 err = -EREMCHG; 195 goto fail; 196 } else { 197 /* version is unchanged, no need to re-verify it */ 198 return 0; 199 } 200 } 201 202 /* illegal */ 203 if (guc_version->major > wanted.major) { 204 err = -EPROTO; 205 goto unsupported; 206 } 207 208 /* there's no fallback on major version. */ 209 if (guc_version->major != wanted.major) { 210 err = -ENOPKG; 211 goto unsupported; 212 } 213 214 /* check against minimum version supported by us */ 215 vf_minimum_guc_version(gt, &wanted); 216 xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY); 217 if (MAKE_GUC_VER_STRUCT(*guc_version) < MAKE_GUC_VER_STRUCT(wanted)) { 218 err = -ENOKEY; 219 goto unsupported; 220 } 221 222 xe_gt_sriov_dbg(gt, "using GuC interface version %u.%u.%u.%u\n", 223 guc_version->branch, guc_version->major, 224 guc_version->minor, guc_version->patch); 225 226 return 0; 227 228 unsupported: 229 xe_gt_sriov_err(gt, "Unsupported GuC version %u.%u.%u.%u (%pe)\n", 230 guc_version->branch, guc_version->major, 231 guc_version->minor, guc_version->patch, 232 ERR_PTR(err)); 233 fail: 234 xe_gt_sriov_err(gt, "Unable to confirm GuC version %u.%u (%pe)\n", 235 wanted.major, wanted.minor, ERR_PTR(err)); 236 237 /* try again with *any* just to query which version is supported */ 238 if (!guc_action_match_version_any(guc, &wanted)) 239 xe_gt_sriov_notice(gt, "GuC reports interface version %u.%u.%u.%u\n", 240 wanted.branch, wanted.major, wanted.minor, wanted.patch); 241 return err; 242 } 243 244 /** 245 * xe_gt_sriov_vf_bootstrap - Query and setup GuC ABI interface version. 246 * @gt: the &xe_gt 247 * 248 * This function is for VF use only. 249 * It requires functional `GuC MMIO based communication`_. 250 * 251 * Return: 0 on success or a negative error code on failure. 252 */ 253 int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt) 254 { 255 int err; 256 257 if (!xe_device_uc_enabled(gt_to_xe(gt))) 258 return -ENODEV; 259 260 err = vf_reset_guc_state(gt); 261 if (unlikely(err)) 262 return err; 263 264 err = vf_handshake_with_guc(gt); 265 if (unlikely(err)) 266 return err; 267 268 return 0; 269 } 270 271 /** 272 * xe_gt_sriov_vf_guc_versions - Minimum required and found GuC ABI versions 273 * @gt: the &xe_gt 274 * @wanted: pointer to the xe_uc_fw_version to be filled with the wanted version 275 * @found: pointer to the xe_uc_fw_version to be filled with the found version 276 * 277 * This function is for VF use only and it can only be used after successful 278 * version handshake with the GuC. 279 */ 280 void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt, 281 struct xe_uc_fw_version *wanted, 282 struct xe_uc_fw_version *found) 283 { 284 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 285 xe_gt_assert(gt, gt->sriov.vf.guc_version.major); 286 287 if (wanted) 288 *wanted = gt->sriov.vf.wanted_guc_version; 289 290 if (found) 291 *found = gt->sriov.vf.guc_version; 292 } 293 294 static int guc_action_vf_notify_resfix_done(struct xe_guc *guc) 295 { 296 u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = { 297 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 298 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 299 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE), 300 }; 301 int ret; 302 303 ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request)); 304 305 return ret > 0 ? -EPROTO : ret; 306 } 307 308 /** 309 * xe_gt_sriov_vf_notify_resfix_done - Notify GuC about resource fixups apply completed. 310 * @gt: the &xe_gt struct instance linked to target GuC 311 * 312 * Returns: 0 if the operation completed successfully, or a negative error 313 * code otherwise. 314 */ 315 int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt) 316 { 317 struct xe_guc *guc = >->uc.guc; 318 int err; 319 320 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 321 322 err = guc_action_vf_notify_resfix_done(guc); 323 if (unlikely(err)) 324 xe_gt_sriov_err(gt, "Failed to notify GuC about resource fixup done (%pe)\n", 325 ERR_PTR(err)); 326 else 327 xe_gt_sriov_dbg_verbose(gt, "sent GuC resource fixup done\n"); 328 329 return err; 330 } 331 332 static int guc_action_query_single_klv(struct xe_guc *guc, u32 key, 333 u32 *value, u32 value_len) 334 { 335 u32 request[VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_LEN] = { 336 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 337 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 338 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, 339 GUC_ACTION_VF2GUC_QUERY_SINGLE_KLV), 340 FIELD_PREP(VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_1_KEY, key), 341 }; 342 u32 response[GUC_MAX_MMIO_MSG_LEN]; 343 u32 length; 344 int ret; 345 346 BUILD_BUG_ON(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_MAX_LEN > GUC_MAX_MMIO_MSG_LEN); 347 ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response); 348 if (unlikely(ret < 0)) 349 return ret; 350 351 if (unlikely(FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_MBZ, response[0]))) 352 return -EPROTO; 353 354 length = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_LENGTH, response[0]); 355 if (unlikely(length > value_len)) 356 return -EOVERFLOW; 357 if (unlikely(length < value_len)) 358 return -ENODATA; 359 360 switch (value_len) { 361 default: 362 xe_gt_WARN_ON(guc_to_gt(guc), value_len > 3); 363 fallthrough; 364 case 3: 365 value[2] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_3_VALUE96, response[3]); 366 fallthrough; 367 case 2: 368 value[1] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_2_VALUE64, response[2]); 369 fallthrough; 370 case 1: 371 value[0] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_1_VALUE32, response[1]); 372 fallthrough; 373 case 0: 374 break; 375 } 376 377 return 0; 378 } 379 380 static int guc_action_query_single_klv32(struct xe_guc *guc, u32 key, u32 *value32) 381 { 382 return guc_action_query_single_klv(guc, key, value32, hxg_sizeof(u32)); 383 } 384 385 static int guc_action_query_single_klv64(struct xe_guc *guc, u32 key, u64 *value64) 386 { 387 u32 value[2]; 388 int err; 389 390 err = guc_action_query_single_klv(guc, key, value, hxg_sizeof(value)); 391 if (unlikely(err)) 392 return err; 393 394 *value64 = make_u64_from_u32(value[1], value[0]); 395 return 0; 396 } 397 398 static bool has_gmdid(struct xe_device *xe) 399 { 400 return GRAPHICS_VERx100(xe) >= 1270; 401 } 402 403 /** 404 * xe_gt_sriov_vf_gmdid - Query GMDID over MMIO. 405 * @gt: the &xe_gt 406 * 407 * This function is for VF use only. 408 * 409 * Return: value of GMDID KLV on success or 0 on failure. 410 */ 411 u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt) 412 { 413 const char *type = xe_gt_is_media_type(gt) ? "media" : "graphics"; 414 struct xe_guc *guc = >->uc.guc; 415 u32 value; 416 int err; 417 418 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 419 xe_gt_assert(gt, !GRAPHICS_VERx100(gt_to_xe(gt)) || has_gmdid(gt_to_xe(gt))); 420 xe_gt_assert(gt, gt->sriov.vf.guc_version.major > 1 || gt->sriov.vf.guc_version.minor >= 2); 421 422 err = guc_action_query_single_klv32(guc, GUC_KLV_GLOBAL_CFG_GMD_ID_KEY, &value); 423 if (unlikely(err)) { 424 xe_gt_sriov_err(gt, "Failed to obtain %s GMDID (%pe)\n", 425 type, ERR_PTR(err)); 426 return 0; 427 } 428 429 xe_gt_sriov_dbg(gt, "%s GMDID = %#x\n", type, value); 430 return value; 431 } 432 433 static int vf_get_ggtt_info(struct xe_gt *gt) 434 { 435 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; 436 struct xe_guc *guc = >->uc.guc; 437 u64 start, size; 438 int err; 439 440 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 441 442 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start); 443 if (unlikely(err)) 444 return err; 445 446 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_SIZE_KEY, &size); 447 if (unlikely(err)) 448 return err; 449 450 if (config->ggtt_size && config->ggtt_size != size) { 451 xe_gt_sriov_err(gt, "Unexpected GGTT reassignment: %lluK != %lluK\n", 452 size / SZ_1K, config->ggtt_size / SZ_1K); 453 return -EREMCHG; 454 } 455 456 xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n", 457 start, start + size - 1, size / SZ_1K); 458 459 config->ggtt_shift = start - (s64)config->ggtt_base; 460 config->ggtt_base = start; 461 config->ggtt_size = size; 462 463 return config->ggtt_size ? 0 : -ENODATA; 464 } 465 466 static int vf_get_lmem_info(struct xe_gt *gt) 467 { 468 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; 469 struct xe_guc *guc = >->uc.guc; 470 char size_str[10]; 471 u64 size; 472 int err; 473 474 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 475 476 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, &size); 477 if (unlikely(err)) 478 return err; 479 480 if (config->lmem_size && config->lmem_size != size) { 481 xe_gt_sriov_err(gt, "Unexpected LMEM reassignment: %lluM != %lluM\n", 482 size / SZ_1M, config->lmem_size / SZ_1M); 483 return -EREMCHG; 484 } 485 486 string_get_size(size, 1, STRING_UNITS_2, size_str, sizeof(size_str)); 487 xe_gt_sriov_dbg_verbose(gt, "LMEM %lluM %s\n", size / SZ_1M, size_str); 488 489 config->lmem_size = size; 490 491 return config->lmem_size ? 0 : -ENODATA; 492 } 493 494 static int vf_get_submission_cfg(struct xe_gt *gt) 495 { 496 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; 497 struct xe_guc *guc = >->uc.guc; 498 u32 num_ctxs, num_dbs; 499 int err; 500 501 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 502 503 err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY, &num_ctxs); 504 if (unlikely(err)) 505 return err; 506 507 err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY, &num_dbs); 508 if (unlikely(err)) 509 return err; 510 511 if (config->num_ctxs && config->num_ctxs != num_ctxs) { 512 xe_gt_sriov_err(gt, "Unexpected CTXs reassignment: %u != %u\n", 513 num_ctxs, config->num_ctxs); 514 return -EREMCHG; 515 } 516 if (config->num_dbs && config->num_dbs != num_dbs) { 517 xe_gt_sriov_err(gt, "Unexpected DBs reassignment: %u != %u\n", 518 num_dbs, config->num_dbs); 519 return -EREMCHG; 520 } 521 522 xe_gt_sriov_dbg_verbose(gt, "CTXs %u DBs %u\n", num_ctxs, num_dbs); 523 524 config->num_ctxs = num_ctxs; 525 config->num_dbs = num_dbs; 526 527 return config->num_ctxs ? 0 : -ENODATA; 528 } 529 530 static void vf_cache_gmdid(struct xe_gt *gt) 531 { 532 xe_gt_assert(gt, has_gmdid(gt_to_xe(gt))); 533 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 534 535 gt->sriov.vf.runtime.gmdid = xe_gt_sriov_vf_gmdid(gt); 536 } 537 538 /** 539 * xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO. 540 * @gt: the &xe_gt 541 * 542 * This function is for VF use only. 543 * 544 * Return: 0 on success or a negative error code on failure. 545 */ 546 int xe_gt_sriov_vf_query_config(struct xe_gt *gt) 547 { 548 struct xe_device *xe = gt_to_xe(gt); 549 int err; 550 551 err = vf_get_ggtt_info(gt); 552 if (unlikely(err)) 553 return err; 554 555 if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) { 556 err = vf_get_lmem_info(gt); 557 if (unlikely(err)) 558 return err; 559 } 560 561 err = vf_get_submission_cfg(gt); 562 if (unlikely(err)) 563 return err; 564 565 if (has_gmdid(xe)) 566 vf_cache_gmdid(gt); 567 568 return 0; 569 } 570 571 /** 572 * xe_gt_sriov_vf_guc_ids - VF GuC context IDs configuration. 573 * @gt: the &xe_gt 574 * 575 * This function is for VF use only. 576 * 577 * Return: number of GuC context IDs assigned to VF. 578 */ 579 u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt) 580 { 581 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 582 xe_gt_assert(gt, gt->sriov.vf.guc_version.major); 583 xe_gt_assert(gt, gt->sriov.vf.self_config.num_ctxs); 584 585 return gt->sriov.vf.self_config.num_ctxs; 586 } 587 588 /** 589 * xe_gt_sriov_vf_lmem - VF LMEM configuration. 590 * @gt: the &xe_gt 591 * 592 * This function is for VF use only. 593 * 594 * Return: size of the LMEM assigned to VF. 595 */ 596 u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt) 597 { 598 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 599 xe_gt_assert(gt, gt->sriov.vf.guc_version.major); 600 xe_gt_assert(gt, gt->sriov.vf.self_config.lmem_size); 601 602 return gt->sriov.vf.self_config.lmem_size; 603 } 604 605 /** 606 * xe_gt_sriov_vf_ggtt - VF GGTT configuration. 607 * @gt: the &xe_gt 608 * 609 * This function is for VF use only. 610 * 611 * Return: size of the GGTT assigned to VF. 612 */ 613 u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt) 614 { 615 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 616 xe_gt_assert(gt, gt->sriov.vf.guc_version.major); 617 xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size); 618 619 return gt->sriov.vf.self_config.ggtt_size; 620 } 621 622 /** 623 * xe_gt_sriov_vf_ggtt_base - VF GGTT base offset. 624 * @gt: the &xe_gt 625 * 626 * This function is for VF use only. 627 * 628 * Return: base offset of the GGTT assigned to VF. 629 */ 630 u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt) 631 { 632 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 633 xe_gt_assert(gt, gt->sriov.vf.guc_version.major); 634 xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size); 635 636 return gt->sriov.vf.self_config.ggtt_base; 637 } 638 639 /** 640 * xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration 641 * @gt: the &xe_gt struct instance 642 * 643 * This function is for VF use only. 644 * 645 * Return: The shift value; could be negative 646 */ 647 s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt) 648 { 649 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; 650 651 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 652 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 653 654 return config->ggtt_shift; 655 } 656 657 static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor) 658 { 659 u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = { 660 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 661 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 662 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VF2PF_HANDSHAKE), 663 FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, *major) | 664 FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, *minor), 665 }; 666 u32 response[VF2PF_HANDSHAKE_RESPONSE_MSG_LEN]; 667 int ret; 668 669 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 670 671 ret = xe_guc_relay_send_to_pf(>->uc.guc.relay, 672 request, ARRAY_SIZE(request), 673 response, ARRAY_SIZE(response)); 674 if (unlikely(ret < 0)) 675 return ret; 676 677 if (unlikely(ret != VF2PF_HANDSHAKE_RESPONSE_MSG_LEN)) 678 return -EPROTO; 679 680 if (unlikely(FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_0_MBZ, response[0]))) 681 return -EPROTO; 682 683 *major = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, response[1]); 684 *minor = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, response[1]); 685 686 return 0; 687 } 688 689 static void vf_connect_pf(struct xe_gt *gt, u16 major, u16 minor) 690 { 691 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 692 693 gt->sriov.vf.pf_version.major = major; 694 gt->sriov.vf.pf_version.minor = minor; 695 } 696 697 static void vf_disconnect_pf(struct xe_gt *gt) 698 { 699 vf_connect_pf(gt, 0, 0); 700 } 701 702 static int vf_handshake_with_pf(struct xe_gt *gt) 703 { 704 u32 major_wanted = GUC_RELAY_VERSION_LATEST_MAJOR; 705 u32 minor_wanted = GUC_RELAY_VERSION_LATEST_MINOR; 706 u32 major = major_wanted, minor = minor_wanted; 707 int err; 708 709 err = relay_action_handshake(gt, &major, &minor); 710 if (unlikely(err)) 711 goto failed; 712 713 if (!major && !minor) { 714 err = -ENODATA; 715 goto failed; 716 } 717 718 xe_gt_sriov_dbg(gt, "using VF/PF ABI %u.%u\n", major, minor); 719 vf_connect_pf(gt, major, minor); 720 return 0; 721 722 failed: 723 xe_gt_sriov_err(gt, "Unable to confirm VF/PF ABI version %u.%u (%pe)\n", 724 major, minor, ERR_PTR(err)); 725 vf_disconnect_pf(gt); 726 return err; 727 } 728 729 /** 730 * xe_gt_sriov_vf_connect - Establish connection with the PF driver. 731 * @gt: the &xe_gt 732 * 733 * This function is for VF use only. 734 * 735 * Return: 0 on success or a negative error code on failure. 736 */ 737 int xe_gt_sriov_vf_connect(struct xe_gt *gt) 738 { 739 int err; 740 741 err = vf_handshake_with_pf(gt); 742 if (unlikely(err)) 743 goto failed; 744 745 return 0; 746 747 failed: 748 xe_gt_sriov_err(gt, "Failed to get version info (%pe)\n", ERR_PTR(err)); 749 return err; 750 } 751 752 /** 753 * xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery, 754 * or just mark that a GuC is ready for it. 755 * @gt: the &xe_gt struct instance linked to target GuC 756 * 757 * This function shall be called only by VF. 758 */ 759 void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt) 760 { 761 struct xe_device *xe = gt_to_xe(gt); 762 763 xe_gt_assert(gt, IS_SRIOV_VF(xe)); 764 765 set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags); 766 /* 767 * We need to be certain that if all flags were set, at least one 768 * thread will notice that and schedule the recovery. 769 */ 770 smp_mb__after_atomic(); 771 772 xe_gt_sriov_info(gt, "ready for recovery after migration\n"); 773 xe_sriov_vf_start_migration_recovery(xe); 774 } 775 776 static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor) 777 { 778 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 779 780 return major == gt->sriov.vf.pf_version.major && 781 minor <= gt->sriov.vf.pf_version.minor; 782 } 783 784 static int vf_prepare_runtime_info(struct xe_gt *gt, unsigned int num_regs) 785 { 786 struct vf_runtime_reg *regs = gt->sriov.vf.runtime.regs; 787 unsigned int regs_size = round_up(num_regs, 4); 788 struct xe_device *xe = gt_to_xe(gt); 789 790 xe_gt_assert(gt, IS_SRIOV_VF(xe)); 791 792 if (regs) { 793 if (num_regs <= gt->sriov.vf.runtime.regs_size) { 794 memset(regs, 0, num_regs * sizeof(*regs)); 795 gt->sriov.vf.runtime.num_regs = num_regs; 796 return 0; 797 } 798 799 drmm_kfree(&xe->drm, regs); 800 gt->sriov.vf.runtime.regs = NULL; 801 gt->sriov.vf.runtime.num_regs = 0; 802 gt->sriov.vf.runtime.regs_size = 0; 803 } 804 805 regs = drmm_kcalloc(&xe->drm, regs_size, sizeof(*regs), GFP_KERNEL); 806 if (unlikely(!regs)) 807 return -ENOMEM; 808 809 gt->sriov.vf.runtime.regs = regs; 810 gt->sriov.vf.runtime.num_regs = num_regs; 811 gt->sriov.vf.runtime.regs_size = regs_size; 812 return 0; 813 } 814 815 static int vf_query_runtime_info(struct xe_gt *gt) 816 { 817 u32 request[VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN]; 818 u32 response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 32]; /* up to 16 regs */ 819 u32 limit = (ARRAY_SIZE(response) - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2; 820 u32 count, remaining, num, i; 821 u32 start = 0; 822 int ret; 823 824 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 825 xe_gt_assert(gt, limit); 826 827 /* this is part of the 1.0 PF/VF ABI */ 828 if (!vf_is_negotiated(gt, 1, 0)) 829 return -ENOPKG; 830 831 request[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 832 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 833 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, 834 GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME) | 835 FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, limit); 836 837 repeat: 838 request[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, start); 839 ret = xe_guc_relay_send_to_pf(>->uc.guc.relay, 840 request, ARRAY_SIZE(request), 841 response, ARRAY_SIZE(response)); 842 if (unlikely(ret < 0)) 843 goto failed; 844 845 if (unlikely(ret < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN)) { 846 ret = -EPROTO; 847 goto failed; 848 } 849 if (unlikely((ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) % 2)) { 850 ret = -EPROTO; 851 goto failed; 852 } 853 854 num = (ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2; 855 count = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, response[0]); 856 remaining = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, response[1]); 857 858 xe_gt_sriov_dbg_verbose(gt, "count=%u num=%u ret=%d start=%u remaining=%u\n", 859 count, num, ret, start, remaining); 860 861 if (unlikely(count != num)) { 862 ret = -EPROTO; 863 goto failed; 864 } 865 866 if (start == 0) { 867 ret = vf_prepare_runtime_info(gt, num + remaining); 868 if (unlikely(ret < 0)) 869 goto failed; 870 } else if (unlikely(start + num > gt->sriov.vf.runtime.num_regs)) { 871 ret = -EPROTO; 872 goto failed; 873 } 874 875 for (i = 0; i < num; ++i) { 876 struct vf_runtime_reg *reg = >->sriov.vf.runtime.regs[start + i]; 877 878 reg->offset = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i]; 879 reg->value = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i + 1]; 880 } 881 882 if (remaining) { 883 start += num; 884 goto repeat; 885 } 886 887 return 0; 888 889 failed: 890 vf_prepare_runtime_info(gt, 0); 891 return ret; 892 } 893 894 static void vf_show_runtime_info(struct xe_gt *gt) 895 { 896 struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs; 897 unsigned int size = gt->sriov.vf.runtime.num_regs; 898 899 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 900 901 for (; size--; vf_regs++) 902 xe_gt_sriov_dbg(gt, "runtime(%#x) = %#x\n", 903 vf_regs->offset, vf_regs->value); 904 } 905 906 /** 907 * xe_gt_sriov_vf_query_runtime - Query SR-IOV runtime data. 908 * @gt: the &xe_gt 909 * 910 * This function is for VF use only. 911 * 912 * Return: 0 on success or a negative error code on failure. 913 */ 914 int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt) 915 { 916 int err; 917 918 err = vf_query_runtime_info(gt); 919 if (unlikely(err)) 920 goto failed; 921 922 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) 923 vf_show_runtime_info(gt); 924 925 return 0; 926 927 failed: 928 xe_gt_sriov_err(gt, "Failed to get runtime info (%pe)\n", 929 ERR_PTR(err)); 930 return err; 931 } 932 933 static int vf_runtime_reg_cmp(const void *a, const void *b) 934 { 935 const struct vf_runtime_reg *ra = a; 936 const struct vf_runtime_reg *rb = b; 937 938 return (int)ra->offset - (int)rb->offset; 939 } 940 941 static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr) 942 { 943 struct xe_gt_sriov_vf_runtime *runtime = >->sriov.vf.runtime; 944 struct vf_runtime_reg key = { .offset = addr }; 945 946 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 947 948 return bsearch(&key, runtime->regs, runtime->num_regs, sizeof(key), 949 vf_runtime_reg_cmp); 950 } 951 952 /** 953 * xe_gt_sriov_vf_read32 - Get a register value from the runtime data. 954 * @gt: the &xe_gt 955 * @reg: the register to read 956 * 957 * This function is for VF use only. 958 * This function shall be called after VF has connected to PF. 959 * This function is dedicated for registers that VFs can't read directly. 960 * 961 * Return: register value obtained from the PF or 0 if not found. 962 */ 963 u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg) 964 { 965 u32 addr = xe_mmio_adjusted_addr(>->mmio, reg.addr); 966 struct vf_runtime_reg *rr; 967 968 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 969 xe_gt_assert(gt, gt->sriov.vf.pf_version.major); 970 xe_gt_assert(gt, !reg.vf); 971 972 if (reg.addr == GMD_ID.addr) { 973 xe_gt_sriov_dbg_verbose(gt, "gmdid(%#x) = %#x\n", 974 addr, gt->sriov.vf.runtime.gmdid); 975 return gt->sriov.vf.runtime.gmdid; 976 } 977 978 rr = vf_lookup_reg(gt, addr); 979 if (!rr) { 980 xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG), 981 "VF is trying to read an inaccessible register %#x+%#x\n", 982 reg.addr, addr - reg.addr); 983 return 0; 984 } 985 986 xe_gt_sriov_dbg_verbose(gt, "runtime[%#x] = %#x\n", addr, rr->value); 987 return rr->value; 988 } 989 990 /** 991 * xe_gt_sriov_vf_write32 - Handle a write to an inaccessible register. 992 * @gt: the &xe_gt 993 * @reg: the register to write 994 * @val: value to write 995 * 996 * This function is for VF use only. 997 * Currently it will trigger a WARN if running on debug build. 998 */ 999 void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val) 1000 { 1001 u32 addr = xe_mmio_adjusted_addr(>->mmio, reg.addr); 1002 1003 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 1004 xe_gt_assert(gt, !reg.vf); 1005 1006 /* 1007 * In the future, we may want to handle selected writes to inaccessible 1008 * registers in some custom way, but for now let's just log a warning 1009 * about such attempt, as likely we might be doing something wrong. 1010 */ 1011 xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG), 1012 "VF is trying to write %#x to an inaccessible register %#x+%#x\n", 1013 val, reg.addr, addr - reg.addr); 1014 } 1015 1016 /** 1017 * xe_gt_sriov_vf_print_config - Print VF self config. 1018 * @gt: the &xe_gt 1019 * @p: the &drm_printer 1020 * 1021 * This function is for VF use only. 1022 */ 1023 void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p) 1024 { 1025 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; 1026 struct xe_device *xe = gt_to_xe(gt); 1027 char buf[10]; 1028 1029 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 1030 1031 drm_printf(p, "GGTT range:\t%#llx-%#llx\n", 1032 config->ggtt_base, 1033 config->ggtt_base + config->ggtt_size - 1); 1034 1035 string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf)); 1036 drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf); 1037 1038 drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift); 1039 1040 if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) { 1041 string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf)); 1042 drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf); 1043 } 1044 1045 drm_printf(p, "GuC contexts:\t%u\n", config->num_ctxs); 1046 drm_printf(p, "GuC doorbells:\t%u\n", config->num_dbs); 1047 } 1048 1049 /** 1050 * xe_gt_sriov_vf_print_runtime - Print VF's runtime regs received from PF. 1051 * @gt: the &xe_gt 1052 * @p: the &drm_printer 1053 * 1054 * This function is for VF use only. 1055 */ 1056 void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p) 1057 { 1058 struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs; 1059 unsigned int size = gt->sriov.vf.runtime.num_regs; 1060 1061 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 1062 1063 for (; size--; vf_regs++) 1064 drm_printf(p, "%#x = %#x\n", vf_regs->offset, vf_regs->value); 1065 } 1066 1067 /** 1068 * xe_gt_sriov_vf_print_version - Print VF ABI versions. 1069 * @gt: the &xe_gt 1070 * @p: the &drm_printer 1071 * 1072 * This function is for VF use only. 1073 */ 1074 void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p) 1075 { 1076 struct xe_uc_fw_version *guc_version = >->sriov.vf.guc_version; 1077 struct xe_uc_fw_version *wanted = >->sriov.vf.wanted_guc_version; 1078 struct xe_gt_sriov_vf_relay_version *pf_version = >->sriov.vf.pf_version; 1079 struct xe_uc_fw_version ver; 1080 1081 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); 1082 1083 drm_printf(p, "GuC ABI:\n"); 1084 1085 vf_minimum_guc_version(gt, &ver); 1086 drm_printf(p, "\tbase:\t%u.%u.%u.*\n", ver.branch, ver.major, ver.minor); 1087 1088 drm_printf(p, "\twanted:\t%u.%u.%u.*\n", 1089 wanted->branch, wanted->major, wanted->minor); 1090 1091 drm_printf(p, "\thandshake:\t%u.%u.%u.%u\n", 1092 guc_version->branch, guc_version->major, 1093 guc_version->minor, guc_version->patch); 1094 1095 drm_printf(p, "PF ABI:\n"); 1096 1097 drm_printf(p, "\tbase:\t%u.%u\n", 1098 GUC_RELAY_VERSION_BASE_MAJOR, GUC_RELAY_VERSION_BASE_MINOR); 1099 drm_printf(p, "\twanted:\t%u.%u\n", 1100 GUC_RELAY_VERSION_LATEST_MAJOR, GUC_RELAY_VERSION_LATEST_MINOR); 1101 drm_printf(p, "\thandshake:\t%u.%u\n", 1102 pf_version->major, pf_version->minor); 1103 } 1104