1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023-2024 Intel Corporation 4 */ 5 6 #include <linux/string_choices.h> 7 #include <linux/wordpart.h> 8 9 #include "abi/guc_actions_sriov_abi.h" 10 #include "abi/guc_klvs_abi.h" 11 12 #include "regs/xe_guc_regs.h" 13 14 #include "xe_bo.h" 15 #include "xe_device.h" 16 #include "xe_ggtt.h" 17 #include "xe_gt.h" 18 #include "xe_gt_sriov_pf_config.h" 19 #include "xe_gt_sriov_pf_helpers.h" 20 #include "xe_gt_sriov_pf_policy.h" 21 #include "xe_gt_sriov_printk.h" 22 #include "xe_guc.h" 23 #include "xe_guc_ct.h" 24 #include "xe_guc_db_mgr.h" 25 #include "xe_guc_fwif.h" 26 #include "xe_guc_id_mgr.h" 27 #include "xe_guc_klv_helpers.h" 28 #include "xe_guc_submit.h" 29 #include "xe_lmtt.h" 30 #include "xe_map.h" 31 #include "xe_sriov.h" 32 #include "xe_ttm_vram_mgr.h" 33 #include "xe_wopcm.h" 34 35 /* 36 * Return: number of KLVs that were successfully parsed and saved, 37 * negative error code on failure. 38 */ 39 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid, 40 u64 addr, u32 size) 41 { 42 u32 request[] = { 43 GUC_ACTION_PF2GUC_UPDATE_VF_CFG, 44 vfid, 45 lower_32_bits(addr), 46 upper_32_bits(addr), 47 size, 48 }; 49 50 return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request)); 51 } 52 53 /* 54 * Return: 0 on success, negative error code on failure. 55 */ 56 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid) 57 { 58 struct xe_guc *guc = >->uc.guc; 59 int ret; 60 61 ret = guc_action_update_vf_cfg(guc, vfid, 0, 0); 62 63 return ret <= 0 ? ret : -EPROTO; 64 } 65 66 /* 67 * Return: number of KLVs that were successfully parsed and saved, 68 * negative error code on failure. 69 */ 70 static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords) 71 { 72 const u32 bytes = num_dwords * sizeof(u32); 73 struct xe_tile *tile = gt_to_tile(gt); 74 struct xe_device *xe = tile_to_xe(tile); 75 struct xe_guc *guc = >->uc.guc; 76 struct xe_bo *bo; 77 int ret; 78 79 bo = xe_bo_create_pin_map(xe, tile, NULL, 80 ALIGN(bytes, PAGE_SIZE), 81 ttm_bo_type_kernel, 82 XE_BO_FLAG_VRAM_IF_DGFX(tile) | 83 XE_BO_FLAG_GGTT | 84 XE_BO_FLAG_GGTT_INVALIDATE); 85 if (IS_ERR(bo)) 86 return PTR_ERR(bo); 87 88 xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes); 89 90 ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords); 91 92 xe_bo_unpin_map_no_vm(bo); 93 94 return ret; 95 } 96 97 /* 98 * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed, 99 * negative error code on failure. 100 */ 101 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, 102 const u32 *klvs, u32 num_dwords) 103 { 104 int ret; 105 106 xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords)); 107 108 ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords); 109 110 if (ret != num_klvs) { 111 int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO; 112 struct drm_printer p = xe_gt_info_printer(gt); 113 char name[8]; 114 115 xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n", 116 xe_sriov_function_name(vfid, name, sizeof(name)), 117 num_klvs, str_plural(num_klvs), ERR_PTR(err)); 118 xe_guc_klv_print(klvs, num_dwords, &p); 119 return err; 120 } 121 122 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { 123 struct drm_printer p = xe_gt_info_printer(gt); 124 125 xe_guc_klv_print(klvs, num_dwords, &p); 126 } 127 128 return 0; 129 } 130 131 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value) 132 { 133 u32 klv[] = { 134 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1), 135 value, 136 }; 137 138 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv)); 139 } 140 141 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value) 142 { 143 u32 klv[] = { 144 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2), 145 lower_32_bits(value), 146 upper_32_bits(value), 147 }; 148 149 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv)); 150 } 151 152 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size) 153 { 154 u32 klvs[] = { 155 PREP_GUC_KLV_TAG(VF_CFG_GGTT_START), 156 lower_32_bits(start), 157 upper_32_bits(start), 158 PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE), 159 lower_32_bits(size), 160 upper_32_bits(size), 161 }; 162 163 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); 164 } 165 166 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num) 167 { 168 u32 klvs[] = { 169 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID), 170 begin, 171 PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS), 172 num, 173 }; 174 175 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); 176 } 177 178 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num) 179 { 180 u32 klvs[] = { 181 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID), 182 begin, 183 PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS), 184 num, 185 }; 186 187 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); 188 } 189 190 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 exec_quantum) 191 { 192 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, exec_quantum); 193 } 194 195 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 preempt_timeout) 196 { 197 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, preempt_timeout); 198 } 199 200 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 201 { 202 return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size); 203 } 204 205 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid) 206 { 207 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 208 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); 209 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 210 211 return >->sriov.pf.vfs[vfid].config; 212 } 213 214 /* Return: number of configuration dwords written */ 215 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config) 216 { 217 u32 n = 0; 218 219 if (drm_mm_node_allocated(&config->ggtt_region)) { 220 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START); 221 cfg[n++] = lower_32_bits(config->ggtt_region.start); 222 cfg[n++] = upper_32_bits(config->ggtt_region.start); 223 224 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE); 225 cfg[n++] = lower_32_bits(config->ggtt_region.size); 226 cfg[n++] = upper_32_bits(config->ggtt_region.size); 227 } 228 229 return n; 230 } 231 232 /* Return: number of configuration dwords written */ 233 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config) 234 { 235 u32 n = 0; 236 237 n += encode_config_ggtt(cfg, config); 238 239 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID); 240 cfg[n++] = config->begin_ctx; 241 242 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS); 243 cfg[n++] = config->num_ctxs; 244 245 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID); 246 cfg[n++] = config->begin_db; 247 248 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS); 249 cfg[n++] = config->num_dbs; 250 251 if (config->lmem_obj) { 252 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE); 253 cfg[n++] = lower_32_bits(config->lmem_obj->size); 254 cfg[n++] = upper_32_bits(config->lmem_obj->size); 255 } 256 257 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM); 258 cfg[n++] = config->exec_quantum; 259 260 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT); 261 cfg[n++] = config->preempt_timeout; 262 263 return n; 264 } 265 266 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) 267 { 268 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 269 u32 max_cfg_dwords = SZ_4K / sizeof(u32); 270 u32 num_dwords; 271 int num_klvs; 272 u32 *cfg; 273 int err; 274 275 cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL); 276 if (!cfg) 277 return -ENOMEM; 278 279 num_dwords = encode_config(cfg, config); 280 xe_gt_assert(gt, num_dwords <= max_cfg_dwords); 281 282 if (xe_gt_is_media_type(gt)) { 283 struct xe_gt *primary = gt->tile->primary_gt; 284 struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid); 285 286 /* media-GT will never include a GGTT config */ 287 xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config)); 288 289 /* the GGTT config must be taken from the primary-GT instead */ 290 num_dwords += encode_config_ggtt(cfg + num_dwords, other); 291 } 292 xe_gt_assert(gt, num_dwords <= max_cfg_dwords); 293 294 num_klvs = xe_guc_klv_count(cfg, num_dwords); 295 err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords); 296 297 kfree(cfg); 298 return err; 299 } 300 301 static u64 pf_get_ggtt_alignment(struct xe_gt *gt) 302 { 303 struct xe_device *xe = gt_to_xe(gt); 304 305 return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; 306 } 307 308 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt) 309 { 310 /* XXX: preliminary */ 311 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 312 pf_get_ggtt_alignment(gt) : SZ_64M; 313 } 314 315 static u64 pf_get_spare_ggtt(struct xe_gt *gt) 316 { 317 u64 spare; 318 319 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 320 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 321 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 322 323 spare = gt->sriov.pf.spare.ggtt_size; 324 spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt)); 325 326 return spare; 327 } 328 329 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size) 330 { 331 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 332 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 333 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 334 335 if (size && size < pf_get_min_spare_ggtt(gt)) 336 return -EINVAL; 337 338 size = round_up(size, pf_get_ggtt_alignment(gt)); 339 gt->sriov.pf.spare.ggtt_size = size; 340 341 return 0; 342 } 343 344 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size) 345 { 346 int err, err2 = 0; 347 348 err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size); 349 350 if (tile->media_gt && !err) 351 err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size); 352 353 return err ?: err2; 354 } 355 356 static void pf_release_ggtt(struct xe_tile *tile, struct drm_mm_node *node) 357 { 358 struct xe_ggtt *ggtt = tile->mem.ggtt; 359 360 if (drm_mm_node_allocated(node)) { 361 /* 362 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign() 363 * is redundant, as PTE will be implicitly re-assigned to PF by 364 * the xe_ggtt_clear() called by below xe_ggtt_remove_node(). 365 */ 366 xe_ggtt_remove_node(ggtt, node, false); 367 } 368 } 369 370 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config) 371 { 372 pf_release_ggtt(gt_to_tile(gt), &config->ggtt_region); 373 } 374 375 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) 376 { 377 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 378 struct drm_mm_node *node = &config->ggtt_region; 379 struct xe_tile *tile = gt_to_tile(gt); 380 struct xe_ggtt *ggtt = tile->mem.ggtt; 381 u64 alignment = pf_get_ggtt_alignment(gt); 382 int err; 383 384 xe_gt_assert(gt, vfid); 385 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 386 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 387 388 size = round_up(size, alignment); 389 390 if (drm_mm_node_allocated(node)) { 391 err = pf_distribute_config_ggtt(tile, vfid, 0, 0); 392 if (unlikely(err)) 393 return err; 394 395 pf_release_ggtt(tile, node); 396 } 397 xe_gt_assert(gt, !drm_mm_node_allocated(node)); 398 399 if (!size) 400 return 0; 401 402 err = xe_ggtt_insert_special_node(ggtt, node, size, alignment); 403 if (unlikely(err)) 404 return err; 405 406 xe_ggtt_assign(ggtt, node, vfid); 407 xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n", 408 vfid, node->start, node->start + node->size - 1); 409 410 err = pf_distribute_config_ggtt(gt->tile, vfid, node->start, node->size); 411 if (unlikely(err)) 412 return err; 413 414 return 0; 415 } 416 417 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid) 418 { 419 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 420 struct drm_mm_node *node = &config->ggtt_region; 421 422 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 423 return drm_mm_node_allocated(node) ? node->size : 0; 424 } 425 426 /** 427 * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF. 428 * @gt: the &xe_gt 429 * @vfid: the VF identifier 430 * 431 * This function can only be called on PF. 432 * 433 * Return: size of the VF's assigned (or PF's spare) GGTT address space. 434 */ 435 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid) 436 { 437 u64 size; 438 439 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 440 if (vfid) 441 size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid); 442 else 443 size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt); 444 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 445 446 return size; 447 } 448 449 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value, 450 u64 actual, const char *what, int err) 451 { 452 char size[10]; 453 char name[8]; 454 455 xe_sriov_function_name(vfid, name, sizeof(name)); 456 457 if (unlikely(err)) { 458 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size)); 459 xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n", 460 name, value, size, what, ERR_PTR(err)); 461 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size)); 462 xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n", 463 name, actual, size, what); 464 return err; 465 } 466 467 /* the actual value may have changed during provisioning */ 468 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size)); 469 xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n", 470 name, actual, size, what); 471 return 0; 472 } 473 474 /** 475 * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space. 476 * @gt: the &xe_gt (can't be media) 477 * @vfid: the VF identifier 478 * @size: requested GGTT size 479 * 480 * If &vfid represents PF, then function will change PF's spare GGTT config. 481 * 482 * This function can only be called on PF. 483 * 484 * Return: 0 on success or a negative error code on failure. 485 */ 486 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) 487 { 488 int err; 489 490 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 491 492 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 493 if (vfid) 494 err = pf_provision_vf_ggtt(gt, vfid, size); 495 else 496 err = pf_set_spare_ggtt(gt, size); 497 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 498 499 return pf_config_set_u64_done(gt, vfid, size, 500 xe_gt_sriov_pf_config_get_ggtt(gt, vfid), 501 vfid ? "GGTT" : "spare GGTT", err); 502 } 503 504 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs, 505 u64 value, u64 (*get)(struct xe_gt*, unsigned int), 506 const char *what, unsigned int last, int err) 507 { 508 char size[10]; 509 510 xe_gt_assert(gt, first); 511 xe_gt_assert(gt, num_vfs); 512 xe_gt_assert(gt, first <= last); 513 514 if (num_vfs == 1) 515 return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err); 516 517 if (unlikely(err)) { 518 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n", 519 first, first + num_vfs - 1, what); 520 if (last > first) 521 pf_config_bulk_set_u64_done(gt, first, last - first, value, 522 get, what, last, 0); 523 return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err); 524 } 525 526 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */ 527 value = get(gt, first); 528 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size)); 529 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n", 530 first, first + num_vfs - 1, value, size, what); 531 return 0; 532 } 533 534 /** 535 * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT. 536 * @gt: the &xe_gt (can't be media) 537 * @vfid: starting VF identifier (can't be 0) 538 * @num_vfs: number of VFs to provision 539 * @size: requested GGTT size 540 * 541 * This function can only be called on PF. 542 * 543 * Return: 0 on success or a negative error code on failure. 544 */ 545 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid, 546 unsigned int num_vfs, u64 size) 547 { 548 unsigned int n; 549 int err = 0; 550 551 xe_gt_assert(gt, vfid); 552 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 553 554 if (!num_vfs) 555 return 0; 556 557 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 558 for (n = vfid; n < vfid + num_vfs; n++) { 559 err = pf_provision_vf_ggtt(gt, n, size); 560 if (err) 561 break; 562 } 563 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 564 565 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size, 566 xe_gt_sriov_pf_config_get_ggtt, 567 "GGTT", n, err); 568 } 569 570 /* Return: size of the largest continuous GGTT region */ 571 static u64 pf_get_max_ggtt(struct xe_gt *gt) 572 { 573 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt; 574 const struct drm_mm *mm = &ggtt->mm; 575 const struct drm_mm_node *entry; 576 u64 alignment = pf_get_ggtt_alignment(gt); 577 u64 spare = pf_get_spare_ggtt(gt); 578 u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt)); 579 u64 hole_start, hole_end, hole_size; 580 u64 max_hole = 0; 581 582 mutex_lock(&ggtt->lock); 583 584 drm_mm_for_each_hole(entry, mm, hole_start, hole_end) { 585 hole_start = max(hole_start, hole_min_start); 586 hole_start = ALIGN(hole_start, alignment); 587 hole_end = ALIGN_DOWN(hole_end, alignment); 588 if (hole_start >= hole_end) 589 continue; 590 hole_size = hole_end - hole_start; 591 xe_gt_sriov_dbg_verbose(gt, "HOLE start %llx size %lluK\n", 592 hole_start, hole_size / SZ_1K); 593 spare -= min3(spare, hole_size, max_hole); 594 max_hole = max(max_hole, hole_size); 595 } 596 597 mutex_unlock(&ggtt->lock); 598 599 xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n", 600 max_hole / SZ_1K, spare / SZ_1K); 601 return max_hole > spare ? max_hole - spare : 0; 602 } 603 604 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs) 605 { 606 u64 available = pf_get_max_ggtt(gt); 607 u64 alignment = pf_get_ggtt_alignment(gt); 608 u64 fair; 609 610 /* 611 * To simplify the logic we only look at single largest GGTT region 612 * as that will be always the best fit for 1 VF case, and most likely 613 * will also nicely cover other cases where VFs are provisioned on the 614 * fresh and idle PF driver, without any stale GGTT allocations spread 615 * in the middle of the full GGTT range. 616 */ 617 618 fair = div_u64(available, num_vfs); 619 fair = ALIGN_DOWN(fair, alignment); 620 xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n", 621 available / SZ_1K, num_vfs, fair / SZ_1K); 622 return fair; 623 } 624 625 /** 626 * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT. 627 * @gt: the &xe_gt (can't be media) 628 * @vfid: starting VF identifier (can't be 0) 629 * @num_vfs: number of VFs to provision 630 * 631 * This function can only be called on PF. 632 * 633 * Return: 0 on success or a negative error code on failure. 634 */ 635 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid, 636 unsigned int num_vfs) 637 { 638 u64 fair; 639 640 xe_gt_assert(gt, vfid); 641 xe_gt_assert(gt, num_vfs); 642 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 643 644 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 645 fair = pf_estimate_fair_ggtt(gt, num_vfs); 646 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 647 648 if (!fair) 649 return -ENOSPC; 650 651 return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair); 652 } 653 654 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt) 655 { 656 /* XXX: preliminary */ 657 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 658 hweight64(gt->info.engine_mask) : SZ_256; 659 } 660 661 static u32 pf_get_spare_ctxs(struct xe_gt *gt) 662 { 663 u32 spare; 664 665 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 666 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 667 668 spare = gt->sriov.pf.spare.num_ctxs; 669 spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt)); 670 671 return spare; 672 } 673 674 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare) 675 { 676 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 677 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 678 679 if (spare > GUC_ID_MAX) 680 return -EINVAL; 681 682 if (spare && spare < pf_get_min_spare_ctxs(gt)) 683 return -EINVAL; 684 685 gt->sriov.pf.spare.num_ctxs = spare; 686 687 return 0; 688 } 689 690 /* Return: start ID or negative error code on failure */ 691 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num) 692 { 693 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm; 694 unsigned int spare = pf_get_spare_ctxs(gt); 695 696 return xe_guc_id_mgr_reserve(idm, num, spare); 697 } 698 699 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num) 700 { 701 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm; 702 703 if (num) 704 xe_guc_id_mgr_release(idm, start, num); 705 } 706 707 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config) 708 { 709 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 710 711 pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs); 712 config->begin_ctx = 0; 713 config->num_ctxs = 0; 714 } 715 716 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs) 717 { 718 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 719 int ret; 720 721 xe_gt_assert(gt, vfid); 722 723 if (num_ctxs > GUC_ID_MAX) 724 return -EINVAL; 725 726 if (config->num_ctxs) { 727 ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0); 728 if (unlikely(ret)) 729 return ret; 730 731 pf_release_config_ctxs(gt, config); 732 } 733 734 if (!num_ctxs) 735 return 0; 736 737 ret = pf_reserve_ctxs(gt, num_ctxs); 738 if (unlikely(ret < 0)) 739 return ret; 740 741 config->begin_ctx = ret; 742 config->num_ctxs = num_ctxs; 743 744 ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs); 745 if (unlikely(ret)) { 746 pf_release_config_ctxs(gt, config); 747 return ret; 748 } 749 750 xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n", 751 vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1); 752 return 0; 753 } 754 755 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid) 756 { 757 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 758 759 return config->num_ctxs; 760 } 761 762 /** 763 * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota. 764 * @gt: the &xe_gt 765 * @vfid: the VF identifier 766 * 767 * This function can only be called on PF. 768 * If &vfid represents a PF then number of PF's spare GuC context IDs is returned. 769 * 770 * Return: VF's quota (or PF's spare). 771 */ 772 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid) 773 { 774 u32 num_ctxs; 775 776 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 777 if (vfid) 778 num_ctxs = pf_get_vf_config_ctxs(gt, vfid); 779 else 780 num_ctxs = pf_get_spare_ctxs(gt); 781 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 782 783 return num_ctxs; 784 } 785 786 static const char *no_unit(u32 unused) 787 { 788 return ""; 789 } 790 791 static const char *spare_unit(u32 unused) 792 { 793 return " spare"; 794 } 795 796 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual, 797 const char *what, const char *(*unit)(u32), int err) 798 { 799 char name[8]; 800 801 xe_sriov_function_name(vfid, name, sizeof(name)); 802 803 if (unlikely(err)) { 804 xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n", 805 name, value, unit(value), what, ERR_PTR(err)); 806 xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n", 807 name, actual, unit(actual), what); 808 return err; 809 } 810 811 /* the actual value may have changed during provisioning */ 812 xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n", 813 name, actual, unit(actual), what); 814 return 0; 815 } 816 817 /** 818 * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF. 819 * @gt: the &xe_gt 820 * @vfid: the VF identifier 821 * @num_ctxs: requested number of GuC contexts IDs (0 to release) 822 * 823 * This function can only be called on PF. 824 * 825 * Return: 0 on success or a negative error code on failure. 826 */ 827 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs) 828 { 829 int err; 830 831 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 832 if (vfid) 833 err = pf_provision_vf_ctxs(gt, vfid, num_ctxs); 834 else 835 err = pf_set_spare_ctxs(gt, num_ctxs); 836 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 837 838 return pf_config_set_u32_done(gt, vfid, num_ctxs, 839 xe_gt_sriov_pf_config_get_ctxs(gt, vfid), 840 "GuC context IDs", vfid ? no_unit : spare_unit, err); 841 } 842 843 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs, 844 u32 value, u32 (*get)(struct xe_gt*, unsigned int), 845 const char *what, const char *(*unit)(u32), 846 unsigned int last, int err) 847 { 848 xe_gt_assert(gt, first); 849 xe_gt_assert(gt, num_vfs); 850 xe_gt_assert(gt, first <= last); 851 852 if (num_vfs == 1) 853 return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err); 854 855 if (unlikely(err)) { 856 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n", 857 first, first + num_vfs - 1, what); 858 if (last > first) 859 pf_config_bulk_set_u32_done(gt, first, last - first, value, 860 get, what, unit, last, 0); 861 return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err); 862 } 863 864 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */ 865 value = get(gt, first); 866 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n", 867 first, first + num_vfs - 1, value, unit(value), what); 868 return 0; 869 } 870 871 /** 872 * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs. 873 * @gt: the &xe_gt 874 * @vfid: starting VF identifier 875 * @num_vfs: number of VFs to provision 876 * @num_ctxs: requested number of GuC contexts IDs (0 to release) 877 * 878 * This function can only be called on PF. 879 * 880 * Return: 0 on success or a negative error code on failure. 881 */ 882 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid, 883 unsigned int num_vfs, u32 num_ctxs) 884 { 885 unsigned int n; 886 int err = 0; 887 888 xe_gt_assert(gt, vfid); 889 890 if (!num_vfs) 891 return 0; 892 893 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 894 for (n = vfid; n < vfid + num_vfs; n++) { 895 err = pf_provision_vf_ctxs(gt, n, num_ctxs); 896 if (err) 897 break; 898 } 899 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 900 901 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs, 902 xe_gt_sriov_pf_config_get_ctxs, 903 "GuC context IDs", no_unit, n, err); 904 } 905 906 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs) 907 { 908 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm; 909 u32 spare = pf_get_spare_ctxs(gt); 910 u32 fair = (idm->total - spare) / num_vfs; 911 int ret; 912 913 for (; fair; --fair) { 914 ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare); 915 if (ret < 0) 916 continue; 917 xe_guc_id_mgr_release(idm, ret, fair * num_vfs); 918 break; 919 } 920 921 xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair); 922 return fair; 923 } 924 925 /** 926 * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs. 927 * @gt: the &xe_gt 928 * @vfid: starting VF identifier (can't be 0) 929 * @num_vfs: number of VFs to provision (can't be 0) 930 * 931 * This function can only be called on PF. 932 * 933 * Return: 0 on success or a negative error code on failure. 934 */ 935 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid, 936 unsigned int num_vfs) 937 { 938 u32 fair; 939 940 xe_gt_assert(gt, vfid); 941 xe_gt_assert(gt, num_vfs); 942 943 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 944 fair = pf_estimate_fair_ctxs(gt, num_vfs); 945 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 946 947 if (!fair) 948 return -ENOSPC; 949 950 return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair); 951 } 952 953 static u32 pf_get_min_spare_dbs(struct xe_gt *gt) 954 { 955 /* XXX: preliminary, we don't use doorbells yet! */ 956 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0; 957 } 958 959 static u32 pf_get_spare_dbs(struct xe_gt *gt) 960 { 961 u32 spare; 962 963 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 964 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 965 966 spare = gt->sriov.pf.spare.num_dbs; 967 spare = max_t(u32, spare, pf_get_min_spare_dbs(gt)); 968 969 return spare; 970 } 971 972 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare) 973 { 974 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 975 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 976 977 if (spare > GUC_NUM_DOORBELLS) 978 return -EINVAL; 979 980 if (spare && spare < pf_get_min_spare_dbs(gt)) 981 return -EINVAL; 982 983 gt->sriov.pf.spare.num_dbs = spare; 984 return 0; 985 } 986 987 /* Return: start ID or negative error code on failure */ 988 static int pf_reserve_dbs(struct xe_gt *gt, u32 num) 989 { 990 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm; 991 unsigned int spare = pf_get_spare_dbs(gt); 992 993 return xe_guc_db_mgr_reserve_range(dbm, num, spare); 994 } 995 996 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num) 997 { 998 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm; 999 1000 if (num) 1001 xe_guc_db_mgr_release_range(dbm, start, num); 1002 } 1003 1004 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config) 1005 { 1006 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1007 1008 pf_release_dbs(gt, config->begin_db, config->num_dbs); 1009 config->begin_db = 0; 1010 config->num_dbs = 0; 1011 } 1012 1013 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs) 1014 { 1015 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1016 int ret; 1017 1018 xe_gt_assert(gt, vfid); 1019 1020 if (num_dbs > GUC_NUM_DOORBELLS) 1021 return -EINVAL; 1022 1023 if (config->num_dbs) { 1024 ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0); 1025 if (unlikely(ret)) 1026 return ret; 1027 1028 pf_release_config_dbs(gt, config); 1029 } 1030 1031 if (!num_dbs) 1032 return 0; 1033 1034 ret = pf_reserve_dbs(gt, num_dbs); 1035 if (unlikely(ret < 0)) 1036 return ret; 1037 1038 config->begin_db = ret; 1039 config->num_dbs = num_dbs; 1040 1041 ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs); 1042 if (unlikely(ret)) { 1043 pf_release_config_dbs(gt, config); 1044 return ret; 1045 } 1046 1047 xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n", 1048 vfid, config->begin_db, config->begin_db + config->num_dbs - 1); 1049 return 0; 1050 } 1051 1052 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid) 1053 { 1054 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1055 1056 return config->num_dbs; 1057 } 1058 1059 /** 1060 * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota. 1061 * @gt: the &xe_gt 1062 * @vfid: the VF identifier 1063 * 1064 * This function can only be called on PF. 1065 * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned. 1066 * 1067 * Return: VF's quota (or PF's spare). 1068 */ 1069 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid) 1070 { 1071 u32 num_dbs; 1072 1073 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1074 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); 1075 1076 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1077 if (vfid) 1078 num_dbs = pf_get_vf_config_dbs(gt, vfid); 1079 else 1080 num_dbs = pf_get_spare_dbs(gt); 1081 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1082 1083 return num_dbs; 1084 } 1085 1086 /** 1087 * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF. 1088 * @gt: the &xe_gt 1089 * @vfid: the VF identifier 1090 * @num_dbs: requested number of GuC doorbells IDs (0 to release) 1091 * 1092 * This function can only be called on PF. 1093 * 1094 * Return: 0 on success or a negative error code on failure. 1095 */ 1096 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs) 1097 { 1098 int err; 1099 1100 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1101 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); 1102 1103 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1104 if (vfid) 1105 err = pf_provision_vf_dbs(gt, vfid, num_dbs); 1106 else 1107 err = pf_set_spare_dbs(gt, num_dbs); 1108 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1109 1110 return pf_config_set_u32_done(gt, vfid, num_dbs, 1111 xe_gt_sriov_pf_config_get_dbs(gt, vfid), 1112 "GuC doorbell IDs", vfid ? no_unit : spare_unit, err); 1113 } 1114 1115 /** 1116 * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs. 1117 * @gt: the &xe_gt 1118 * @vfid: starting VF identifier (can't be 0) 1119 * @num_vfs: number of VFs to provision 1120 * @num_dbs: requested number of GuC doorbell IDs (0 to release) 1121 * 1122 * This function can only be called on PF. 1123 * 1124 * Return: 0 on success or a negative error code on failure. 1125 */ 1126 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid, 1127 unsigned int num_vfs, u32 num_dbs) 1128 { 1129 unsigned int n; 1130 int err = 0; 1131 1132 xe_gt_assert(gt, vfid); 1133 1134 if (!num_vfs) 1135 return 0; 1136 1137 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1138 for (n = vfid; n < vfid + num_vfs; n++) { 1139 err = pf_provision_vf_dbs(gt, n, num_dbs); 1140 if (err) 1141 break; 1142 } 1143 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1144 1145 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs, 1146 xe_gt_sriov_pf_config_get_dbs, 1147 "GuC doorbell IDs", no_unit, n, err); 1148 } 1149 1150 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs) 1151 { 1152 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm; 1153 u32 spare = pf_get_spare_dbs(gt); 1154 u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs; 1155 int ret; 1156 1157 for (; fair; --fair) { 1158 ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare); 1159 if (ret < 0) 1160 continue; 1161 xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs); 1162 break; 1163 } 1164 1165 xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair); 1166 return fair; 1167 } 1168 1169 /** 1170 * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell IDs. 1171 * @gt: the &xe_gt 1172 * @vfid: starting VF identifier (can't be 0) 1173 * @num_vfs: number of VFs to provision (can't be 0) 1174 * 1175 * This function can only be called on PF. 1176 * 1177 * Return: 0 on success or a negative error code on failure. 1178 */ 1179 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid, 1180 unsigned int num_vfs) 1181 { 1182 u32 fair; 1183 1184 xe_gt_assert(gt, vfid); 1185 xe_gt_assert(gt, num_vfs); 1186 1187 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1188 fair = pf_estimate_fair_dbs(gt, num_vfs); 1189 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1190 1191 if (!fair) 1192 return -ENOSPC; 1193 1194 return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair); 1195 } 1196 1197 static u64 pf_get_lmem_alignment(struct xe_gt *gt) 1198 { 1199 /* this might be platform dependent */ 1200 return SZ_2M; 1201 } 1202 1203 static u64 pf_get_min_spare_lmem(struct xe_gt *gt) 1204 { 1205 /* this might be platform dependent */ 1206 return SZ_128M; /* XXX: preliminary */ 1207 } 1208 1209 static u64 pf_get_spare_lmem(struct xe_gt *gt) 1210 { 1211 u64 spare; 1212 1213 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1214 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1215 1216 spare = gt->sriov.pf.spare.lmem_size; 1217 spare = max_t(u64, spare, pf_get_min_spare_lmem(gt)); 1218 1219 return spare; 1220 } 1221 1222 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size) 1223 { 1224 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1225 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1226 1227 if (size && size < pf_get_min_spare_lmem(gt)) 1228 return -EINVAL; 1229 1230 gt->sriov.pf.spare.lmem_size = size; 1231 return 0; 1232 } 1233 1234 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid) 1235 { 1236 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1237 struct xe_bo *bo; 1238 1239 bo = config->lmem_obj; 1240 return bo ? bo->size : 0; 1241 } 1242 1243 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 1244 { 1245 struct xe_device *xe = gt_to_xe(gt); 1246 struct xe_tile *tile; 1247 unsigned int tid; 1248 int err; 1249 1250 for_each_tile(tile, xe, tid) { 1251 if (tile->primary_gt == gt) { 1252 err = pf_push_vf_cfg_lmem(gt, vfid, size); 1253 } else { 1254 u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid); 1255 1256 if (!lmem) 1257 continue; 1258 err = pf_push_vf_cfg_lmem(gt, vfid, lmem); 1259 } 1260 if (unlikely(err)) 1261 return err; 1262 } 1263 return 0; 1264 } 1265 1266 static void pf_force_lmtt_invalidate(struct xe_device *xe) 1267 { 1268 /* TODO */ 1269 } 1270 1271 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid) 1272 { 1273 struct xe_lmtt *lmtt; 1274 struct xe_tile *tile; 1275 unsigned int tid; 1276 1277 for_each_tile(tile, xe, tid) { 1278 lmtt = &tile->sriov.pf.lmtt; 1279 xe_lmtt_drop_pages(lmtt, vfid); 1280 } 1281 } 1282 1283 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid) 1284 { 1285 struct xe_gt_sriov_config *config; 1286 struct xe_tile *tile; 1287 struct xe_lmtt *lmtt; 1288 struct xe_bo *bo; 1289 struct xe_gt *gt; 1290 u64 total, offset; 1291 unsigned int gtid; 1292 unsigned int tid; 1293 int err; 1294 1295 total = 0; 1296 for_each_tile(tile, xe, tid) 1297 total += pf_get_vf_config_lmem(tile->primary_gt, vfid); 1298 1299 for_each_tile(tile, xe, tid) { 1300 lmtt = &tile->sriov.pf.lmtt; 1301 1302 xe_lmtt_drop_pages(lmtt, vfid); 1303 if (!total) 1304 continue; 1305 1306 err = xe_lmtt_prepare_pages(lmtt, vfid, total); 1307 if (err) 1308 goto fail; 1309 1310 offset = 0; 1311 for_each_gt(gt, xe, gtid) { 1312 if (xe_gt_is_media_type(gt)) 1313 continue; 1314 1315 config = pf_pick_vf_config(gt, vfid); 1316 bo = config->lmem_obj; 1317 if (!bo) 1318 continue; 1319 1320 err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset); 1321 if (err) 1322 goto fail; 1323 offset += bo->size; 1324 } 1325 } 1326 1327 pf_force_lmtt_invalidate(xe); 1328 return 0; 1329 1330 fail: 1331 for_each_tile(tile, xe, tid) { 1332 lmtt = &tile->sriov.pf.lmtt; 1333 xe_lmtt_drop_pages(lmtt, vfid); 1334 } 1335 return err; 1336 } 1337 1338 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config) 1339 { 1340 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 1341 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1342 1343 if (config->lmem_obj) { 1344 xe_bo_unpin_map_no_vm(config->lmem_obj); 1345 config->lmem_obj = NULL; 1346 } 1347 } 1348 1349 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 1350 { 1351 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1352 struct xe_device *xe = gt_to_xe(gt); 1353 struct xe_tile *tile = gt_to_tile(gt); 1354 struct xe_bo *bo; 1355 int err; 1356 1357 xe_gt_assert(gt, vfid); 1358 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 1359 1360 size = round_up(size, pf_get_lmem_alignment(gt)); 1361 1362 if (config->lmem_obj) { 1363 err = pf_distribute_config_lmem(gt, vfid, 0); 1364 if (unlikely(err)) 1365 return err; 1366 1367 pf_reset_vf_lmtt(xe, vfid); 1368 pf_release_vf_config_lmem(gt, config); 1369 } 1370 xe_gt_assert(gt, !config->lmem_obj); 1371 1372 if (!size) 1373 return 0; 1374 1375 xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M); 1376 bo = xe_bo_create_pin_map(xe, tile, NULL, 1377 ALIGN(size, PAGE_SIZE), 1378 ttm_bo_type_kernel, 1379 XE_BO_FLAG_VRAM_IF_DGFX(tile) | 1380 XE_BO_FLAG_PINNED); 1381 if (IS_ERR(bo)) 1382 return PTR_ERR(bo); 1383 1384 config->lmem_obj = bo; 1385 1386 err = pf_update_vf_lmtt(xe, vfid); 1387 if (unlikely(err)) 1388 goto release; 1389 1390 err = pf_push_vf_cfg_lmem(gt, vfid, bo->size); 1391 if (unlikely(err)) 1392 goto reset_lmtt; 1393 1394 xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n", 1395 vfid, bo->size, bo->size / SZ_1M); 1396 return 0; 1397 1398 reset_lmtt: 1399 pf_reset_vf_lmtt(xe, vfid); 1400 release: 1401 pf_release_vf_config_lmem(gt, config); 1402 return err; 1403 } 1404 1405 /** 1406 * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota. 1407 * @gt: the &xe_gt 1408 * @vfid: the VF identifier 1409 * 1410 * This function can only be called on PF. 1411 * 1412 * Return: VF's (or PF's spare) LMEM quota. 1413 */ 1414 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid) 1415 { 1416 u64 size; 1417 1418 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1419 if (vfid) 1420 size = pf_get_vf_config_lmem(gt, vfid); 1421 else 1422 size = pf_get_spare_lmem(gt); 1423 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1424 1425 return size; 1426 } 1427 1428 /** 1429 * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM. 1430 * @gt: the &xe_gt (can't be media) 1431 * @vfid: the VF identifier 1432 * @size: requested LMEM size 1433 * 1434 * This function can only be called on PF. 1435 */ 1436 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 1437 { 1438 int err; 1439 1440 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1441 if (vfid) 1442 err = pf_provision_vf_lmem(gt, vfid, size); 1443 else 1444 err = pf_set_spare_lmem(gt, size); 1445 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1446 1447 return pf_config_set_u64_done(gt, vfid, size, 1448 xe_gt_sriov_pf_config_get_lmem(gt, vfid), 1449 vfid ? "LMEM" : "spare LMEM", err); 1450 } 1451 1452 /** 1453 * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM. 1454 * @gt: the &xe_gt (can't be media) 1455 * @vfid: starting VF identifier (can't be 0) 1456 * @num_vfs: number of VFs to provision 1457 * @size: requested LMEM size 1458 * 1459 * This function can only be called on PF. 1460 * 1461 * Return: 0 on success or a negative error code on failure. 1462 */ 1463 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, 1464 unsigned int num_vfs, u64 size) 1465 { 1466 unsigned int n; 1467 int err = 0; 1468 1469 xe_gt_assert(gt, vfid); 1470 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 1471 1472 if (!num_vfs) 1473 return 0; 1474 1475 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1476 for (n = vfid; n < vfid + num_vfs; n++) { 1477 err = pf_provision_vf_lmem(gt, n, size); 1478 if (err) 1479 break; 1480 } 1481 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1482 1483 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size, 1484 xe_gt_sriov_pf_config_get_lmem, 1485 "LMEM", n, err); 1486 } 1487 1488 static u64 pf_query_free_lmem(struct xe_gt *gt) 1489 { 1490 struct xe_tile *tile = gt->tile; 1491 1492 return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager); 1493 } 1494 1495 static u64 pf_query_max_lmem(struct xe_gt *gt) 1496 { 1497 u64 alignment = pf_get_lmem_alignment(gt); 1498 u64 spare = pf_get_spare_lmem(gt); 1499 u64 free = pf_query_free_lmem(gt); 1500 u64 avail; 1501 1502 /* XXX: need to account for 2MB blocks only */ 1503 avail = free > spare ? free - spare : 0; 1504 avail = round_down(avail, alignment); 1505 1506 return avail; 1507 } 1508 1509 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV 1510 #define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */ 1511 #else 1512 #define MAX_FAIR_LMEM SZ_2G /* XXX: known issue with allocating BO over 2GiB */ 1513 #endif 1514 1515 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs) 1516 { 1517 u64 available = pf_query_max_lmem(gt); 1518 u64 alignment = pf_get_lmem_alignment(gt); 1519 u64 fair; 1520 1521 fair = div_u64(available, num_vfs); 1522 fair = ALIGN_DOWN(fair, alignment); 1523 #ifdef MAX_FAIR_LMEM 1524 fair = min_t(u64, MAX_FAIR_LMEM, fair); 1525 #endif 1526 xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n", 1527 available / SZ_1M, num_vfs, fair / SZ_1M); 1528 return fair; 1529 } 1530 1531 /** 1532 * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM. 1533 * @gt: the &xe_gt (can't be media) 1534 * @vfid: starting VF identifier (can't be 0) 1535 * @num_vfs: number of VFs to provision (can't be 0) 1536 * 1537 * This function can only be called on PF. 1538 * 1539 * Return: 0 on success or a negative error code on failure. 1540 */ 1541 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid, 1542 unsigned int num_vfs) 1543 { 1544 u64 fair; 1545 1546 xe_gt_assert(gt, vfid); 1547 xe_gt_assert(gt, num_vfs); 1548 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 1549 1550 if (!IS_DGFX(gt_to_xe(gt))) 1551 return 0; 1552 1553 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1554 fair = pf_estimate_fair_lmem(gt, num_vfs); 1555 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1556 1557 if (!fair) 1558 return -ENOSPC; 1559 1560 return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair); 1561 } 1562 1563 /** 1564 * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources. 1565 * @gt: the &xe_gt 1566 * @vfid: starting VF identifier (can't be 0) 1567 * @num_vfs: number of VFs to provision (can't be 0) 1568 * 1569 * This function can only be called on PF. 1570 * 1571 * Return: 0 on success or a negative error code on failure. 1572 */ 1573 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid, 1574 unsigned int num_vfs) 1575 { 1576 int result = 0; 1577 int err; 1578 1579 xe_gt_assert(gt, vfid); 1580 xe_gt_assert(gt, num_vfs); 1581 1582 if (!xe_gt_is_media_type(gt)) { 1583 err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs); 1584 result = result ?: err; 1585 err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs); 1586 result = result ?: err; 1587 } 1588 err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs); 1589 result = result ?: err; 1590 err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs); 1591 result = result ?: err; 1592 1593 return result; 1594 } 1595 1596 static const char *exec_quantum_unit(u32 exec_quantum) 1597 { 1598 return exec_quantum ? "ms" : "(infinity)"; 1599 } 1600 1601 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid, 1602 u32 exec_quantum) 1603 { 1604 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1605 int err; 1606 1607 err = pf_push_vf_cfg_exec_quantum(gt, vfid, exec_quantum); 1608 if (unlikely(err)) 1609 return err; 1610 1611 config->exec_quantum = exec_quantum; 1612 return 0; 1613 } 1614 1615 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) 1616 { 1617 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1618 1619 return config->exec_quantum; 1620 } 1621 1622 /** 1623 * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF. 1624 * @gt: the &xe_gt 1625 * @vfid: the VF identifier 1626 * @exec_quantum: requested execution quantum in milliseconds (0 is infinity) 1627 * 1628 * This function can only be called on PF. 1629 * 1630 * Return: 0 on success or a negative error code on failure. 1631 */ 1632 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, 1633 u32 exec_quantum) 1634 { 1635 int err; 1636 1637 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1638 err = pf_provision_exec_quantum(gt, vfid, exec_quantum); 1639 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1640 1641 return pf_config_set_u32_done(gt, vfid, exec_quantum, 1642 xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid), 1643 "execution quantum", exec_quantum_unit, err); 1644 } 1645 1646 /** 1647 * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum. 1648 * @gt: the &xe_gt 1649 * @vfid: the VF identifier 1650 * 1651 * This function can only be called on PF. 1652 * 1653 * Return: VF's (or PF's) execution quantum in milliseconds. 1654 */ 1655 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) 1656 { 1657 u32 exec_quantum; 1658 1659 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1660 exec_quantum = pf_get_exec_quantum(gt, vfid); 1661 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1662 1663 return exec_quantum; 1664 } 1665 1666 static const char *preempt_timeout_unit(u32 preempt_timeout) 1667 { 1668 return preempt_timeout ? "us" : "(infinity)"; 1669 } 1670 1671 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid, 1672 u32 preempt_timeout) 1673 { 1674 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1675 int err; 1676 1677 err = pf_push_vf_cfg_preempt_timeout(gt, vfid, preempt_timeout); 1678 if (unlikely(err)) 1679 return err; 1680 1681 config->preempt_timeout = preempt_timeout; 1682 1683 return 0; 1684 } 1685 1686 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) 1687 { 1688 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1689 1690 return config->preempt_timeout; 1691 } 1692 1693 /** 1694 * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF. 1695 * @gt: the &xe_gt 1696 * @vfid: the VF identifier 1697 * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity) 1698 * 1699 * This function can only be called on PF. 1700 * 1701 * Return: 0 on success or a negative error code on failure. 1702 */ 1703 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid, 1704 u32 preempt_timeout) 1705 { 1706 int err; 1707 1708 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1709 err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout); 1710 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1711 1712 return pf_config_set_u32_done(gt, vfid, preempt_timeout, 1713 xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid), 1714 "preemption timeout", preempt_timeout_unit, err); 1715 } 1716 1717 /** 1718 * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout. 1719 * @gt: the &xe_gt 1720 * @vfid: the VF identifier 1721 * 1722 * This function can only be called on PF. 1723 * 1724 * Return: VF's (or PF's) preemption timeout in microseconds. 1725 */ 1726 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) 1727 { 1728 u32 preempt_timeout; 1729 1730 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1731 preempt_timeout = pf_get_preempt_timeout(gt, vfid); 1732 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1733 1734 return preempt_timeout; 1735 } 1736 1737 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config) 1738 { 1739 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1740 1741 config->exec_quantum = 0; 1742 config->preempt_timeout = 0; 1743 } 1744 1745 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid) 1746 { 1747 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1748 1749 if (!xe_gt_is_media_type(gt)) { 1750 pf_release_vf_config_ggtt(gt, config); 1751 pf_release_vf_config_lmem(gt, config); 1752 } 1753 pf_release_config_ctxs(gt, config); 1754 pf_release_config_dbs(gt, config); 1755 pf_reset_config_sched(gt, config); 1756 } 1757 1758 /** 1759 * xe_gt_sriov_pf_config_release - Release and reset VF configuration. 1760 * @gt: the &xe_gt 1761 * @vfid: the VF identifier (can't be PF) 1762 * @force: force configuration release 1763 * 1764 * This function can only be called on PF. 1765 * 1766 * Return: 0 on success or a negative error code on failure. 1767 */ 1768 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force) 1769 { 1770 int err; 1771 1772 xe_gt_assert(gt, vfid); 1773 1774 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1775 err = pf_send_vf_cfg_reset(gt, vfid); 1776 if (!err || force) 1777 pf_release_vf_config(gt, vfid); 1778 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1779 1780 if (unlikely(err)) { 1781 xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n", 1782 vfid, ERR_PTR(err), 1783 force ? " but all resources were released anyway!" : ""); 1784 } 1785 1786 return force ? 0 : err; 1787 } 1788 1789 /** 1790 * xe_gt_sriov_pf_config_push - Reprovision VF's configuration. 1791 * @gt: the &xe_gt 1792 * @vfid: the VF identifier (can't be PF) 1793 * @refresh: explicit refresh 1794 * 1795 * This function can only be called on PF. 1796 * 1797 * Return: 0 on success or a negative error code on failure. 1798 */ 1799 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh) 1800 { 1801 int err = 0; 1802 1803 xe_gt_assert(gt, vfid); 1804 1805 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1806 if (refresh) 1807 err = pf_send_vf_cfg_reset(gt, vfid); 1808 if (!err) 1809 err = pf_push_full_vf_config(gt, vfid); 1810 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1811 1812 if (unlikely(err)) { 1813 xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n", 1814 refresh ? "refresh" : "push", vfid, ERR_PTR(err)); 1815 } 1816 1817 return err; 1818 } 1819 1820 /** 1821 * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations. 1822 * @gt: the &xe_gt 1823 * @p: the &drm_printer 1824 * 1825 * Print GGTT configuration data for all VFs. 1826 * VFs without provisioned GGTT are ignored. 1827 * 1828 * This function can only be called on PF. 1829 */ 1830 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p) 1831 { 1832 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 1833 const struct xe_gt_sriov_config *config; 1834 char buf[10]; 1835 1836 for (n = 1; n <= total_vfs; n++) { 1837 config = >->sriov.pf.vfs[n].config; 1838 if (!drm_mm_node_allocated(&config->ggtt_region)) 1839 continue; 1840 1841 string_get_size(config->ggtt_region.size, 1, STRING_UNITS_2, buf, sizeof(buf)); 1842 drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n", 1843 n, config->ggtt_region.start, 1844 config->ggtt_region.start + config->ggtt_region.size - 1, buf); 1845 } 1846 1847 return 0; 1848 } 1849 1850 /** 1851 * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations. 1852 * @gt: the &xe_gt 1853 * @p: the &drm_printer 1854 * 1855 * Print GuC context ID allocations across all VFs. 1856 * VFs without GuC context IDs are skipped. 1857 * 1858 * This function can only be called on PF. 1859 * Return: 0 on success or a negative error code on failure. 1860 */ 1861 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p) 1862 { 1863 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 1864 const struct xe_gt_sriov_config *config; 1865 1866 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1867 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1868 1869 for (n = 1; n <= total_vfs; n++) { 1870 config = >->sriov.pf.vfs[n].config; 1871 if (!config->num_ctxs) 1872 continue; 1873 1874 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n", 1875 n, 1876 config->begin_ctx, 1877 config->begin_ctx + config->num_ctxs - 1, 1878 config->num_ctxs); 1879 } 1880 1881 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1882 return 0; 1883 } 1884 1885 /** 1886 * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations. 1887 * @gt: the &xe_gt 1888 * @p: the &drm_printer 1889 * 1890 * Print GuC doorbell IDs allocations across all VFs. 1891 * VFs without GuC doorbell IDs are skipped. 1892 * 1893 * This function can only be called on PF. 1894 * Return: 0 on success or a negative error code on failure. 1895 */ 1896 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p) 1897 { 1898 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 1899 const struct xe_gt_sriov_config *config; 1900 1901 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1902 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1903 1904 for (n = 1; n <= total_vfs; n++) { 1905 config = >->sriov.pf.vfs[n].config; 1906 if (!config->num_dbs) 1907 continue; 1908 1909 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n", 1910 n, 1911 config->begin_db, 1912 config->begin_db + config->num_dbs - 1, 1913 config->num_dbs); 1914 } 1915 1916 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1917 return 0; 1918 } 1919 1920 /** 1921 * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges. 1922 * @gt: the &xe_gt 1923 * @p: the &drm_printer 1924 * 1925 * Print GGTT ranges that are available for the provisioning. 1926 * 1927 * This function can only be called on PF. 1928 */ 1929 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p) 1930 { 1931 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt; 1932 const struct drm_mm *mm = &ggtt->mm; 1933 const struct drm_mm_node *entry; 1934 u64 alignment = pf_get_ggtt_alignment(gt); 1935 u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt)); 1936 u64 hole_start, hole_end, hole_size; 1937 u64 spare, avail, total = 0; 1938 char buf[10]; 1939 1940 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1941 1942 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1943 1944 spare = pf_get_spare_ggtt(gt); 1945 1946 mutex_lock(&ggtt->lock); 1947 1948 drm_mm_for_each_hole(entry, mm, hole_start, hole_end) { 1949 hole_start = max(hole_start, hole_min_start); 1950 hole_start = ALIGN(hole_start, alignment); 1951 hole_end = ALIGN_DOWN(hole_end, alignment); 1952 if (hole_start >= hole_end) 1953 continue; 1954 hole_size = hole_end - hole_start; 1955 total += hole_size; 1956 1957 string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf)); 1958 drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n", 1959 hole_start, hole_end - 1, buf); 1960 } 1961 1962 mutex_unlock(&ggtt->lock); 1963 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1964 1965 string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf)); 1966 drm_printf(p, "total:\t%llu\t(%s)\n", total, buf); 1967 1968 string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf)); 1969 drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf); 1970 1971 avail = total > spare ? total - spare : 0; 1972 1973 string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf)); 1974 drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf); 1975 1976 return 0; 1977 } 1978