1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023-2024 Intel Corporation 4 */ 5 6 #include <linux/string_choices.h> 7 #include <linux/wordpart.h> 8 9 #include "abi/guc_actions_sriov_abi.h" 10 #include "abi/guc_klvs_abi.h" 11 12 #include "regs/xe_guc_regs.h" 13 14 #include "xe_bo.h" 15 #include "xe_device.h" 16 #include "xe_ggtt.h" 17 #include "xe_gt.h" 18 #include "xe_gt_sriov_pf_config.h" 19 #include "xe_gt_sriov_pf_helpers.h" 20 #include "xe_gt_sriov_pf_policy.h" 21 #include "xe_gt_sriov_printk.h" 22 #include "xe_guc.h" 23 #include "xe_guc_ct.h" 24 #include "xe_guc_db_mgr.h" 25 #include "xe_guc_fwif.h" 26 #include "xe_guc_id_mgr.h" 27 #include "xe_guc_klv_helpers.h" 28 #include "xe_guc_klv_thresholds_set.h" 29 #include "xe_guc_submit.h" 30 #include "xe_lmtt.h" 31 #include "xe_map.h" 32 #include "xe_sriov.h" 33 #include "xe_ttm_vram_mgr.h" 34 #include "xe_wopcm.h" 35 36 /* 37 * Return: number of KLVs that were successfully parsed and saved, 38 * negative error code on failure. 39 */ 40 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid, 41 u64 addr, u32 size) 42 { 43 u32 request[] = { 44 GUC_ACTION_PF2GUC_UPDATE_VF_CFG, 45 vfid, 46 lower_32_bits(addr), 47 upper_32_bits(addr), 48 size, 49 }; 50 51 return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request)); 52 } 53 54 /* 55 * Return: 0 on success, negative error code on failure. 56 */ 57 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid) 58 { 59 struct xe_guc *guc = >->uc.guc; 60 int ret; 61 62 ret = guc_action_update_vf_cfg(guc, vfid, 0, 0); 63 64 return ret <= 0 ? ret : -EPROTO; 65 } 66 67 /* 68 * Return: number of KLVs that were successfully parsed and saved, 69 * negative error code on failure. 70 */ 71 static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords) 72 { 73 const u32 bytes = num_dwords * sizeof(u32); 74 struct xe_tile *tile = gt_to_tile(gt); 75 struct xe_device *xe = tile_to_xe(tile); 76 struct xe_guc *guc = >->uc.guc; 77 struct xe_bo *bo; 78 int ret; 79 80 bo = xe_bo_create_pin_map(xe, tile, NULL, 81 ALIGN(bytes, PAGE_SIZE), 82 ttm_bo_type_kernel, 83 XE_BO_FLAG_VRAM_IF_DGFX(tile) | 84 XE_BO_FLAG_GGTT | 85 XE_BO_FLAG_GGTT_INVALIDATE); 86 if (IS_ERR(bo)) 87 return PTR_ERR(bo); 88 89 xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes); 90 91 ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords); 92 93 xe_bo_unpin_map_no_vm(bo); 94 95 return ret; 96 } 97 98 /* 99 * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed, 100 * negative error code on failure. 101 */ 102 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, 103 const u32 *klvs, u32 num_dwords) 104 { 105 int ret; 106 107 xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords)); 108 109 ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords); 110 111 if (ret != num_klvs) { 112 int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO; 113 struct drm_printer p = xe_gt_info_printer(gt); 114 char name[8]; 115 116 xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n", 117 xe_sriov_function_name(vfid, name, sizeof(name)), 118 num_klvs, str_plural(num_klvs), ERR_PTR(err)); 119 xe_guc_klv_print(klvs, num_dwords, &p); 120 return err; 121 } 122 123 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { 124 struct drm_printer p = xe_gt_info_printer(gt); 125 126 xe_guc_klv_print(klvs, num_dwords, &p); 127 } 128 129 return 0; 130 } 131 132 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value) 133 { 134 u32 klv[] = { 135 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1), 136 value, 137 }; 138 139 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv)); 140 } 141 142 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value) 143 { 144 u32 klv[] = { 145 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2), 146 lower_32_bits(value), 147 upper_32_bits(value), 148 }; 149 150 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv)); 151 } 152 153 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size) 154 { 155 u32 klvs[] = { 156 PREP_GUC_KLV_TAG(VF_CFG_GGTT_START), 157 lower_32_bits(start), 158 upper_32_bits(start), 159 PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE), 160 lower_32_bits(size), 161 upper_32_bits(size), 162 }; 163 164 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); 165 } 166 167 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num) 168 { 169 u32 klvs[] = { 170 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID), 171 begin, 172 PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS), 173 num, 174 }; 175 176 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); 177 } 178 179 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num) 180 { 181 u32 klvs[] = { 182 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID), 183 begin, 184 PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS), 185 num, 186 }; 187 188 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); 189 } 190 191 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum) 192 { 193 /* GuC will silently clamp values exceeding max */ 194 *exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE); 195 196 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum); 197 } 198 199 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout) 200 { 201 /* GuC will silently clamp values exceeding max */ 202 *preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE); 203 204 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout); 205 } 206 207 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 208 { 209 return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size); 210 } 211 212 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid, 213 enum xe_guc_klv_threshold_index index, u32 value) 214 { 215 u32 key = xe_guc_klv_threshold_index_to_key(index); 216 217 xe_gt_assert(gt, key); 218 return pf_push_vf_cfg_u32(gt, vfid, key, value); 219 } 220 221 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid) 222 { 223 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 224 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); 225 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 226 227 return >->sriov.pf.vfs[vfid].config; 228 } 229 230 /* Return: number of configuration dwords written */ 231 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config) 232 { 233 u32 n = 0; 234 235 if (drm_mm_node_allocated(&config->ggtt_region)) { 236 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START); 237 cfg[n++] = lower_32_bits(config->ggtt_region.start); 238 cfg[n++] = upper_32_bits(config->ggtt_region.start); 239 240 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE); 241 cfg[n++] = lower_32_bits(config->ggtt_region.size); 242 cfg[n++] = upper_32_bits(config->ggtt_region.size); 243 } 244 245 return n; 246 } 247 248 /* Return: number of configuration dwords written */ 249 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config) 250 { 251 u32 n = 0; 252 253 n += encode_config_ggtt(cfg, config); 254 255 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID); 256 cfg[n++] = config->begin_ctx; 257 258 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS); 259 cfg[n++] = config->num_ctxs; 260 261 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID); 262 cfg[n++] = config->begin_db; 263 264 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS); 265 cfg[n++] = config->num_dbs; 266 267 if (config->lmem_obj) { 268 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE); 269 cfg[n++] = lower_32_bits(config->lmem_obj->size); 270 cfg[n++] = upper_32_bits(config->lmem_obj->size); 271 } 272 273 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM); 274 cfg[n++] = config->exec_quantum; 275 276 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT); 277 cfg[n++] = config->preempt_timeout; 278 279 return n; 280 } 281 282 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) 283 { 284 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 285 u32 max_cfg_dwords = SZ_4K / sizeof(u32); 286 u32 num_dwords; 287 int num_klvs; 288 u32 *cfg; 289 int err; 290 291 cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL); 292 if (!cfg) 293 return -ENOMEM; 294 295 num_dwords = encode_config(cfg, config); 296 xe_gt_assert(gt, num_dwords <= max_cfg_dwords); 297 298 if (xe_gt_is_media_type(gt)) { 299 struct xe_gt *primary = gt->tile->primary_gt; 300 struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid); 301 302 /* media-GT will never include a GGTT config */ 303 xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config)); 304 305 /* the GGTT config must be taken from the primary-GT instead */ 306 num_dwords += encode_config_ggtt(cfg + num_dwords, other); 307 } 308 xe_gt_assert(gt, num_dwords <= max_cfg_dwords); 309 310 num_klvs = xe_guc_klv_count(cfg, num_dwords); 311 err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords); 312 313 kfree(cfg); 314 return err; 315 } 316 317 static u64 pf_get_ggtt_alignment(struct xe_gt *gt) 318 { 319 struct xe_device *xe = gt_to_xe(gt); 320 321 return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; 322 } 323 324 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt) 325 { 326 /* XXX: preliminary */ 327 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 328 pf_get_ggtt_alignment(gt) : SZ_64M; 329 } 330 331 static u64 pf_get_spare_ggtt(struct xe_gt *gt) 332 { 333 u64 spare; 334 335 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 336 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 337 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 338 339 spare = gt->sriov.pf.spare.ggtt_size; 340 spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt)); 341 342 return spare; 343 } 344 345 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size) 346 { 347 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 348 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 349 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 350 351 if (size && size < pf_get_min_spare_ggtt(gt)) 352 return -EINVAL; 353 354 size = round_up(size, pf_get_ggtt_alignment(gt)); 355 gt->sriov.pf.spare.ggtt_size = size; 356 357 return 0; 358 } 359 360 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size) 361 { 362 int err, err2 = 0; 363 364 err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size); 365 366 if (tile->media_gt && !err) 367 err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size); 368 369 return err ?: err2; 370 } 371 372 static void pf_release_ggtt(struct xe_tile *tile, struct drm_mm_node *node) 373 { 374 struct xe_ggtt *ggtt = tile->mem.ggtt; 375 376 if (drm_mm_node_allocated(node)) { 377 /* 378 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign() 379 * is redundant, as PTE will be implicitly re-assigned to PF by 380 * the xe_ggtt_clear() called by below xe_ggtt_remove_node(). 381 */ 382 xe_ggtt_remove_node(ggtt, node, false); 383 } 384 } 385 386 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config) 387 { 388 pf_release_ggtt(gt_to_tile(gt), &config->ggtt_region); 389 } 390 391 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) 392 { 393 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 394 struct drm_mm_node *node = &config->ggtt_region; 395 struct xe_tile *tile = gt_to_tile(gt); 396 struct xe_ggtt *ggtt = tile->mem.ggtt; 397 u64 alignment = pf_get_ggtt_alignment(gt); 398 int err; 399 400 xe_gt_assert(gt, vfid); 401 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 402 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 403 404 size = round_up(size, alignment); 405 406 if (drm_mm_node_allocated(node)) { 407 err = pf_distribute_config_ggtt(tile, vfid, 0, 0); 408 if (unlikely(err)) 409 return err; 410 411 pf_release_ggtt(tile, node); 412 } 413 xe_gt_assert(gt, !drm_mm_node_allocated(node)); 414 415 if (!size) 416 return 0; 417 418 err = xe_ggtt_insert_special_node(ggtt, node, size, alignment); 419 if (unlikely(err)) 420 return err; 421 422 xe_ggtt_assign(ggtt, node, vfid); 423 xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n", 424 vfid, node->start, node->start + node->size - 1); 425 426 err = pf_distribute_config_ggtt(gt->tile, vfid, node->start, node->size); 427 if (unlikely(err)) 428 return err; 429 430 return 0; 431 } 432 433 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid) 434 { 435 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 436 struct drm_mm_node *node = &config->ggtt_region; 437 438 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 439 return drm_mm_node_allocated(node) ? node->size : 0; 440 } 441 442 /** 443 * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF. 444 * @gt: the &xe_gt 445 * @vfid: the VF identifier 446 * 447 * This function can only be called on PF. 448 * 449 * Return: size of the VF's assigned (or PF's spare) GGTT address space. 450 */ 451 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid) 452 { 453 u64 size; 454 455 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 456 if (vfid) 457 size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid); 458 else 459 size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt); 460 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 461 462 return size; 463 } 464 465 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value, 466 u64 actual, const char *what, int err) 467 { 468 char size[10]; 469 char name[8]; 470 471 xe_sriov_function_name(vfid, name, sizeof(name)); 472 473 if (unlikely(err)) { 474 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size)); 475 xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n", 476 name, value, size, what, ERR_PTR(err)); 477 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size)); 478 xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n", 479 name, actual, size, what); 480 return err; 481 } 482 483 /* the actual value may have changed during provisioning */ 484 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size)); 485 xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n", 486 name, actual, size, what); 487 return 0; 488 } 489 490 /** 491 * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space. 492 * @gt: the &xe_gt (can't be media) 493 * @vfid: the VF identifier 494 * @size: requested GGTT size 495 * 496 * If &vfid represents PF, then function will change PF's spare GGTT config. 497 * 498 * This function can only be called on PF. 499 * 500 * Return: 0 on success or a negative error code on failure. 501 */ 502 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) 503 { 504 int err; 505 506 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 507 508 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 509 if (vfid) 510 err = pf_provision_vf_ggtt(gt, vfid, size); 511 else 512 err = pf_set_spare_ggtt(gt, size); 513 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 514 515 return pf_config_set_u64_done(gt, vfid, size, 516 xe_gt_sriov_pf_config_get_ggtt(gt, vfid), 517 vfid ? "GGTT" : "spare GGTT", err); 518 } 519 520 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs, 521 u64 value, u64 (*get)(struct xe_gt*, unsigned int), 522 const char *what, unsigned int last, int err) 523 { 524 char size[10]; 525 526 xe_gt_assert(gt, first); 527 xe_gt_assert(gt, num_vfs); 528 xe_gt_assert(gt, first <= last); 529 530 if (num_vfs == 1) 531 return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err); 532 533 if (unlikely(err)) { 534 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n", 535 first, first + num_vfs - 1, what); 536 if (last > first) 537 pf_config_bulk_set_u64_done(gt, first, last - first, value, 538 get, what, last, 0); 539 return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err); 540 } 541 542 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */ 543 value = get(gt, first); 544 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size)); 545 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n", 546 first, first + num_vfs - 1, value, size, what); 547 return 0; 548 } 549 550 /** 551 * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT. 552 * @gt: the &xe_gt (can't be media) 553 * @vfid: starting VF identifier (can't be 0) 554 * @num_vfs: number of VFs to provision 555 * @size: requested GGTT size 556 * 557 * This function can only be called on PF. 558 * 559 * Return: 0 on success or a negative error code on failure. 560 */ 561 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid, 562 unsigned int num_vfs, u64 size) 563 { 564 unsigned int n; 565 int err = 0; 566 567 xe_gt_assert(gt, vfid); 568 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 569 570 if (!num_vfs) 571 return 0; 572 573 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 574 for (n = vfid; n < vfid + num_vfs; n++) { 575 err = pf_provision_vf_ggtt(gt, n, size); 576 if (err) 577 break; 578 } 579 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 580 581 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size, 582 xe_gt_sriov_pf_config_get_ggtt, 583 "GGTT", n, err); 584 } 585 586 /* Return: size of the largest continuous GGTT region */ 587 static u64 pf_get_max_ggtt(struct xe_gt *gt) 588 { 589 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt; 590 const struct drm_mm *mm = &ggtt->mm; 591 const struct drm_mm_node *entry; 592 u64 alignment = pf_get_ggtt_alignment(gt); 593 u64 spare = pf_get_spare_ggtt(gt); 594 u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt)); 595 u64 hole_start, hole_end, hole_size; 596 u64 max_hole = 0; 597 598 mutex_lock(&ggtt->lock); 599 600 drm_mm_for_each_hole(entry, mm, hole_start, hole_end) { 601 hole_start = max(hole_start, hole_min_start); 602 hole_start = ALIGN(hole_start, alignment); 603 hole_end = ALIGN_DOWN(hole_end, alignment); 604 if (hole_start >= hole_end) 605 continue; 606 hole_size = hole_end - hole_start; 607 xe_gt_sriov_dbg_verbose(gt, "HOLE start %llx size %lluK\n", 608 hole_start, hole_size / SZ_1K); 609 spare -= min3(spare, hole_size, max_hole); 610 max_hole = max(max_hole, hole_size); 611 } 612 613 mutex_unlock(&ggtt->lock); 614 615 xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n", 616 max_hole / SZ_1K, spare / SZ_1K); 617 return max_hole > spare ? max_hole - spare : 0; 618 } 619 620 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs) 621 { 622 u64 available = pf_get_max_ggtt(gt); 623 u64 alignment = pf_get_ggtt_alignment(gt); 624 u64 fair; 625 626 /* 627 * To simplify the logic we only look at single largest GGTT region 628 * as that will be always the best fit for 1 VF case, and most likely 629 * will also nicely cover other cases where VFs are provisioned on the 630 * fresh and idle PF driver, without any stale GGTT allocations spread 631 * in the middle of the full GGTT range. 632 */ 633 634 fair = div_u64(available, num_vfs); 635 fair = ALIGN_DOWN(fair, alignment); 636 xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n", 637 available / SZ_1K, num_vfs, fair / SZ_1K); 638 return fair; 639 } 640 641 /** 642 * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT. 643 * @gt: the &xe_gt (can't be media) 644 * @vfid: starting VF identifier (can't be 0) 645 * @num_vfs: number of VFs to provision 646 * 647 * This function can only be called on PF. 648 * 649 * Return: 0 on success or a negative error code on failure. 650 */ 651 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid, 652 unsigned int num_vfs) 653 { 654 u64 fair; 655 656 xe_gt_assert(gt, vfid); 657 xe_gt_assert(gt, num_vfs); 658 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 659 660 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 661 fair = pf_estimate_fair_ggtt(gt, num_vfs); 662 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 663 664 if (!fair) 665 return -ENOSPC; 666 667 return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair); 668 } 669 670 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt) 671 { 672 /* XXX: preliminary */ 673 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 674 hweight64(gt->info.engine_mask) : SZ_256; 675 } 676 677 static u32 pf_get_spare_ctxs(struct xe_gt *gt) 678 { 679 u32 spare; 680 681 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 682 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 683 684 spare = gt->sriov.pf.spare.num_ctxs; 685 spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt)); 686 687 return spare; 688 } 689 690 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare) 691 { 692 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 693 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 694 695 if (spare > GUC_ID_MAX) 696 return -EINVAL; 697 698 if (spare && spare < pf_get_min_spare_ctxs(gt)) 699 return -EINVAL; 700 701 gt->sriov.pf.spare.num_ctxs = spare; 702 703 return 0; 704 } 705 706 /* Return: start ID or negative error code on failure */ 707 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num) 708 { 709 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm; 710 unsigned int spare = pf_get_spare_ctxs(gt); 711 712 return xe_guc_id_mgr_reserve(idm, num, spare); 713 } 714 715 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num) 716 { 717 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm; 718 719 if (num) 720 xe_guc_id_mgr_release(idm, start, num); 721 } 722 723 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config) 724 { 725 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 726 727 pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs); 728 config->begin_ctx = 0; 729 config->num_ctxs = 0; 730 } 731 732 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs) 733 { 734 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 735 int ret; 736 737 xe_gt_assert(gt, vfid); 738 739 if (num_ctxs > GUC_ID_MAX) 740 return -EINVAL; 741 742 if (config->num_ctxs) { 743 ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0); 744 if (unlikely(ret)) 745 return ret; 746 747 pf_release_config_ctxs(gt, config); 748 } 749 750 if (!num_ctxs) 751 return 0; 752 753 ret = pf_reserve_ctxs(gt, num_ctxs); 754 if (unlikely(ret < 0)) 755 return ret; 756 757 config->begin_ctx = ret; 758 config->num_ctxs = num_ctxs; 759 760 ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs); 761 if (unlikely(ret)) { 762 pf_release_config_ctxs(gt, config); 763 return ret; 764 } 765 766 xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n", 767 vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1); 768 return 0; 769 } 770 771 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid) 772 { 773 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 774 775 return config->num_ctxs; 776 } 777 778 /** 779 * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota. 780 * @gt: the &xe_gt 781 * @vfid: the VF identifier 782 * 783 * This function can only be called on PF. 784 * If &vfid represents a PF then number of PF's spare GuC context IDs is returned. 785 * 786 * Return: VF's quota (or PF's spare). 787 */ 788 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid) 789 { 790 u32 num_ctxs; 791 792 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 793 if (vfid) 794 num_ctxs = pf_get_vf_config_ctxs(gt, vfid); 795 else 796 num_ctxs = pf_get_spare_ctxs(gt); 797 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 798 799 return num_ctxs; 800 } 801 802 static const char *no_unit(u32 unused) 803 { 804 return ""; 805 } 806 807 static const char *spare_unit(u32 unused) 808 { 809 return " spare"; 810 } 811 812 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual, 813 const char *what, const char *(*unit)(u32), int err) 814 { 815 char name[8]; 816 817 xe_sriov_function_name(vfid, name, sizeof(name)); 818 819 if (unlikely(err)) { 820 xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n", 821 name, value, unit(value), what, ERR_PTR(err)); 822 xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n", 823 name, actual, unit(actual), what); 824 return err; 825 } 826 827 /* the actual value may have changed during provisioning */ 828 xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n", 829 name, actual, unit(actual), what); 830 return 0; 831 } 832 833 /** 834 * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF. 835 * @gt: the &xe_gt 836 * @vfid: the VF identifier 837 * @num_ctxs: requested number of GuC contexts IDs (0 to release) 838 * 839 * This function can only be called on PF. 840 * 841 * Return: 0 on success or a negative error code on failure. 842 */ 843 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs) 844 { 845 int err; 846 847 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 848 if (vfid) 849 err = pf_provision_vf_ctxs(gt, vfid, num_ctxs); 850 else 851 err = pf_set_spare_ctxs(gt, num_ctxs); 852 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 853 854 return pf_config_set_u32_done(gt, vfid, num_ctxs, 855 xe_gt_sriov_pf_config_get_ctxs(gt, vfid), 856 "GuC context IDs", vfid ? no_unit : spare_unit, err); 857 } 858 859 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs, 860 u32 value, u32 (*get)(struct xe_gt*, unsigned int), 861 const char *what, const char *(*unit)(u32), 862 unsigned int last, int err) 863 { 864 xe_gt_assert(gt, first); 865 xe_gt_assert(gt, num_vfs); 866 xe_gt_assert(gt, first <= last); 867 868 if (num_vfs == 1) 869 return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err); 870 871 if (unlikely(err)) { 872 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n", 873 first, first + num_vfs - 1, what); 874 if (last > first) 875 pf_config_bulk_set_u32_done(gt, first, last - first, value, 876 get, what, unit, last, 0); 877 return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err); 878 } 879 880 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */ 881 value = get(gt, first); 882 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n", 883 first, first + num_vfs - 1, value, unit(value), what); 884 return 0; 885 } 886 887 /** 888 * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs. 889 * @gt: the &xe_gt 890 * @vfid: starting VF identifier 891 * @num_vfs: number of VFs to provision 892 * @num_ctxs: requested number of GuC contexts IDs (0 to release) 893 * 894 * This function can only be called on PF. 895 * 896 * Return: 0 on success or a negative error code on failure. 897 */ 898 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid, 899 unsigned int num_vfs, u32 num_ctxs) 900 { 901 unsigned int n; 902 int err = 0; 903 904 xe_gt_assert(gt, vfid); 905 906 if (!num_vfs) 907 return 0; 908 909 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 910 for (n = vfid; n < vfid + num_vfs; n++) { 911 err = pf_provision_vf_ctxs(gt, n, num_ctxs); 912 if (err) 913 break; 914 } 915 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 916 917 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs, 918 xe_gt_sriov_pf_config_get_ctxs, 919 "GuC context IDs", no_unit, n, err); 920 } 921 922 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs) 923 { 924 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm; 925 u32 spare = pf_get_spare_ctxs(gt); 926 u32 fair = (idm->total - spare) / num_vfs; 927 int ret; 928 929 for (; fair; --fair) { 930 ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare); 931 if (ret < 0) 932 continue; 933 xe_guc_id_mgr_release(idm, ret, fair * num_vfs); 934 break; 935 } 936 937 xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair); 938 return fair; 939 } 940 941 /** 942 * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs. 943 * @gt: the &xe_gt 944 * @vfid: starting VF identifier (can't be 0) 945 * @num_vfs: number of VFs to provision (can't be 0) 946 * 947 * This function can only be called on PF. 948 * 949 * Return: 0 on success or a negative error code on failure. 950 */ 951 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid, 952 unsigned int num_vfs) 953 { 954 u32 fair; 955 956 xe_gt_assert(gt, vfid); 957 xe_gt_assert(gt, num_vfs); 958 959 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 960 fair = pf_estimate_fair_ctxs(gt, num_vfs); 961 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 962 963 if (!fair) 964 return -ENOSPC; 965 966 return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair); 967 } 968 969 static u32 pf_get_min_spare_dbs(struct xe_gt *gt) 970 { 971 /* XXX: preliminary, we don't use doorbells yet! */ 972 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0; 973 } 974 975 static u32 pf_get_spare_dbs(struct xe_gt *gt) 976 { 977 u32 spare; 978 979 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 980 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 981 982 spare = gt->sriov.pf.spare.num_dbs; 983 spare = max_t(u32, spare, pf_get_min_spare_dbs(gt)); 984 985 return spare; 986 } 987 988 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare) 989 { 990 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 991 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 992 993 if (spare > GUC_NUM_DOORBELLS) 994 return -EINVAL; 995 996 if (spare && spare < pf_get_min_spare_dbs(gt)) 997 return -EINVAL; 998 999 gt->sriov.pf.spare.num_dbs = spare; 1000 return 0; 1001 } 1002 1003 /* Return: start ID or negative error code on failure */ 1004 static int pf_reserve_dbs(struct xe_gt *gt, u32 num) 1005 { 1006 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm; 1007 unsigned int spare = pf_get_spare_dbs(gt); 1008 1009 return xe_guc_db_mgr_reserve_range(dbm, num, spare); 1010 } 1011 1012 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num) 1013 { 1014 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm; 1015 1016 if (num) 1017 xe_guc_db_mgr_release_range(dbm, start, num); 1018 } 1019 1020 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config) 1021 { 1022 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1023 1024 pf_release_dbs(gt, config->begin_db, config->num_dbs); 1025 config->begin_db = 0; 1026 config->num_dbs = 0; 1027 } 1028 1029 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs) 1030 { 1031 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1032 int ret; 1033 1034 xe_gt_assert(gt, vfid); 1035 1036 if (num_dbs > GUC_NUM_DOORBELLS) 1037 return -EINVAL; 1038 1039 if (config->num_dbs) { 1040 ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0); 1041 if (unlikely(ret)) 1042 return ret; 1043 1044 pf_release_config_dbs(gt, config); 1045 } 1046 1047 if (!num_dbs) 1048 return 0; 1049 1050 ret = pf_reserve_dbs(gt, num_dbs); 1051 if (unlikely(ret < 0)) 1052 return ret; 1053 1054 config->begin_db = ret; 1055 config->num_dbs = num_dbs; 1056 1057 ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs); 1058 if (unlikely(ret)) { 1059 pf_release_config_dbs(gt, config); 1060 return ret; 1061 } 1062 1063 xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n", 1064 vfid, config->begin_db, config->begin_db + config->num_dbs - 1); 1065 return 0; 1066 } 1067 1068 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid) 1069 { 1070 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1071 1072 return config->num_dbs; 1073 } 1074 1075 /** 1076 * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota. 1077 * @gt: the &xe_gt 1078 * @vfid: the VF identifier 1079 * 1080 * This function can only be called on PF. 1081 * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned. 1082 * 1083 * Return: VF's quota (or PF's spare). 1084 */ 1085 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid) 1086 { 1087 u32 num_dbs; 1088 1089 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1090 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); 1091 1092 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1093 if (vfid) 1094 num_dbs = pf_get_vf_config_dbs(gt, vfid); 1095 else 1096 num_dbs = pf_get_spare_dbs(gt); 1097 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1098 1099 return num_dbs; 1100 } 1101 1102 /** 1103 * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF. 1104 * @gt: the &xe_gt 1105 * @vfid: the VF identifier 1106 * @num_dbs: requested number of GuC doorbells IDs (0 to release) 1107 * 1108 * This function can only be called on PF. 1109 * 1110 * Return: 0 on success or a negative error code on failure. 1111 */ 1112 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs) 1113 { 1114 int err; 1115 1116 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1117 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); 1118 1119 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1120 if (vfid) 1121 err = pf_provision_vf_dbs(gt, vfid, num_dbs); 1122 else 1123 err = pf_set_spare_dbs(gt, num_dbs); 1124 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1125 1126 return pf_config_set_u32_done(gt, vfid, num_dbs, 1127 xe_gt_sriov_pf_config_get_dbs(gt, vfid), 1128 "GuC doorbell IDs", vfid ? no_unit : spare_unit, err); 1129 } 1130 1131 /** 1132 * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs. 1133 * @gt: the &xe_gt 1134 * @vfid: starting VF identifier (can't be 0) 1135 * @num_vfs: number of VFs to provision 1136 * @num_dbs: requested number of GuC doorbell IDs (0 to release) 1137 * 1138 * This function can only be called on PF. 1139 * 1140 * Return: 0 on success or a negative error code on failure. 1141 */ 1142 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid, 1143 unsigned int num_vfs, u32 num_dbs) 1144 { 1145 unsigned int n; 1146 int err = 0; 1147 1148 xe_gt_assert(gt, vfid); 1149 1150 if (!num_vfs) 1151 return 0; 1152 1153 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1154 for (n = vfid; n < vfid + num_vfs; n++) { 1155 err = pf_provision_vf_dbs(gt, n, num_dbs); 1156 if (err) 1157 break; 1158 } 1159 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1160 1161 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs, 1162 xe_gt_sriov_pf_config_get_dbs, 1163 "GuC doorbell IDs", no_unit, n, err); 1164 } 1165 1166 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs) 1167 { 1168 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm; 1169 u32 spare = pf_get_spare_dbs(gt); 1170 u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs; 1171 int ret; 1172 1173 for (; fair; --fair) { 1174 ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare); 1175 if (ret < 0) 1176 continue; 1177 xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs); 1178 break; 1179 } 1180 1181 xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair); 1182 return fair; 1183 } 1184 1185 /** 1186 * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell IDs. 1187 * @gt: the &xe_gt 1188 * @vfid: starting VF identifier (can't be 0) 1189 * @num_vfs: number of VFs to provision (can't be 0) 1190 * 1191 * This function can only be called on PF. 1192 * 1193 * Return: 0 on success or a negative error code on failure. 1194 */ 1195 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid, 1196 unsigned int num_vfs) 1197 { 1198 u32 fair; 1199 1200 xe_gt_assert(gt, vfid); 1201 xe_gt_assert(gt, num_vfs); 1202 1203 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1204 fair = pf_estimate_fair_dbs(gt, num_vfs); 1205 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1206 1207 if (!fair) 1208 return -ENOSPC; 1209 1210 return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair); 1211 } 1212 1213 static u64 pf_get_lmem_alignment(struct xe_gt *gt) 1214 { 1215 /* this might be platform dependent */ 1216 return SZ_2M; 1217 } 1218 1219 static u64 pf_get_min_spare_lmem(struct xe_gt *gt) 1220 { 1221 /* this might be platform dependent */ 1222 return SZ_128M; /* XXX: preliminary */ 1223 } 1224 1225 static u64 pf_get_spare_lmem(struct xe_gt *gt) 1226 { 1227 u64 spare; 1228 1229 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1230 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1231 1232 spare = gt->sriov.pf.spare.lmem_size; 1233 spare = max_t(u64, spare, pf_get_min_spare_lmem(gt)); 1234 1235 return spare; 1236 } 1237 1238 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size) 1239 { 1240 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1241 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1242 1243 if (size && size < pf_get_min_spare_lmem(gt)) 1244 return -EINVAL; 1245 1246 gt->sriov.pf.spare.lmem_size = size; 1247 return 0; 1248 } 1249 1250 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid) 1251 { 1252 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1253 struct xe_bo *bo; 1254 1255 bo = config->lmem_obj; 1256 return bo ? bo->size : 0; 1257 } 1258 1259 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 1260 { 1261 struct xe_device *xe = gt_to_xe(gt); 1262 struct xe_tile *tile; 1263 unsigned int tid; 1264 int err; 1265 1266 for_each_tile(tile, xe, tid) { 1267 if (tile->primary_gt == gt) { 1268 err = pf_push_vf_cfg_lmem(gt, vfid, size); 1269 } else { 1270 u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid); 1271 1272 if (!lmem) 1273 continue; 1274 err = pf_push_vf_cfg_lmem(gt, vfid, lmem); 1275 } 1276 if (unlikely(err)) 1277 return err; 1278 } 1279 return 0; 1280 } 1281 1282 static void pf_force_lmtt_invalidate(struct xe_device *xe) 1283 { 1284 /* TODO */ 1285 } 1286 1287 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid) 1288 { 1289 struct xe_lmtt *lmtt; 1290 struct xe_tile *tile; 1291 unsigned int tid; 1292 1293 for_each_tile(tile, xe, tid) { 1294 lmtt = &tile->sriov.pf.lmtt; 1295 xe_lmtt_drop_pages(lmtt, vfid); 1296 } 1297 } 1298 1299 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid) 1300 { 1301 struct xe_gt_sriov_config *config; 1302 struct xe_tile *tile; 1303 struct xe_lmtt *lmtt; 1304 struct xe_bo *bo; 1305 struct xe_gt *gt; 1306 u64 total, offset; 1307 unsigned int gtid; 1308 unsigned int tid; 1309 int err; 1310 1311 total = 0; 1312 for_each_tile(tile, xe, tid) 1313 total += pf_get_vf_config_lmem(tile->primary_gt, vfid); 1314 1315 for_each_tile(tile, xe, tid) { 1316 lmtt = &tile->sriov.pf.lmtt; 1317 1318 xe_lmtt_drop_pages(lmtt, vfid); 1319 if (!total) 1320 continue; 1321 1322 err = xe_lmtt_prepare_pages(lmtt, vfid, total); 1323 if (err) 1324 goto fail; 1325 1326 offset = 0; 1327 for_each_gt(gt, xe, gtid) { 1328 if (xe_gt_is_media_type(gt)) 1329 continue; 1330 1331 config = pf_pick_vf_config(gt, vfid); 1332 bo = config->lmem_obj; 1333 if (!bo) 1334 continue; 1335 1336 err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset); 1337 if (err) 1338 goto fail; 1339 offset += bo->size; 1340 } 1341 } 1342 1343 pf_force_lmtt_invalidate(xe); 1344 return 0; 1345 1346 fail: 1347 for_each_tile(tile, xe, tid) { 1348 lmtt = &tile->sriov.pf.lmtt; 1349 xe_lmtt_drop_pages(lmtt, vfid); 1350 } 1351 return err; 1352 } 1353 1354 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config) 1355 { 1356 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 1357 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1358 1359 if (config->lmem_obj) { 1360 xe_bo_unpin_map_no_vm(config->lmem_obj); 1361 config->lmem_obj = NULL; 1362 } 1363 } 1364 1365 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 1366 { 1367 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1368 struct xe_device *xe = gt_to_xe(gt); 1369 struct xe_tile *tile = gt_to_tile(gt); 1370 struct xe_bo *bo; 1371 int err; 1372 1373 xe_gt_assert(gt, vfid); 1374 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 1375 1376 size = round_up(size, pf_get_lmem_alignment(gt)); 1377 1378 if (config->lmem_obj) { 1379 err = pf_distribute_config_lmem(gt, vfid, 0); 1380 if (unlikely(err)) 1381 return err; 1382 1383 pf_reset_vf_lmtt(xe, vfid); 1384 pf_release_vf_config_lmem(gt, config); 1385 } 1386 xe_gt_assert(gt, !config->lmem_obj); 1387 1388 if (!size) 1389 return 0; 1390 1391 xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M); 1392 bo = xe_bo_create_pin_map(xe, tile, NULL, 1393 ALIGN(size, PAGE_SIZE), 1394 ttm_bo_type_kernel, 1395 XE_BO_FLAG_VRAM_IF_DGFX(tile) | 1396 XE_BO_FLAG_PINNED); 1397 if (IS_ERR(bo)) 1398 return PTR_ERR(bo); 1399 1400 config->lmem_obj = bo; 1401 1402 err = pf_update_vf_lmtt(xe, vfid); 1403 if (unlikely(err)) 1404 goto release; 1405 1406 err = pf_push_vf_cfg_lmem(gt, vfid, bo->size); 1407 if (unlikely(err)) 1408 goto reset_lmtt; 1409 1410 xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n", 1411 vfid, bo->size, bo->size / SZ_1M); 1412 return 0; 1413 1414 reset_lmtt: 1415 pf_reset_vf_lmtt(xe, vfid); 1416 release: 1417 pf_release_vf_config_lmem(gt, config); 1418 return err; 1419 } 1420 1421 /** 1422 * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota. 1423 * @gt: the &xe_gt 1424 * @vfid: the VF identifier 1425 * 1426 * This function can only be called on PF. 1427 * 1428 * Return: VF's (or PF's spare) LMEM quota. 1429 */ 1430 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid) 1431 { 1432 u64 size; 1433 1434 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1435 if (vfid) 1436 size = pf_get_vf_config_lmem(gt, vfid); 1437 else 1438 size = pf_get_spare_lmem(gt); 1439 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1440 1441 return size; 1442 } 1443 1444 /** 1445 * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM. 1446 * @gt: the &xe_gt (can't be media) 1447 * @vfid: the VF identifier 1448 * @size: requested LMEM size 1449 * 1450 * This function can only be called on PF. 1451 */ 1452 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 1453 { 1454 int err; 1455 1456 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1457 if (vfid) 1458 err = pf_provision_vf_lmem(gt, vfid, size); 1459 else 1460 err = pf_set_spare_lmem(gt, size); 1461 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1462 1463 return pf_config_set_u64_done(gt, vfid, size, 1464 xe_gt_sriov_pf_config_get_lmem(gt, vfid), 1465 vfid ? "LMEM" : "spare LMEM", err); 1466 } 1467 1468 /** 1469 * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM. 1470 * @gt: the &xe_gt (can't be media) 1471 * @vfid: starting VF identifier (can't be 0) 1472 * @num_vfs: number of VFs to provision 1473 * @size: requested LMEM size 1474 * 1475 * This function can only be called on PF. 1476 * 1477 * Return: 0 on success or a negative error code on failure. 1478 */ 1479 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, 1480 unsigned int num_vfs, u64 size) 1481 { 1482 unsigned int n; 1483 int err = 0; 1484 1485 xe_gt_assert(gt, vfid); 1486 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 1487 1488 if (!num_vfs) 1489 return 0; 1490 1491 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1492 for (n = vfid; n < vfid + num_vfs; n++) { 1493 err = pf_provision_vf_lmem(gt, n, size); 1494 if (err) 1495 break; 1496 } 1497 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1498 1499 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size, 1500 xe_gt_sriov_pf_config_get_lmem, 1501 "LMEM", n, err); 1502 } 1503 1504 static u64 pf_query_free_lmem(struct xe_gt *gt) 1505 { 1506 struct xe_tile *tile = gt->tile; 1507 1508 return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager); 1509 } 1510 1511 static u64 pf_query_max_lmem(struct xe_gt *gt) 1512 { 1513 u64 alignment = pf_get_lmem_alignment(gt); 1514 u64 spare = pf_get_spare_lmem(gt); 1515 u64 free = pf_query_free_lmem(gt); 1516 u64 avail; 1517 1518 /* XXX: need to account for 2MB blocks only */ 1519 avail = free > spare ? free - spare : 0; 1520 avail = round_down(avail, alignment); 1521 1522 return avail; 1523 } 1524 1525 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV 1526 #define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */ 1527 #else 1528 #define MAX_FAIR_LMEM SZ_2G /* XXX: known issue with allocating BO over 2GiB */ 1529 #endif 1530 1531 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs) 1532 { 1533 u64 available = pf_query_max_lmem(gt); 1534 u64 alignment = pf_get_lmem_alignment(gt); 1535 u64 fair; 1536 1537 fair = div_u64(available, num_vfs); 1538 fair = ALIGN_DOWN(fair, alignment); 1539 #ifdef MAX_FAIR_LMEM 1540 fair = min_t(u64, MAX_FAIR_LMEM, fair); 1541 #endif 1542 xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n", 1543 available / SZ_1M, num_vfs, fair / SZ_1M); 1544 return fair; 1545 } 1546 1547 /** 1548 * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM. 1549 * @gt: the &xe_gt (can't be media) 1550 * @vfid: starting VF identifier (can't be 0) 1551 * @num_vfs: number of VFs to provision (can't be 0) 1552 * 1553 * This function can only be called on PF. 1554 * 1555 * Return: 0 on success or a negative error code on failure. 1556 */ 1557 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid, 1558 unsigned int num_vfs) 1559 { 1560 u64 fair; 1561 1562 xe_gt_assert(gt, vfid); 1563 xe_gt_assert(gt, num_vfs); 1564 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); 1565 1566 if (!IS_DGFX(gt_to_xe(gt))) 1567 return 0; 1568 1569 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1570 fair = pf_estimate_fair_lmem(gt, num_vfs); 1571 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1572 1573 if (!fair) 1574 return -ENOSPC; 1575 1576 return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair); 1577 } 1578 1579 /** 1580 * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources. 1581 * @gt: the &xe_gt 1582 * @vfid: starting VF identifier (can't be 0) 1583 * @num_vfs: number of VFs to provision (can't be 0) 1584 * 1585 * This function can only be called on PF. 1586 * 1587 * Return: 0 on success or a negative error code on failure. 1588 */ 1589 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid, 1590 unsigned int num_vfs) 1591 { 1592 int result = 0; 1593 int err; 1594 1595 xe_gt_assert(gt, vfid); 1596 xe_gt_assert(gt, num_vfs); 1597 1598 if (!xe_gt_is_media_type(gt)) { 1599 err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs); 1600 result = result ?: err; 1601 err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs); 1602 result = result ?: err; 1603 } 1604 err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs); 1605 result = result ?: err; 1606 err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs); 1607 result = result ?: err; 1608 1609 return result; 1610 } 1611 1612 static const char *exec_quantum_unit(u32 exec_quantum) 1613 { 1614 return exec_quantum ? "ms" : "(infinity)"; 1615 } 1616 1617 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid, 1618 u32 exec_quantum) 1619 { 1620 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1621 int err; 1622 1623 err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum); 1624 if (unlikely(err)) 1625 return err; 1626 1627 config->exec_quantum = exec_quantum; 1628 return 0; 1629 } 1630 1631 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) 1632 { 1633 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1634 1635 return config->exec_quantum; 1636 } 1637 1638 /** 1639 * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF. 1640 * @gt: the &xe_gt 1641 * @vfid: the VF identifier 1642 * @exec_quantum: requested execution quantum in milliseconds (0 is infinity) 1643 * 1644 * This function can only be called on PF. 1645 * 1646 * Return: 0 on success or a negative error code on failure. 1647 */ 1648 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, 1649 u32 exec_quantum) 1650 { 1651 int err; 1652 1653 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1654 err = pf_provision_exec_quantum(gt, vfid, exec_quantum); 1655 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1656 1657 return pf_config_set_u32_done(gt, vfid, exec_quantum, 1658 xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid), 1659 "execution quantum", exec_quantum_unit, err); 1660 } 1661 1662 /** 1663 * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum. 1664 * @gt: the &xe_gt 1665 * @vfid: the VF identifier 1666 * 1667 * This function can only be called on PF. 1668 * 1669 * Return: VF's (or PF's) execution quantum in milliseconds. 1670 */ 1671 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) 1672 { 1673 u32 exec_quantum; 1674 1675 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1676 exec_quantum = pf_get_exec_quantum(gt, vfid); 1677 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1678 1679 return exec_quantum; 1680 } 1681 1682 static const char *preempt_timeout_unit(u32 preempt_timeout) 1683 { 1684 return preempt_timeout ? "us" : "(infinity)"; 1685 } 1686 1687 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid, 1688 u32 preempt_timeout) 1689 { 1690 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1691 int err; 1692 1693 err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout); 1694 if (unlikely(err)) 1695 return err; 1696 1697 config->preempt_timeout = preempt_timeout; 1698 1699 return 0; 1700 } 1701 1702 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) 1703 { 1704 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1705 1706 return config->preempt_timeout; 1707 } 1708 1709 /** 1710 * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF. 1711 * @gt: the &xe_gt 1712 * @vfid: the VF identifier 1713 * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity) 1714 * 1715 * This function can only be called on PF. 1716 * 1717 * Return: 0 on success or a negative error code on failure. 1718 */ 1719 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid, 1720 u32 preempt_timeout) 1721 { 1722 int err; 1723 1724 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1725 err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout); 1726 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1727 1728 return pf_config_set_u32_done(gt, vfid, preempt_timeout, 1729 xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid), 1730 "preemption timeout", preempt_timeout_unit, err); 1731 } 1732 1733 /** 1734 * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout. 1735 * @gt: the &xe_gt 1736 * @vfid: the VF identifier 1737 * 1738 * This function can only be called on PF. 1739 * 1740 * Return: VF's (or PF's) preemption timeout in microseconds. 1741 */ 1742 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) 1743 { 1744 u32 preempt_timeout; 1745 1746 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1747 preempt_timeout = pf_get_preempt_timeout(gt, vfid); 1748 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1749 1750 return preempt_timeout; 1751 } 1752 1753 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config) 1754 { 1755 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1756 1757 config->exec_quantum = 0; 1758 config->preempt_timeout = 0; 1759 } 1760 1761 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid, 1762 enum xe_guc_klv_threshold_index index, u32 value) 1763 { 1764 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1765 int err; 1766 1767 err = pf_push_vf_cfg_threshold(gt, vfid, index, value); 1768 if (unlikely(err)) 1769 return err; 1770 1771 config->thresholds[index] = value; 1772 1773 return 0; 1774 } 1775 1776 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid, 1777 enum xe_guc_klv_threshold_index index) 1778 { 1779 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1780 1781 return config->thresholds[index]; 1782 } 1783 1784 static const char *threshold_unit(u32 threshold) 1785 { 1786 return threshold ? "" : "(disabled)"; 1787 } 1788 1789 /** 1790 * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF. 1791 * @gt: the &xe_gt 1792 * @vfid: the VF identifier 1793 * @index: the threshold index 1794 * @value: requested value (0 means disabled) 1795 * 1796 * This function can only be called on PF. 1797 * 1798 * Return: 0 on success or a negative error code on failure. 1799 */ 1800 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid, 1801 enum xe_guc_klv_threshold_index index, u32 value) 1802 { 1803 u32 key = xe_guc_klv_threshold_index_to_key(index); 1804 const char *name = xe_guc_klv_key_to_string(key); 1805 int err; 1806 1807 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1808 err = pf_provision_threshold(gt, vfid, index, value); 1809 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1810 1811 return pf_config_set_u32_done(gt, vfid, value, 1812 xe_gt_sriov_pf_config_get_threshold(gt, vfid, index), 1813 name, threshold_unit, err); 1814 } 1815 1816 /** 1817 * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold. 1818 * @gt: the &xe_gt 1819 * @vfid: the VF identifier 1820 * @index: the threshold index 1821 * 1822 * This function can only be called on PF. 1823 * 1824 * Return: value of VF's (or PF's) threshold. 1825 */ 1826 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid, 1827 enum xe_guc_klv_threshold_index index) 1828 { 1829 u32 value; 1830 1831 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1832 value = pf_get_threshold(gt, vfid, index); 1833 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1834 1835 return value; 1836 } 1837 1838 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid) 1839 { 1840 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1841 1842 if (!xe_gt_is_media_type(gt)) { 1843 pf_release_vf_config_ggtt(gt, config); 1844 pf_release_vf_config_lmem(gt, config); 1845 pf_update_vf_lmtt(gt_to_xe(gt), vfid); 1846 } 1847 pf_release_config_ctxs(gt, config); 1848 pf_release_config_dbs(gt, config); 1849 pf_reset_config_sched(gt, config); 1850 } 1851 1852 /** 1853 * xe_gt_sriov_pf_config_release - Release and reset VF configuration. 1854 * @gt: the &xe_gt 1855 * @vfid: the VF identifier (can't be PF) 1856 * @force: force configuration release 1857 * 1858 * This function can only be called on PF. 1859 * 1860 * Return: 0 on success or a negative error code on failure. 1861 */ 1862 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force) 1863 { 1864 int err; 1865 1866 xe_gt_assert(gt, vfid); 1867 1868 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1869 err = pf_send_vf_cfg_reset(gt, vfid); 1870 if (!err || force) 1871 pf_release_vf_config(gt, vfid); 1872 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1873 1874 if (unlikely(err)) { 1875 xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n", 1876 vfid, ERR_PTR(err), 1877 force ? " but all resources were released anyway!" : ""); 1878 } 1879 1880 return force ? 0 : err; 1881 } 1882 1883 /** 1884 * xe_gt_sriov_pf_config_push - Reprovision VF's configuration. 1885 * @gt: the &xe_gt 1886 * @vfid: the VF identifier (can't be PF) 1887 * @refresh: explicit refresh 1888 * 1889 * This function can only be called on PF. 1890 * 1891 * Return: 0 on success or a negative error code on failure. 1892 */ 1893 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh) 1894 { 1895 int err = 0; 1896 1897 xe_gt_assert(gt, vfid); 1898 1899 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1900 if (refresh) 1901 err = pf_send_vf_cfg_reset(gt, vfid); 1902 if (!err) 1903 err = pf_push_full_vf_config(gt, vfid); 1904 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1905 1906 if (unlikely(err)) { 1907 xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n", 1908 refresh ? "refresh" : "push", vfid, ERR_PTR(err)); 1909 } 1910 1911 return err; 1912 } 1913 1914 /** 1915 * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations. 1916 * @gt: the &xe_gt 1917 * @p: the &drm_printer 1918 * 1919 * Print GGTT configuration data for all VFs. 1920 * VFs without provisioned GGTT are ignored. 1921 * 1922 * This function can only be called on PF. 1923 */ 1924 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p) 1925 { 1926 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 1927 const struct xe_gt_sriov_config *config; 1928 char buf[10]; 1929 1930 for (n = 1; n <= total_vfs; n++) { 1931 config = >->sriov.pf.vfs[n].config; 1932 if (!drm_mm_node_allocated(&config->ggtt_region)) 1933 continue; 1934 1935 string_get_size(config->ggtt_region.size, 1, STRING_UNITS_2, buf, sizeof(buf)); 1936 drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n", 1937 n, config->ggtt_region.start, 1938 config->ggtt_region.start + config->ggtt_region.size - 1, buf); 1939 } 1940 1941 return 0; 1942 } 1943 1944 /** 1945 * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations. 1946 * @gt: the &xe_gt 1947 * @p: the &drm_printer 1948 * 1949 * Print GuC context ID allocations across all VFs. 1950 * VFs without GuC context IDs are skipped. 1951 * 1952 * This function can only be called on PF. 1953 * Return: 0 on success or a negative error code on failure. 1954 */ 1955 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p) 1956 { 1957 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 1958 const struct xe_gt_sriov_config *config; 1959 1960 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1961 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1962 1963 for (n = 1; n <= total_vfs; n++) { 1964 config = >->sriov.pf.vfs[n].config; 1965 if (!config->num_ctxs) 1966 continue; 1967 1968 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n", 1969 n, 1970 config->begin_ctx, 1971 config->begin_ctx + config->num_ctxs - 1, 1972 config->num_ctxs); 1973 } 1974 1975 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1976 return 0; 1977 } 1978 1979 /** 1980 * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations. 1981 * @gt: the &xe_gt 1982 * @p: the &drm_printer 1983 * 1984 * Print GuC doorbell IDs allocations across all VFs. 1985 * VFs without GuC doorbell IDs are skipped. 1986 * 1987 * This function can only be called on PF. 1988 * Return: 0 on success or a negative error code on failure. 1989 */ 1990 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p) 1991 { 1992 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 1993 const struct xe_gt_sriov_config *config; 1994 1995 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1996 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1997 1998 for (n = 1; n <= total_vfs; n++) { 1999 config = >->sriov.pf.vfs[n].config; 2000 if (!config->num_dbs) 2001 continue; 2002 2003 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n", 2004 n, 2005 config->begin_db, 2006 config->begin_db + config->num_dbs - 1, 2007 config->num_dbs); 2008 } 2009 2010 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2011 return 0; 2012 } 2013 2014 /** 2015 * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges. 2016 * @gt: the &xe_gt 2017 * @p: the &drm_printer 2018 * 2019 * Print GGTT ranges that are available for the provisioning. 2020 * 2021 * This function can only be called on PF. 2022 */ 2023 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p) 2024 { 2025 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt; 2026 const struct drm_mm *mm = &ggtt->mm; 2027 const struct drm_mm_node *entry; 2028 u64 alignment = pf_get_ggtt_alignment(gt); 2029 u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt)); 2030 u64 hole_start, hole_end, hole_size; 2031 u64 spare, avail, total = 0; 2032 char buf[10]; 2033 2034 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2035 2036 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2037 2038 spare = pf_get_spare_ggtt(gt); 2039 2040 mutex_lock(&ggtt->lock); 2041 2042 drm_mm_for_each_hole(entry, mm, hole_start, hole_end) { 2043 hole_start = max(hole_start, hole_min_start); 2044 hole_start = ALIGN(hole_start, alignment); 2045 hole_end = ALIGN_DOWN(hole_end, alignment); 2046 if (hole_start >= hole_end) 2047 continue; 2048 hole_size = hole_end - hole_start; 2049 total += hole_size; 2050 2051 string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf)); 2052 drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n", 2053 hole_start, hole_end - 1, buf); 2054 } 2055 2056 mutex_unlock(&ggtt->lock); 2057 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2058 2059 string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf)); 2060 drm_printf(p, "total:\t%llu\t(%s)\n", total, buf); 2061 2062 string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf)); 2063 drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf); 2064 2065 avail = total > spare ? total - spare : 0; 2066 2067 string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf)); 2068 drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf); 2069 2070 return 0; 2071 } 2072