1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023-2024 Intel Corporation 4 */ 5 6 #include <linux/string_choices.h> 7 #include <linux/wordpart.h> 8 9 #include "abi/guc_actions_sriov_abi.h" 10 #include "abi/guc_klvs_abi.h" 11 12 #include "regs/xe_guc_regs.h" 13 14 #include "xe_bo.h" 15 #include "xe_device.h" 16 #include "xe_ggtt.h" 17 #include "xe_gt.h" 18 #include "xe_gt_sriov_pf_config.h" 19 #include "xe_gt_sriov_pf_helpers.h" 20 #include "xe_gt_sriov_pf_policy.h" 21 #include "xe_gt_sriov_printk.h" 22 #include "xe_guc.h" 23 #include "xe_guc_buf.h" 24 #include "xe_guc_ct.h" 25 #include "xe_guc_db_mgr.h" 26 #include "xe_guc_fwif.h" 27 #include "xe_guc_id_mgr.h" 28 #include "xe_guc_klv_helpers.h" 29 #include "xe_guc_klv_thresholds_set.h" 30 #include "xe_guc_submit.h" 31 #include "xe_lmtt.h" 32 #include "xe_map.h" 33 #include "xe_migrate.h" 34 #include "xe_sriov.h" 35 #include "xe_ttm_vram_mgr.h" 36 #include "xe_wopcm.h" 37 38 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo))) 39 40 /* 41 * Return: number of KLVs that were successfully parsed and saved, 42 * negative error code on failure. 43 */ 44 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid, 45 u64 addr, u32 size) 46 { 47 u32 request[] = { 48 GUC_ACTION_PF2GUC_UPDATE_VF_CFG, 49 vfid, 50 lower_32_bits(addr), 51 upper_32_bits(addr), 52 size, 53 }; 54 55 return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request)); 56 } 57 58 /* 59 * Return: 0 on success, negative error code on failure. 60 */ 61 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid) 62 { 63 struct xe_guc *guc = >->uc.guc; 64 int ret; 65 66 ret = guc_action_update_vf_cfg(guc, vfid, 0, 0); 67 68 return ret <= 0 ? ret : -EPROTO; 69 } 70 71 /* 72 * Return: number of KLVs that were successfully parsed and saved, 73 * negative error code on failure. 74 */ 75 static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords) 76 { 77 struct xe_guc *guc = >->uc.guc; 78 79 return guc_action_update_vf_cfg(guc, vfid, xe_guc_buf_flush(buf), num_dwords); 80 } 81 82 /* 83 * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed, 84 * negative error code on failure. 85 */ 86 static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, 87 struct xe_guc_buf buf, u32 num_dwords) 88 { 89 int ret; 90 91 ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords); 92 93 if (ret != num_klvs) { 94 int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO; 95 void *klvs = xe_guc_buf_cpu_ptr(buf); 96 struct drm_printer p = xe_gt_info_printer(gt); 97 char name[8]; 98 99 xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n", 100 xe_sriov_function_name(vfid, name, sizeof(name)), 101 num_klvs, str_plural(num_klvs), ERR_PTR(err)); 102 xe_guc_klv_print(klvs, num_dwords, &p); 103 return err; 104 } 105 106 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { 107 struct drm_printer p = xe_gt_dbg_printer(gt); 108 void *klvs = xe_guc_buf_cpu_ptr(buf); 109 char name[8]; 110 111 xe_gt_sriov_dbg(gt, "pushed %s config with %u KLV%s:\n", 112 xe_sriov_function_name(vfid, name, sizeof(name)), 113 num_klvs, str_plural(num_klvs)); 114 xe_guc_klv_print(klvs, num_dwords, &p); 115 } 116 117 return 0; 118 } 119 120 /* 121 * Return: 0 on success, -ENOBUFS if no free buffer for the indirect data, 122 * negative error code on failure. 123 */ 124 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, 125 const u32 *klvs, u32 num_dwords) 126 { 127 CLASS(xe_guc_buf_from_data, buf)(>->uc.guc.buf, klvs, num_dwords * sizeof(u32)); 128 129 xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords)); 130 131 if (!xe_guc_buf_is_valid(buf)) 132 return -ENOBUFS; 133 134 return pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords); 135 } 136 137 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value) 138 { 139 u32 klv[] = { 140 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1), 141 value, 142 }; 143 144 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv)); 145 } 146 147 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value) 148 { 149 u32 klv[] = { 150 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2), 151 lower_32_bits(value), 152 upper_32_bits(value), 153 }; 154 155 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv)); 156 } 157 158 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size) 159 { 160 u32 klvs[] = { 161 PREP_GUC_KLV_TAG(VF_CFG_GGTT_START), 162 lower_32_bits(start), 163 upper_32_bits(start), 164 PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE), 165 lower_32_bits(size), 166 upper_32_bits(size), 167 }; 168 169 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); 170 } 171 172 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num) 173 { 174 u32 klvs[] = { 175 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID), 176 begin, 177 PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS), 178 num, 179 }; 180 181 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); 182 } 183 184 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num) 185 { 186 u32 klvs[] = { 187 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID), 188 begin, 189 PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS), 190 num, 191 }; 192 193 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); 194 } 195 196 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum) 197 { 198 /* GuC will silently clamp values exceeding max */ 199 *exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE); 200 201 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum); 202 } 203 204 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout) 205 { 206 /* GuC will silently clamp values exceeding max */ 207 *preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE); 208 209 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout); 210 } 211 212 static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority) 213 { 214 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority); 215 } 216 217 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 218 { 219 return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size); 220 } 221 222 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid, 223 enum xe_guc_klv_threshold_index index, u32 value) 224 { 225 u32 key = xe_guc_klv_threshold_index_to_key(index); 226 227 xe_gt_assert(gt, key); 228 return pf_push_vf_cfg_u32(gt, vfid, key, value); 229 } 230 231 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid) 232 { 233 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 234 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); 235 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 236 237 return >->sriov.pf.vfs[vfid].config; 238 } 239 240 /* Return: number of configuration dwords written */ 241 static u32 encode_ggtt(u32 *cfg, u64 start, u64 size, bool details) 242 { 243 u32 n = 0; 244 245 if (details) { 246 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START); 247 cfg[n++] = lower_32_bits(start); 248 cfg[n++] = upper_32_bits(start); 249 } 250 251 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE); 252 cfg[n++] = lower_32_bits(size); 253 cfg[n++] = upper_32_bits(size); 254 255 return n; 256 } 257 258 /* Return: number of configuration dwords written */ 259 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details) 260 { 261 struct xe_ggtt_node *node = config->ggtt_region; 262 263 if (!xe_ggtt_node_allocated(node)) 264 return 0; 265 266 return encode_ggtt(cfg, node->base.start, node->base.size, details); 267 } 268 269 /* Return: number of configuration dwords written */ 270 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details) 271 { 272 u32 n = 0; 273 274 n += encode_config_ggtt(cfg, config, details); 275 276 if (details && config->num_ctxs) { 277 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID); 278 cfg[n++] = config->begin_ctx; 279 } 280 281 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS); 282 cfg[n++] = config->num_ctxs; 283 284 if (details && config->num_dbs) { 285 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID); 286 cfg[n++] = config->begin_db; 287 } 288 289 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS); 290 cfg[n++] = config->num_dbs; 291 292 if (config->lmem_obj) { 293 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE); 294 cfg[n++] = lower_32_bits(xe_bo_size(config->lmem_obj)); 295 cfg[n++] = upper_32_bits(xe_bo_size(config->lmem_obj)); 296 } 297 298 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM); 299 cfg[n++] = config->exec_quantum; 300 301 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT); 302 cfg[n++] = config->preempt_timeout; 303 304 #define encode_threshold_config(TAG, ...) ({ \ 305 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG); \ 306 cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)]; \ 307 }); 308 309 MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config); 310 #undef encode_threshold_config 311 312 return n; 313 } 314 315 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) 316 { 317 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 318 u32 max_cfg_dwords = xe_guc_buf_cache_dwords(>->uc.guc.buf); 319 CLASS(xe_guc_buf, buf)(>->uc.guc.buf, max_cfg_dwords); 320 u32 num_dwords; 321 int num_klvs; 322 u32 *cfg; 323 int err; 324 325 if (!xe_guc_buf_is_valid(buf)) 326 return -ENOBUFS; 327 328 cfg = xe_guc_buf_cpu_ptr(buf); 329 num_dwords = encode_config(cfg, config, true); 330 xe_gt_assert(gt, num_dwords <= max_cfg_dwords); 331 332 if (xe_gt_is_media_type(gt)) { 333 struct xe_gt *primary = gt->tile->primary_gt; 334 struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid); 335 336 /* media-GT will never include a GGTT config */ 337 xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true)); 338 339 /* the GGTT config must be taken from the primary-GT instead */ 340 num_dwords += encode_config_ggtt(cfg + num_dwords, other, true); 341 } 342 xe_gt_assert(gt, num_dwords <= max_cfg_dwords); 343 344 if (vfid == PFID) { 345 u64 ggtt_start = xe_wopcm_size(gt_to_xe(gt)); 346 u64 ggtt_size = gt_to_tile(gt)->mem.ggtt->size - ggtt_start; 347 348 /* plain PF config data will never include a real GGTT region */ 349 xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true)); 350 351 /* fake PF GGTT config covers full GGTT range except reserved WOPCM */ 352 num_dwords += encode_ggtt(cfg + num_dwords, ggtt_start, ggtt_size, true); 353 } 354 355 num_klvs = xe_guc_klv_count(cfg, num_dwords); 356 err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords); 357 358 return err; 359 } 360 361 static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset) 362 { 363 int err = 0; 364 365 xe_gt_assert(gt, vfid); 366 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 367 368 if (reset) 369 err = pf_send_vf_cfg_reset(gt, vfid); 370 if (!err) 371 err = pf_push_full_vf_config(gt, vfid); 372 373 return err; 374 } 375 376 static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid) 377 { 378 return pf_push_vf_cfg(gt, vfid, true); 379 } 380 381 static u64 pf_get_ggtt_alignment(struct xe_gt *gt) 382 { 383 struct xe_device *xe = gt_to_xe(gt); 384 385 return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; 386 } 387 388 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt) 389 { 390 /* XXX: preliminary */ 391 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 392 pf_get_ggtt_alignment(gt) : SZ_64M; 393 } 394 395 static u64 pf_get_spare_ggtt(struct xe_gt *gt) 396 { 397 u64 spare; 398 399 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 400 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 401 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 402 403 spare = gt->sriov.pf.spare.ggtt_size; 404 spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt)); 405 406 return spare; 407 } 408 409 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size) 410 { 411 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 412 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 413 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 414 415 if (size && size < pf_get_min_spare_ggtt(gt)) 416 return -EINVAL; 417 418 size = round_up(size, pf_get_ggtt_alignment(gt)); 419 gt->sriov.pf.spare.ggtt_size = size; 420 421 return 0; 422 } 423 424 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size) 425 { 426 int err, err2 = 0; 427 428 err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size); 429 430 if (tile->media_gt && !err) 431 err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size); 432 433 return err ?: err2; 434 } 435 436 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node) 437 { 438 if (xe_ggtt_node_allocated(node)) { 439 /* 440 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign() 441 * is redundant, as PTE will be implicitly re-assigned to PF by 442 * the xe_ggtt_clear() called by below xe_ggtt_remove_node(). 443 */ 444 xe_ggtt_node_remove(node, false); 445 } else { 446 xe_ggtt_node_fini(node); 447 } 448 } 449 450 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config) 451 { 452 pf_release_ggtt(gt_to_tile(gt), config->ggtt_region); 453 config->ggtt_region = NULL; 454 } 455 456 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) 457 { 458 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 459 struct xe_ggtt_node *node; 460 struct xe_tile *tile = gt_to_tile(gt); 461 struct xe_ggtt *ggtt = tile->mem.ggtt; 462 u64 alignment = pf_get_ggtt_alignment(gt); 463 int err; 464 465 xe_gt_assert(gt, vfid); 466 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 467 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 468 469 size = round_up(size, alignment); 470 471 if (xe_ggtt_node_allocated(config->ggtt_region)) { 472 err = pf_distribute_config_ggtt(tile, vfid, 0, 0); 473 if (unlikely(err)) 474 return err; 475 476 pf_release_vf_config_ggtt(gt, config); 477 478 err = pf_refresh_vf_cfg(gt, vfid); 479 if (unlikely(err)) 480 return err; 481 } 482 xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region)); 483 484 if (!size) 485 return 0; 486 487 node = xe_ggtt_node_init(ggtt); 488 if (IS_ERR(node)) 489 return PTR_ERR(node); 490 491 err = xe_ggtt_node_insert(node, size, alignment); 492 if (unlikely(err)) 493 goto err; 494 495 xe_ggtt_assign(node, vfid); 496 xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n", 497 vfid, node->base.start, node->base.start + node->base.size - 1); 498 499 err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size); 500 if (unlikely(err)) 501 goto err; 502 503 config->ggtt_region = node; 504 return 0; 505 err: 506 pf_release_ggtt(tile, node); 507 return err; 508 } 509 510 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid) 511 { 512 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 513 struct xe_ggtt_node *node = config->ggtt_region; 514 515 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 516 return xe_ggtt_node_allocated(node) ? node->base.size : 0; 517 } 518 519 /** 520 * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF. 521 * @gt: the &xe_gt 522 * @vfid: the VF identifier 523 * 524 * This function can only be called on PF. 525 * 526 * Return: size of the VF's assigned (or PF's spare) GGTT address space. 527 */ 528 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid) 529 { 530 u64 size; 531 532 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 533 if (vfid) 534 size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid); 535 else 536 size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt); 537 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 538 539 return size; 540 } 541 542 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value, 543 u64 actual, const char *what, int err) 544 { 545 char size[10]; 546 char name[8]; 547 548 xe_sriov_function_name(vfid, name, sizeof(name)); 549 550 if (unlikely(err)) { 551 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size)); 552 xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n", 553 name, value, size, what, ERR_PTR(err)); 554 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size)); 555 xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n", 556 name, actual, size, what); 557 return err; 558 } 559 560 /* the actual value may have changed during provisioning */ 561 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size)); 562 xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n", 563 name, actual, size, what); 564 return 0; 565 } 566 567 /** 568 * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space. 569 * @gt: the &xe_gt (can't be media) 570 * @vfid: the VF identifier 571 * @size: requested GGTT size 572 * 573 * If &vfid represents PF, then function will change PF's spare GGTT config. 574 * 575 * This function can only be called on PF. 576 * 577 * Return: 0 on success or a negative error code on failure. 578 */ 579 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) 580 { 581 int err; 582 583 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 584 585 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 586 if (vfid) 587 err = pf_provision_vf_ggtt(gt, vfid, size); 588 else 589 err = pf_set_spare_ggtt(gt, size); 590 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 591 592 return pf_config_set_u64_done(gt, vfid, size, 593 xe_gt_sriov_pf_config_get_ggtt(gt, vfid), 594 vfid ? "GGTT" : "spare GGTT", err); 595 } 596 597 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs, 598 u64 value, u64 (*get)(struct xe_gt*, unsigned int), 599 const char *what, unsigned int last, int err) 600 { 601 char size[10]; 602 603 xe_gt_assert(gt, first); 604 xe_gt_assert(gt, num_vfs); 605 xe_gt_assert(gt, first <= last); 606 607 if (num_vfs == 1) 608 return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err); 609 610 if (unlikely(err)) { 611 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n", 612 first, first + num_vfs - 1, what); 613 if (last > first) 614 pf_config_bulk_set_u64_done(gt, first, last - first, value, 615 get, what, last, 0); 616 return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err); 617 } 618 619 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */ 620 value = get(gt, first); 621 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size)); 622 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n", 623 first, first + num_vfs - 1, value, size, what); 624 return 0; 625 } 626 627 /** 628 * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT. 629 * @gt: the &xe_gt (can't be media) 630 * @vfid: starting VF identifier (can't be 0) 631 * @num_vfs: number of VFs to provision 632 * @size: requested GGTT size 633 * 634 * This function can only be called on PF. 635 * 636 * Return: 0 on success or a negative error code on failure. 637 */ 638 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid, 639 unsigned int num_vfs, u64 size) 640 { 641 unsigned int n; 642 int err = 0; 643 644 xe_gt_assert(gt, vfid); 645 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 646 647 if (!num_vfs) 648 return 0; 649 650 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 651 for (n = vfid; n < vfid + num_vfs; n++) { 652 err = pf_provision_vf_ggtt(gt, n, size); 653 if (err) 654 break; 655 } 656 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 657 658 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size, 659 xe_gt_sriov_pf_config_get_ggtt, 660 "GGTT", n, err); 661 } 662 663 /* Return: size of the largest continuous GGTT region */ 664 static u64 pf_get_max_ggtt(struct xe_gt *gt) 665 { 666 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt; 667 u64 alignment = pf_get_ggtt_alignment(gt); 668 u64 spare = pf_get_spare_ggtt(gt); 669 u64 max_hole; 670 671 max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare); 672 673 xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n", 674 max_hole / SZ_1K, spare / SZ_1K); 675 return max_hole > spare ? max_hole - spare : 0; 676 } 677 678 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs) 679 { 680 u64 available = pf_get_max_ggtt(gt); 681 u64 alignment = pf_get_ggtt_alignment(gt); 682 u64 fair; 683 684 /* 685 * To simplify the logic we only look at single largest GGTT region 686 * as that will be always the best fit for 1 VF case, and most likely 687 * will also nicely cover other cases where VFs are provisioned on the 688 * fresh and idle PF driver, without any stale GGTT allocations spread 689 * in the middle of the full GGTT range. 690 */ 691 692 fair = div_u64(available, num_vfs); 693 fair = ALIGN_DOWN(fair, alignment); 694 xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n", 695 available / SZ_1K, num_vfs, fair / SZ_1K); 696 return fair; 697 } 698 699 /** 700 * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT. 701 * @gt: the &xe_gt (can't be media) 702 * @vfid: starting VF identifier (can't be 0) 703 * @num_vfs: number of VFs to provision 704 * 705 * This function can only be called on PF. 706 * 707 * Return: 0 on success or a negative error code on failure. 708 */ 709 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid, 710 unsigned int num_vfs) 711 { 712 u64 fair; 713 714 xe_gt_assert(gt, vfid); 715 xe_gt_assert(gt, num_vfs); 716 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 717 718 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 719 fair = pf_estimate_fair_ggtt(gt, num_vfs); 720 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 721 722 if (!fair) 723 return -ENOSPC; 724 725 return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair); 726 } 727 728 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt) 729 { 730 /* XXX: preliminary */ 731 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 732 hweight64(gt->info.engine_mask) : SZ_256; 733 } 734 735 static u32 pf_get_spare_ctxs(struct xe_gt *gt) 736 { 737 u32 spare; 738 739 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 740 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 741 742 spare = gt->sriov.pf.spare.num_ctxs; 743 spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt)); 744 745 return spare; 746 } 747 748 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare) 749 { 750 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 751 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 752 753 if (spare > GUC_ID_MAX) 754 return -EINVAL; 755 756 if (spare && spare < pf_get_min_spare_ctxs(gt)) 757 return -EINVAL; 758 759 gt->sriov.pf.spare.num_ctxs = spare; 760 761 return 0; 762 } 763 764 /* Return: start ID or negative error code on failure */ 765 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num) 766 { 767 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm; 768 unsigned int spare = pf_get_spare_ctxs(gt); 769 770 return xe_guc_id_mgr_reserve(idm, num, spare); 771 } 772 773 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num) 774 { 775 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm; 776 777 if (num) 778 xe_guc_id_mgr_release(idm, start, num); 779 } 780 781 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config) 782 { 783 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 784 785 pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs); 786 config->begin_ctx = 0; 787 config->num_ctxs = 0; 788 } 789 790 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs) 791 { 792 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 793 int ret; 794 795 xe_gt_assert(gt, vfid); 796 797 if (num_ctxs > GUC_ID_MAX) 798 return -EINVAL; 799 800 if (config->num_ctxs) { 801 ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0); 802 if (unlikely(ret)) 803 return ret; 804 805 pf_release_config_ctxs(gt, config); 806 807 ret = pf_refresh_vf_cfg(gt, vfid); 808 if (unlikely(ret)) 809 return ret; 810 } 811 812 if (!num_ctxs) 813 return 0; 814 815 ret = pf_reserve_ctxs(gt, num_ctxs); 816 if (unlikely(ret < 0)) 817 return ret; 818 819 config->begin_ctx = ret; 820 config->num_ctxs = num_ctxs; 821 822 ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs); 823 if (unlikely(ret)) { 824 pf_release_config_ctxs(gt, config); 825 return ret; 826 } 827 828 xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n", 829 vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1); 830 return 0; 831 } 832 833 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid) 834 { 835 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 836 837 return config->num_ctxs; 838 } 839 840 /** 841 * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota. 842 * @gt: the &xe_gt 843 * @vfid: the VF identifier 844 * 845 * This function can only be called on PF. 846 * If &vfid represents a PF then number of PF's spare GuC context IDs is returned. 847 * 848 * Return: VF's quota (or PF's spare). 849 */ 850 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid) 851 { 852 u32 num_ctxs; 853 854 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 855 if (vfid) 856 num_ctxs = pf_get_vf_config_ctxs(gt, vfid); 857 else 858 num_ctxs = pf_get_spare_ctxs(gt); 859 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 860 861 return num_ctxs; 862 } 863 864 static const char *no_unit(u32 unused) 865 { 866 return ""; 867 } 868 869 static const char *spare_unit(u32 unused) 870 { 871 return " spare"; 872 } 873 874 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual, 875 const char *what, const char *(*unit)(u32), int err) 876 { 877 char name[8]; 878 879 xe_sriov_function_name(vfid, name, sizeof(name)); 880 881 if (unlikely(err)) { 882 xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n", 883 name, value, unit(value), what, ERR_PTR(err)); 884 xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n", 885 name, actual, unit(actual), what); 886 return err; 887 } 888 889 /* the actual value may have changed during provisioning */ 890 xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n", 891 name, actual, unit(actual), what); 892 return 0; 893 } 894 895 /** 896 * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF. 897 * @gt: the &xe_gt 898 * @vfid: the VF identifier 899 * @num_ctxs: requested number of GuC contexts IDs (0 to release) 900 * 901 * This function can only be called on PF. 902 * 903 * Return: 0 on success or a negative error code on failure. 904 */ 905 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs) 906 { 907 int err; 908 909 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 910 if (vfid) 911 err = pf_provision_vf_ctxs(gt, vfid, num_ctxs); 912 else 913 err = pf_set_spare_ctxs(gt, num_ctxs); 914 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 915 916 return pf_config_set_u32_done(gt, vfid, num_ctxs, 917 xe_gt_sriov_pf_config_get_ctxs(gt, vfid), 918 "GuC context IDs", vfid ? no_unit : spare_unit, err); 919 } 920 921 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs, 922 u32 value, u32 (*get)(struct xe_gt*, unsigned int), 923 const char *what, const char *(*unit)(u32), 924 unsigned int last, int err) 925 { 926 xe_gt_assert(gt, first); 927 xe_gt_assert(gt, num_vfs); 928 xe_gt_assert(gt, first <= last); 929 930 if (num_vfs == 1) 931 return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err); 932 933 if (unlikely(err)) { 934 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n", 935 first, first + num_vfs - 1, what); 936 if (last > first) 937 pf_config_bulk_set_u32_done(gt, first, last - first, value, 938 get, what, unit, last, 0); 939 return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err); 940 } 941 942 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */ 943 value = get(gt, first); 944 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n", 945 first, first + num_vfs - 1, value, unit(value), what); 946 return 0; 947 } 948 949 /** 950 * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs. 951 * @gt: the &xe_gt 952 * @vfid: starting VF identifier 953 * @num_vfs: number of VFs to provision 954 * @num_ctxs: requested number of GuC contexts IDs (0 to release) 955 * 956 * This function can only be called on PF. 957 * 958 * Return: 0 on success or a negative error code on failure. 959 */ 960 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid, 961 unsigned int num_vfs, u32 num_ctxs) 962 { 963 unsigned int n; 964 int err = 0; 965 966 xe_gt_assert(gt, vfid); 967 968 if (!num_vfs) 969 return 0; 970 971 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 972 for (n = vfid; n < vfid + num_vfs; n++) { 973 err = pf_provision_vf_ctxs(gt, n, num_ctxs); 974 if (err) 975 break; 976 } 977 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 978 979 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs, 980 xe_gt_sriov_pf_config_get_ctxs, 981 "GuC context IDs", no_unit, n, err); 982 } 983 984 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs) 985 { 986 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm; 987 u32 spare = pf_get_spare_ctxs(gt); 988 u32 fair = (idm->total - spare) / num_vfs; 989 int ret; 990 991 for (; fair; --fair) { 992 ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare); 993 if (ret < 0) 994 continue; 995 xe_guc_id_mgr_release(idm, ret, fair * num_vfs); 996 break; 997 } 998 999 xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair); 1000 return fair; 1001 } 1002 1003 /** 1004 * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs. 1005 * @gt: the &xe_gt 1006 * @vfid: starting VF identifier (can't be 0) 1007 * @num_vfs: number of VFs to provision (can't be 0) 1008 * 1009 * This function can only be called on PF. 1010 * 1011 * Return: 0 on success or a negative error code on failure. 1012 */ 1013 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid, 1014 unsigned int num_vfs) 1015 { 1016 u32 fair; 1017 1018 xe_gt_assert(gt, vfid); 1019 xe_gt_assert(gt, num_vfs); 1020 1021 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1022 fair = pf_estimate_fair_ctxs(gt, num_vfs); 1023 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1024 1025 if (!fair) 1026 return -ENOSPC; 1027 1028 return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair); 1029 } 1030 1031 static u32 pf_get_min_spare_dbs(struct xe_gt *gt) 1032 { 1033 /* XXX: preliminary, we don't use doorbells yet! */ 1034 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0; 1035 } 1036 1037 static u32 pf_get_spare_dbs(struct xe_gt *gt) 1038 { 1039 u32 spare; 1040 1041 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1042 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1043 1044 spare = gt->sriov.pf.spare.num_dbs; 1045 spare = max_t(u32, spare, pf_get_min_spare_dbs(gt)); 1046 1047 return spare; 1048 } 1049 1050 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare) 1051 { 1052 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1053 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1054 1055 if (spare > GUC_NUM_DOORBELLS) 1056 return -EINVAL; 1057 1058 if (spare && spare < pf_get_min_spare_dbs(gt)) 1059 return -EINVAL; 1060 1061 gt->sriov.pf.spare.num_dbs = spare; 1062 return 0; 1063 } 1064 1065 /* Return: start ID or negative error code on failure */ 1066 static int pf_reserve_dbs(struct xe_gt *gt, u32 num) 1067 { 1068 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm; 1069 unsigned int spare = pf_get_spare_dbs(gt); 1070 1071 return xe_guc_db_mgr_reserve_range(dbm, num, spare); 1072 } 1073 1074 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num) 1075 { 1076 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm; 1077 1078 if (num) 1079 xe_guc_db_mgr_release_range(dbm, start, num); 1080 } 1081 1082 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config) 1083 { 1084 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1085 1086 pf_release_dbs(gt, config->begin_db, config->num_dbs); 1087 config->begin_db = 0; 1088 config->num_dbs = 0; 1089 } 1090 1091 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs) 1092 { 1093 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1094 int ret; 1095 1096 xe_gt_assert(gt, vfid); 1097 1098 if (num_dbs > GUC_NUM_DOORBELLS) 1099 return -EINVAL; 1100 1101 if (config->num_dbs) { 1102 ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0); 1103 if (unlikely(ret)) 1104 return ret; 1105 1106 pf_release_config_dbs(gt, config); 1107 1108 ret = pf_refresh_vf_cfg(gt, vfid); 1109 if (unlikely(ret)) 1110 return ret; 1111 } 1112 1113 if (!num_dbs) 1114 return 0; 1115 1116 ret = pf_reserve_dbs(gt, num_dbs); 1117 if (unlikely(ret < 0)) 1118 return ret; 1119 1120 config->begin_db = ret; 1121 config->num_dbs = num_dbs; 1122 1123 ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs); 1124 if (unlikely(ret)) { 1125 pf_release_config_dbs(gt, config); 1126 return ret; 1127 } 1128 1129 xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n", 1130 vfid, config->begin_db, config->begin_db + config->num_dbs - 1); 1131 return 0; 1132 } 1133 1134 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid) 1135 { 1136 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1137 1138 return config->num_dbs; 1139 } 1140 1141 /** 1142 * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota. 1143 * @gt: the &xe_gt 1144 * @vfid: the VF identifier 1145 * 1146 * This function can only be called on PF. 1147 * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned. 1148 * 1149 * Return: VF's quota (or PF's spare). 1150 */ 1151 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid) 1152 { 1153 u32 num_dbs; 1154 1155 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1156 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); 1157 1158 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1159 if (vfid) 1160 num_dbs = pf_get_vf_config_dbs(gt, vfid); 1161 else 1162 num_dbs = pf_get_spare_dbs(gt); 1163 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1164 1165 return num_dbs; 1166 } 1167 1168 /** 1169 * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF. 1170 * @gt: the &xe_gt 1171 * @vfid: the VF identifier 1172 * @num_dbs: requested number of GuC doorbells IDs (0 to release) 1173 * 1174 * This function can only be called on PF. 1175 * 1176 * Return: 0 on success or a negative error code on failure. 1177 */ 1178 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs) 1179 { 1180 int err; 1181 1182 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1183 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); 1184 1185 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1186 if (vfid) 1187 err = pf_provision_vf_dbs(gt, vfid, num_dbs); 1188 else 1189 err = pf_set_spare_dbs(gt, num_dbs); 1190 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1191 1192 return pf_config_set_u32_done(gt, vfid, num_dbs, 1193 xe_gt_sriov_pf_config_get_dbs(gt, vfid), 1194 "GuC doorbell IDs", vfid ? no_unit : spare_unit, err); 1195 } 1196 1197 /** 1198 * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs. 1199 * @gt: the &xe_gt 1200 * @vfid: starting VF identifier (can't be 0) 1201 * @num_vfs: number of VFs to provision 1202 * @num_dbs: requested number of GuC doorbell IDs (0 to release) 1203 * 1204 * This function can only be called on PF. 1205 * 1206 * Return: 0 on success or a negative error code on failure. 1207 */ 1208 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid, 1209 unsigned int num_vfs, u32 num_dbs) 1210 { 1211 unsigned int n; 1212 int err = 0; 1213 1214 xe_gt_assert(gt, vfid); 1215 1216 if (!num_vfs) 1217 return 0; 1218 1219 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1220 for (n = vfid; n < vfid + num_vfs; n++) { 1221 err = pf_provision_vf_dbs(gt, n, num_dbs); 1222 if (err) 1223 break; 1224 } 1225 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1226 1227 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs, 1228 xe_gt_sriov_pf_config_get_dbs, 1229 "GuC doorbell IDs", no_unit, n, err); 1230 } 1231 1232 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs) 1233 { 1234 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm; 1235 u32 spare = pf_get_spare_dbs(gt); 1236 u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs; 1237 int ret; 1238 1239 for (; fair; --fair) { 1240 ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare); 1241 if (ret < 0) 1242 continue; 1243 xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs); 1244 break; 1245 } 1246 1247 xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair); 1248 return fair; 1249 } 1250 1251 /** 1252 * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell IDs. 1253 * @gt: the &xe_gt 1254 * @vfid: starting VF identifier (can't be 0) 1255 * @num_vfs: number of VFs to provision (can't be 0) 1256 * 1257 * This function can only be called on PF. 1258 * 1259 * Return: 0 on success or a negative error code on failure. 1260 */ 1261 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid, 1262 unsigned int num_vfs) 1263 { 1264 u32 fair; 1265 1266 xe_gt_assert(gt, vfid); 1267 xe_gt_assert(gt, num_vfs); 1268 1269 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1270 fair = pf_estimate_fair_dbs(gt, num_vfs); 1271 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1272 1273 if (!fair) 1274 return -ENOSPC; 1275 1276 return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair); 1277 } 1278 1279 static u64 pf_get_lmem_alignment(struct xe_gt *gt) 1280 { 1281 /* this might be platform dependent */ 1282 return SZ_2M; 1283 } 1284 1285 static u64 pf_get_min_spare_lmem(struct xe_gt *gt) 1286 { 1287 /* this might be platform dependent */ 1288 return SZ_128M; /* XXX: preliminary */ 1289 } 1290 1291 static u64 pf_get_spare_lmem(struct xe_gt *gt) 1292 { 1293 u64 spare; 1294 1295 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1296 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1297 1298 spare = gt->sriov.pf.spare.lmem_size; 1299 spare = max_t(u64, spare, pf_get_min_spare_lmem(gt)); 1300 1301 return spare; 1302 } 1303 1304 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size) 1305 { 1306 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1307 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1308 1309 if (size && size < pf_get_min_spare_lmem(gt)) 1310 return -EINVAL; 1311 1312 gt->sriov.pf.spare.lmem_size = size; 1313 return 0; 1314 } 1315 1316 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid) 1317 { 1318 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1319 struct xe_bo *bo; 1320 1321 bo = config->lmem_obj; 1322 return bo ? xe_bo_size(bo) : 0; 1323 } 1324 1325 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 1326 { 1327 struct xe_device *xe = gt_to_xe(gt); 1328 struct xe_tile *tile; 1329 unsigned int tid; 1330 int err; 1331 1332 for_each_tile(tile, xe, tid) { 1333 if (tile->primary_gt == gt) { 1334 err = pf_push_vf_cfg_lmem(gt, vfid, size); 1335 } else { 1336 u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid); 1337 1338 if (!lmem) 1339 continue; 1340 err = pf_push_vf_cfg_lmem(gt, vfid, lmem); 1341 } 1342 if (unlikely(err)) 1343 return err; 1344 } 1345 return 0; 1346 } 1347 1348 static void pf_force_lmtt_invalidate(struct xe_device *xe) 1349 { 1350 struct xe_lmtt *lmtt; 1351 struct xe_tile *tile; 1352 unsigned int tid; 1353 1354 xe_assert(xe, xe_device_has_lmtt(xe)); 1355 xe_assert(xe, IS_SRIOV_PF(xe)); 1356 1357 for_each_tile(tile, xe, tid) { 1358 lmtt = &tile->sriov.pf.lmtt; 1359 xe_lmtt_invalidate_hw(lmtt); 1360 } 1361 } 1362 1363 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid) 1364 { 1365 struct xe_lmtt *lmtt; 1366 struct xe_tile *tile; 1367 unsigned int tid; 1368 1369 xe_assert(xe, xe_device_has_lmtt(xe)); 1370 xe_assert(xe, IS_SRIOV_PF(xe)); 1371 1372 for_each_tile(tile, xe, tid) { 1373 lmtt = &tile->sriov.pf.lmtt; 1374 xe_lmtt_drop_pages(lmtt, vfid); 1375 } 1376 } 1377 1378 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid) 1379 { 1380 struct xe_gt_sriov_config *config; 1381 struct xe_tile *tile; 1382 struct xe_lmtt *lmtt; 1383 struct xe_bo *bo; 1384 struct xe_gt *gt; 1385 u64 total, offset; 1386 unsigned int gtid; 1387 unsigned int tid; 1388 int err; 1389 1390 xe_assert(xe, xe_device_has_lmtt(xe)); 1391 xe_assert(xe, IS_SRIOV_PF(xe)); 1392 1393 total = 0; 1394 for_each_tile(tile, xe, tid) 1395 total += pf_get_vf_config_lmem(tile->primary_gt, vfid); 1396 1397 for_each_tile(tile, xe, tid) { 1398 lmtt = &tile->sriov.pf.lmtt; 1399 1400 xe_lmtt_drop_pages(lmtt, vfid); 1401 if (!total) 1402 continue; 1403 1404 err = xe_lmtt_prepare_pages(lmtt, vfid, total); 1405 if (err) 1406 goto fail; 1407 1408 offset = 0; 1409 for_each_gt(gt, xe, gtid) { 1410 if (xe_gt_is_media_type(gt)) 1411 continue; 1412 1413 config = pf_pick_vf_config(gt, vfid); 1414 bo = config->lmem_obj; 1415 if (!bo) 1416 continue; 1417 1418 err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset); 1419 if (err) 1420 goto fail; 1421 offset += xe_bo_size(bo); 1422 } 1423 } 1424 1425 pf_force_lmtt_invalidate(xe); 1426 return 0; 1427 1428 fail: 1429 for_each_tile(tile, xe, tid) { 1430 lmtt = &tile->sriov.pf.lmtt; 1431 xe_lmtt_drop_pages(lmtt, vfid); 1432 } 1433 return err; 1434 } 1435 1436 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config) 1437 { 1438 xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt))); 1439 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 1440 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1441 1442 if (config->lmem_obj) { 1443 xe_bo_unpin_map_no_vm(config->lmem_obj); 1444 config->lmem_obj = NULL; 1445 } 1446 } 1447 1448 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 1449 { 1450 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1451 struct xe_device *xe = gt_to_xe(gt); 1452 struct xe_tile *tile = gt_to_tile(gt); 1453 struct xe_bo *bo; 1454 int err; 1455 1456 xe_gt_assert(gt, vfid); 1457 xe_gt_assert(gt, IS_DGFX(xe)); 1458 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 1459 1460 size = round_up(size, pf_get_lmem_alignment(gt)); 1461 1462 if (config->lmem_obj) { 1463 err = pf_distribute_config_lmem(gt, vfid, 0); 1464 if (unlikely(err)) 1465 return err; 1466 1467 if (xe_device_has_lmtt(xe)) 1468 pf_reset_vf_lmtt(xe, vfid); 1469 pf_release_vf_config_lmem(gt, config); 1470 } 1471 xe_gt_assert(gt, !config->lmem_obj); 1472 1473 if (!size) 1474 return 0; 1475 1476 xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M); 1477 bo = xe_bo_create_locked(xe, tile, NULL, 1478 ALIGN(size, PAGE_SIZE), 1479 ttm_bo_type_kernel, 1480 XE_BO_FLAG_VRAM_IF_DGFX(tile) | 1481 XE_BO_FLAG_NEEDS_2M | 1482 XE_BO_FLAG_PINNED | 1483 XE_BO_FLAG_PINNED_LATE_RESTORE); 1484 if (IS_ERR(bo)) 1485 return PTR_ERR(bo); 1486 1487 err = xe_bo_pin(bo); 1488 xe_bo_unlock(bo); 1489 if (unlikely(err)) { 1490 xe_bo_put(bo); 1491 return err; 1492 } 1493 1494 config->lmem_obj = bo; 1495 1496 if (xe_device_has_lmtt(xe)) { 1497 err = pf_update_vf_lmtt(xe, vfid); 1498 if (unlikely(err)) 1499 goto release; 1500 } 1501 1502 err = pf_push_vf_cfg_lmem(gt, vfid, xe_bo_size(bo)); 1503 if (unlikely(err)) 1504 goto reset_lmtt; 1505 1506 xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n", 1507 vfid, xe_bo_size(bo), xe_bo_size(bo) / SZ_1M); 1508 return 0; 1509 1510 reset_lmtt: 1511 if (xe_device_has_lmtt(xe)) 1512 pf_reset_vf_lmtt(xe, vfid); 1513 release: 1514 pf_release_vf_config_lmem(gt, config); 1515 return err; 1516 } 1517 1518 /** 1519 * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota. 1520 * @gt: the &xe_gt 1521 * @vfid: the VF identifier 1522 * 1523 * This function can only be called on PF. 1524 * 1525 * Return: VF's (or PF's spare) LMEM quota. 1526 */ 1527 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid) 1528 { 1529 u64 size; 1530 1531 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1532 if (vfid) 1533 size = pf_get_vf_config_lmem(gt, vfid); 1534 else 1535 size = pf_get_spare_lmem(gt); 1536 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1537 1538 return size; 1539 } 1540 1541 /** 1542 * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM. 1543 * @gt: the &xe_gt (can't be media) 1544 * @vfid: the VF identifier 1545 * @size: requested LMEM size 1546 * 1547 * This function can only be called on PF. 1548 */ 1549 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 1550 { 1551 int err; 1552 1553 xe_gt_assert(gt, xe_device_has_lmtt(gt_to_xe(gt))); 1554 1555 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1556 if (vfid) 1557 err = pf_provision_vf_lmem(gt, vfid, size); 1558 else 1559 err = pf_set_spare_lmem(gt, size); 1560 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1561 1562 return pf_config_set_u64_done(gt, vfid, size, 1563 xe_gt_sriov_pf_config_get_lmem(gt, vfid), 1564 vfid ? "LMEM" : "spare LMEM", err); 1565 } 1566 1567 /** 1568 * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM. 1569 * @gt: the &xe_gt (can't be media) 1570 * @vfid: starting VF identifier (can't be 0) 1571 * @num_vfs: number of VFs to provision 1572 * @size: requested LMEM size 1573 * 1574 * This function can only be called on PF. 1575 * 1576 * Return: 0 on success or a negative error code on failure. 1577 */ 1578 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, 1579 unsigned int num_vfs, u64 size) 1580 { 1581 unsigned int n; 1582 int err = 0; 1583 1584 xe_gt_assert(gt, vfid); 1585 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 1586 1587 if (!num_vfs) 1588 return 0; 1589 1590 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1591 for (n = vfid; n < vfid + num_vfs; n++) { 1592 err = pf_provision_vf_lmem(gt, n, size); 1593 if (err) 1594 break; 1595 } 1596 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1597 1598 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size, 1599 xe_gt_sriov_pf_config_get_lmem, 1600 "LMEM", n, err); 1601 } 1602 1603 static u64 pf_query_free_lmem(struct xe_gt *gt) 1604 { 1605 struct xe_tile *tile = gt->tile; 1606 1607 return xe_ttm_vram_get_avail(&tile->mem.vram.ttm.manager); 1608 } 1609 1610 static u64 pf_query_max_lmem(struct xe_gt *gt) 1611 { 1612 u64 alignment = pf_get_lmem_alignment(gt); 1613 u64 spare = pf_get_spare_lmem(gt); 1614 u64 free = pf_query_free_lmem(gt); 1615 u64 avail; 1616 1617 /* XXX: need to account for 2MB blocks only */ 1618 avail = free > spare ? free - spare : 0; 1619 avail = round_down(avail, alignment); 1620 1621 return avail; 1622 } 1623 1624 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV 1625 #define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */ 1626 #endif 1627 1628 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs) 1629 { 1630 u64 available = pf_query_max_lmem(gt); 1631 u64 alignment = pf_get_lmem_alignment(gt); 1632 u64 fair; 1633 1634 fair = div_u64(available, num_vfs); 1635 fair = ALIGN_DOWN(fair, alignment); 1636 #ifdef MAX_FAIR_LMEM 1637 fair = min_t(u64, MAX_FAIR_LMEM, fair); 1638 #endif 1639 xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n", 1640 available / SZ_1M, num_vfs, fair / SZ_1M); 1641 return fair; 1642 } 1643 1644 /** 1645 * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM. 1646 * @gt: the &xe_gt (can't be media) 1647 * @vfid: starting VF identifier (can't be 0) 1648 * @num_vfs: number of VFs to provision (can't be 0) 1649 * 1650 * This function can only be called on PF. 1651 * 1652 * Return: 0 on success or a negative error code on failure. 1653 */ 1654 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid, 1655 unsigned int num_vfs) 1656 { 1657 u64 fair; 1658 1659 xe_gt_assert(gt, vfid); 1660 xe_gt_assert(gt, num_vfs); 1661 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 1662 1663 if (!xe_device_has_lmtt(gt_to_xe(gt))) 1664 return 0; 1665 1666 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1667 fair = pf_estimate_fair_lmem(gt, num_vfs); 1668 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1669 1670 if (!fair) 1671 return -ENOSPC; 1672 1673 return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair); 1674 } 1675 1676 /** 1677 * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources. 1678 * @gt: the &xe_gt 1679 * @vfid: starting VF identifier (can't be 0) 1680 * @num_vfs: number of VFs to provision (can't be 0) 1681 * 1682 * This function can only be called on PF. 1683 * 1684 * Return: 0 on success or a negative error code on failure. 1685 */ 1686 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid, 1687 unsigned int num_vfs) 1688 { 1689 int result = 0; 1690 int err; 1691 1692 xe_gt_assert(gt, vfid); 1693 xe_gt_assert(gt, num_vfs); 1694 1695 if (xe_gt_is_main_type(gt)) { 1696 err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs); 1697 result = result ?: err; 1698 err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs); 1699 result = result ?: err; 1700 } 1701 err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs); 1702 result = result ?: err; 1703 err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs); 1704 result = result ?: err; 1705 1706 return result; 1707 } 1708 1709 static const char *exec_quantum_unit(u32 exec_quantum) 1710 { 1711 return exec_quantum ? "ms" : "(infinity)"; 1712 } 1713 1714 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid, 1715 u32 exec_quantum) 1716 { 1717 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1718 int err; 1719 1720 err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum); 1721 if (unlikely(err)) 1722 return err; 1723 1724 config->exec_quantum = exec_quantum; 1725 return 0; 1726 } 1727 1728 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) 1729 { 1730 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1731 1732 return config->exec_quantum; 1733 } 1734 1735 /** 1736 * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF. 1737 * @gt: the &xe_gt 1738 * @vfid: the VF identifier 1739 * @exec_quantum: requested execution quantum in milliseconds (0 is infinity) 1740 * 1741 * This function can only be called on PF. 1742 * 1743 * Return: 0 on success or a negative error code on failure. 1744 */ 1745 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, 1746 u32 exec_quantum) 1747 { 1748 int err; 1749 1750 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1751 err = pf_provision_exec_quantum(gt, vfid, exec_quantum); 1752 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1753 1754 return pf_config_set_u32_done(gt, vfid, exec_quantum, 1755 xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid), 1756 "execution quantum", exec_quantum_unit, err); 1757 } 1758 1759 /** 1760 * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum. 1761 * @gt: the &xe_gt 1762 * @vfid: the VF identifier 1763 * 1764 * This function can only be called on PF. 1765 * 1766 * Return: VF's (or PF's) execution quantum in milliseconds. 1767 */ 1768 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) 1769 { 1770 u32 exec_quantum; 1771 1772 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1773 exec_quantum = pf_get_exec_quantum(gt, vfid); 1774 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1775 1776 return exec_quantum; 1777 } 1778 1779 static const char *preempt_timeout_unit(u32 preempt_timeout) 1780 { 1781 return preempt_timeout ? "us" : "(infinity)"; 1782 } 1783 1784 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid, 1785 u32 preempt_timeout) 1786 { 1787 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1788 int err; 1789 1790 err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout); 1791 if (unlikely(err)) 1792 return err; 1793 1794 config->preempt_timeout = preempt_timeout; 1795 1796 return 0; 1797 } 1798 1799 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) 1800 { 1801 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1802 1803 return config->preempt_timeout; 1804 } 1805 1806 /** 1807 * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF. 1808 * @gt: the &xe_gt 1809 * @vfid: the VF identifier 1810 * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity) 1811 * 1812 * This function can only be called on PF. 1813 * 1814 * Return: 0 on success or a negative error code on failure. 1815 */ 1816 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid, 1817 u32 preempt_timeout) 1818 { 1819 int err; 1820 1821 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1822 err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout); 1823 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1824 1825 return pf_config_set_u32_done(gt, vfid, preempt_timeout, 1826 xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid), 1827 "preemption timeout", preempt_timeout_unit, err); 1828 } 1829 1830 /** 1831 * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout. 1832 * @gt: the &xe_gt 1833 * @vfid: the VF identifier 1834 * 1835 * This function can only be called on PF. 1836 * 1837 * Return: VF's (or PF's) preemption timeout in microseconds. 1838 */ 1839 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) 1840 { 1841 u32 preempt_timeout; 1842 1843 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1844 preempt_timeout = pf_get_preempt_timeout(gt, vfid); 1845 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1846 1847 return preempt_timeout; 1848 } 1849 1850 static const char *sched_priority_unit(u32 priority) 1851 { 1852 return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" : 1853 priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" : 1854 priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" : 1855 "(?)"; 1856 } 1857 1858 static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority) 1859 { 1860 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1861 int err; 1862 1863 err = pf_push_vf_cfg_sched_priority(gt, vfid, priority); 1864 if (unlikely(err)) 1865 return err; 1866 1867 config->sched_priority = priority; 1868 return 0; 1869 } 1870 1871 static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid) 1872 { 1873 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1874 1875 return config->sched_priority; 1876 } 1877 1878 /** 1879 * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority. 1880 * @gt: the &xe_gt 1881 * @vfid: the VF identifier 1882 * @priority: requested scheduling priority 1883 * 1884 * This function can only be called on PF. 1885 * 1886 * Return: 0 on success or a negative error code on failure. 1887 */ 1888 int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority) 1889 { 1890 int err; 1891 1892 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1893 err = pf_provision_sched_priority(gt, vfid, priority); 1894 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1895 1896 return pf_config_set_u32_done(gt, vfid, priority, 1897 xe_gt_sriov_pf_config_get_sched_priority(gt, vfid), 1898 "scheduling priority", sched_priority_unit, err); 1899 } 1900 1901 /** 1902 * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority. 1903 * @gt: the &xe_gt 1904 * @vfid: the VF identifier 1905 * 1906 * This function can only be called on PF. 1907 * 1908 * Return: VF's (or PF's) scheduling priority. 1909 */ 1910 u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid) 1911 { 1912 u32 priority; 1913 1914 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1915 priority = pf_get_sched_priority(gt, vfid); 1916 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1917 1918 return priority; 1919 } 1920 1921 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config) 1922 { 1923 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1924 1925 config->exec_quantum = 0; 1926 config->preempt_timeout = 0; 1927 } 1928 1929 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid, 1930 enum xe_guc_klv_threshold_index index, u32 value) 1931 { 1932 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1933 int err; 1934 1935 err = pf_push_vf_cfg_threshold(gt, vfid, index, value); 1936 if (unlikely(err)) 1937 return err; 1938 1939 config->thresholds[index] = value; 1940 1941 return 0; 1942 } 1943 1944 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid, 1945 enum xe_guc_klv_threshold_index index) 1946 { 1947 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1948 1949 return config->thresholds[index]; 1950 } 1951 1952 static const char *threshold_unit(u32 threshold) 1953 { 1954 return threshold ? "" : "(disabled)"; 1955 } 1956 1957 /** 1958 * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF. 1959 * @gt: the &xe_gt 1960 * @vfid: the VF identifier 1961 * @index: the threshold index 1962 * @value: requested value (0 means disabled) 1963 * 1964 * This function can only be called on PF. 1965 * 1966 * Return: 0 on success or a negative error code on failure. 1967 */ 1968 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid, 1969 enum xe_guc_klv_threshold_index index, u32 value) 1970 { 1971 u32 key = xe_guc_klv_threshold_index_to_key(index); 1972 const char *name = xe_guc_klv_key_to_string(key); 1973 int err; 1974 1975 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1976 err = pf_provision_threshold(gt, vfid, index, value); 1977 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1978 1979 return pf_config_set_u32_done(gt, vfid, value, 1980 xe_gt_sriov_pf_config_get_threshold(gt, vfid, index), 1981 name, threshold_unit, err); 1982 } 1983 1984 /** 1985 * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold. 1986 * @gt: the &xe_gt 1987 * @vfid: the VF identifier 1988 * @index: the threshold index 1989 * 1990 * This function can only be called on PF. 1991 * 1992 * Return: value of VF's (or PF's) threshold. 1993 */ 1994 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid, 1995 enum xe_guc_klv_threshold_index index) 1996 { 1997 u32 value; 1998 1999 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2000 value = pf_get_threshold(gt, vfid, index); 2001 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2002 2003 return value; 2004 } 2005 2006 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config) 2007 { 2008 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 2009 2010 #define reset_threshold_config(TAG, ...) ({ \ 2011 config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0; \ 2012 }); 2013 2014 MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config); 2015 #undef reset_threshold_config 2016 } 2017 2018 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid) 2019 { 2020 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 2021 struct xe_device *xe = gt_to_xe(gt); 2022 2023 if (xe_gt_is_main_type(gt)) { 2024 pf_release_vf_config_ggtt(gt, config); 2025 if (IS_DGFX(xe)) { 2026 pf_release_vf_config_lmem(gt, config); 2027 if (xe_device_has_lmtt(xe)) 2028 pf_update_vf_lmtt(xe, vfid); 2029 } 2030 } 2031 pf_release_config_ctxs(gt, config); 2032 pf_release_config_dbs(gt, config); 2033 pf_reset_config_sched(gt, config); 2034 pf_reset_config_thresholds(gt, config); 2035 } 2036 2037 /** 2038 * xe_gt_sriov_pf_config_release - Release and reset VF configuration. 2039 * @gt: the &xe_gt 2040 * @vfid: the VF identifier (can't be PF) 2041 * @force: force configuration release 2042 * 2043 * This function can only be called on PF. 2044 * 2045 * Return: 0 on success or a negative error code on failure. 2046 */ 2047 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force) 2048 { 2049 int err; 2050 2051 xe_gt_assert(gt, vfid); 2052 2053 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2054 err = pf_send_vf_cfg_reset(gt, vfid); 2055 if (!err || force) 2056 pf_release_vf_config(gt, vfid); 2057 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2058 2059 if (unlikely(err)) { 2060 xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n", 2061 vfid, ERR_PTR(err), 2062 force ? " but all resources were released anyway!" : ""); 2063 } 2064 2065 return force ? 0 : err; 2066 } 2067 2068 static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid) 2069 { 2070 if (xe_ggtt_node_allocated(ggtt_region)) 2071 xe_ggtt_assign(ggtt_region, vfid); 2072 } 2073 2074 static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout) 2075 { 2076 struct xe_migrate *m = tile->migrate; 2077 struct dma_fence *fence; 2078 int err; 2079 2080 if (!bo) 2081 return 0; 2082 2083 xe_bo_lock(bo, false); 2084 fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL); 2085 if (IS_ERR(fence)) { 2086 err = PTR_ERR(fence); 2087 } else if (!fence) { 2088 err = -ENOMEM; 2089 } else { 2090 long ret = dma_fence_wait_timeout(fence, false, timeout); 2091 2092 err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT; 2093 dma_fence_put(fence); 2094 if (!err) 2095 xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n", 2096 jiffies_to_msecs(timeout - ret)); 2097 } 2098 xe_bo_unlock(bo); 2099 2100 return err; 2101 } 2102 2103 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout) 2104 { 2105 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 2106 struct xe_tile *tile = gt_to_tile(gt); 2107 struct xe_device *xe = gt_to_xe(gt); 2108 int err = 0; 2109 2110 /* 2111 * Only GGTT and LMEM requires to be cleared by the PF. 2112 * GuC doorbell IDs and context IDs do not need any clearing. 2113 */ 2114 if (xe_gt_is_main_type(gt)) { 2115 pf_sanitize_ggtt(config->ggtt_region, vfid); 2116 if (IS_DGFX(xe)) 2117 err = pf_sanitize_lmem(tile, config->lmem_obj, timeout); 2118 } 2119 2120 return err; 2121 } 2122 2123 /** 2124 * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources. 2125 * @gt: the &xe_gt 2126 * @vfid: the VF identifier (can't be PF) 2127 * @timeout: maximum timeout to wait for completion in jiffies 2128 * 2129 * This function can only be called on PF. 2130 * 2131 * Return: 0 on success or a negative error code on failure. 2132 */ 2133 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout) 2134 { 2135 int err; 2136 2137 xe_gt_assert(gt, vfid != PFID); 2138 2139 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2140 err = pf_sanitize_vf_resources(gt, vfid, timeout); 2141 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2142 2143 if (unlikely(err)) 2144 xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n", 2145 vfid, ERR_PTR(err)); 2146 return err; 2147 } 2148 2149 /** 2150 * xe_gt_sriov_pf_config_push - Reprovision VF's configuration. 2151 * @gt: the &xe_gt 2152 * @vfid: the VF identifier (can't be PF) 2153 * @refresh: explicit refresh 2154 * 2155 * This function can only be called on PF. 2156 * 2157 * Return: 0 on success or a negative error code on failure. 2158 */ 2159 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh) 2160 { 2161 int err = 0; 2162 2163 xe_gt_assert(gt, vfid); 2164 2165 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2166 err = pf_push_vf_cfg(gt, vfid, refresh); 2167 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2168 2169 if (unlikely(err)) { 2170 xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n", 2171 refresh ? "refresh" : "push", vfid, ERR_PTR(err)); 2172 } 2173 2174 return err; 2175 } 2176 2177 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid) 2178 { 2179 struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt; 2180 struct xe_device *xe = gt_to_xe(gt); 2181 bool is_primary = xe_gt_is_main_type(gt); 2182 bool valid_ggtt, valid_ctxs, valid_dbs; 2183 bool valid_any, valid_all; 2184 2185 valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid); 2186 valid_ctxs = pf_get_vf_config_ctxs(gt, vfid); 2187 valid_dbs = pf_get_vf_config_dbs(gt, vfid); 2188 2189 /* note that GuC doorbells are optional */ 2190 valid_any = valid_ctxs || valid_dbs; 2191 valid_all = valid_ctxs; 2192 2193 /* and GGTT/LMEM is configured on primary GT only */ 2194 valid_all = valid_all && valid_ggtt; 2195 valid_any = valid_any || (valid_ggtt && is_primary); 2196 2197 if (xe_device_has_lmtt(xe)) { 2198 bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid); 2199 2200 valid_any = valid_any || (valid_lmem && is_primary); 2201 valid_all = valid_all && valid_lmem; 2202 } 2203 2204 return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA; 2205 } 2206 2207 /** 2208 * xe_gt_sriov_pf_config_is_empty - Check VF's configuration. 2209 * @gt: the &xe_gt 2210 * @vfid: the VF identifier (can't be PF) 2211 * 2212 * This function can only be called on PF. 2213 * 2214 * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty. 2215 */ 2216 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid) 2217 { 2218 bool empty; 2219 2220 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2221 xe_gt_assert(gt, vfid); 2222 2223 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2224 empty = pf_validate_vf_config(gt, vfid) == -ENODATA; 2225 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2226 2227 return empty; 2228 } 2229 2230 /** 2231 * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob. 2232 * @gt: the &xe_gt 2233 * @vfid: the VF identifier (can't be PF) 2234 * @buf: the buffer to save a config to (or NULL if query the buf size) 2235 * @size: the size of the buffer (or 0 if query the buf size) 2236 * 2237 * This function can only be called on PF. 2238 * 2239 * Return: minimum size of the buffer or the number of bytes saved, 2240 * or a negative error code on failure. 2241 */ 2242 ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size) 2243 { 2244 struct xe_gt_sriov_config *config; 2245 ssize_t ret; 2246 2247 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2248 xe_gt_assert(gt, vfid); 2249 xe_gt_assert(gt, !(!buf ^ !size)); 2250 2251 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2252 ret = pf_validate_vf_config(gt, vfid); 2253 if (!size) { 2254 ret = ret ? 0 : SZ_4K; 2255 } else if (!ret) { 2256 if (size < SZ_4K) { 2257 ret = -ENOBUFS; 2258 } else { 2259 config = pf_pick_vf_config(gt, vfid); 2260 ret = encode_config(buf, config, false) * sizeof(u32); 2261 } 2262 } 2263 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2264 2265 return ret; 2266 } 2267 2268 static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid, 2269 u32 key, u32 len, const u32 *value) 2270 { 2271 switch (key) { 2272 case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY: 2273 if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN) 2274 return -EBADMSG; 2275 return pf_provision_vf_ctxs(gt, vfid, value[0]); 2276 2277 case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY: 2278 if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN) 2279 return -EBADMSG; 2280 return pf_provision_vf_dbs(gt, vfid, value[0]); 2281 2282 case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY: 2283 if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN) 2284 return -EBADMSG; 2285 return pf_provision_exec_quantum(gt, vfid, value[0]); 2286 2287 case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY: 2288 if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN) 2289 return -EBADMSG; 2290 return pf_provision_preempt_timeout(gt, vfid, value[0]); 2291 2292 /* auto-generate case statements */ 2293 #define define_threshold_key_to_provision_case(TAG, ...) \ 2294 case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG): \ 2295 BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u); \ 2296 if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG)) \ 2297 return -EBADMSG; \ 2298 return pf_provision_threshold(gt, vfid, \ 2299 MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG), \ 2300 value[0]); 2301 2302 MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case) 2303 #undef define_threshold_key_to_provision_case 2304 } 2305 2306 if (xe_gt_is_media_type(gt)) 2307 return -EKEYREJECTED; 2308 2309 switch (key) { 2310 case GUC_KLV_VF_CFG_GGTT_SIZE_KEY: 2311 if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN) 2312 return -EBADMSG; 2313 return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0])); 2314 2315 case GUC_KLV_VF_CFG_LMEM_SIZE_KEY: 2316 if (!IS_DGFX(gt_to_xe(gt))) 2317 return -EKEYREJECTED; 2318 if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN) 2319 return -EBADMSG; 2320 return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0])); 2321 } 2322 2323 return -EKEYREJECTED; 2324 } 2325 2326 static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid, 2327 const u32 *klvs, size_t num_dwords) 2328 { 2329 int err; 2330 2331 while (num_dwords >= GUC_KLV_LEN_MIN) { 2332 u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]); 2333 u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]); 2334 2335 klvs += GUC_KLV_LEN_MIN; 2336 num_dwords -= GUC_KLV_LEN_MIN; 2337 2338 if (num_dwords < len) 2339 err = -EBADMSG; 2340 else 2341 err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs); 2342 2343 if (err) { 2344 xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err)); 2345 return err; 2346 } 2347 2348 klvs += len; 2349 num_dwords -= len; 2350 } 2351 2352 return pf_validate_vf_config(gt, vfid); 2353 } 2354 2355 /** 2356 * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob. 2357 * @gt: the &xe_gt 2358 * @vfid: the VF identifier (can't be PF) 2359 * @buf: the buffer with config data 2360 * @size: the size of the config data 2361 * 2362 * This function can only be called on PF. 2363 * 2364 * Return: 0 on success or a negative error code on failure. 2365 */ 2366 int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid, 2367 const void *buf, size_t size) 2368 { 2369 int err; 2370 2371 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2372 xe_gt_assert(gt, vfid); 2373 2374 if (!size) 2375 return -ENODATA; 2376 2377 if (size % sizeof(u32)) 2378 return -EINVAL; 2379 2380 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { 2381 struct drm_printer p = xe_gt_dbg_printer(gt); 2382 2383 drm_printf(&p, "restoring VF%u config:\n", vfid); 2384 xe_guc_klv_print(buf, size / sizeof(u32), &p); 2385 } 2386 2387 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2388 err = pf_send_vf_cfg_reset(gt, vfid); 2389 if (!err) { 2390 pf_release_vf_config(gt, vfid); 2391 err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32)); 2392 } 2393 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2394 2395 return err; 2396 } 2397 2398 static void pf_prepare_self_config(struct xe_gt *gt) 2399 { 2400 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, PFID); 2401 2402 /* 2403 * We want PF to be allowed to use all of context ID, doorbells IDs 2404 * and whole usable GGTT area. While we can store ctxs/dbs numbers 2405 * directly in the config structure, can't do the same with the GGTT 2406 * configuration, so let it be prepared on demand while pushing KLVs. 2407 */ 2408 config->num_ctxs = GUC_ID_MAX; 2409 config->num_dbs = GUC_NUM_DOORBELLS; 2410 } 2411 2412 static int pf_push_self_config(struct xe_gt *gt) 2413 { 2414 int err; 2415 2416 err = pf_push_full_vf_config(gt, PFID); 2417 if (err) { 2418 xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n", 2419 ERR_PTR(err)); 2420 return err; 2421 } 2422 2423 xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n"); 2424 return 0; 2425 } 2426 2427 static void fini_config(void *arg) 2428 { 2429 struct xe_gt *gt = arg; 2430 struct xe_device *xe = gt_to_xe(gt); 2431 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe); 2432 2433 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2434 for (n = 1; n <= total_vfs; n++) 2435 pf_release_vf_config(gt, n); 2436 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2437 } 2438 2439 /** 2440 * xe_gt_sriov_pf_config_init - Initialize SR-IOV configuration data. 2441 * @gt: the &xe_gt 2442 * 2443 * This function can only be called on PF. 2444 * 2445 * Return: 0 on success or a negative error code on failure. 2446 */ 2447 int xe_gt_sriov_pf_config_init(struct xe_gt *gt) 2448 { 2449 struct xe_device *xe = gt_to_xe(gt); 2450 int err; 2451 2452 xe_gt_assert(gt, IS_SRIOV_PF(xe)); 2453 2454 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2455 pf_prepare_self_config(gt); 2456 err = pf_push_self_config(gt); 2457 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2458 2459 if (err) 2460 return err; 2461 2462 return devm_add_action_or_reset(xe->drm.dev, fini_config, gt); 2463 } 2464 2465 /** 2466 * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset. 2467 * @gt: the &xe_gt 2468 * 2469 * Any prior configurations pushed to GuC are lost when the GT is reset. 2470 * Push again all non-empty VF configurations to the GuC. 2471 * 2472 * This function can only be called on PF. 2473 */ 2474 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt) 2475 { 2476 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 2477 unsigned int fail = 0, skip = 0; 2478 2479 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2480 pf_push_self_config(gt); 2481 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2482 2483 for (n = 1; n <= total_vfs; n++) { 2484 if (xe_gt_sriov_pf_config_is_empty(gt, n)) 2485 skip++; 2486 else if (xe_gt_sriov_pf_config_push(gt, n, false)) 2487 fail++; 2488 } 2489 2490 if (fail) 2491 xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n", 2492 fail, total_vfs - skip, str_plural(total_vfs)); 2493 2494 if (fail != total_vfs) 2495 xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n", 2496 total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs)); 2497 } 2498 2499 /** 2500 * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations. 2501 * @gt: the &xe_gt 2502 * @p: the &drm_printer 2503 * 2504 * Print GGTT configuration data for all VFs. 2505 * VFs without provisioned GGTT are ignored. 2506 * 2507 * This function can only be called on PF. 2508 */ 2509 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p) 2510 { 2511 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 2512 const struct xe_gt_sriov_config *config; 2513 char buf[10]; 2514 2515 for (n = 1; n <= total_vfs; n++) { 2516 config = >->sriov.pf.vfs[n].config; 2517 if (!xe_ggtt_node_allocated(config->ggtt_region)) 2518 continue; 2519 2520 string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2, 2521 buf, sizeof(buf)); 2522 drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n", 2523 n, config->ggtt_region->base.start, 2524 config->ggtt_region->base.start + config->ggtt_region->base.size - 1, 2525 buf); 2526 } 2527 2528 return 0; 2529 } 2530 2531 /** 2532 * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations. 2533 * @gt: the &xe_gt 2534 * @p: the &drm_printer 2535 * 2536 * Print GuC context ID allocations across all VFs. 2537 * VFs without GuC context IDs are skipped. 2538 * 2539 * This function can only be called on PF. 2540 * Return: 0 on success or a negative error code on failure. 2541 */ 2542 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p) 2543 { 2544 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 2545 const struct xe_gt_sriov_config *config; 2546 2547 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2548 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2549 2550 for (n = 1; n <= total_vfs; n++) { 2551 config = >->sriov.pf.vfs[n].config; 2552 if (!config->num_ctxs) 2553 continue; 2554 2555 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n", 2556 n, 2557 config->begin_ctx, 2558 config->begin_ctx + config->num_ctxs - 1, 2559 config->num_ctxs); 2560 } 2561 2562 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2563 return 0; 2564 } 2565 2566 /** 2567 * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations. 2568 * @gt: the &xe_gt 2569 * @p: the &drm_printer 2570 * 2571 * Print GuC doorbell IDs allocations across all VFs. 2572 * VFs without GuC doorbell IDs are skipped. 2573 * 2574 * This function can only be called on PF. 2575 * Return: 0 on success or a negative error code on failure. 2576 */ 2577 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p) 2578 { 2579 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 2580 const struct xe_gt_sriov_config *config; 2581 2582 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2583 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2584 2585 for (n = 1; n <= total_vfs; n++) { 2586 config = >->sriov.pf.vfs[n].config; 2587 if (!config->num_dbs) 2588 continue; 2589 2590 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n", 2591 n, 2592 config->begin_db, 2593 config->begin_db + config->num_dbs - 1, 2594 config->num_dbs); 2595 } 2596 2597 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2598 return 0; 2599 } 2600 2601 /** 2602 * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations. 2603 * @gt: the &xe_gt 2604 * @p: the &drm_printer 2605 * 2606 * Print LMEM allocations across all VFs. 2607 * VFs without LMEM allocation are skipped. 2608 * 2609 * This function can only be called on PF. 2610 * Return: 0 on success or a negative error code on failure. 2611 */ 2612 int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p) 2613 { 2614 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 2615 const struct xe_gt_sriov_config *config; 2616 char buf[10]; 2617 2618 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2619 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2620 2621 for (n = 1; n <= total_vfs; n++) { 2622 config = >->sriov.pf.vfs[n].config; 2623 if (!config->lmem_obj) 2624 continue; 2625 2626 string_get_size(xe_bo_size(config->lmem_obj), 1, STRING_UNITS_2, 2627 buf, sizeof(buf)); 2628 drm_printf(p, "VF%u:\t%zu\t(%s)\n", 2629 n, xe_bo_size(config->lmem_obj), buf); 2630 } 2631 2632 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2633 return 0; 2634 } 2635 2636 /** 2637 * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges. 2638 * @gt: the &xe_gt 2639 * @p: the &drm_printer 2640 * 2641 * Print GGTT ranges that are available for the provisioning. 2642 * 2643 * This function can only be called on PF. 2644 */ 2645 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p) 2646 { 2647 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt; 2648 u64 alignment = pf_get_ggtt_alignment(gt); 2649 u64 spare, avail, total; 2650 char buf[10]; 2651 2652 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2653 2654 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2655 2656 spare = pf_get_spare_ggtt(gt); 2657 total = xe_ggtt_print_holes(ggtt, alignment, p); 2658 2659 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2660 2661 string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf)); 2662 drm_printf(p, "total:\t%llu\t(%s)\n", total, buf); 2663 2664 string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf)); 2665 drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf); 2666 2667 avail = total > spare ? total - spare : 0; 2668 2669 string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf)); 2670 drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf); 2671 2672 return 0; 2673 } 2674