1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023-2024 Intel Corporation 4 */ 5 6 #include <linux/string_choices.h> 7 #include <linux/wordpart.h> 8 9 #include "abi/guc_actions_sriov_abi.h" 10 #include "abi/guc_klvs_abi.h" 11 12 #include "regs/xe_guc_regs.h" 13 14 #include "xe_bo.h" 15 #include "xe_device.h" 16 #include "xe_ggtt.h" 17 #include "xe_gt.h" 18 #include "xe_gt_sriov_pf_config.h" 19 #include "xe_gt_sriov_pf_helpers.h" 20 #include "xe_gt_sriov_pf_policy.h" 21 #include "xe_gt_sriov_printk.h" 22 #include "xe_guc.h" 23 #include "xe_guc_buf.h" 24 #include "xe_guc_ct.h" 25 #include "xe_guc_db_mgr.h" 26 #include "xe_guc_fwif.h" 27 #include "xe_guc_id_mgr.h" 28 #include "xe_guc_klv_helpers.h" 29 #include "xe_guc_klv_thresholds_set.h" 30 #include "xe_guc_submit.h" 31 #include "xe_lmtt.h" 32 #include "xe_map.h" 33 #include "xe_migrate.h" 34 #include "xe_sriov.h" 35 #include "xe_ttm_vram_mgr.h" 36 #include "xe_vram_types.h" 37 #include "xe_wopcm.h" 38 39 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo))) 40 41 /* 42 * Return: number of KLVs that were successfully parsed and saved, 43 * negative error code on failure. 44 */ 45 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid, 46 u64 addr, u32 size) 47 { 48 u32 request[] = { 49 GUC_ACTION_PF2GUC_UPDATE_VF_CFG, 50 vfid, 51 lower_32_bits(addr), 52 upper_32_bits(addr), 53 size, 54 }; 55 56 return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request)); 57 } 58 59 /* 60 * Return: 0 on success, negative error code on failure. 61 */ 62 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid) 63 { 64 struct xe_guc *guc = >->uc.guc; 65 int ret; 66 67 ret = guc_action_update_vf_cfg(guc, vfid, 0, 0); 68 69 return ret <= 0 ? ret : -EPROTO; 70 } 71 72 /* 73 * Return: number of KLVs that were successfully parsed and saved, 74 * negative error code on failure. 75 */ 76 static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords) 77 { 78 struct xe_guc *guc = >->uc.guc; 79 80 return guc_action_update_vf_cfg(guc, vfid, xe_guc_buf_flush(buf), num_dwords); 81 } 82 83 /* 84 * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed, 85 * negative error code on failure. 86 */ 87 static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, 88 struct xe_guc_buf buf, u32 num_dwords) 89 { 90 int ret; 91 92 ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords); 93 94 if (ret != num_klvs) { 95 int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO; 96 void *klvs = xe_guc_buf_cpu_ptr(buf); 97 struct drm_printer p = xe_gt_info_printer(gt); 98 char name[8]; 99 100 xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n", 101 xe_sriov_function_name(vfid, name, sizeof(name)), 102 num_klvs, str_plural(num_klvs), ERR_PTR(err)); 103 xe_guc_klv_print(klvs, num_dwords, &p); 104 return err; 105 } 106 107 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { 108 struct drm_printer p = xe_gt_dbg_printer(gt); 109 void *klvs = xe_guc_buf_cpu_ptr(buf); 110 char name[8]; 111 112 xe_gt_sriov_dbg(gt, "pushed %s config with %u KLV%s:\n", 113 xe_sriov_function_name(vfid, name, sizeof(name)), 114 num_klvs, str_plural(num_klvs)); 115 xe_guc_klv_print(klvs, num_dwords, &p); 116 } 117 118 return 0; 119 } 120 121 /* 122 * Return: 0 on success, -ENOBUFS if no free buffer for the indirect data, 123 * negative error code on failure. 124 */ 125 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, 126 const u32 *klvs, u32 num_dwords) 127 { 128 CLASS(xe_guc_buf_from_data, buf)(>->uc.guc.buf, klvs, num_dwords * sizeof(u32)); 129 130 xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords)); 131 132 if (!xe_guc_buf_is_valid(buf)) 133 return -ENOBUFS; 134 135 return pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords); 136 } 137 138 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value) 139 { 140 u32 klv[] = { 141 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1), 142 value, 143 }; 144 145 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv)); 146 } 147 148 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value) 149 { 150 u32 klv[] = { 151 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2), 152 lower_32_bits(value), 153 upper_32_bits(value), 154 }; 155 156 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv)); 157 } 158 159 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size) 160 { 161 u32 klvs[] = { 162 PREP_GUC_KLV_TAG(VF_CFG_GGTT_START), 163 lower_32_bits(start), 164 upper_32_bits(start), 165 PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE), 166 lower_32_bits(size), 167 upper_32_bits(size), 168 }; 169 170 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); 171 } 172 173 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num) 174 { 175 u32 klvs[] = { 176 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID), 177 begin, 178 PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS), 179 num, 180 }; 181 182 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); 183 } 184 185 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num) 186 { 187 u32 klvs[] = { 188 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID), 189 begin, 190 PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS), 191 num, 192 }; 193 194 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); 195 } 196 197 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum) 198 { 199 /* GuC will silently clamp values exceeding max */ 200 *exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE); 201 202 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum); 203 } 204 205 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout) 206 { 207 /* GuC will silently clamp values exceeding max */ 208 *preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE); 209 210 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout); 211 } 212 213 static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority) 214 { 215 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority); 216 } 217 218 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 219 { 220 return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size); 221 } 222 223 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid, 224 enum xe_guc_klv_threshold_index index, u32 value) 225 { 226 u32 key = xe_guc_klv_threshold_index_to_key(index); 227 228 xe_gt_assert(gt, key); 229 return pf_push_vf_cfg_u32(gt, vfid, key, value); 230 } 231 232 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid) 233 { 234 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 235 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); 236 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 237 238 return >->sriov.pf.vfs[vfid].config; 239 } 240 241 /* Return: number of configuration dwords written */ 242 static u32 encode_ggtt(u32 *cfg, u64 start, u64 size, bool details) 243 { 244 u32 n = 0; 245 246 if (details) { 247 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START); 248 cfg[n++] = lower_32_bits(start); 249 cfg[n++] = upper_32_bits(start); 250 } 251 252 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE); 253 cfg[n++] = lower_32_bits(size); 254 cfg[n++] = upper_32_bits(size); 255 256 return n; 257 } 258 259 /* Return: number of configuration dwords written */ 260 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details) 261 { 262 struct xe_ggtt_node *node = config->ggtt_region; 263 264 if (!xe_ggtt_node_allocated(node)) 265 return 0; 266 267 return encode_ggtt(cfg, node->base.start, node->base.size, details); 268 } 269 270 /* Return: number of configuration dwords written */ 271 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details) 272 { 273 u32 n = 0; 274 275 n += encode_config_ggtt(cfg, config, details); 276 277 if (details && config->num_ctxs) { 278 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID); 279 cfg[n++] = config->begin_ctx; 280 } 281 282 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS); 283 cfg[n++] = config->num_ctxs; 284 285 if (details && config->num_dbs) { 286 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID); 287 cfg[n++] = config->begin_db; 288 } 289 290 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS); 291 cfg[n++] = config->num_dbs; 292 293 if (config->lmem_obj) { 294 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE); 295 cfg[n++] = lower_32_bits(xe_bo_size(config->lmem_obj)); 296 cfg[n++] = upper_32_bits(xe_bo_size(config->lmem_obj)); 297 } 298 299 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM); 300 cfg[n++] = config->exec_quantum; 301 302 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT); 303 cfg[n++] = config->preempt_timeout; 304 305 #define encode_threshold_config(TAG, ...) ({ \ 306 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG); \ 307 cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)]; \ 308 }); 309 310 MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config); 311 #undef encode_threshold_config 312 313 return n; 314 } 315 316 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) 317 { 318 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 319 u32 max_cfg_dwords = xe_guc_buf_cache_dwords(>->uc.guc.buf); 320 CLASS(xe_guc_buf, buf)(>->uc.guc.buf, max_cfg_dwords); 321 u32 num_dwords; 322 int num_klvs; 323 u32 *cfg; 324 int err; 325 326 if (!xe_guc_buf_is_valid(buf)) 327 return -ENOBUFS; 328 329 cfg = xe_guc_buf_cpu_ptr(buf); 330 num_dwords = encode_config(cfg, config, true); 331 xe_gt_assert(gt, num_dwords <= max_cfg_dwords); 332 333 if (xe_gt_is_media_type(gt)) { 334 struct xe_gt *primary = gt->tile->primary_gt; 335 struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid); 336 337 /* media-GT will never include a GGTT config */ 338 xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true)); 339 340 /* the GGTT config must be taken from the primary-GT instead */ 341 num_dwords += encode_config_ggtt(cfg + num_dwords, other, true); 342 } 343 xe_gt_assert(gt, num_dwords <= max_cfg_dwords); 344 345 if (vfid == PFID) { 346 u64 ggtt_start = xe_wopcm_size(gt_to_xe(gt)); 347 u64 ggtt_size = gt_to_tile(gt)->mem.ggtt->size - ggtt_start; 348 349 /* plain PF config data will never include a real GGTT region */ 350 xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true)); 351 352 /* fake PF GGTT config covers full GGTT range except reserved WOPCM */ 353 num_dwords += encode_ggtt(cfg + num_dwords, ggtt_start, ggtt_size, true); 354 } 355 356 num_klvs = xe_guc_klv_count(cfg, num_dwords); 357 err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords); 358 359 return err; 360 } 361 362 static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset) 363 { 364 int err = 0; 365 366 xe_gt_assert(gt, vfid); 367 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 368 369 if (reset) 370 err = pf_send_vf_cfg_reset(gt, vfid); 371 if (!err) 372 err = pf_push_full_vf_config(gt, vfid); 373 374 return err; 375 } 376 377 static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid) 378 { 379 return pf_push_vf_cfg(gt, vfid, true); 380 } 381 382 static u64 pf_get_ggtt_alignment(struct xe_gt *gt) 383 { 384 struct xe_device *xe = gt_to_xe(gt); 385 386 return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; 387 } 388 389 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt) 390 { 391 /* XXX: preliminary */ 392 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 393 pf_get_ggtt_alignment(gt) : SZ_64M; 394 } 395 396 static u64 pf_get_spare_ggtt(struct xe_gt *gt) 397 { 398 u64 spare; 399 400 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 401 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 402 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 403 404 spare = gt->sriov.pf.spare.ggtt_size; 405 spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt)); 406 407 return spare; 408 } 409 410 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size) 411 { 412 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 413 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 414 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 415 416 if (size && size < pf_get_min_spare_ggtt(gt)) 417 return -EINVAL; 418 419 size = round_up(size, pf_get_ggtt_alignment(gt)); 420 gt->sriov.pf.spare.ggtt_size = size; 421 422 return 0; 423 } 424 425 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size) 426 { 427 int err, err2 = 0; 428 429 err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size); 430 431 if (tile->media_gt && !err) 432 err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size); 433 434 return err ?: err2; 435 } 436 437 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node) 438 { 439 if (xe_ggtt_node_allocated(node)) { 440 /* 441 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign() 442 * is redundant, as PTE will be implicitly re-assigned to PF by 443 * the xe_ggtt_clear() called by below xe_ggtt_remove_node(). 444 */ 445 xe_ggtt_node_remove(node, false); 446 } else { 447 xe_ggtt_node_fini(node); 448 } 449 } 450 451 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config) 452 { 453 pf_release_ggtt(gt_to_tile(gt), config->ggtt_region); 454 config->ggtt_region = NULL; 455 } 456 457 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) 458 { 459 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 460 struct xe_ggtt_node *node; 461 struct xe_tile *tile = gt_to_tile(gt); 462 struct xe_ggtt *ggtt = tile->mem.ggtt; 463 u64 alignment = pf_get_ggtt_alignment(gt); 464 int err; 465 466 xe_gt_assert(gt, vfid); 467 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 468 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 469 470 size = round_up(size, alignment); 471 472 if (xe_ggtt_node_allocated(config->ggtt_region)) { 473 err = pf_distribute_config_ggtt(tile, vfid, 0, 0); 474 if (unlikely(err)) 475 return err; 476 477 pf_release_vf_config_ggtt(gt, config); 478 479 err = pf_refresh_vf_cfg(gt, vfid); 480 if (unlikely(err)) 481 return err; 482 } 483 xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region)); 484 485 if (!size) 486 return 0; 487 488 node = xe_ggtt_node_init(ggtt); 489 if (IS_ERR(node)) 490 return PTR_ERR(node); 491 492 err = xe_ggtt_node_insert(node, size, alignment); 493 if (unlikely(err)) 494 goto err; 495 496 xe_ggtt_assign(node, vfid); 497 xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n", 498 vfid, node->base.start, node->base.start + node->base.size - 1); 499 500 err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size); 501 if (unlikely(err)) 502 goto err; 503 504 config->ggtt_region = node; 505 return 0; 506 err: 507 pf_release_ggtt(tile, node); 508 return err; 509 } 510 511 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid) 512 { 513 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 514 struct xe_ggtt_node *node = config->ggtt_region; 515 516 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 517 return xe_ggtt_node_allocated(node) ? node->base.size : 0; 518 } 519 520 /** 521 * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF. 522 * @gt: the &xe_gt 523 * @vfid: the VF identifier 524 * 525 * This function can only be called on PF. 526 * 527 * Return: size of the VF's assigned (or PF's spare) GGTT address space. 528 */ 529 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid) 530 { 531 u64 size; 532 533 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 534 if (vfid) 535 size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid); 536 else 537 size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt); 538 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 539 540 return size; 541 } 542 543 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value, 544 u64 actual, const char *what, int err) 545 { 546 char size[10]; 547 char name[8]; 548 549 xe_sriov_function_name(vfid, name, sizeof(name)); 550 551 if (unlikely(err)) { 552 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size)); 553 xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n", 554 name, value, size, what, ERR_PTR(err)); 555 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size)); 556 xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n", 557 name, actual, size, what); 558 return err; 559 } 560 561 /* the actual value may have changed during provisioning */ 562 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size)); 563 xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n", 564 name, actual, size, what); 565 return 0; 566 } 567 568 /** 569 * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space. 570 * @gt: the &xe_gt (can't be media) 571 * @vfid: the VF identifier 572 * @size: requested GGTT size 573 * 574 * If &vfid represents PF, then function will change PF's spare GGTT config. 575 * 576 * This function can only be called on PF. 577 * 578 * Return: 0 on success or a negative error code on failure. 579 */ 580 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) 581 { 582 int err; 583 584 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 585 586 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 587 if (vfid) 588 err = pf_provision_vf_ggtt(gt, vfid, size); 589 else 590 err = pf_set_spare_ggtt(gt, size); 591 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 592 593 return pf_config_set_u64_done(gt, vfid, size, 594 xe_gt_sriov_pf_config_get_ggtt(gt, vfid), 595 vfid ? "GGTT" : "spare GGTT", err); 596 } 597 598 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs, 599 u64 value, u64 (*get)(struct xe_gt*, unsigned int), 600 const char *what, unsigned int last, int err) 601 { 602 char size[10]; 603 604 xe_gt_assert(gt, first); 605 xe_gt_assert(gt, num_vfs); 606 xe_gt_assert(gt, first <= last); 607 608 if (num_vfs == 1) 609 return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err); 610 611 if (unlikely(err)) { 612 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n", 613 first, first + num_vfs - 1, what); 614 if (last > first) 615 pf_config_bulk_set_u64_done(gt, first, last - first, value, 616 get, what, last, 0); 617 return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err); 618 } 619 620 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */ 621 value = get(gt, first); 622 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size)); 623 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n", 624 first, first + num_vfs - 1, value, size, what); 625 return 0; 626 } 627 628 /** 629 * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT. 630 * @gt: the &xe_gt (can't be media) 631 * @vfid: starting VF identifier (can't be 0) 632 * @num_vfs: number of VFs to provision 633 * @size: requested GGTT size 634 * 635 * This function can only be called on PF. 636 * 637 * Return: 0 on success or a negative error code on failure. 638 */ 639 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid, 640 unsigned int num_vfs, u64 size) 641 { 642 unsigned int n; 643 int err = 0; 644 645 xe_gt_assert(gt, vfid); 646 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 647 648 if (!num_vfs) 649 return 0; 650 651 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 652 for (n = vfid; n < vfid + num_vfs; n++) { 653 err = pf_provision_vf_ggtt(gt, n, size); 654 if (err) 655 break; 656 } 657 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 658 659 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size, 660 xe_gt_sriov_pf_config_get_ggtt, 661 "GGTT", n, err); 662 } 663 664 /* Return: size of the largest continuous GGTT region */ 665 static u64 pf_get_max_ggtt(struct xe_gt *gt) 666 { 667 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt; 668 u64 alignment = pf_get_ggtt_alignment(gt); 669 u64 spare = pf_get_spare_ggtt(gt); 670 u64 max_hole; 671 672 max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare); 673 674 xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n", 675 max_hole / SZ_1K, spare / SZ_1K); 676 return max_hole > spare ? max_hole - spare : 0; 677 } 678 679 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs) 680 { 681 u64 available = pf_get_max_ggtt(gt); 682 u64 alignment = pf_get_ggtt_alignment(gt); 683 u64 fair; 684 685 /* 686 * To simplify the logic we only look at single largest GGTT region 687 * as that will be always the best fit for 1 VF case, and most likely 688 * will also nicely cover other cases where VFs are provisioned on the 689 * fresh and idle PF driver, without any stale GGTT allocations spread 690 * in the middle of the full GGTT range. 691 */ 692 693 fair = div_u64(available, num_vfs); 694 fair = ALIGN_DOWN(fair, alignment); 695 xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n", 696 available / SZ_1K, num_vfs, fair / SZ_1K); 697 return fair; 698 } 699 700 /** 701 * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT. 702 * @gt: the &xe_gt (can't be media) 703 * @vfid: starting VF identifier (can't be 0) 704 * @num_vfs: number of VFs to provision 705 * 706 * This function can only be called on PF. 707 * 708 * Return: 0 on success or a negative error code on failure. 709 */ 710 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid, 711 unsigned int num_vfs) 712 { 713 u64 fair; 714 715 xe_gt_assert(gt, vfid); 716 xe_gt_assert(gt, num_vfs); 717 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 718 719 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 720 fair = pf_estimate_fair_ggtt(gt, num_vfs); 721 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 722 723 if (!fair) 724 return -ENOSPC; 725 726 return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair); 727 } 728 729 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt) 730 { 731 /* XXX: preliminary */ 732 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 733 hweight64(gt->info.engine_mask) : SZ_256; 734 } 735 736 static u32 pf_get_spare_ctxs(struct xe_gt *gt) 737 { 738 u32 spare; 739 740 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 741 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 742 743 spare = gt->sriov.pf.spare.num_ctxs; 744 spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt)); 745 746 return spare; 747 } 748 749 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare) 750 { 751 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 752 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 753 754 if (spare > GUC_ID_MAX) 755 return -EINVAL; 756 757 if (spare && spare < pf_get_min_spare_ctxs(gt)) 758 return -EINVAL; 759 760 gt->sriov.pf.spare.num_ctxs = spare; 761 762 return 0; 763 } 764 765 /* Return: start ID or negative error code on failure */ 766 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num) 767 { 768 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm; 769 unsigned int spare = pf_get_spare_ctxs(gt); 770 771 return xe_guc_id_mgr_reserve(idm, num, spare); 772 } 773 774 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num) 775 { 776 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm; 777 778 if (num) 779 xe_guc_id_mgr_release(idm, start, num); 780 } 781 782 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config) 783 { 784 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 785 786 pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs); 787 config->begin_ctx = 0; 788 config->num_ctxs = 0; 789 } 790 791 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs) 792 { 793 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 794 int ret; 795 796 xe_gt_assert(gt, vfid); 797 798 if (num_ctxs > GUC_ID_MAX) 799 return -EINVAL; 800 801 if (config->num_ctxs) { 802 ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0); 803 if (unlikely(ret)) 804 return ret; 805 806 pf_release_config_ctxs(gt, config); 807 808 ret = pf_refresh_vf_cfg(gt, vfid); 809 if (unlikely(ret)) 810 return ret; 811 } 812 813 if (!num_ctxs) 814 return 0; 815 816 ret = pf_reserve_ctxs(gt, num_ctxs); 817 if (unlikely(ret < 0)) 818 return ret; 819 820 config->begin_ctx = ret; 821 config->num_ctxs = num_ctxs; 822 823 ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs); 824 if (unlikely(ret)) { 825 pf_release_config_ctxs(gt, config); 826 return ret; 827 } 828 829 xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n", 830 vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1); 831 return 0; 832 } 833 834 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid) 835 { 836 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 837 838 return config->num_ctxs; 839 } 840 841 /** 842 * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota. 843 * @gt: the &xe_gt 844 * @vfid: the VF identifier 845 * 846 * This function can only be called on PF. 847 * If &vfid represents a PF then number of PF's spare GuC context IDs is returned. 848 * 849 * Return: VF's quota (or PF's spare). 850 */ 851 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid) 852 { 853 u32 num_ctxs; 854 855 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 856 if (vfid) 857 num_ctxs = pf_get_vf_config_ctxs(gt, vfid); 858 else 859 num_ctxs = pf_get_spare_ctxs(gt); 860 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 861 862 return num_ctxs; 863 } 864 865 static const char *no_unit(u32 unused) 866 { 867 return ""; 868 } 869 870 static const char *spare_unit(u32 unused) 871 { 872 return " spare"; 873 } 874 875 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual, 876 const char *what, const char *(*unit)(u32), int err) 877 { 878 char name[8]; 879 880 xe_sriov_function_name(vfid, name, sizeof(name)); 881 882 if (unlikely(err)) { 883 xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n", 884 name, value, unit(value), what, ERR_PTR(err)); 885 xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n", 886 name, actual, unit(actual), what); 887 return err; 888 } 889 890 /* the actual value may have changed during provisioning */ 891 xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n", 892 name, actual, unit(actual), what); 893 return 0; 894 } 895 896 /** 897 * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF. 898 * @gt: the &xe_gt 899 * @vfid: the VF identifier 900 * @num_ctxs: requested number of GuC contexts IDs (0 to release) 901 * 902 * This function can only be called on PF. 903 * 904 * Return: 0 on success or a negative error code on failure. 905 */ 906 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs) 907 { 908 int err; 909 910 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 911 if (vfid) 912 err = pf_provision_vf_ctxs(gt, vfid, num_ctxs); 913 else 914 err = pf_set_spare_ctxs(gt, num_ctxs); 915 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 916 917 return pf_config_set_u32_done(gt, vfid, num_ctxs, 918 xe_gt_sriov_pf_config_get_ctxs(gt, vfid), 919 "GuC context IDs", vfid ? no_unit : spare_unit, err); 920 } 921 922 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs, 923 u32 value, u32 (*get)(struct xe_gt*, unsigned int), 924 const char *what, const char *(*unit)(u32), 925 unsigned int last, int err) 926 { 927 xe_gt_assert(gt, first); 928 xe_gt_assert(gt, num_vfs); 929 xe_gt_assert(gt, first <= last); 930 931 if (num_vfs == 1) 932 return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err); 933 934 if (unlikely(err)) { 935 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n", 936 first, first + num_vfs - 1, what); 937 if (last > first) 938 pf_config_bulk_set_u32_done(gt, first, last - first, value, 939 get, what, unit, last, 0); 940 return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err); 941 } 942 943 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */ 944 value = get(gt, first); 945 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n", 946 first, first + num_vfs - 1, value, unit(value), what); 947 return 0; 948 } 949 950 /** 951 * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs. 952 * @gt: the &xe_gt 953 * @vfid: starting VF identifier 954 * @num_vfs: number of VFs to provision 955 * @num_ctxs: requested number of GuC contexts IDs (0 to release) 956 * 957 * This function can only be called on PF. 958 * 959 * Return: 0 on success or a negative error code on failure. 960 */ 961 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid, 962 unsigned int num_vfs, u32 num_ctxs) 963 { 964 unsigned int n; 965 int err = 0; 966 967 xe_gt_assert(gt, vfid); 968 969 if (!num_vfs) 970 return 0; 971 972 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 973 for (n = vfid; n < vfid + num_vfs; n++) { 974 err = pf_provision_vf_ctxs(gt, n, num_ctxs); 975 if (err) 976 break; 977 } 978 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 979 980 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs, 981 xe_gt_sriov_pf_config_get_ctxs, 982 "GuC context IDs", no_unit, n, err); 983 } 984 985 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs) 986 { 987 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm; 988 u32 spare = pf_get_spare_ctxs(gt); 989 u32 fair = (idm->total - spare) / num_vfs; 990 int ret; 991 992 for (; fair; --fair) { 993 ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare); 994 if (ret < 0) 995 continue; 996 xe_guc_id_mgr_release(idm, ret, fair * num_vfs); 997 break; 998 } 999 1000 xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair); 1001 return fair; 1002 } 1003 1004 /** 1005 * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs. 1006 * @gt: the &xe_gt 1007 * @vfid: starting VF identifier (can't be 0) 1008 * @num_vfs: number of VFs to provision (can't be 0) 1009 * 1010 * This function can only be called on PF. 1011 * 1012 * Return: 0 on success or a negative error code on failure. 1013 */ 1014 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid, 1015 unsigned int num_vfs) 1016 { 1017 u32 fair; 1018 1019 xe_gt_assert(gt, vfid); 1020 xe_gt_assert(gt, num_vfs); 1021 1022 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1023 fair = pf_estimate_fair_ctxs(gt, num_vfs); 1024 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1025 1026 if (!fair) 1027 return -ENOSPC; 1028 1029 return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair); 1030 } 1031 1032 static u32 pf_get_min_spare_dbs(struct xe_gt *gt) 1033 { 1034 /* XXX: preliminary, we don't use doorbells yet! */ 1035 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0; 1036 } 1037 1038 static u32 pf_get_spare_dbs(struct xe_gt *gt) 1039 { 1040 u32 spare; 1041 1042 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1043 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1044 1045 spare = gt->sriov.pf.spare.num_dbs; 1046 spare = max_t(u32, spare, pf_get_min_spare_dbs(gt)); 1047 1048 return spare; 1049 } 1050 1051 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare) 1052 { 1053 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1054 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1055 1056 if (spare > GUC_NUM_DOORBELLS) 1057 return -EINVAL; 1058 1059 if (spare && spare < pf_get_min_spare_dbs(gt)) 1060 return -EINVAL; 1061 1062 gt->sriov.pf.spare.num_dbs = spare; 1063 return 0; 1064 } 1065 1066 /* Return: start ID or negative error code on failure */ 1067 static int pf_reserve_dbs(struct xe_gt *gt, u32 num) 1068 { 1069 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm; 1070 unsigned int spare = pf_get_spare_dbs(gt); 1071 1072 return xe_guc_db_mgr_reserve_range(dbm, num, spare); 1073 } 1074 1075 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num) 1076 { 1077 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm; 1078 1079 if (num) 1080 xe_guc_db_mgr_release_range(dbm, start, num); 1081 } 1082 1083 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config) 1084 { 1085 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1086 1087 pf_release_dbs(gt, config->begin_db, config->num_dbs); 1088 config->begin_db = 0; 1089 config->num_dbs = 0; 1090 } 1091 1092 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs) 1093 { 1094 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1095 int ret; 1096 1097 xe_gt_assert(gt, vfid); 1098 1099 if (num_dbs > GUC_NUM_DOORBELLS) 1100 return -EINVAL; 1101 1102 if (config->num_dbs) { 1103 ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0); 1104 if (unlikely(ret)) 1105 return ret; 1106 1107 pf_release_config_dbs(gt, config); 1108 1109 ret = pf_refresh_vf_cfg(gt, vfid); 1110 if (unlikely(ret)) 1111 return ret; 1112 } 1113 1114 if (!num_dbs) 1115 return 0; 1116 1117 ret = pf_reserve_dbs(gt, num_dbs); 1118 if (unlikely(ret < 0)) 1119 return ret; 1120 1121 config->begin_db = ret; 1122 config->num_dbs = num_dbs; 1123 1124 ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs); 1125 if (unlikely(ret)) { 1126 pf_release_config_dbs(gt, config); 1127 return ret; 1128 } 1129 1130 xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n", 1131 vfid, config->begin_db, config->begin_db + config->num_dbs - 1); 1132 return 0; 1133 } 1134 1135 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid) 1136 { 1137 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1138 1139 return config->num_dbs; 1140 } 1141 1142 /** 1143 * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota. 1144 * @gt: the &xe_gt 1145 * @vfid: the VF identifier 1146 * 1147 * This function can only be called on PF. 1148 * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned. 1149 * 1150 * Return: VF's quota (or PF's spare). 1151 */ 1152 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid) 1153 { 1154 u32 num_dbs; 1155 1156 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1157 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); 1158 1159 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1160 if (vfid) 1161 num_dbs = pf_get_vf_config_dbs(gt, vfid); 1162 else 1163 num_dbs = pf_get_spare_dbs(gt); 1164 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1165 1166 return num_dbs; 1167 } 1168 1169 /** 1170 * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF. 1171 * @gt: the &xe_gt 1172 * @vfid: the VF identifier 1173 * @num_dbs: requested number of GuC doorbells IDs (0 to release) 1174 * 1175 * This function can only be called on PF. 1176 * 1177 * Return: 0 on success or a negative error code on failure. 1178 */ 1179 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs) 1180 { 1181 int err; 1182 1183 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1184 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); 1185 1186 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1187 if (vfid) 1188 err = pf_provision_vf_dbs(gt, vfid, num_dbs); 1189 else 1190 err = pf_set_spare_dbs(gt, num_dbs); 1191 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1192 1193 return pf_config_set_u32_done(gt, vfid, num_dbs, 1194 xe_gt_sriov_pf_config_get_dbs(gt, vfid), 1195 "GuC doorbell IDs", vfid ? no_unit : spare_unit, err); 1196 } 1197 1198 /** 1199 * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs. 1200 * @gt: the &xe_gt 1201 * @vfid: starting VF identifier (can't be 0) 1202 * @num_vfs: number of VFs to provision 1203 * @num_dbs: requested number of GuC doorbell IDs (0 to release) 1204 * 1205 * This function can only be called on PF. 1206 * 1207 * Return: 0 on success or a negative error code on failure. 1208 */ 1209 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid, 1210 unsigned int num_vfs, u32 num_dbs) 1211 { 1212 unsigned int n; 1213 int err = 0; 1214 1215 xe_gt_assert(gt, vfid); 1216 1217 if (!num_vfs) 1218 return 0; 1219 1220 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1221 for (n = vfid; n < vfid + num_vfs; n++) { 1222 err = pf_provision_vf_dbs(gt, n, num_dbs); 1223 if (err) 1224 break; 1225 } 1226 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1227 1228 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs, 1229 xe_gt_sriov_pf_config_get_dbs, 1230 "GuC doorbell IDs", no_unit, n, err); 1231 } 1232 1233 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs) 1234 { 1235 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm; 1236 u32 spare = pf_get_spare_dbs(gt); 1237 u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs; 1238 int ret; 1239 1240 for (; fair; --fair) { 1241 ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare); 1242 if (ret < 0) 1243 continue; 1244 xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs); 1245 break; 1246 } 1247 1248 xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair); 1249 return fair; 1250 } 1251 1252 /** 1253 * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell IDs. 1254 * @gt: the &xe_gt 1255 * @vfid: starting VF identifier (can't be 0) 1256 * @num_vfs: number of VFs to provision (can't be 0) 1257 * 1258 * This function can only be called on PF. 1259 * 1260 * Return: 0 on success or a negative error code on failure. 1261 */ 1262 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid, 1263 unsigned int num_vfs) 1264 { 1265 u32 fair; 1266 1267 xe_gt_assert(gt, vfid); 1268 xe_gt_assert(gt, num_vfs); 1269 1270 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1271 fair = pf_estimate_fair_dbs(gt, num_vfs); 1272 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1273 1274 if (!fair) 1275 return -ENOSPC; 1276 1277 return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair); 1278 } 1279 1280 static u64 pf_get_lmem_alignment(struct xe_gt *gt) 1281 { 1282 /* this might be platform dependent */ 1283 return SZ_2M; 1284 } 1285 1286 static u64 pf_get_min_spare_lmem(struct xe_gt *gt) 1287 { 1288 /* this might be platform dependent */ 1289 return SZ_128M; /* XXX: preliminary */ 1290 } 1291 1292 static u64 pf_get_spare_lmem(struct xe_gt *gt) 1293 { 1294 u64 spare; 1295 1296 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1297 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1298 1299 spare = gt->sriov.pf.spare.lmem_size; 1300 spare = max_t(u64, spare, pf_get_min_spare_lmem(gt)); 1301 1302 return spare; 1303 } 1304 1305 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size) 1306 { 1307 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 1308 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1309 1310 if (size && size < pf_get_min_spare_lmem(gt)) 1311 return -EINVAL; 1312 1313 gt->sriov.pf.spare.lmem_size = size; 1314 return 0; 1315 } 1316 1317 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid) 1318 { 1319 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1320 struct xe_bo *bo; 1321 1322 bo = config->lmem_obj; 1323 return bo ? xe_bo_size(bo) : 0; 1324 } 1325 1326 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 1327 { 1328 struct xe_device *xe = gt_to_xe(gt); 1329 struct xe_tile *tile; 1330 unsigned int tid; 1331 int err; 1332 1333 for_each_tile(tile, xe, tid) { 1334 if (tile->primary_gt == gt) { 1335 err = pf_push_vf_cfg_lmem(gt, vfid, size); 1336 } else { 1337 u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid); 1338 1339 if (!lmem) 1340 continue; 1341 err = pf_push_vf_cfg_lmem(gt, vfid, lmem); 1342 } 1343 if (unlikely(err)) 1344 return err; 1345 } 1346 return 0; 1347 } 1348 1349 static void pf_force_lmtt_invalidate(struct xe_device *xe) 1350 { 1351 struct xe_lmtt *lmtt; 1352 struct xe_tile *tile; 1353 unsigned int tid; 1354 1355 xe_assert(xe, xe_device_has_lmtt(xe)); 1356 xe_assert(xe, IS_SRIOV_PF(xe)); 1357 1358 for_each_tile(tile, xe, tid) { 1359 lmtt = &tile->sriov.pf.lmtt; 1360 xe_lmtt_invalidate_hw(lmtt); 1361 } 1362 } 1363 1364 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid) 1365 { 1366 struct xe_lmtt *lmtt; 1367 struct xe_tile *tile; 1368 unsigned int tid; 1369 1370 xe_assert(xe, xe_device_has_lmtt(xe)); 1371 xe_assert(xe, IS_SRIOV_PF(xe)); 1372 1373 for_each_tile(tile, xe, tid) { 1374 lmtt = &tile->sriov.pf.lmtt; 1375 xe_lmtt_drop_pages(lmtt, vfid); 1376 } 1377 } 1378 1379 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid) 1380 { 1381 struct xe_gt_sriov_config *config; 1382 struct xe_tile *tile; 1383 struct xe_lmtt *lmtt; 1384 struct xe_bo *bo; 1385 struct xe_gt *gt; 1386 u64 total, offset; 1387 unsigned int gtid; 1388 unsigned int tid; 1389 int err; 1390 1391 xe_assert(xe, xe_device_has_lmtt(xe)); 1392 xe_assert(xe, IS_SRIOV_PF(xe)); 1393 1394 total = 0; 1395 for_each_tile(tile, xe, tid) 1396 total += pf_get_vf_config_lmem(tile->primary_gt, vfid); 1397 1398 for_each_tile(tile, xe, tid) { 1399 lmtt = &tile->sriov.pf.lmtt; 1400 1401 xe_lmtt_drop_pages(lmtt, vfid); 1402 if (!total) 1403 continue; 1404 1405 err = xe_lmtt_prepare_pages(lmtt, vfid, total); 1406 if (err) 1407 goto fail; 1408 1409 offset = 0; 1410 for_each_gt(gt, xe, gtid) { 1411 if (xe_gt_is_media_type(gt)) 1412 continue; 1413 1414 config = pf_pick_vf_config(gt, vfid); 1415 bo = config->lmem_obj; 1416 if (!bo) 1417 continue; 1418 1419 err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset); 1420 if (err) 1421 goto fail; 1422 offset += xe_bo_size(bo); 1423 } 1424 } 1425 1426 pf_force_lmtt_invalidate(xe); 1427 return 0; 1428 1429 fail: 1430 for_each_tile(tile, xe, tid) { 1431 lmtt = &tile->sriov.pf.lmtt; 1432 xe_lmtt_drop_pages(lmtt, vfid); 1433 } 1434 return err; 1435 } 1436 1437 /* Return: %true if there was an LMEM provisioned, %false otherwise */ 1438 static bool pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config) 1439 { 1440 xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt))); 1441 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 1442 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1443 1444 if (config->lmem_obj) { 1445 xe_bo_unpin_map_no_vm(config->lmem_obj); 1446 config->lmem_obj = NULL; 1447 return true; 1448 } 1449 return false; 1450 } 1451 1452 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 1453 { 1454 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1455 struct xe_device *xe = gt_to_xe(gt); 1456 struct xe_tile *tile = gt_to_tile(gt); 1457 struct xe_bo *bo; 1458 int err; 1459 1460 xe_gt_assert(gt, vfid); 1461 xe_gt_assert(gt, IS_DGFX(xe)); 1462 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 1463 1464 size = round_up(size, pf_get_lmem_alignment(gt)); 1465 1466 if (config->lmem_obj) { 1467 err = pf_distribute_config_lmem(gt, vfid, 0); 1468 if (unlikely(err)) 1469 return err; 1470 1471 if (xe_device_has_lmtt(xe)) 1472 pf_reset_vf_lmtt(xe, vfid); 1473 pf_release_vf_config_lmem(gt, config); 1474 } 1475 xe_gt_assert(gt, !config->lmem_obj); 1476 1477 if (!size) 1478 return 0; 1479 1480 xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M); 1481 bo = xe_bo_create_pin_range_novm(xe, tile, 1482 ALIGN(size, PAGE_SIZE), 0, ~0ull, 1483 ttm_bo_type_kernel, 1484 XE_BO_FLAG_VRAM_IF_DGFX(tile) | 1485 XE_BO_FLAG_NEEDS_2M | 1486 XE_BO_FLAG_PINNED | 1487 XE_BO_FLAG_PINNED_LATE_RESTORE | 1488 XE_BO_FLAG_FORCE_USER_VRAM); 1489 if (IS_ERR(bo)) 1490 return PTR_ERR(bo); 1491 1492 config->lmem_obj = bo; 1493 1494 if (xe_device_has_lmtt(xe)) { 1495 err = pf_update_vf_lmtt(xe, vfid); 1496 if (unlikely(err)) 1497 goto release; 1498 } 1499 1500 err = pf_push_vf_cfg_lmem(gt, vfid, xe_bo_size(bo)); 1501 if (unlikely(err)) 1502 goto reset_lmtt; 1503 1504 xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n", 1505 vfid, xe_bo_size(bo), xe_bo_size(bo) / SZ_1M); 1506 return 0; 1507 1508 reset_lmtt: 1509 if (xe_device_has_lmtt(xe)) 1510 pf_reset_vf_lmtt(xe, vfid); 1511 release: 1512 pf_release_vf_config_lmem(gt, config); 1513 return err; 1514 } 1515 1516 /** 1517 * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota. 1518 * @gt: the &xe_gt 1519 * @vfid: the VF identifier 1520 * 1521 * This function can only be called on PF. 1522 * 1523 * Return: VF's (or PF's spare) LMEM quota. 1524 */ 1525 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid) 1526 { 1527 u64 size; 1528 1529 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1530 if (vfid) 1531 size = pf_get_vf_config_lmem(gt, vfid); 1532 else 1533 size = pf_get_spare_lmem(gt); 1534 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1535 1536 return size; 1537 } 1538 1539 /** 1540 * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM. 1541 * @gt: the &xe_gt (can't be media) 1542 * @vfid: the VF identifier 1543 * @size: requested LMEM size 1544 * 1545 * This function can only be called on PF. 1546 */ 1547 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) 1548 { 1549 int err; 1550 1551 if (!xe_device_has_lmtt(gt_to_xe(gt))) 1552 return -EPERM; 1553 1554 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1555 if (vfid) 1556 err = pf_provision_vf_lmem(gt, vfid, size); 1557 else 1558 err = pf_set_spare_lmem(gt, size); 1559 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1560 1561 return pf_config_set_u64_done(gt, vfid, size, 1562 xe_gt_sriov_pf_config_get_lmem(gt, vfid), 1563 vfid ? "LMEM" : "spare LMEM", err); 1564 } 1565 1566 /** 1567 * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM. 1568 * @gt: the &xe_gt (can't be media) 1569 * @vfid: starting VF identifier (can't be 0) 1570 * @num_vfs: number of VFs to provision 1571 * @size: requested LMEM size 1572 * 1573 * This function can only be called on PF. 1574 * 1575 * Return: 0 on success or a negative error code on failure. 1576 */ 1577 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, 1578 unsigned int num_vfs, u64 size) 1579 { 1580 unsigned int n; 1581 int err = 0; 1582 1583 xe_gt_assert(gt, vfid); 1584 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 1585 1586 if (!num_vfs) 1587 return 0; 1588 1589 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1590 for (n = vfid; n < vfid + num_vfs; n++) { 1591 err = pf_provision_vf_lmem(gt, n, size); 1592 if (err) 1593 break; 1594 } 1595 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1596 1597 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size, 1598 xe_gt_sriov_pf_config_get_lmem, 1599 "LMEM", n, err); 1600 } 1601 1602 static u64 pf_query_free_lmem(struct xe_gt *gt) 1603 { 1604 struct xe_tile *tile = gt->tile; 1605 1606 return xe_ttm_vram_get_avail(&tile->mem.vram->ttm.manager); 1607 } 1608 1609 static u64 pf_query_max_lmem(struct xe_gt *gt) 1610 { 1611 u64 alignment = pf_get_lmem_alignment(gt); 1612 u64 spare = pf_get_spare_lmem(gt); 1613 u64 free = pf_query_free_lmem(gt); 1614 u64 avail; 1615 1616 /* XXX: need to account for 2MB blocks only */ 1617 avail = free > spare ? free - spare : 0; 1618 avail = round_down(avail, alignment); 1619 1620 return avail; 1621 } 1622 1623 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV 1624 #define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */ 1625 #endif 1626 1627 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs) 1628 { 1629 u64 available = pf_query_max_lmem(gt); 1630 u64 alignment = pf_get_lmem_alignment(gt); 1631 u64 fair; 1632 1633 fair = div_u64(available, num_vfs); 1634 fair = ALIGN_DOWN(fair, alignment); 1635 #ifdef MAX_FAIR_LMEM 1636 fair = min_t(u64, MAX_FAIR_LMEM, fair); 1637 #endif 1638 xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n", 1639 available / SZ_1M, num_vfs, fair / SZ_1M); 1640 return fair; 1641 } 1642 1643 /** 1644 * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM. 1645 * @gt: the &xe_gt (can't be media) 1646 * @vfid: starting VF identifier (can't be 0) 1647 * @num_vfs: number of VFs to provision (can't be 0) 1648 * 1649 * This function can only be called on PF. 1650 * 1651 * Return: 0 on success or a negative error code on failure. 1652 */ 1653 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid, 1654 unsigned int num_vfs) 1655 { 1656 u64 fair; 1657 1658 xe_gt_assert(gt, vfid); 1659 xe_gt_assert(gt, num_vfs); 1660 xe_gt_assert(gt, xe_gt_is_main_type(gt)); 1661 1662 if (!xe_device_has_lmtt(gt_to_xe(gt))) 1663 return 0; 1664 1665 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1666 fair = pf_estimate_fair_lmem(gt, num_vfs); 1667 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1668 1669 if (!fair) 1670 return -ENOSPC; 1671 1672 return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair); 1673 } 1674 1675 /** 1676 * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources. 1677 * @gt: the &xe_gt 1678 * @vfid: starting VF identifier (can't be 0) 1679 * @num_vfs: number of VFs to provision (can't be 0) 1680 * 1681 * This function can only be called on PF. 1682 * 1683 * Return: 0 on success or a negative error code on failure. 1684 */ 1685 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid, 1686 unsigned int num_vfs) 1687 { 1688 int result = 0; 1689 int err; 1690 1691 xe_gt_assert(gt, vfid); 1692 xe_gt_assert(gt, num_vfs); 1693 1694 if (xe_gt_is_main_type(gt)) { 1695 err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs); 1696 result = result ?: err; 1697 err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs); 1698 result = result ?: err; 1699 } 1700 err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs); 1701 result = result ?: err; 1702 err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs); 1703 result = result ?: err; 1704 1705 return result; 1706 } 1707 1708 static const char *exec_quantum_unit(u32 exec_quantum) 1709 { 1710 return exec_quantum ? "ms" : "(infinity)"; 1711 } 1712 1713 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid, 1714 u32 exec_quantum) 1715 { 1716 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1717 int err; 1718 1719 err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum); 1720 if (unlikely(err)) 1721 return err; 1722 1723 config->exec_quantum = exec_quantum; 1724 return 0; 1725 } 1726 1727 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) 1728 { 1729 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1730 1731 return config->exec_quantum; 1732 } 1733 1734 /** 1735 * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF. 1736 * @gt: the &xe_gt 1737 * @vfid: the VF identifier 1738 * @exec_quantum: requested execution quantum in milliseconds (0 is infinity) 1739 * 1740 * This function can only be called on PF. 1741 * 1742 * Return: 0 on success or a negative error code on failure. 1743 */ 1744 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, 1745 u32 exec_quantum) 1746 { 1747 int err; 1748 1749 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1750 err = pf_provision_exec_quantum(gt, vfid, exec_quantum); 1751 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1752 1753 return pf_config_set_u32_done(gt, vfid, exec_quantum, 1754 xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid), 1755 "execution quantum", exec_quantum_unit, err); 1756 } 1757 1758 /** 1759 * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum. 1760 * @gt: the &xe_gt 1761 * @vfid: the VF identifier 1762 * 1763 * This function can only be called on PF. 1764 * 1765 * Return: VF's (or PF's) execution quantum in milliseconds. 1766 */ 1767 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) 1768 { 1769 u32 exec_quantum; 1770 1771 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1772 exec_quantum = pf_get_exec_quantum(gt, vfid); 1773 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1774 1775 return exec_quantum; 1776 } 1777 1778 static const char *preempt_timeout_unit(u32 preempt_timeout) 1779 { 1780 return preempt_timeout ? "us" : "(infinity)"; 1781 } 1782 1783 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid, 1784 u32 preempt_timeout) 1785 { 1786 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1787 int err; 1788 1789 err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout); 1790 if (unlikely(err)) 1791 return err; 1792 1793 config->preempt_timeout = preempt_timeout; 1794 1795 return 0; 1796 } 1797 1798 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) 1799 { 1800 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1801 1802 return config->preempt_timeout; 1803 } 1804 1805 /** 1806 * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF. 1807 * @gt: the &xe_gt 1808 * @vfid: the VF identifier 1809 * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity) 1810 * 1811 * This function can only be called on PF. 1812 * 1813 * Return: 0 on success or a negative error code on failure. 1814 */ 1815 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid, 1816 u32 preempt_timeout) 1817 { 1818 int err; 1819 1820 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1821 err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout); 1822 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1823 1824 return pf_config_set_u32_done(gt, vfid, preempt_timeout, 1825 xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid), 1826 "preemption timeout", preempt_timeout_unit, err); 1827 } 1828 1829 /** 1830 * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout. 1831 * @gt: the &xe_gt 1832 * @vfid: the VF identifier 1833 * 1834 * This function can only be called on PF. 1835 * 1836 * Return: VF's (or PF's) preemption timeout in microseconds. 1837 */ 1838 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) 1839 { 1840 u32 preempt_timeout; 1841 1842 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1843 preempt_timeout = pf_get_preempt_timeout(gt, vfid); 1844 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1845 1846 return preempt_timeout; 1847 } 1848 1849 static const char *sched_priority_unit(u32 priority) 1850 { 1851 return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" : 1852 priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" : 1853 priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" : 1854 "(?)"; 1855 } 1856 1857 static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority) 1858 { 1859 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1860 int err; 1861 1862 err = pf_push_vf_cfg_sched_priority(gt, vfid, priority); 1863 if (unlikely(err)) 1864 return err; 1865 1866 config->sched_priority = priority; 1867 return 0; 1868 } 1869 1870 static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid) 1871 { 1872 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1873 1874 return config->sched_priority; 1875 } 1876 1877 /** 1878 * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority. 1879 * @gt: the &xe_gt 1880 * @vfid: the VF identifier 1881 * @priority: requested scheduling priority 1882 * 1883 * This function can only be called on PF. 1884 * 1885 * Return: 0 on success or a negative error code on failure. 1886 */ 1887 int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority) 1888 { 1889 int err; 1890 1891 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1892 err = pf_provision_sched_priority(gt, vfid, priority); 1893 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1894 1895 return pf_config_set_u32_done(gt, vfid, priority, 1896 xe_gt_sriov_pf_config_get_sched_priority(gt, vfid), 1897 "scheduling priority", sched_priority_unit, err); 1898 } 1899 1900 /** 1901 * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority. 1902 * @gt: the &xe_gt 1903 * @vfid: the VF identifier 1904 * 1905 * This function can only be called on PF. 1906 * 1907 * Return: VF's (or PF's) scheduling priority. 1908 */ 1909 u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid) 1910 { 1911 u32 priority; 1912 1913 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1914 priority = pf_get_sched_priority(gt, vfid); 1915 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1916 1917 return priority; 1918 } 1919 1920 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config) 1921 { 1922 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 1923 1924 config->exec_quantum = 0; 1925 config->preempt_timeout = 0; 1926 } 1927 1928 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid, 1929 enum xe_guc_klv_threshold_index index, u32 value) 1930 { 1931 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1932 int err; 1933 1934 err = pf_push_vf_cfg_threshold(gt, vfid, index, value); 1935 if (unlikely(err)) 1936 return err; 1937 1938 config->thresholds[index] = value; 1939 1940 return 0; 1941 } 1942 1943 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid, 1944 enum xe_guc_klv_threshold_index index) 1945 { 1946 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 1947 1948 return config->thresholds[index]; 1949 } 1950 1951 static const char *threshold_unit(u32 threshold) 1952 { 1953 return threshold ? "" : "(disabled)"; 1954 } 1955 1956 /** 1957 * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF. 1958 * @gt: the &xe_gt 1959 * @vfid: the VF identifier 1960 * @index: the threshold index 1961 * @value: requested value (0 means disabled) 1962 * 1963 * This function can only be called on PF. 1964 * 1965 * Return: 0 on success or a negative error code on failure. 1966 */ 1967 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid, 1968 enum xe_guc_klv_threshold_index index, u32 value) 1969 { 1970 u32 key = xe_guc_klv_threshold_index_to_key(index); 1971 const char *name = xe_guc_klv_key_to_string(key); 1972 int err; 1973 1974 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1975 err = pf_provision_threshold(gt, vfid, index, value); 1976 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 1977 1978 return pf_config_set_u32_done(gt, vfid, value, 1979 xe_gt_sriov_pf_config_get_threshold(gt, vfid, index), 1980 name, threshold_unit, err); 1981 } 1982 1983 /** 1984 * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold. 1985 * @gt: the &xe_gt 1986 * @vfid: the VF identifier 1987 * @index: the threshold index 1988 * 1989 * This function can only be called on PF. 1990 * 1991 * Return: value of VF's (or PF's) threshold. 1992 */ 1993 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid, 1994 enum xe_guc_klv_threshold_index index) 1995 { 1996 u32 value; 1997 1998 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 1999 value = pf_get_threshold(gt, vfid, index); 2000 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2001 2002 return value; 2003 } 2004 2005 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config) 2006 { 2007 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); 2008 2009 #define reset_threshold_config(TAG, ...) ({ \ 2010 config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0; \ 2011 }); 2012 2013 MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config); 2014 #undef reset_threshold_config 2015 } 2016 2017 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid) 2018 { 2019 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 2020 struct xe_device *xe = gt_to_xe(gt); 2021 bool released; 2022 2023 if (xe_gt_is_main_type(gt)) { 2024 pf_release_vf_config_ggtt(gt, config); 2025 if (IS_DGFX(xe)) { 2026 released = pf_release_vf_config_lmem(gt, config); 2027 if (released && xe_device_has_lmtt(xe)) 2028 pf_update_vf_lmtt(xe, vfid); 2029 } 2030 } 2031 pf_release_config_ctxs(gt, config); 2032 pf_release_config_dbs(gt, config); 2033 pf_reset_config_sched(gt, config); 2034 pf_reset_config_thresholds(gt, config); 2035 } 2036 2037 /** 2038 * xe_gt_sriov_pf_config_release - Release and reset VF configuration. 2039 * @gt: the &xe_gt 2040 * @vfid: the VF identifier (can't be PF) 2041 * @force: force configuration release 2042 * 2043 * This function can only be called on PF. 2044 * 2045 * Return: 0 on success or a negative error code on failure. 2046 */ 2047 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force) 2048 { 2049 int err; 2050 2051 xe_gt_assert(gt, vfid); 2052 2053 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2054 err = pf_send_vf_cfg_reset(gt, vfid); 2055 if (!err || force) 2056 pf_release_vf_config(gt, vfid); 2057 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2058 2059 if (unlikely(err)) { 2060 xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n", 2061 vfid, ERR_PTR(err), 2062 force ? " but all resources were released anyway!" : ""); 2063 } 2064 2065 return force ? 0 : err; 2066 } 2067 2068 static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid) 2069 { 2070 if (xe_ggtt_node_allocated(ggtt_region)) 2071 xe_ggtt_assign(ggtt_region, vfid); 2072 } 2073 2074 static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout) 2075 { 2076 struct xe_migrate *m = tile->migrate; 2077 struct dma_fence *fence; 2078 int err; 2079 2080 if (!bo) 2081 return 0; 2082 2083 xe_bo_lock(bo, false); 2084 fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL); 2085 if (IS_ERR(fence)) { 2086 err = PTR_ERR(fence); 2087 } else if (!fence) { 2088 err = -ENOMEM; 2089 } else { 2090 long ret = dma_fence_wait_timeout(fence, false, timeout); 2091 2092 err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT; 2093 dma_fence_put(fence); 2094 if (!err) 2095 xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n", 2096 jiffies_to_msecs(timeout - ret)); 2097 } 2098 xe_bo_unlock(bo); 2099 2100 return err; 2101 } 2102 2103 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout) 2104 { 2105 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); 2106 struct xe_tile *tile = gt_to_tile(gt); 2107 struct xe_device *xe = gt_to_xe(gt); 2108 int err = 0; 2109 2110 /* 2111 * Only GGTT and LMEM requires to be cleared by the PF. 2112 * GuC doorbell IDs and context IDs do not need any clearing. 2113 */ 2114 if (xe_gt_is_main_type(gt)) { 2115 pf_sanitize_ggtt(config->ggtt_region, vfid); 2116 if (IS_DGFX(xe)) 2117 err = pf_sanitize_lmem(tile, config->lmem_obj, timeout); 2118 } 2119 2120 return err; 2121 } 2122 2123 /** 2124 * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources. 2125 * @gt: the &xe_gt 2126 * @vfid: the VF identifier (can't be PF) 2127 * @timeout: maximum timeout to wait for completion in jiffies 2128 * 2129 * This function can only be called on PF. 2130 * 2131 * Return: 0 on success or a negative error code on failure. 2132 */ 2133 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout) 2134 { 2135 int err; 2136 2137 xe_gt_assert(gt, vfid != PFID); 2138 2139 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2140 err = pf_sanitize_vf_resources(gt, vfid, timeout); 2141 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2142 2143 if (unlikely(err)) 2144 xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n", 2145 vfid, ERR_PTR(err)); 2146 return err; 2147 } 2148 2149 /** 2150 * xe_gt_sriov_pf_config_push - Reprovision VF's configuration. 2151 * @gt: the &xe_gt 2152 * @vfid: the VF identifier (can't be PF) 2153 * @refresh: explicit refresh 2154 * 2155 * This function can only be called on PF. 2156 * 2157 * Return: 0 on success or a negative error code on failure. 2158 */ 2159 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh) 2160 { 2161 int err = 0; 2162 2163 xe_gt_assert(gt, vfid); 2164 2165 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2166 err = pf_push_vf_cfg(gt, vfid, refresh); 2167 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2168 2169 if (unlikely(err)) { 2170 xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n", 2171 refresh ? "refresh" : "push", vfid, ERR_PTR(err)); 2172 } 2173 2174 return err; 2175 } 2176 2177 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid) 2178 { 2179 struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt; 2180 struct xe_device *xe = gt_to_xe(gt); 2181 bool is_primary = xe_gt_is_main_type(gt); 2182 bool valid_ggtt, valid_ctxs, valid_dbs; 2183 bool valid_any, valid_all; 2184 2185 valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid); 2186 valid_ctxs = pf_get_vf_config_ctxs(gt, vfid); 2187 valid_dbs = pf_get_vf_config_dbs(gt, vfid); 2188 2189 /* note that GuC doorbells are optional */ 2190 valid_any = valid_ctxs || valid_dbs; 2191 valid_all = valid_ctxs; 2192 2193 /* and GGTT/LMEM is configured on primary GT only */ 2194 valid_all = valid_all && valid_ggtt; 2195 valid_any = valid_any || (valid_ggtt && is_primary); 2196 2197 if (xe_device_has_lmtt(xe)) { 2198 bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid); 2199 2200 valid_any = valid_any || (valid_lmem && is_primary); 2201 valid_all = valid_all && valid_lmem; 2202 } 2203 2204 return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA; 2205 } 2206 2207 /** 2208 * xe_gt_sriov_pf_config_is_empty - Check VF's configuration. 2209 * @gt: the &xe_gt 2210 * @vfid: the VF identifier (can't be PF) 2211 * 2212 * This function can only be called on PF. 2213 * 2214 * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty. 2215 */ 2216 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid) 2217 { 2218 bool empty; 2219 2220 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2221 xe_gt_assert(gt, vfid); 2222 2223 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2224 empty = pf_validate_vf_config(gt, vfid) == -ENODATA; 2225 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2226 2227 return empty; 2228 } 2229 2230 /** 2231 * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob. 2232 * @gt: the &xe_gt 2233 * @vfid: the VF identifier (can't be PF) 2234 * @buf: the buffer to save a config to (or NULL if query the buf size) 2235 * @size: the size of the buffer (or 0 if query the buf size) 2236 * 2237 * This function can only be called on PF. 2238 * 2239 * Return: minimum size of the buffer or the number of bytes saved, 2240 * or a negative error code on failure. 2241 */ 2242 ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size) 2243 { 2244 struct xe_gt_sriov_config *config; 2245 ssize_t ret; 2246 2247 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2248 xe_gt_assert(gt, vfid); 2249 xe_gt_assert(gt, !(!buf ^ !size)); 2250 2251 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2252 ret = pf_validate_vf_config(gt, vfid); 2253 if (!size) { 2254 ret = ret ? 0 : SZ_4K; 2255 } else if (!ret) { 2256 if (size < SZ_4K) { 2257 ret = -ENOBUFS; 2258 } else { 2259 config = pf_pick_vf_config(gt, vfid); 2260 ret = encode_config(buf, config, false) * sizeof(u32); 2261 } 2262 } 2263 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2264 2265 return ret; 2266 } 2267 2268 static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid, 2269 u32 key, u32 len, const u32 *value) 2270 { 2271 switch (key) { 2272 case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY: 2273 if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN) 2274 return -EBADMSG; 2275 return pf_provision_vf_ctxs(gt, vfid, value[0]); 2276 2277 case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY: 2278 if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN) 2279 return -EBADMSG; 2280 return pf_provision_vf_dbs(gt, vfid, value[0]); 2281 2282 case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY: 2283 if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN) 2284 return -EBADMSG; 2285 return pf_provision_exec_quantum(gt, vfid, value[0]); 2286 2287 case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY: 2288 if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN) 2289 return -EBADMSG; 2290 return pf_provision_preempt_timeout(gt, vfid, value[0]); 2291 2292 /* auto-generate case statements */ 2293 #define define_threshold_key_to_provision_case(TAG, ...) \ 2294 case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG): \ 2295 BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u); \ 2296 if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG)) \ 2297 return -EBADMSG; \ 2298 return pf_provision_threshold(gt, vfid, \ 2299 MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG), \ 2300 value[0]); 2301 2302 MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case) 2303 #undef define_threshold_key_to_provision_case 2304 } 2305 2306 if (xe_gt_is_media_type(gt)) 2307 return -EKEYREJECTED; 2308 2309 switch (key) { 2310 case GUC_KLV_VF_CFG_GGTT_SIZE_KEY: 2311 if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN) 2312 return -EBADMSG; 2313 return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0])); 2314 2315 case GUC_KLV_VF_CFG_LMEM_SIZE_KEY: 2316 if (!IS_DGFX(gt_to_xe(gt))) 2317 return -EKEYREJECTED; 2318 if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN) 2319 return -EBADMSG; 2320 return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0])); 2321 } 2322 2323 return -EKEYREJECTED; 2324 } 2325 2326 static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid, 2327 const u32 *klvs, size_t num_dwords) 2328 { 2329 int err; 2330 2331 while (num_dwords >= GUC_KLV_LEN_MIN) { 2332 u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]); 2333 u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]); 2334 2335 klvs += GUC_KLV_LEN_MIN; 2336 num_dwords -= GUC_KLV_LEN_MIN; 2337 2338 if (num_dwords < len) 2339 err = -EBADMSG; 2340 else 2341 err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs); 2342 2343 if (err) { 2344 xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err)); 2345 return err; 2346 } 2347 2348 klvs += len; 2349 num_dwords -= len; 2350 } 2351 2352 return pf_validate_vf_config(gt, vfid); 2353 } 2354 2355 /** 2356 * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob. 2357 * @gt: the &xe_gt 2358 * @vfid: the VF identifier (can't be PF) 2359 * @buf: the buffer with config data 2360 * @size: the size of the config data 2361 * 2362 * This function can only be called on PF. 2363 * 2364 * Return: 0 on success or a negative error code on failure. 2365 */ 2366 int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid, 2367 const void *buf, size_t size) 2368 { 2369 int err; 2370 2371 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2372 xe_gt_assert(gt, vfid); 2373 2374 if (!size) 2375 return -ENODATA; 2376 2377 if (size % sizeof(u32)) 2378 return -EINVAL; 2379 2380 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { 2381 struct drm_printer p = xe_gt_dbg_printer(gt); 2382 2383 drm_printf(&p, "restoring VF%u config:\n", vfid); 2384 xe_guc_klv_print(buf, size / sizeof(u32), &p); 2385 } 2386 2387 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2388 err = pf_send_vf_cfg_reset(gt, vfid); 2389 if (!err) { 2390 pf_release_vf_config(gt, vfid); 2391 err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32)); 2392 } 2393 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2394 2395 return err; 2396 } 2397 2398 static void pf_prepare_self_config(struct xe_gt *gt) 2399 { 2400 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, PFID); 2401 2402 /* 2403 * We want PF to be allowed to use all of context ID, doorbells IDs 2404 * and whole usable GGTT area. While we can store ctxs/dbs numbers 2405 * directly in the config structure, can't do the same with the GGTT 2406 * configuration, so let it be prepared on demand while pushing KLVs. 2407 */ 2408 config->num_ctxs = GUC_ID_MAX; 2409 config->num_dbs = GUC_NUM_DOORBELLS; 2410 } 2411 2412 static int pf_push_self_config(struct xe_gt *gt) 2413 { 2414 int err; 2415 2416 err = pf_push_full_vf_config(gt, PFID); 2417 if (err) { 2418 xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n", 2419 ERR_PTR(err)); 2420 return err; 2421 } 2422 2423 xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n"); 2424 return 0; 2425 } 2426 2427 static void fini_config(void *arg) 2428 { 2429 struct xe_gt *gt = arg; 2430 struct xe_device *xe = gt_to_xe(gt); 2431 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe); 2432 2433 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2434 for (n = 1; n <= total_vfs; n++) 2435 pf_release_vf_config(gt, n); 2436 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2437 } 2438 2439 /** 2440 * xe_gt_sriov_pf_config_init - Initialize SR-IOV configuration data. 2441 * @gt: the &xe_gt 2442 * 2443 * This function can only be called on PF. 2444 * 2445 * Return: 0 on success or a negative error code on failure. 2446 */ 2447 int xe_gt_sriov_pf_config_init(struct xe_gt *gt) 2448 { 2449 struct xe_device *xe = gt_to_xe(gt); 2450 int err; 2451 2452 xe_gt_assert(gt, IS_SRIOV_PF(xe)); 2453 2454 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2455 pf_prepare_self_config(gt); 2456 err = pf_push_self_config(gt); 2457 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2458 2459 if (err) 2460 return err; 2461 2462 return devm_add_action_or_reset(xe->drm.dev, fini_config, gt); 2463 } 2464 2465 /** 2466 * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset. 2467 * @gt: the &xe_gt 2468 * 2469 * Any prior configurations pushed to GuC are lost when the GT is reset. 2470 * Push again all non-empty VF configurations to the GuC. 2471 * 2472 * This function can only be called on PF. 2473 */ 2474 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt) 2475 { 2476 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 2477 unsigned int fail = 0, skip = 0; 2478 2479 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2480 pf_push_self_config(gt); 2481 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2482 2483 for (n = 1; n <= total_vfs; n++) { 2484 if (xe_gt_sriov_pf_config_is_empty(gt, n)) 2485 skip++; 2486 else if (xe_gt_sriov_pf_config_push(gt, n, false)) 2487 fail++; 2488 } 2489 2490 if (fail) 2491 xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n", 2492 fail, total_vfs - skip, str_plural(total_vfs)); 2493 2494 if (fail != total_vfs) 2495 xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n", 2496 total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs)); 2497 } 2498 2499 /** 2500 * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations. 2501 * @gt: the &xe_gt 2502 * @p: the &drm_printer 2503 * 2504 * Print GGTT configuration data for all VFs. 2505 * VFs without provisioned GGTT are ignored. 2506 * 2507 * This function can only be called on PF. 2508 */ 2509 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p) 2510 { 2511 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 2512 const struct xe_gt_sriov_config *config; 2513 char buf[10]; 2514 2515 for (n = 1; n <= total_vfs; n++) { 2516 config = >->sriov.pf.vfs[n].config; 2517 if (!xe_ggtt_node_allocated(config->ggtt_region)) 2518 continue; 2519 2520 string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2, 2521 buf, sizeof(buf)); 2522 drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n", 2523 n, config->ggtt_region->base.start, 2524 config->ggtt_region->base.start + config->ggtt_region->base.size - 1, 2525 buf); 2526 } 2527 2528 return 0; 2529 } 2530 2531 /** 2532 * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations. 2533 * @gt: the &xe_gt 2534 * @p: the &drm_printer 2535 * 2536 * Print GuC context ID allocations across all VFs. 2537 * VFs without GuC context IDs are skipped. 2538 * 2539 * This function can only be called on PF. 2540 * Return: 0 on success or a negative error code on failure. 2541 */ 2542 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p) 2543 { 2544 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 2545 const struct xe_gt_sriov_config *config; 2546 2547 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2548 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2549 2550 for (n = 1; n <= total_vfs; n++) { 2551 config = >->sriov.pf.vfs[n].config; 2552 if (!config->num_ctxs) 2553 continue; 2554 2555 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n", 2556 n, 2557 config->begin_ctx, 2558 config->begin_ctx + config->num_ctxs - 1, 2559 config->num_ctxs); 2560 } 2561 2562 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2563 return 0; 2564 } 2565 2566 /** 2567 * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations. 2568 * @gt: the &xe_gt 2569 * @p: the &drm_printer 2570 * 2571 * Print GuC doorbell IDs allocations across all VFs. 2572 * VFs without GuC doorbell IDs are skipped. 2573 * 2574 * This function can only be called on PF. 2575 * Return: 0 on success or a negative error code on failure. 2576 */ 2577 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p) 2578 { 2579 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 2580 const struct xe_gt_sriov_config *config; 2581 2582 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2583 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2584 2585 for (n = 1; n <= total_vfs; n++) { 2586 config = >->sriov.pf.vfs[n].config; 2587 if (!config->num_dbs) 2588 continue; 2589 2590 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n", 2591 n, 2592 config->begin_db, 2593 config->begin_db + config->num_dbs - 1, 2594 config->num_dbs); 2595 } 2596 2597 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2598 return 0; 2599 } 2600 2601 /** 2602 * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations. 2603 * @gt: the &xe_gt 2604 * @p: the &drm_printer 2605 * 2606 * Print LMEM allocations across all VFs. 2607 * VFs without LMEM allocation are skipped. 2608 * 2609 * This function can only be called on PF. 2610 * Return: 0 on success or a negative error code on failure. 2611 */ 2612 int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p) 2613 { 2614 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); 2615 const struct xe_gt_sriov_config *config; 2616 char buf[10]; 2617 2618 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2619 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2620 2621 for (n = 1; n <= total_vfs; n++) { 2622 config = >->sriov.pf.vfs[n].config; 2623 if (!config->lmem_obj) 2624 continue; 2625 2626 string_get_size(xe_bo_size(config->lmem_obj), 1, STRING_UNITS_2, 2627 buf, sizeof(buf)); 2628 drm_printf(p, "VF%u:\t%zu\t(%s)\n", 2629 n, xe_bo_size(config->lmem_obj), buf); 2630 } 2631 2632 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2633 return 0; 2634 } 2635 2636 /** 2637 * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges. 2638 * @gt: the &xe_gt 2639 * @p: the &drm_printer 2640 * 2641 * Print GGTT ranges that are available for the provisioning. 2642 * 2643 * This function can only be called on PF. 2644 */ 2645 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p) 2646 { 2647 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt; 2648 u64 alignment = pf_get_ggtt_alignment(gt); 2649 u64 spare, avail, total; 2650 char buf[10]; 2651 2652 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); 2653 2654 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); 2655 2656 spare = pf_get_spare_ggtt(gt); 2657 total = xe_ggtt_print_holes(ggtt, alignment, p); 2658 2659 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); 2660 2661 string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf)); 2662 drm_printf(p, "total:\t%llu\t(%s)\n", total, buf); 2663 2664 string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf)); 2665 drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf); 2666 2667 avail = total > spare ? total - spare : 0; 2668 2669 string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf)); 2670 drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf); 2671 2672 return 0; 2673 } 2674