xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8 
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11 
12 #include "regs/xe_guc_regs.h"
13 
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_gt_sriov_pf_config.h"
19 #include "xe_gt_sriov_pf_helpers.h"
20 #include "xe_gt_sriov_pf_policy.h"
21 #include "xe_gt_sriov_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_ct.h"
24 #include "xe_guc_db_mgr.h"
25 #include "xe_guc_fwif.h"
26 #include "xe_guc_id_mgr.h"
27 #include "xe_guc_klv_helpers.h"
28 #include "xe_guc_klv_thresholds_set.h"
29 #include "xe_guc_submit.h"
30 #include "xe_lmtt.h"
31 #include "xe_map.h"
32 #include "xe_migrate.h"
33 #include "xe_sriov.h"
34 #include "xe_ttm_vram_mgr.h"
35 #include "xe_wopcm.h"
36 
37 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
38 
39 /*
40  * Return: number of KLVs that were successfully parsed and saved,
41  *         negative error code on failure.
42  */
43 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
44 				    u64 addr, u32 size)
45 {
46 	u32 request[] = {
47 		GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
48 		vfid,
49 		lower_32_bits(addr),
50 		upper_32_bits(addr),
51 		size,
52 	};
53 
54 	return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
55 }
56 
57 /*
58  * Return: 0 on success, negative error code on failure.
59  */
60 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
61 {
62 	struct xe_guc *guc = &gt->uc.guc;
63 	int ret;
64 
65 	ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
66 
67 	return ret <= 0 ? ret : -EPROTO;
68 }
69 
70 /*
71  * Return: number of KLVs that were successfully parsed and saved,
72  *         negative error code on failure.
73  */
74 static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords)
75 {
76 	const u32 bytes = num_dwords * sizeof(u32);
77 	struct xe_tile *tile = gt_to_tile(gt);
78 	struct xe_device *xe = tile_to_xe(tile);
79 	struct xe_guc *guc = &gt->uc.guc;
80 	struct xe_bo *bo;
81 	int ret;
82 
83 	bo = xe_bo_create_pin_map(xe, tile, NULL,
84 				  ALIGN(bytes, PAGE_SIZE),
85 				  ttm_bo_type_kernel,
86 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
87 				  XE_BO_FLAG_GGTT |
88 				  XE_BO_FLAG_GGTT_INVALIDATE);
89 	if (IS_ERR(bo))
90 		return PTR_ERR(bo);
91 
92 	xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
93 
94 	ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords);
95 
96 	xe_bo_unpin_map_no_vm(bo);
97 
98 	return ret;
99 }
100 
101 /*
102  * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
103  *         negative error code on failure.
104  */
105 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
106 			       const u32 *klvs, u32 num_dwords)
107 {
108 	int ret;
109 
110 	xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
111 
112 	ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords);
113 
114 	if (ret != num_klvs) {
115 		int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
116 		struct drm_printer p = xe_gt_info_printer(gt);
117 		char name[8];
118 
119 		xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
120 				   xe_sriov_function_name(vfid, name, sizeof(name)),
121 				   num_klvs, str_plural(num_klvs), ERR_PTR(err));
122 		xe_guc_klv_print(klvs, num_dwords, &p);
123 		return err;
124 	}
125 
126 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
127 		struct drm_printer p = xe_gt_info_printer(gt);
128 
129 		xe_guc_klv_print(klvs, num_dwords, &p);
130 	}
131 
132 	return 0;
133 }
134 
135 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
136 {
137 	u32 klv[] = {
138 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
139 		value,
140 	};
141 
142 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
143 }
144 
145 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
146 {
147 	u32 klv[] = {
148 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
149 		lower_32_bits(value),
150 		upper_32_bits(value),
151 	};
152 
153 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
154 }
155 
156 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
157 {
158 	u32 klvs[] = {
159 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
160 		lower_32_bits(start),
161 		upper_32_bits(start),
162 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
163 		lower_32_bits(size),
164 		upper_32_bits(size),
165 	};
166 
167 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
168 }
169 
170 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
171 {
172 	u32 klvs[] = {
173 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
174 		begin,
175 		PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
176 		num,
177 	};
178 
179 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
180 }
181 
182 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
183 {
184 	u32 klvs[] = {
185 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
186 		begin,
187 		PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
188 		num,
189 	};
190 
191 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
192 }
193 
194 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
195 {
196 	/* GuC will silently clamp values exceeding max */
197 	*exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
198 
199 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
200 }
201 
202 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
203 {
204 	/* GuC will silently clamp values exceeding max */
205 	*preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
206 
207 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
208 }
209 
210 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
211 {
212 	return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
213 }
214 
215 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
216 				    enum xe_guc_klv_threshold_index index, u32 value)
217 {
218 	u32 key = xe_guc_klv_threshold_index_to_key(index);
219 
220 	xe_gt_assert(gt, key);
221 	return pf_push_vf_cfg_u32(gt, vfid, key, value);
222 }
223 
224 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
225 {
226 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
227 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
228 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
229 
230 	return &gt->sriov.pf.vfs[vfid].config;
231 }
232 
233 /* Return: number of configuration dwords written */
234 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
235 {
236 	u32 n = 0;
237 
238 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
239 		if (details) {
240 			cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
241 			cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
242 			cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
243 		}
244 
245 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
246 		cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
247 		cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
248 	}
249 
250 	return n;
251 }
252 
253 /* Return: number of configuration dwords written */
254 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
255 {
256 	u32 n = 0;
257 
258 	n += encode_config_ggtt(cfg, config, details);
259 
260 	if (details) {
261 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
262 		cfg[n++] = config->begin_ctx;
263 	}
264 
265 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
266 	cfg[n++] = config->num_ctxs;
267 
268 	if (details) {
269 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
270 		cfg[n++] = config->begin_db;
271 	}
272 
273 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
274 	cfg[n++] = config->num_dbs;
275 
276 	if (config->lmem_obj) {
277 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
278 		cfg[n++] = lower_32_bits(config->lmem_obj->size);
279 		cfg[n++] = upper_32_bits(config->lmem_obj->size);
280 	}
281 
282 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
283 	cfg[n++] = config->exec_quantum;
284 
285 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
286 	cfg[n++] = config->preempt_timeout;
287 
288 #define encode_threshold_config(TAG, ...) ({					\
289 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG);			\
290 	cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)];	\
291 });
292 
293 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
294 #undef encode_threshold_config
295 
296 	return n;
297 }
298 
299 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
300 {
301 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
302 	u32 max_cfg_dwords = SZ_4K / sizeof(u32);
303 	u32 num_dwords;
304 	int num_klvs;
305 	u32 *cfg;
306 	int err;
307 
308 	cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL);
309 	if (!cfg)
310 		return -ENOMEM;
311 
312 	num_dwords = encode_config(cfg, config, true);
313 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
314 
315 	if (xe_gt_is_media_type(gt)) {
316 		struct xe_gt *primary = gt->tile->primary_gt;
317 		struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
318 
319 		/* media-GT will never include a GGTT config */
320 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
321 
322 		/* the GGTT config must be taken from the primary-GT instead */
323 		num_dwords += encode_config_ggtt(cfg + num_dwords, other, true);
324 	}
325 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
326 
327 	num_klvs = xe_guc_klv_count(cfg, num_dwords);
328 	err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords);
329 
330 	kfree(cfg);
331 	return err;
332 }
333 
334 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
335 {
336 	struct xe_device *xe = gt_to_xe(gt);
337 
338 	return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
339 }
340 
341 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
342 {
343 	/* XXX: preliminary */
344 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
345 		pf_get_ggtt_alignment(gt) : SZ_64M;
346 }
347 
348 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
349 {
350 	u64 spare;
351 
352 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
353 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
354 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
355 
356 	spare = gt->sriov.pf.spare.ggtt_size;
357 	spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
358 
359 	return spare;
360 }
361 
362 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
363 {
364 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
365 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
366 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
367 
368 	if (size && size < pf_get_min_spare_ggtt(gt))
369 		return -EINVAL;
370 
371 	size = round_up(size, pf_get_ggtt_alignment(gt));
372 	gt->sriov.pf.spare.ggtt_size = size;
373 
374 	return 0;
375 }
376 
377 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
378 {
379 	int err, err2 = 0;
380 
381 	err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
382 
383 	if (tile->media_gt && !err)
384 		err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
385 
386 	return err ?: err2;
387 }
388 
389 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
390 {
391 	if (xe_ggtt_node_allocated(node)) {
392 		/*
393 		 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
394 		 * is redundant, as PTE will be implicitly re-assigned to PF by
395 		 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
396 		 */
397 		xe_ggtt_node_remove(node, false);
398 	} else {
399 		xe_ggtt_node_fini(node);
400 	}
401 }
402 
403 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
404 {
405 	pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
406 	config->ggtt_region = NULL;
407 }
408 
409 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
410 {
411 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
412 	struct xe_ggtt_node *node;
413 	struct xe_tile *tile = gt_to_tile(gt);
414 	struct xe_ggtt *ggtt = tile->mem.ggtt;
415 	u64 alignment = pf_get_ggtt_alignment(gt);
416 	int err;
417 
418 	xe_gt_assert(gt, vfid);
419 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
420 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
421 
422 	size = round_up(size, alignment);
423 
424 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
425 		err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
426 		if (unlikely(err))
427 			return err;
428 
429 		pf_release_vf_config_ggtt(gt, config);
430 	}
431 	xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
432 
433 	if (!size)
434 		return 0;
435 
436 	node = xe_ggtt_node_init(ggtt);
437 	if (IS_ERR(node))
438 		return PTR_ERR(node);
439 
440 	err = xe_ggtt_node_insert(node, size, alignment);
441 	if (unlikely(err))
442 		goto err;
443 
444 	xe_ggtt_assign(node, vfid);
445 	xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
446 				vfid, node->base.start, node->base.start + node->base.size - 1);
447 
448 	err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
449 	if (unlikely(err))
450 		goto err;
451 
452 	config->ggtt_region = node;
453 	return 0;
454 err:
455 	pf_release_ggtt(tile, node);
456 	return err;
457 }
458 
459 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
460 {
461 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
462 	struct xe_ggtt_node *node = config->ggtt_region;
463 
464 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
465 	return xe_ggtt_node_allocated(node) ? node->base.size : 0;
466 }
467 
468 /**
469  * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
470  * @gt: the &xe_gt
471  * @vfid: the VF identifier
472  *
473  * This function can only be called on PF.
474  *
475  * Return: size of the VF's assigned (or PF's spare) GGTT address space.
476  */
477 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
478 {
479 	u64 size;
480 
481 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
482 	if (vfid)
483 		size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
484 	else
485 		size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
486 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
487 
488 	return size;
489 }
490 
491 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
492 				  u64 actual, const char *what, int err)
493 {
494 	char size[10];
495 	char name[8];
496 
497 	xe_sriov_function_name(vfid, name, sizeof(name));
498 
499 	if (unlikely(err)) {
500 		string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
501 		xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
502 				   name, value, size, what, ERR_PTR(err));
503 		string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
504 		xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
505 				 name, actual, size, what);
506 		return err;
507 	}
508 
509 	/* the actual value may have changed during provisioning */
510 	string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
511 	xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
512 			 name, actual, size, what);
513 	return 0;
514 }
515 
516 /**
517  * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
518  * @gt: the &xe_gt (can't be media)
519  * @vfid: the VF identifier
520  * @size: requested GGTT size
521  *
522  * If &vfid represents PF, then function will change PF's spare GGTT config.
523  *
524  * This function can only be called on PF.
525  *
526  * Return: 0 on success or a negative error code on failure.
527  */
528 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
529 {
530 	int err;
531 
532 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
533 
534 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
535 	if (vfid)
536 		err = pf_provision_vf_ggtt(gt, vfid, size);
537 	else
538 		err = pf_set_spare_ggtt(gt, size);
539 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
540 
541 	return pf_config_set_u64_done(gt, vfid, size,
542 				      xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
543 				      vfid ? "GGTT" : "spare GGTT", err);
544 }
545 
546 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
547 				       u64 value, u64 (*get)(struct xe_gt*, unsigned int),
548 				       const char *what, unsigned int last, int err)
549 {
550 	char size[10];
551 
552 	xe_gt_assert(gt, first);
553 	xe_gt_assert(gt, num_vfs);
554 	xe_gt_assert(gt, first <= last);
555 
556 	if (num_vfs == 1)
557 		return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
558 
559 	if (unlikely(err)) {
560 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
561 				   first, first + num_vfs - 1, what);
562 		if (last > first)
563 			pf_config_bulk_set_u64_done(gt, first, last - first, value,
564 						    get, what, last, 0);
565 		return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
566 	}
567 
568 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
569 	value = get(gt, first);
570 	string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
571 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
572 			 first, first + num_vfs - 1, value, size, what);
573 	return 0;
574 }
575 
576 /**
577  * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
578  * @gt: the &xe_gt (can't be media)
579  * @vfid: starting VF identifier (can't be 0)
580  * @num_vfs: number of VFs to provision
581  * @size: requested GGTT size
582  *
583  * This function can only be called on PF.
584  *
585  * Return: 0 on success or a negative error code on failure.
586  */
587 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
588 					unsigned int num_vfs, u64 size)
589 {
590 	unsigned int n;
591 	int err = 0;
592 
593 	xe_gt_assert(gt, vfid);
594 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
595 
596 	if (!num_vfs)
597 		return 0;
598 
599 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
600 	for (n = vfid; n < vfid + num_vfs; n++) {
601 		err = pf_provision_vf_ggtt(gt, n, size);
602 		if (err)
603 			break;
604 	}
605 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
606 
607 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
608 					   xe_gt_sriov_pf_config_get_ggtt,
609 					   "GGTT", n, err);
610 }
611 
612 /* Return: size of the largest continuous GGTT region */
613 static u64 pf_get_max_ggtt(struct xe_gt *gt)
614 {
615 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
616 	u64 alignment = pf_get_ggtt_alignment(gt);
617 	u64 spare = pf_get_spare_ggtt(gt);
618 	u64 max_hole;
619 
620 	max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
621 
622 	xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
623 				max_hole / SZ_1K, spare / SZ_1K);
624 	return max_hole > spare ? max_hole - spare : 0;
625 }
626 
627 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
628 {
629 	u64 available = pf_get_max_ggtt(gt);
630 	u64 alignment = pf_get_ggtt_alignment(gt);
631 	u64 fair;
632 
633 	/*
634 	 * To simplify the logic we only look at single largest GGTT region
635 	 * as that will be always the best fit for 1 VF case, and most likely
636 	 * will also nicely cover other cases where VFs are provisioned on the
637 	 * fresh and idle PF driver, without any stale GGTT allocations spread
638 	 * in the middle of the full GGTT range.
639 	 */
640 
641 	fair = div_u64(available, num_vfs);
642 	fair = ALIGN_DOWN(fair, alignment);
643 	xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
644 				available / SZ_1K, num_vfs, fair / SZ_1K);
645 	return fair;
646 }
647 
648 /**
649  * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
650  * @gt: the &xe_gt (can't be media)
651  * @vfid: starting VF identifier (can't be 0)
652  * @num_vfs: number of VFs to provision
653  *
654  * This function can only be called on PF.
655  *
656  * Return: 0 on success or a negative error code on failure.
657  */
658 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
659 					unsigned int num_vfs)
660 {
661 	u64 fair;
662 
663 	xe_gt_assert(gt, vfid);
664 	xe_gt_assert(gt, num_vfs);
665 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
666 
667 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
668 	fair = pf_estimate_fair_ggtt(gt, num_vfs);
669 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
670 
671 	if (!fair)
672 		return -ENOSPC;
673 
674 	return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
675 }
676 
677 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
678 {
679 	/* XXX: preliminary */
680 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
681 		hweight64(gt->info.engine_mask) : SZ_256;
682 }
683 
684 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
685 {
686 	u32 spare;
687 
688 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
689 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
690 
691 	spare = gt->sriov.pf.spare.num_ctxs;
692 	spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
693 
694 	return spare;
695 }
696 
697 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
698 {
699 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
700 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
701 
702 	if (spare > GUC_ID_MAX)
703 		return -EINVAL;
704 
705 	if (spare && spare < pf_get_min_spare_ctxs(gt))
706 		return -EINVAL;
707 
708 	gt->sriov.pf.spare.num_ctxs = spare;
709 
710 	return 0;
711 }
712 
713 /* Return: start ID or negative error code on failure */
714 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
715 {
716 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
717 	unsigned int spare = pf_get_spare_ctxs(gt);
718 
719 	return xe_guc_id_mgr_reserve(idm, num, spare);
720 }
721 
722 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
723 {
724 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
725 
726 	if (num)
727 		xe_guc_id_mgr_release(idm, start, num);
728 }
729 
730 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
731 {
732 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
733 
734 	pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
735 	config->begin_ctx = 0;
736 	config->num_ctxs = 0;
737 }
738 
739 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
740 {
741 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
742 	int ret;
743 
744 	xe_gt_assert(gt, vfid);
745 
746 	if (num_ctxs > GUC_ID_MAX)
747 		return -EINVAL;
748 
749 	if (config->num_ctxs) {
750 		ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
751 		if (unlikely(ret))
752 			return ret;
753 
754 		pf_release_config_ctxs(gt, config);
755 	}
756 
757 	if (!num_ctxs)
758 		return 0;
759 
760 	ret = pf_reserve_ctxs(gt, num_ctxs);
761 	if (unlikely(ret < 0))
762 		return ret;
763 
764 	config->begin_ctx = ret;
765 	config->num_ctxs = num_ctxs;
766 
767 	ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
768 	if (unlikely(ret)) {
769 		pf_release_config_ctxs(gt, config);
770 		return ret;
771 	}
772 
773 	xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
774 				vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
775 	return 0;
776 }
777 
778 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
779 {
780 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
781 
782 	return config->num_ctxs;
783 }
784 
785 /**
786  * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
787  * @gt: the &xe_gt
788  * @vfid: the VF identifier
789  *
790  * This function can only be called on PF.
791  * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
792  *
793  * Return: VF's quota (or PF's spare).
794  */
795 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
796 {
797 	u32 num_ctxs;
798 
799 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
800 	if (vfid)
801 		num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
802 	else
803 		num_ctxs = pf_get_spare_ctxs(gt);
804 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
805 
806 	return num_ctxs;
807 }
808 
809 static const char *no_unit(u32 unused)
810 {
811 	return "";
812 }
813 
814 static const char *spare_unit(u32 unused)
815 {
816 	return " spare";
817 }
818 
819 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
820 				  const char *what, const char *(*unit)(u32), int err)
821 {
822 	char name[8];
823 
824 	xe_sriov_function_name(vfid, name, sizeof(name));
825 
826 	if (unlikely(err)) {
827 		xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
828 				   name, value, unit(value), what, ERR_PTR(err));
829 		xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
830 				 name, actual, unit(actual), what);
831 		return err;
832 	}
833 
834 	/* the actual value may have changed during provisioning */
835 	xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
836 			 name, actual, unit(actual), what);
837 	return 0;
838 }
839 
840 /**
841  * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
842  * @gt: the &xe_gt
843  * @vfid: the VF identifier
844  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
845  *
846  * This function can only be called on PF.
847  *
848  * Return: 0 on success or a negative error code on failure.
849  */
850 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
851 {
852 	int err;
853 
854 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
855 	if (vfid)
856 		err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
857 	else
858 		err = pf_set_spare_ctxs(gt, num_ctxs);
859 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
860 
861 	return pf_config_set_u32_done(gt, vfid, num_ctxs,
862 				      xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
863 				      "GuC context IDs", vfid ? no_unit : spare_unit, err);
864 }
865 
866 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
867 				       u32 value, u32 (*get)(struct xe_gt*, unsigned int),
868 				       const char *what, const char *(*unit)(u32),
869 				       unsigned int last, int err)
870 {
871 	xe_gt_assert(gt, first);
872 	xe_gt_assert(gt, num_vfs);
873 	xe_gt_assert(gt, first <= last);
874 
875 	if (num_vfs == 1)
876 		return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
877 
878 	if (unlikely(err)) {
879 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
880 				   first, first + num_vfs - 1, what);
881 		if (last > first)
882 			pf_config_bulk_set_u32_done(gt, first, last - first, value,
883 						    get, what, unit, last, 0);
884 		return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
885 	}
886 
887 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
888 	value = get(gt, first);
889 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
890 			 first, first + num_vfs - 1, value, unit(value), what);
891 	return 0;
892 }
893 
894 /**
895  * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
896  * @gt: the &xe_gt
897  * @vfid: starting VF identifier
898  * @num_vfs: number of VFs to provision
899  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
900  *
901  * This function can only be called on PF.
902  *
903  * Return: 0 on success or a negative error code on failure.
904  */
905 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
906 					unsigned int num_vfs, u32 num_ctxs)
907 {
908 	unsigned int n;
909 	int err = 0;
910 
911 	xe_gt_assert(gt, vfid);
912 
913 	if (!num_vfs)
914 		return 0;
915 
916 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
917 	for (n = vfid; n < vfid + num_vfs; n++) {
918 		err = pf_provision_vf_ctxs(gt, n, num_ctxs);
919 		if (err)
920 			break;
921 	}
922 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
923 
924 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
925 					   xe_gt_sriov_pf_config_get_ctxs,
926 					   "GuC context IDs", no_unit, n, err);
927 }
928 
929 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
930 {
931 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
932 	u32 spare = pf_get_spare_ctxs(gt);
933 	u32 fair = (idm->total - spare) / num_vfs;
934 	int ret;
935 
936 	for (; fair; --fair) {
937 		ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
938 		if (ret < 0)
939 			continue;
940 		xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
941 		break;
942 	}
943 
944 	xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
945 	return fair;
946 }
947 
948 /**
949  * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
950  * @gt: the &xe_gt
951  * @vfid: starting VF identifier (can't be 0)
952  * @num_vfs: number of VFs to provision (can't be 0)
953  *
954  * This function can only be called on PF.
955  *
956  * Return: 0 on success or a negative error code on failure.
957  */
958 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
959 					unsigned int num_vfs)
960 {
961 	u32 fair;
962 
963 	xe_gt_assert(gt, vfid);
964 	xe_gt_assert(gt, num_vfs);
965 
966 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
967 	fair = pf_estimate_fair_ctxs(gt, num_vfs);
968 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
969 
970 	if (!fair)
971 		return -ENOSPC;
972 
973 	return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
974 }
975 
976 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
977 {
978 	/* XXX: preliminary, we don't use doorbells yet! */
979 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
980 }
981 
982 static u32 pf_get_spare_dbs(struct xe_gt *gt)
983 {
984 	u32 spare;
985 
986 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
987 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
988 
989 	spare = gt->sriov.pf.spare.num_dbs;
990 	spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
991 
992 	return spare;
993 }
994 
995 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
996 {
997 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
998 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
999 
1000 	if (spare > GUC_NUM_DOORBELLS)
1001 		return -EINVAL;
1002 
1003 	if (spare && spare < pf_get_min_spare_dbs(gt))
1004 		return -EINVAL;
1005 
1006 	gt->sriov.pf.spare.num_dbs = spare;
1007 	return 0;
1008 }
1009 
1010 /* Return: start ID or negative error code on failure */
1011 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
1012 {
1013 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1014 	unsigned int spare = pf_get_spare_dbs(gt);
1015 
1016 	return xe_guc_db_mgr_reserve_range(dbm, num, spare);
1017 }
1018 
1019 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1020 {
1021 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1022 
1023 	if (num)
1024 		xe_guc_db_mgr_release_range(dbm, start, num);
1025 }
1026 
1027 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1028 {
1029 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1030 
1031 	pf_release_dbs(gt, config->begin_db, config->num_dbs);
1032 	config->begin_db = 0;
1033 	config->num_dbs = 0;
1034 }
1035 
1036 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1037 {
1038 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1039 	int ret;
1040 
1041 	xe_gt_assert(gt, vfid);
1042 
1043 	if (num_dbs > GUC_NUM_DOORBELLS)
1044 		return -EINVAL;
1045 
1046 	if (config->num_dbs) {
1047 		ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1048 		if (unlikely(ret))
1049 			return ret;
1050 
1051 		pf_release_config_dbs(gt, config);
1052 	}
1053 
1054 	if (!num_dbs)
1055 		return 0;
1056 
1057 	ret = pf_reserve_dbs(gt, num_dbs);
1058 	if (unlikely(ret < 0))
1059 		return ret;
1060 
1061 	config->begin_db = ret;
1062 	config->num_dbs = num_dbs;
1063 
1064 	ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1065 	if (unlikely(ret)) {
1066 		pf_release_config_dbs(gt, config);
1067 		return ret;
1068 	}
1069 
1070 	xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1071 				vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1072 	return 0;
1073 }
1074 
1075 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1076 {
1077 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1078 
1079 	return config->num_dbs;
1080 }
1081 
1082 /**
1083  * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1084  * @gt: the &xe_gt
1085  * @vfid: the VF identifier
1086  *
1087  * This function can only be called on PF.
1088  * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1089  *
1090  * Return: VF's quota (or PF's spare).
1091  */
1092 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1093 {
1094 	u32 num_dbs;
1095 
1096 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1097 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1098 
1099 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1100 	if (vfid)
1101 		num_dbs = pf_get_vf_config_dbs(gt, vfid);
1102 	else
1103 		num_dbs = pf_get_spare_dbs(gt);
1104 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1105 
1106 	return num_dbs;
1107 }
1108 
1109 /**
1110  * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1111  * @gt: the &xe_gt
1112  * @vfid: the VF identifier
1113  * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1114  *
1115  * This function can only be called on PF.
1116  *
1117  * Return: 0 on success or a negative error code on failure.
1118  */
1119 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1120 {
1121 	int err;
1122 
1123 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1124 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1125 
1126 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1127 	if (vfid)
1128 		err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1129 	else
1130 		err = pf_set_spare_dbs(gt, num_dbs);
1131 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1132 
1133 	return pf_config_set_u32_done(gt, vfid, num_dbs,
1134 				      xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1135 				      "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1136 }
1137 
1138 /**
1139  * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1140  * @gt: the &xe_gt
1141  * @vfid: starting VF identifier (can't be 0)
1142  * @num_vfs: number of VFs to provision
1143  * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1144  *
1145  * This function can only be called on PF.
1146  *
1147  * Return: 0 on success or a negative error code on failure.
1148  */
1149 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1150 				       unsigned int num_vfs, u32 num_dbs)
1151 {
1152 	unsigned int n;
1153 	int err = 0;
1154 
1155 	xe_gt_assert(gt, vfid);
1156 
1157 	if (!num_vfs)
1158 		return 0;
1159 
1160 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1161 	for (n = vfid; n < vfid + num_vfs; n++) {
1162 		err = pf_provision_vf_dbs(gt, n, num_dbs);
1163 		if (err)
1164 			break;
1165 	}
1166 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1167 
1168 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1169 					   xe_gt_sriov_pf_config_get_dbs,
1170 					   "GuC doorbell IDs", no_unit, n, err);
1171 }
1172 
1173 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1174 {
1175 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1176 	u32 spare = pf_get_spare_dbs(gt);
1177 	u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1178 	int ret;
1179 
1180 	for (; fair; --fair) {
1181 		ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1182 		if (ret < 0)
1183 			continue;
1184 		xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1185 		break;
1186 	}
1187 
1188 	xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1189 	return fair;
1190 }
1191 
1192 /**
1193  * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell  IDs.
1194  * @gt: the &xe_gt
1195  * @vfid: starting VF identifier (can't be 0)
1196  * @num_vfs: number of VFs to provision (can't be 0)
1197  *
1198  * This function can only be called on PF.
1199  *
1200  * Return: 0 on success or a negative error code on failure.
1201  */
1202 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1203 				       unsigned int num_vfs)
1204 {
1205 	u32 fair;
1206 
1207 	xe_gt_assert(gt, vfid);
1208 	xe_gt_assert(gt, num_vfs);
1209 
1210 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1211 	fair = pf_estimate_fair_dbs(gt, num_vfs);
1212 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1213 
1214 	if (!fair)
1215 		return -ENOSPC;
1216 
1217 	return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1218 }
1219 
1220 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1221 {
1222 	/* this might be platform dependent */
1223 	return SZ_2M;
1224 }
1225 
1226 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1227 {
1228 	/* this might be platform dependent */
1229 	return SZ_128M; /* XXX: preliminary */
1230 }
1231 
1232 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1233 {
1234 	u64 spare;
1235 
1236 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1237 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1238 
1239 	spare = gt->sriov.pf.spare.lmem_size;
1240 	spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1241 
1242 	return spare;
1243 }
1244 
1245 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1246 {
1247 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1248 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1249 
1250 	if (size && size < pf_get_min_spare_lmem(gt))
1251 		return -EINVAL;
1252 
1253 	gt->sriov.pf.spare.lmem_size = size;
1254 	return 0;
1255 }
1256 
1257 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1258 {
1259 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1260 	struct xe_bo *bo;
1261 
1262 	bo = config->lmem_obj;
1263 	return bo ? bo->size : 0;
1264 }
1265 
1266 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1267 {
1268 	struct xe_device *xe = gt_to_xe(gt);
1269 	struct xe_tile *tile;
1270 	unsigned int tid;
1271 	int err;
1272 
1273 	for_each_tile(tile, xe, tid) {
1274 		if (tile->primary_gt == gt) {
1275 			err = pf_push_vf_cfg_lmem(gt, vfid, size);
1276 		} else {
1277 			u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1278 
1279 			if (!lmem)
1280 				continue;
1281 			err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1282 		}
1283 		if (unlikely(err))
1284 			return err;
1285 	}
1286 	return 0;
1287 }
1288 
1289 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1290 {
1291 	/* TODO */
1292 }
1293 
1294 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1295 {
1296 	struct xe_lmtt *lmtt;
1297 	struct xe_tile *tile;
1298 	unsigned int tid;
1299 
1300 	xe_assert(xe, IS_DGFX(xe));
1301 	xe_assert(xe, IS_SRIOV_PF(xe));
1302 
1303 	for_each_tile(tile, xe, tid) {
1304 		lmtt = &tile->sriov.pf.lmtt;
1305 		xe_lmtt_drop_pages(lmtt, vfid);
1306 	}
1307 }
1308 
1309 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1310 {
1311 	struct xe_gt_sriov_config *config;
1312 	struct xe_tile *tile;
1313 	struct xe_lmtt *lmtt;
1314 	struct xe_bo *bo;
1315 	struct xe_gt *gt;
1316 	u64 total, offset;
1317 	unsigned int gtid;
1318 	unsigned int tid;
1319 	int err;
1320 
1321 	xe_assert(xe, IS_DGFX(xe));
1322 	xe_assert(xe, IS_SRIOV_PF(xe));
1323 
1324 	total = 0;
1325 	for_each_tile(tile, xe, tid)
1326 		total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1327 
1328 	for_each_tile(tile, xe, tid) {
1329 		lmtt = &tile->sriov.pf.lmtt;
1330 
1331 		xe_lmtt_drop_pages(lmtt, vfid);
1332 		if (!total)
1333 			continue;
1334 
1335 		err  = xe_lmtt_prepare_pages(lmtt, vfid, total);
1336 		if (err)
1337 			goto fail;
1338 
1339 		offset = 0;
1340 		for_each_gt(gt, xe, gtid) {
1341 			if (xe_gt_is_media_type(gt))
1342 				continue;
1343 
1344 			config = pf_pick_vf_config(gt, vfid);
1345 			bo = config->lmem_obj;
1346 			if (!bo)
1347 				continue;
1348 
1349 			err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1350 			if (err)
1351 				goto fail;
1352 			offset += bo->size;
1353 		}
1354 	}
1355 
1356 	pf_force_lmtt_invalidate(xe);
1357 	return 0;
1358 
1359 fail:
1360 	for_each_tile(tile, xe, tid) {
1361 		lmtt = &tile->sriov.pf.lmtt;
1362 		xe_lmtt_drop_pages(lmtt, vfid);
1363 	}
1364 	return err;
1365 }
1366 
1367 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1368 {
1369 	xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1370 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1371 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1372 
1373 	if (config->lmem_obj) {
1374 		xe_bo_unpin_map_no_vm(config->lmem_obj);
1375 		config->lmem_obj = NULL;
1376 	}
1377 }
1378 
1379 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1380 {
1381 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1382 	struct xe_device *xe = gt_to_xe(gt);
1383 	struct xe_tile *tile = gt_to_tile(gt);
1384 	struct xe_bo *bo;
1385 	int err;
1386 
1387 	xe_gt_assert(gt, vfid);
1388 	xe_gt_assert(gt, IS_DGFX(xe));
1389 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1390 
1391 	size = round_up(size, pf_get_lmem_alignment(gt));
1392 
1393 	if (config->lmem_obj) {
1394 		err = pf_distribute_config_lmem(gt, vfid, 0);
1395 		if (unlikely(err))
1396 			return err;
1397 
1398 		pf_reset_vf_lmtt(xe, vfid);
1399 		pf_release_vf_config_lmem(gt, config);
1400 	}
1401 	xe_gt_assert(gt, !config->lmem_obj);
1402 
1403 	if (!size)
1404 		return 0;
1405 
1406 	xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1407 	bo = xe_bo_create_pin_map(xe, tile, NULL,
1408 				  ALIGN(size, PAGE_SIZE),
1409 				  ttm_bo_type_kernel,
1410 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1411 				  XE_BO_FLAG_NEEDS_2M |
1412 				  XE_BO_FLAG_PINNED);
1413 	if (IS_ERR(bo))
1414 		return PTR_ERR(bo);
1415 
1416 	config->lmem_obj = bo;
1417 
1418 	err = pf_update_vf_lmtt(xe, vfid);
1419 	if (unlikely(err))
1420 		goto release;
1421 
1422 	err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
1423 	if (unlikely(err))
1424 		goto reset_lmtt;
1425 
1426 	xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1427 				vfid, bo->size, bo->size / SZ_1M);
1428 	return 0;
1429 
1430 reset_lmtt:
1431 	pf_reset_vf_lmtt(xe, vfid);
1432 release:
1433 	pf_release_vf_config_lmem(gt, config);
1434 	return err;
1435 }
1436 
1437 /**
1438  * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1439  * @gt: the &xe_gt
1440  * @vfid: the VF identifier
1441  *
1442  * This function can only be called on PF.
1443  *
1444  * Return: VF's (or PF's spare) LMEM quota.
1445  */
1446 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1447 {
1448 	u64 size;
1449 
1450 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1451 	if (vfid)
1452 		size = pf_get_vf_config_lmem(gt, vfid);
1453 	else
1454 		size = pf_get_spare_lmem(gt);
1455 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1456 
1457 	return size;
1458 }
1459 
1460 /**
1461  * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1462  * @gt: the &xe_gt (can't be media)
1463  * @vfid: the VF identifier
1464  * @size: requested LMEM size
1465  *
1466  * This function can only be called on PF.
1467  */
1468 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1469 {
1470 	int err;
1471 
1472 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1473 	if (vfid)
1474 		err = pf_provision_vf_lmem(gt, vfid, size);
1475 	else
1476 		err = pf_set_spare_lmem(gt, size);
1477 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1478 
1479 	return pf_config_set_u64_done(gt, vfid, size,
1480 				      xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1481 				      vfid ? "LMEM" : "spare LMEM", err);
1482 }
1483 
1484 /**
1485  * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1486  * @gt: the &xe_gt (can't be media)
1487  * @vfid: starting VF identifier (can't be 0)
1488  * @num_vfs: number of VFs to provision
1489  * @size: requested LMEM size
1490  *
1491  * This function can only be called on PF.
1492  *
1493  * Return: 0 on success or a negative error code on failure.
1494  */
1495 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1496 					unsigned int num_vfs, u64 size)
1497 {
1498 	unsigned int n;
1499 	int err = 0;
1500 
1501 	xe_gt_assert(gt, vfid);
1502 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1503 
1504 	if (!num_vfs)
1505 		return 0;
1506 
1507 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1508 	for (n = vfid; n < vfid + num_vfs; n++) {
1509 		err = pf_provision_vf_lmem(gt, n, size);
1510 		if (err)
1511 			break;
1512 	}
1513 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1514 
1515 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1516 					   xe_gt_sriov_pf_config_get_lmem,
1517 					   "LMEM", n, err);
1518 }
1519 
1520 static u64 pf_query_free_lmem(struct xe_gt *gt)
1521 {
1522 	struct xe_tile *tile = gt->tile;
1523 
1524 	return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager);
1525 }
1526 
1527 static u64 pf_query_max_lmem(struct xe_gt *gt)
1528 {
1529 	u64 alignment = pf_get_lmem_alignment(gt);
1530 	u64 spare = pf_get_spare_lmem(gt);
1531 	u64 free = pf_query_free_lmem(gt);
1532 	u64 avail;
1533 
1534 	/* XXX: need to account for 2MB blocks only */
1535 	avail = free > spare ? free - spare : 0;
1536 	avail = round_down(avail, alignment);
1537 
1538 	return avail;
1539 }
1540 
1541 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1542 #define MAX_FAIR_LMEM	SZ_128M	/* XXX: make it small for the driver bringup */
1543 #else
1544 #define MAX_FAIR_LMEM	SZ_2G	/* XXX: known issue with allocating BO over 2GiB */
1545 #endif
1546 
1547 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1548 {
1549 	u64 available = pf_query_max_lmem(gt);
1550 	u64 alignment = pf_get_lmem_alignment(gt);
1551 	u64 fair;
1552 
1553 	fair = div_u64(available, num_vfs);
1554 	fair = rounddown_pow_of_two(fair);	/* XXX: ttm_vram_mgr & drm_buddy limitation */
1555 	fair = ALIGN_DOWN(fair, alignment);
1556 #ifdef MAX_FAIR_LMEM
1557 	fair = min_t(u64, MAX_FAIR_LMEM, fair);
1558 #endif
1559 	xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1560 				available / SZ_1M, num_vfs, fair / SZ_1M);
1561 	return fair;
1562 }
1563 
1564 /**
1565  * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1566  * @gt: the &xe_gt (can't be media)
1567  * @vfid: starting VF identifier (can't be 0)
1568  * @num_vfs: number of VFs to provision (can't be 0)
1569  *
1570  * This function can only be called on PF.
1571  *
1572  * Return: 0 on success or a negative error code on failure.
1573  */
1574 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1575 					unsigned int num_vfs)
1576 {
1577 	u64 fair;
1578 
1579 	xe_gt_assert(gt, vfid);
1580 	xe_gt_assert(gt, num_vfs);
1581 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1582 
1583 	if (!IS_DGFX(gt_to_xe(gt)))
1584 		return 0;
1585 
1586 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1587 	fair = pf_estimate_fair_lmem(gt, num_vfs);
1588 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1589 
1590 	if (!fair)
1591 		return -ENOSPC;
1592 
1593 	return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1594 }
1595 
1596 /**
1597  * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1598  * @gt: the &xe_gt
1599  * @vfid: starting VF identifier (can't be 0)
1600  * @num_vfs: number of VFs to provision (can't be 0)
1601  *
1602  * This function can only be called on PF.
1603  *
1604  * Return: 0 on success or a negative error code on failure.
1605  */
1606 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1607 				   unsigned int num_vfs)
1608 {
1609 	int result = 0;
1610 	int err;
1611 
1612 	xe_gt_assert(gt, vfid);
1613 	xe_gt_assert(gt, num_vfs);
1614 
1615 	if (!xe_gt_is_media_type(gt)) {
1616 		err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1617 		result = result ?: err;
1618 		err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1619 		result = result ?: err;
1620 	}
1621 	err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1622 	result = result ?: err;
1623 	err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1624 	result = result ?: err;
1625 
1626 	return result;
1627 }
1628 
1629 static const char *exec_quantum_unit(u32 exec_quantum)
1630 {
1631 	return exec_quantum ? "ms" : "(infinity)";
1632 }
1633 
1634 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1635 				     u32 exec_quantum)
1636 {
1637 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1638 	int err;
1639 
1640 	err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1641 	if (unlikely(err))
1642 		return err;
1643 
1644 	config->exec_quantum = exec_quantum;
1645 	return 0;
1646 }
1647 
1648 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1649 {
1650 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1651 
1652 	return config->exec_quantum;
1653 }
1654 
1655 /**
1656  * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1657  * @gt: the &xe_gt
1658  * @vfid: the VF identifier
1659  * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1660  *
1661  * This function can only be called on PF.
1662  *
1663  * Return: 0 on success or a negative error code on failure.
1664  */
1665 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1666 					   u32 exec_quantum)
1667 {
1668 	int err;
1669 
1670 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1671 	err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1672 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1673 
1674 	return pf_config_set_u32_done(gt, vfid, exec_quantum,
1675 				      xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1676 				      "execution quantum", exec_quantum_unit, err);
1677 }
1678 
1679 /**
1680  * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1681  * @gt: the &xe_gt
1682  * @vfid: the VF identifier
1683  *
1684  * This function can only be called on PF.
1685  *
1686  * Return: VF's (or PF's) execution quantum in milliseconds.
1687  */
1688 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1689 {
1690 	u32 exec_quantum;
1691 
1692 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1693 	exec_quantum = pf_get_exec_quantum(gt, vfid);
1694 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1695 
1696 	return exec_quantum;
1697 }
1698 
1699 static const char *preempt_timeout_unit(u32 preempt_timeout)
1700 {
1701 	return preempt_timeout ? "us" : "(infinity)";
1702 }
1703 
1704 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1705 					u32 preempt_timeout)
1706 {
1707 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1708 	int err;
1709 
1710 	err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1711 	if (unlikely(err))
1712 		return err;
1713 
1714 	config->preempt_timeout = preempt_timeout;
1715 
1716 	return 0;
1717 }
1718 
1719 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1720 {
1721 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1722 
1723 	return config->preempt_timeout;
1724 }
1725 
1726 /**
1727  * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1728  * @gt: the &xe_gt
1729  * @vfid: the VF identifier
1730  * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1731  *
1732  * This function can only be called on PF.
1733  *
1734  * Return: 0 on success or a negative error code on failure.
1735  */
1736 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1737 					      u32 preempt_timeout)
1738 {
1739 	int err;
1740 
1741 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1742 	err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1743 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1744 
1745 	return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1746 				      xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1747 				      "preemption timeout", preempt_timeout_unit, err);
1748 }
1749 
1750 /**
1751  * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1752  * @gt: the &xe_gt
1753  * @vfid: the VF identifier
1754  *
1755  * This function can only be called on PF.
1756  *
1757  * Return: VF's (or PF's) preemption timeout in microseconds.
1758  */
1759 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1760 {
1761 	u32 preempt_timeout;
1762 
1763 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1764 	preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1765 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1766 
1767 	return preempt_timeout;
1768 }
1769 
1770 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1771 {
1772 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1773 
1774 	config->exec_quantum = 0;
1775 	config->preempt_timeout = 0;
1776 }
1777 
1778 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
1779 				  enum xe_guc_klv_threshold_index index, u32 value)
1780 {
1781 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1782 	int err;
1783 
1784 	err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
1785 	if (unlikely(err))
1786 		return err;
1787 
1788 	config->thresholds[index] = value;
1789 
1790 	return 0;
1791 }
1792 
1793 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
1794 			    enum xe_guc_klv_threshold_index index)
1795 {
1796 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1797 
1798 	return config->thresholds[index];
1799 }
1800 
1801 static const char *threshold_unit(u32 threshold)
1802 {
1803 	return threshold ? "" : "(disabled)";
1804 }
1805 
1806 /**
1807  * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
1808  * @gt: the &xe_gt
1809  * @vfid: the VF identifier
1810  * @index: the threshold index
1811  * @value: requested value (0 means disabled)
1812  *
1813  * This function can only be called on PF.
1814  *
1815  * Return: 0 on success or a negative error code on failure.
1816  */
1817 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
1818 					enum xe_guc_klv_threshold_index index, u32 value)
1819 {
1820 	u32 key = xe_guc_klv_threshold_index_to_key(index);
1821 	const char *name = xe_guc_klv_key_to_string(key);
1822 	int err;
1823 
1824 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1825 	err = pf_provision_threshold(gt, vfid, index, value);
1826 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1827 
1828 	return pf_config_set_u32_done(gt, vfid, value,
1829 				      xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
1830 				      name, threshold_unit, err);
1831 }
1832 
1833 /**
1834  * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
1835  * @gt: the &xe_gt
1836  * @vfid: the VF identifier
1837  * @index: the threshold index
1838  *
1839  * This function can only be called on PF.
1840  *
1841  * Return: value of VF's (or PF's) threshold.
1842  */
1843 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
1844 					enum xe_guc_klv_threshold_index index)
1845 {
1846 	u32 value;
1847 
1848 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1849 	value = pf_get_threshold(gt, vfid, index);
1850 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1851 
1852 	return value;
1853 }
1854 
1855 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1856 {
1857 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1858 
1859 #define reset_threshold_config(TAG, ...) ({				\
1860 	config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0;	\
1861 });
1862 
1863 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
1864 #undef reset_threshold_config
1865 }
1866 
1867 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
1868 {
1869 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1870 	struct xe_device *xe = gt_to_xe(gt);
1871 
1872 	if (!xe_gt_is_media_type(gt)) {
1873 		pf_release_vf_config_ggtt(gt, config);
1874 		if (IS_DGFX(xe)) {
1875 			pf_release_vf_config_lmem(gt, config);
1876 			pf_update_vf_lmtt(xe, vfid);
1877 		}
1878 	}
1879 	pf_release_config_ctxs(gt, config);
1880 	pf_release_config_dbs(gt, config);
1881 	pf_reset_config_sched(gt, config);
1882 	pf_reset_config_thresholds(gt, config);
1883 }
1884 
1885 /**
1886  * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
1887  * @gt: the &xe_gt
1888  * @vfid: the VF identifier (can't be PF)
1889  * @force: force configuration release
1890  *
1891  * This function can only be called on PF.
1892  *
1893  * Return: 0 on success or a negative error code on failure.
1894  */
1895 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
1896 {
1897 	int err;
1898 
1899 	xe_gt_assert(gt, vfid);
1900 
1901 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1902 	err = pf_send_vf_cfg_reset(gt, vfid);
1903 	if (!err || force)
1904 		pf_release_vf_config(gt, vfid);
1905 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1906 
1907 	if (unlikely(err)) {
1908 		xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
1909 				   vfid, ERR_PTR(err),
1910 				   force ? " but all resources were released anyway!" : "");
1911 	}
1912 
1913 	return force ? 0 : err;
1914 }
1915 
1916 static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
1917 {
1918 	if (xe_ggtt_node_allocated(ggtt_region))
1919 		xe_ggtt_assign(ggtt_region, vfid);
1920 }
1921 
1922 static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
1923 {
1924 	struct xe_migrate *m = tile->migrate;
1925 	struct dma_fence *fence;
1926 	int err;
1927 
1928 	if (!bo)
1929 		return 0;
1930 
1931 	xe_bo_lock(bo, false);
1932 	fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
1933 	if (IS_ERR(fence)) {
1934 		err = PTR_ERR(fence);
1935 	} else if (!fence) {
1936 		err = -ENOMEM;
1937 	} else {
1938 		long ret = dma_fence_wait_timeout(fence, false, timeout);
1939 
1940 		err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
1941 		dma_fence_put(fence);
1942 		if (!err)
1943 			xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
1944 						jiffies_to_msecs(timeout - ret));
1945 	}
1946 	xe_bo_unlock(bo);
1947 
1948 	return err;
1949 }
1950 
1951 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
1952 {
1953 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1954 	struct xe_tile *tile = gt_to_tile(gt);
1955 	struct xe_device *xe = gt_to_xe(gt);
1956 	int err = 0;
1957 
1958 	/*
1959 	 * Only GGTT and LMEM requires to be cleared by the PF.
1960 	 * GuC doorbell IDs and context IDs do not need any clearing.
1961 	 */
1962 	if (!xe_gt_is_media_type(gt)) {
1963 		pf_sanitize_ggtt(config->ggtt_region, vfid);
1964 		if (IS_DGFX(xe))
1965 			err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
1966 	}
1967 
1968 	return err;
1969 }
1970 
1971 /**
1972  * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
1973  * @gt: the &xe_gt
1974  * @vfid: the VF identifier (can't be PF)
1975  * @timeout: maximum timeout to wait for completion in jiffies
1976  *
1977  * This function can only be called on PF.
1978  *
1979  * Return: 0 on success or a negative error code on failure.
1980  */
1981 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
1982 {
1983 	int err;
1984 
1985 	xe_gt_assert(gt, vfid != PFID);
1986 
1987 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1988 	err = pf_sanitize_vf_resources(gt, vfid, timeout);
1989 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1990 
1991 	if (unlikely(err))
1992 		xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
1993 				   vfid, ERR_PTR(err));
1994 	return err;
1995 }
1996 
1997 /**
1998  * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
1999  * @gt: the &xe_gt
2000  * @vfid: the VF identifier (can't be PF)
2001  * @refresh: explicit refresh
2002  *
2003  * This function can only be called on PF.
2004  *
2005  * Return: 0 on success or a negative error code on failure.
2006  */
2007 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
2008 {
2009 	int err = 0;
2010 
2011 	xe_gt_assert(gt, vfid);
2012 
2013 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2014 	if (refresh)
2015 		err = pf_send_vf_cfg_reset(gt, vfid);
2016 	if (!err)
2017 		err = pf_push_full_vf_config(gt, vfid);
2018 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2019 
2020 	if (unlikely(err)) {
2021 		xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
2022 				   refresh ? "refresh" : "push", vfid, ERR_PTR(err));
2023 	}
2024 
2025 	return err;
2026 }
2027 
2028 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
2029 {
2030 	struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
2031 	struct xe_device *xe = gt_to_xe(gt);
2032 	bool is_primary = !xe_gt_is_media_type(gt);
2033 	bool valid_ggtt, valid_ctxs, valid_dbs;
2034 	bool valid_any, valid_all;
2035 
2036 	valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
2037 	valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
2038 	valid_dbs = pf_get_vf_config_dbs(gt, vfid);
2039 
2040 	/* note that GuC doorbells are optional */
2041 	valid_any = valid_ctxs || valid_dbs;
2042 	valid_all = valid_ctxs;
2043 
2044 	/* and GGTT/LMEM is configured on primary GT only */
2045 	valid_all = valid_all && valid_ggtt;
2046 	valid_any = valid_any || (valid_ggtt && is_primary);
2047 
2048 	if (IS_DGFX(xe)) {
2049 		bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
2050 
2051 		valid_any = valid_any || (valid_lmem && is_primary);
2052 		valid_all = valid_all && valid_lmem;
2053 	}
2054 
2055 	return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA;
2056 }
2057 
2058 /**
2059  * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
2060  * @gt: the &xe_gt
2061  * @vfid: the VF identifier (can't be PF)
2062  *
2063  * This function can only be called on PF.
2064  *
2065  * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
2066  */
2067 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
2068 {
2069 	bool empty;
2070 
2071 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2072 	xe_gt_assert(gt, vfid);
2073 
2074 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2075 	empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
2076 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2077 
2078 	return empty;
2079 }
2080 
2081 /**
2082  * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob.
2083  * @gt: the &xe_gt
2084  * @vfid: the VF identifier (can't be PF)
2085  * @buf: the buffer to save a config to (or NULL if query the buf size)
2086  * @size: the size of the buffer (or 0 if query the buf size)
2087  *
2088  * This function can only be called on PF.
2089  *
2090  * Return: mininum size of the buffer or the number of bytes saved,
2091  *         or a negative error code on failure.
2092  */
2093 ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
2094 {
2095 	struct xe_gt_sriov_config *config;
2096 	ssize_t ret;
2097 
2098 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2099 	xe_gt_assert(gt, vfid);
2100 	xe_gt_assert(gt, !(!buf ^ !size));
2101 
2102 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2103 	ret = pf_validate_vf_config(gt, vfid);
2104 	if (!size) {
2105 		ret = ret ? 0 : SZ_4K;
2106 	} else if (!ret) {
2107 		if (size < SZ_4K) {
2108 			ret = -ENOBUFS;
2109 		} else {
2110 			config = pf_pick_vf_config(gt, vfid);
2111 			ret = encode_config(buf, config, false) * sizeof(u32);
2112 		}
2113 	}
2114 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2115 
2116 	return ret;
2117 }
2118 
2119 static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid,
2120 				    u32 key, u32 len, const u32 *value)
2121 {
2122 	switch (key) {
2123 	case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
2124 		if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN)
2125 			return -EBADMSG;
2126 		return pf_provision_vf_ctxs(gt, vfid, value[0]);
2127 
2128 	case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
2129 		if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN)
2130 			return -EBADMSG;
2131 		return pf_provision_vf_dbs(gt, vfid, value[0]);
2132 
2133 	case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
2134 		if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN)
2135 			return -EBADMSG;
2136 		return pf_provision_exec_quantum(gt, vfid, value[0]);
2137 
2138 	case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
2139 		if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN)
2140 			return -EBADMSG;
2141 		return pf_provision_preempt_timeout(gt, vfid, value[0]);
2142 
2143 	/* auto-generate case statements */
2144 #define define_threshold_key_to_provision_case(TAG, ...)				\
2145 	case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG):					\
2146 		BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u);		\
2147 		if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG))			\
2148 			return -EBADMSG;						\
2149 		return pf_provision_threshold(gt, vfid,					\
2150 					      MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG),	\
2151 					      value[0]);
2152 
2153 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case)
2154 #undef define_threshold_key_to_provision_case
2155 	}
2156 
2157 	if (xe_gt_is_media_type(gt))
2158 		return -EKEYREJECTED;
2159 
2160 	switch (key) {
2161 	case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
2162 		if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN)
2163 			return -EBADMSG;
2164 		return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0]));
2165 
2166 	case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
2167 		if (!IS_DGFX(gt_to_xe(gt)))
2168 			return -EKEYREJECTED;
2169 		if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN)
2170 			return -EBADMSG;
2171 		return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0]));
2172 	}
2173 
2174 	return -EKEYREJECTED;
2175 }
2176 
2177 static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid,
2178 				const u32 *klvs, size_t num_dwords)
2179 {
2180 	int err;
2181 
2182 	while (num_dwords >= GUC_KLV_LEN_MIN) {
2183 		u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
2184 		u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
2185 
2186 		klvs += GUC_KLV_LEN_MIN;
2187 		num_dwords -= GUC_KLV_LEN_MIN;
2188 
2189 		if (num_dwords < len)
2190 			err = -EBADMSG;
2191 		else
2192 			err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs);
2193 
2194 		if (err) {
2195 			xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err));
2196 			return err;
2197 		}
2198 
2199 		klvs += len;
2200 		num_dwords -= len;
2201 	}
2202 
2203 	return pf_validate_vf_config(gt, vfid);
2204 }
2205 
2206 /**
2207  * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob.
2208  * @gt: the &xe_gt
2209  * @vfid: the VF identifier (can't be PF)
2210  * @buf: the buffer with config data
2211  * @size: the size of the config data
2212  *
2213  * This function can only be called on PF.
2214  *
2215  * Return: 0 on success or a negative error code on failure.
2216  */
2217 int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
2218 				  const void *buf, size_t size)
2219 {
2220 	int err;
2221 
2222 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2223 	xe_gt_assert(gt, vfid);
2224 
2225 	if (!size)
2226 		return -ENODATA;
2227 
2228 	if (size % sizeof(u32))
2229 		return -EINVAL;
2230 
2231 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
2232 		struct drm_printer p = xe_gt_info_printer(gt);
2233 
2234 		drm_printf(&p, "restoring VF%u config:\n", vfid);
2235 		xe_guc_klv_print(buf, size / sizeof(u32), &p);
2236 	}
2237 
2238 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2239 	err = pf_send_vf_cfg_reset(gt, vfid);
2240 	if (!err) {
2241 		pf_release_vf_config(gt, vfid);
2242 		err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32));
2243 	}
2244 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2245 
2246 	return err;
2247 }
2248 
2249 /**
2250  * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
2251  * @gt: the &xe_gt
2252  *
2253  * Any prior configurations pushed to GuC are lost when the GT is reset.
2254  * Push again all non-empty VF configurations to the GuC.
2255  *
2256  * This function can only be called on PF.
2257  */
2258 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
2259 {
2260 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2261 	unsigned int fail = 0, skip = 0;
2262 
2263 	for (n = 1; n <= total_vfs; n++) {
2264 		if (xe_gt_sriov_pf_config_is_empty(gt, n))
2265 			skip++;
2266 		else if (xe_gt_sriov_pf_config_push(gt, n, false))
2267 			fail++;
2268 	}
2269 
2270 	if (fail)
2271 		xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
2272 				   fail, total_vfs - skip, str_plural(total_vfs));
2273 
2274 	if (fail != total_vfs)
2275 		xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
2276 				total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
2277 }
2278 
2279 /**
2280  * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2281  * @gt: the &xe_gt
2282  * @p: the &drm_printer
2283  *
2284  * Print GGTT configuration data for all VFs.
2285  * VFs without provisioned GGTT are ignored.
2286  *
2287  * This function can only be called on PF.
2288  */
2289 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2290 {
2291 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2292 	const struct xe_gt_sriov_config *config;
2293 	char buf[10];
2294 
2295 	for (n = 1; n <= total_vfs; n++) {
2296 		config = &gt->sriov.pf.vfs[n].config;
2297 		if (!xe_ggtt_node_allocated(config->ggtt_region))
2298 			continue;
2299 
2300 		string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
2301 				buf, sizeof(buf));
2302 		drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2303 			   n, config->ggtt_region->base.start,
2304 			   config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
2305 			   buf);
2306 	}
2307 
2308 	return 0;
2309 }
2310 
2311 /**
2312  * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2313  * @gt: the &xe_gt
2314  * @p: the &drm_printer
2315  *
2316  * Print GuC context ID allocations across all VFs.
2317  * VFs without GuC context IDs are skipped.
2318  *
2319  * This function can only be called on PF.
2320  * Return: 0 on success or a negative error code on failure.
2321  */
2322 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2323 {
2324 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2325 	const struct xe_gt_sriov_config *config;
2326 
2327 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2328 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2329 
2330 	for (n = 1; n <= total_vfs; n++) {
2331 		config = &gt->sriov.pf.vfs[n].config;
2332 		if (!config->num_ctxs)
2333 			continue;
2334 
2335 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2336 			   n,
2337 			   config->begin_ctx,
2338 			   config->begin_ctx + config->num_ctxs - 1,
2339 			   config->num_ctxs);
2340 	}
2341 
2342 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2343 	return 0;
2344 }
2345 
2346 /**
2347  * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2348  * @gt: the &xe_gt
2349  * @p: the &drm_printer
2350  *
2351  * Print GuC doorbell IDs allocations across all VFs.
2352  * VFs without GuC doorbell IDs are skipped.
2353  *
2354  * This function can only be called on PF.
2355  * Return: 0 on success or a negative error code on failure.
2356  */
2357 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2358 {
2359 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2360 	const struct xe_gt_sriov_config *config;
2361 
2362 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2363 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2364 
2365 	for (n = 1; n <= total_vfs; n++) {
2366 		config = &gt->sriov.pf.vfs[n].config;
2367 		if (!config->num_dbs)
2368 			continue;
2369 
2370 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2371 			   n,
2372 			   config->begin_db,
2373 			   config->begin_db + config->num_dbs - 1,
2374 			   config->num_dbs);
2375 	}
2376 
2377 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2378 	return 0;
2379 }
2380 
2381 /**
2382  * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations.
2383  * @gt: the &xe_gt
2384  * @p: the &drm_printer
2385  *
2386  * Print LMEM allocations across all VFs.
2387  * VFs without LMEM allocation are skipped.
2388  *
2389  * This function can only be called on PF.
2390  * Return: 0 on success or a negative error code on failure.
2391  */
2392 int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
2393 {
2394 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2395 	const struct xe_gt_sriov_config *config;
2396 	char buf[10];
2397 
2398 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2399 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2400 
2401 	for (n = 1; n <= total_vfs; n++) {
2402 		config = &gt->sriov.pf.vfs[n].config;
2403 		if (!config->lmem_obj)
2404 			continue;
2405 
2406 		string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2,
2407 				buf, sizeof(buf));
2408 		drm_printf(p, "VF%u:\t%zu\t(%s)\n",
2409 			   n, config->lmem_obj->size, buf);
2410 	}
2411 
2412 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2413 	return 0;
2414 }
2415 
2416 /**
2417  * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2418  * @gt: the &xe_gt
2419  * @p: the &drm_printer
2420  *
2421  * Print GGTT ranges that are available for the provisioning.
2422  *
2423  * This function can only be called on PF.
2424  */
2425 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2426 {
2427 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2428 	u64 alignment = pf_get_ggtt_alignment(gt);
2429 	u64 spare, avail, total;
2430 	char buf[10];
2431 
2432 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2433 
2434 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2435 
2436 	spare = pf_get_spare_ggtt(gt);
2437 	total = xe_ggtt_print_holes(ggtt, alignment, p);
2438 
2439 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2440 
2441 	string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2442 	drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2443 
2444 	string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2445 	drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2446 
2447 	avail = total > spare ? total - spare : 0;
2448 
2449 	string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2450 	drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2451 
2452 	return 0;
2453 }
2454