xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c (revision e77a8005748547fb1f10645097f13ccdd804d7e5)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8 
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11 
12 #include "regs/xe_guc_regs.h"
13 
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_gt_sriov_pf_config.h"
19 #include "xe_gt_sriov_pf_helpers.h"
20 #include "xe_gt_sriov_pf_policy.h"
21 #include "xe_gt_sriov_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_ct.h"
24 #include "xe_guc_db_mgr.h"
25 #include "xe_guc_fwif.h"
26 #include "xe_guc_id_mgr.h"
27 #include "xe_guc_klv_helpers.h"
28 #include "xe_guc_klv_thresholds_set.h"
29 #include "xe_guc_submit.h"
30 #include "xe_lmtt.h"
31 #include "xe_map.h"
32 #include "xe_migrate.h"
33 #include "xe_sriov.h"
34 #include "xe_ttm_vram_mgr.h"
35 #include "xe_wopcm.h"
36 
37 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
38 
39 /*
40  * Return: number of KLVs that were successfully parsed and saved,
41  *         negative error code on failure.
42  */
43 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
44 				    u64 addr, u32 size)
45 {
46 	u32 request[] = {
47 		GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
48 		vfid,
49 		lower_32_bits(addr),
50 		upper_32_bits(addr),
51 		size,
52 	};
53 
54 	return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
55 }
56 
57 /*
58  * Return: 0 on success, negative error code on failure.
59  */
60 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
61 {
62 	struct xe_guc *guc = &gt->uc.guc;
63 	int ret;
64 
65 	ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
66 
67 	return ret <= 0 ? ret : -EPROTO;
68 }
69 
70 /*
71  * Return: number of KLVs that were successfully parsed and saved,
72  *         negative error code on failure.
73  */
74 static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords)
75 {
76 	const u32 bytes = num_dwords * sizeof(u32);
77 	struct xe_tile *tile = gt_to_tile(gt);
78 	struct xe_device *xe = tile_to_xe(tile);
79 	struct xe_guc *guc = &gt->uc.guc;
80 	struct xe_bo *bo;
81 	int ret;
82 
83 	bo = xe_bo_create_pin_map(xe, tile, NULL,
84 				  ALIGN(bytes, PAGE_SIZE),
85 				  ttm_bo_type_kernel,
86 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
87 				  XE_BO_FLAG_GGTT |
88 				  XE_BO_FLAG_GGTT_INVALIDATE);
89 	if (IS_ERR(bo))
90 		return PTR_ERR(bo);
91 
92 	xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
93 
94 	ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords);
95 
96 	xe_bo_unpin_map_no_vm(bo);
97 
98 	return ret;
99 }
100 
101 /*
102  * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
103  *         negative error code on failure.
104  */
105 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
106 			       const u32 *klvs, u32 num_dwords)
107 {
108 	int ret;
109 
110 	xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
111 
112 	ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords);
113 
114 	if (ret != num_klvs) {
115 		int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
116 		struct drm_printer p = xe_gt_info_printer(gt);
117 		char name[8];
118 
119 		xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
120 				   xe_sriov_function_name(vfid, name, sizeof(name)),
121 				   num_klvs, str_plural(num_klvs), ERR_PTR(err));
122 		xe_guc_klv_print(klvs, num_dwords, &p);
123 		return err;
124 	}
125 
126 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
127 		struct drm_printer p = xe_gt_info_printer(gt);
128 
129 		xe_guc_klv_print(klvs, num_dwords, &p);
130 	}
131 
132 	return 0;
133 }
134 
135 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
136 {
137 	u32 klv[] = {
138 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
139 		value,
140 	};
141 
142 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
143 }
144 
145 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
146 {
147 	u32 klv[] = {
148 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
149 		lower_32_bits(value),
150 		upper_32_bits(value),
151 	};
152 
153 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
154 }
155 
156 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
157 {
158 	u32 klvs[] = {
159 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
160 		lower_32_bits(start),
161 		upper_32_bits(start),
162 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
163 		lower_32_bits(size),
164 		upper_32_bits(size),
165 	};
166 
167 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
168 }
169 
170 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
171 {
172 	u32 klvs[] = {
173 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
174 		begin,
175 		PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
176 		num,
177 	};
178 
179 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
180 }
181 
182 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
183 {
184 	u32 klvs[] = {
185 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
186 		begin,
187 		PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
188 		num,
189 	};
190 
191 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
192 }
193 
194 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
195 {
196 	/* GuC will silently clamp values exceeding max */
197 	*exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
198 
199 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
200 }
201 
202 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
203 {
204 	/* GuC will silently clamp values exceeding max */
205 	*preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
206 
207 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
208 }
209 
210 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
211 {
212 	return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
213 }
214 
215 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
216 				    enum xe_guc_klv_threshold_index index, u32 value)
217 {
218 	u32 key = xe_guc_klv_threshold_index_to_key(index);
219 
220 	xe_gt_assert(gt, key);
221 	return pf_push_vf_cfg_u32(gt, vfid, key, value);
222 }
223 
224 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
225 {
226 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
227 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
228 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
229 
230 	return &gt->sriov.pf.vfs[vfid].config;
231 }
232 
233 /* Return: number of configuration dwords written */
234 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
235 {
236 	u32 n = 0;
237 
238 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
239 		if (details) {
240 			cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
241 			cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
242 			cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
243 		}
244 
245 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
246 		cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
247 		cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
248 	}
249 
250 	return n;
251 }
252 
253 /* Return: number of configuration dwords written */
254 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
255 {
256 	u32 n = 0;
257 
258 	n += encode_config_ggtt(cfg, config, details);
259 
260 	if (details) {
261 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
262 		cfg[n++] = config->begin_ctx;
263 	}
264 
265 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
266 	cfg[n++] = config->num_ctxs;
267 
268 	if (details) {
269 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
270 		cfg[n++] = config->begin_db;
271 	}
272 
273 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
274 	cfg[n++] = config->num_dbs;
275 
276 	if (config->lmem_obj) {
277 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
278 		cfg[n++] = lower_32_bits(config->lmem_obj->size);
279 		cfg[n++] = upper_32_bits(config->lmem_obj->size);
280 	}
281 
282 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
283 	cfg[n++] = config->exec_quantum;
284 
285 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
286 	cfg[n++] = config->preempt_timeout;
287 
288 #define encode_threshold_config(TAG, ...) ({					\
289 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG);			\
290 	cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)];	\
291 });
292 
293 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
294 #undef encode_threshold_config
295 
296 	return n;
297 }
298 
299 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
300 {
301 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
302 	u32 max_cfg_dwords = SZ_4K / sizeof(u32);
303 	u32 num_dwords;
304 	int num_klvs;
305 	u32 *cfg;
306 	int err;
307 
308 	cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL);
309 	if (!cfg)
310 		return -ENOMEM;
311 
312 	num_dwords = encode_config(cfg, config, true);
313 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
314 
315 	if (xe_gt_is_media_type(gt)) {
316 		struct xe_gt *primary = gt->tile->primary_gt;
317 		struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
318 
319 		/* media-GT will never include a GGTT config */
320 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
321 
322 		/* the GGTT config must be taken from the primary-GT instead */
323 		num_dwords += encode_config_ggtt(cfg + num_dwords, other, true);
324 	}
325 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
326 
327 	num_klvs = xe_guc_klv_count(cfg, num_dwords);
328 	err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords);
329 
330 	kfree(cfg);
331 	return err;
332 }
333 
334 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
335 {
336 	struct xe_device *xe = gt_to_xe(gt);
337 
338 	return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
339 }
340 
341 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
342 {
343 	/* XXX: preliminary */
344 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
345 		pf_get_ggtt_alignment(gt) : SZ_64M;
346 }
347 
348 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
349 {
350 	u64 spare;
351 
352 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
353 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
354 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
355 
356 	spare = gt->sriov.pf.spare.ggtt_size;
357 	spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
358 
359 	return spare;
360 }
361 
362 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
363 {
364 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
365 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
366 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
367 
368 	if (size && size < pf_get_min_spare_ggtt(gt))
369 		return -EINVAL;
370 
371 	size = round_up(size, pf_get_ggtt_alignment(gt));
372 	gt->sriov.pf.spare.ggtt_size = size;
373 
374 	return 0;
375 }
376 
377 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
378 {
379 	int err, err2 = 0;
380 
381 	err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
382 
383 	if (tile->media_gt && !err)
384 		err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
385 
386 	return err ?: err2;
387 }
388 
389 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
390 {
391 	if (xe_ggtt_node_allocated(node)) {
392 		/*
393 		 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
394 		 * is redundant, as PTE will be implicitly re-assigned to PF by
395 		 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
396 		 */
397 		xe_ggtt_node_remove(node, false);
398 	}
399 }
400 
401 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
402 {
403 	pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
404 	config->ggtt_region = NULL;
405 }
406 
407 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
408 {
409 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
410 	struct xe_ggtt_node *node;
411 	struct xe_tile *tile = gt_to_tile(gt);
412 	struct xe_ggtt *ggtt = tile->mem.ggtt;
413 	u64 alignment = pf_get_ggtt_alignment(gt);
414 	int err;
415 
416 	xe_gt_assert(gt, vfid);
417 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
418 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
419 
420 	size = round_up(size, alignment);
421 
422 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
423 		err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
424 		if (unlikely(err))
425 			return err;
426 
427 		pf_release_vf_config_ggtt(gt, config);
428 	}
429 	xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
430 
431 	if (!size)
432 		return 0;
433 
434 	node = xe_ggtt_node_init(ggtt);
435 	if (IS_ERR(node))
436 		return PTR_ERR(node);
437 
438 	err = xe_ggtt_node_insert(node, size, alignment);
439 	if (unlikely(err))
440 		goto err;
441 
442 	xe_ggtt_assign(node, vfid);
443 	xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
444 				vfid, node->base.start, node->base.start + node->base.size - 1);
445 
446 	err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
447 	if (unlikely(err))
448 		goto err;
449 
450 	config->ggtt_region = node;
451 	return 0;
452 err:
453 	xe_ggtt_node_fini(node);
454 	return err;
455 }
456 
457 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
458 {
459 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
460 	struct xe_ggtt_node *node = config->ggtt_region;
461 
462 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
463 	return xe_ggtt_node_allocated(node) ? node->base.size : 0;
464 }
465 
466 /**
467  * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
468  * @gt: the &xe_gt
469  * @vfid: the VF identifier
470  *
471  * This function can only be called on PF.
472  *
473  * Return: size of the VF's assigned (or PF's spare) GGTT address space.
474  */
475 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
476 {
477 	u64 size;
478 
479 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
480 	if (vfid)
481 		size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
482 	else
483 		size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
484 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
485 
486 	return size;
487 }
488 
489 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
490 				  u64 actual, const char *what, int err)
491 {
492 	char size[10];
493 	char name[8];
494 
495 	xe_sriov_function_name(vfid, name, sizeof(name));
496 
497 	if (unlikely(err)) {
498 		string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
499 		xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
500 				   name, value, size, what, ERR_PTR(err));
501 		string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
502 		xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
503 				 name, actual, size, what);
504 		return err;
505 	}
506 
507 	/* the actual value may have changed during provisioning */
508 	string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
509 	xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
510 			 name, actual, size, what);
511 	return 0;
512 }
513 
514 /**
515  * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
516  * @gt: the &xe_gt (can't be media)
517  * @vfid: the VF identifier
518  * @size: requested GGTT size
519  *
520  * If &vfid represents PF, then function will change PF's spare GGTT config.
521  *
522  * This function can only be called on PF.
523  *
524  * Return: 0 on success or a negative error code on failure.
525  */
526 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
527 {
528 	int err;
529 
530 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
531 
532 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
533 	if (vfid)
534 		err = pf_provision_vf_ggtt(gt, vfid, size);
535 	else
536 		err = pf_set_spare_ggtt(gt, size);
537 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
538 
539 	return pf_config_set_u64_done(gt, vfid, size,
540 				      xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
541 				      vfid ? "GGTT" : "spare GGTT", err);
542 }
543 
544 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
545 				       u64 value, u64 (*get)(struct xe_gt*, unsigned int),
546 				       const char *what, unsigned int last, int err)
547 {
548 	char size[10];
549 
550 	xe_gt_assert(gt, first);
551 	xe_gt_assert(gt, num_vfs);
552 	xe_gt_assert(gt, first <= last);
553 
554 	if (num_vfs == 1)
555 		return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
556 
557 	if (unlikely(err)) {
558 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
559 				   first, first + num_vfs - 1, what);
560 		if (last > first)
561 			pf_config_bulk_set_u64_done(gt, first, last - first, value,
562 						    get, what, last, 0);
563 		return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
564 	}
565 
566 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
567 	value = get(gt, first);
568 	string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
569 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
570 			 first, first + num_vfs - 1, value, size, what);
571 	return 0;
572 }
573 
574 /**
575  * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
576  * @gt: the &xe_gt (can't be media)
577  * @vfid: starting VF identifier (can't be 0)
578  * @num_vfs: number of VFs to provision
579  * @size: requested GGTT size
580  *
581  * This function can only be called on PF.
582  *
583  * Return: 0 on success or a negative error code on failure.
584  */
585 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
586 					unsigned int num_vfs, u64 size)
587 {
588 	unsigned int n;
589 	int err = 0;
590 
591 	xe_gt_assert(gt, vfid);
592 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
593 
594 	if (!num_vfs)
595 		return 0;
596 
597 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
598 	for (n = vfid; n < vfid + num_vfs; n++) {
599 		err = pf_provision_vf_ggtt(gt, n, size);
600 		if (err)
601 			break;
602 	}
603 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
604 
605 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
606 					   xe_gt_sriov_pf_config_get_ggtt,
607 					   "GGTT", n, err);
608 }
609 
610 /* Return: size of the largest continuous GGTT region */
611 static u64 pf_get_max_ggtt(struct xe_gt *gt)
612 {
613 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
614 	u64 alignment = pf_get_ggtt_alignment(gt);
615 	u64 spare = pf_get_spare_ggtt(gt);
616 	u64 max_hole;
617 
618 	max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
619 
620 	xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
621 				max_hole / SZ_1K, spare / SZ_1K);
622 	return max_hole > spare ? max_hole - spare : 0;
623 }
624 
625 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
626 {
627 	u64 available = pf_get_max_ggtt(gt);
628 	u64 alignment = pf_get_ggtt_alignment(gt);
629 	u64 fair;
630 
631 	/*
632 	 * To simplify the logic we only look at single largest GGTT region
633 	 * as that will be always the best fit for 1 VF case, and most likely
634 	 * will also nicely cover other cases where VFs are provisioned on the
635 	 * fresh and idle PF driver, without any stale GGTT allocations spread
636 	 * in the middle of the full GGTT range.
637 	 */
638 
639 	fair = div_u64(available, num_vfs);
640 	fair = ALIGN_DOWN(fair, alignment);
641 	xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
642 				available / SZ_1K, num_vfs, fair / SZ_1K);
643 	return fair;
644 }
645 
646 /**
647  * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
648  * @gt: the &xe_gt (can't be media)
649  * @vfid: starting VF identifier (can't be 0)
650  * @num_vfs: number of VFs to provision
651  *
652  * This function can only be called on PF.
653  *
654  * Return: 0 on success or a negative error code on failure.
655  */
656 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
657 					unsigned int num_vfs)
658 {
659 	u64 fair;
660 
661 	xe_gt_assert(gt, vfid);
662 	xe_gt_assert(gt, num_vfs);
663 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
664 
665 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
666 	fair = pf_estimate_fair_ggtt(gt, num_vfs);
667 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
668 
669 	if (!fair)
670 		return -ENOSPC;
671 
672 	return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
673 }
674 
675 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
676 {
677 	/* XXX: preliminary */
678 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
679 		hweight64(gt->info.engine_mask) : SZ_256;
680 }
681 
682 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
683 {
684 	u32 spare;
685 
686 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
687 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
688 
689 	spare = gt->sriov.pf.spare.num_ctxs;
690 	spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
691 
692 	return spare;
693 }
694 
695 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
696 {
697 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
698 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
699 
700 	if (spare > GUC_ID_MAX)
701 		return -EINVAL;
702 
703 	if (spare && spare < pf_get_min_spare_ctxs(gt))
704 		return -EINVAL;
705 
706 	gt->sriov.pf.spare.num_ctxs = spare;
707 
708 	return 0;
709 }
710 
711 /* Return: start ID or negative error code on failure */
712 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
713 {
714 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
715 	unsigned int spare = pf_get_spare_ctxs(gt);
716 
717 	return xe_guc_id_mgr_reserve(idm, num, spare);
718 }
719 
720 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
721 {
722 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
723 
724 	if (num)
725 		xe_guc_id_mgr_release(idm, start, num);
726 }
727 
728 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
729 {
730 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
731 
732 	pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
733 	config->begin_ctx = 0;
734 	config->num_ctxs = 0;
735 }
736 
737 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
738 {
739 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
740 	int ret;
741 
742 	xe_gt_assert(gt, vfid);
743 
744 	if (num_ctxs > GUC_ID_MAX)
745 		return -EINVAL;
746 
747 	if (config->num_ctxs) {
748 		ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
749 		if (unlikely(ret))
750 			return ret;
751 
752 		pf_release_config_ctxs(gt, config);
753 	}
754 
755 	if (!num_ctxs)
756 		return 0;
757 
758 	ret = pf_reserve_ctxs(gt, num_ctxs);
759 	if (unlikely(ret < 0))
760 		return ret;
761 
762 	config->begin_ctx = ret;
763 	config->num_ctxs = num_ctxs;
764 
765 	ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
766 	if (unlikely(ret)) {
767 		pf_release_config_ctxs(gt, config);
768 		return ret;
769 	}
770 
771 	xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
772 				vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
773 	return 0;
774 }
775 
776 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
777 {
778 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
779 
780 	return config->num_ctxs;
781 }
782 
783 /**
784  * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
785  * @gt: the &xe_gt
786  * @vfid: the VF identifier
787  *
788  * This function can only be called on PF.
789  * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
790  *
791  * Return: VF's quota (or PF's spare).
792  */
793 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
794 {
795 	u32 num_ctxs;
796 
797 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
798 	if (vfid)
799 		num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
800 	else
801 		num_ctxs = pf_get_spare_ctxs(gt);
802 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
803 
804 	return num_ctxs;
805 }
806 
807 static const char *no_unit(u32 unused)
808 {
809 	return "";
810 }
811 
812 static const char *spare_unit(u32 unused)
813 {
814 	return " spare";
815 }
816 
817 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
818 				  const char *what, const char *(*unit)(u32), int err)
819 {
820 	char name[8];
821 
822 	xe_sriov_function_name(vfid, name, sizeof(name));
823 
824 	if (unlikely(err)) {
825 		xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
826 				   name, value, unit(value), what, ERR_PTR(err));
827 		xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
828 				 name, actual, unit(actual), what);
829 		return err;
830 	}
831 
832 	/* the actual value may have changed during provisioning */
833 	xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
834 			 name, actual, unit(actual), what);
835 	return 0;
836 }
837 
838 /**
839  * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
840  * @gt: the &xe_gt
841  * @vfid: the VF identifier
842  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
843  *
844  * This function can only be called on PF.
845  *
846  * Return: 0 on success or a negative error code on failure.
847  */
848 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
849 {
850 	int err;
851 
852 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
853 	if (vfid)
854 		err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
855 	else
856 		err = pf_set_spare_ctxs(gt, num_ctxs);
857 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
858 
859 	return pf_config_set_u32_done(gt, vfid, num_ctxs,
860 				      xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
861 				      "GuC context IDs", vfid ? no_unit : spare_unit, err);
862 }
863 
864 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
865 				       u32 value, u32 (*get)(struct xe_gt*, unsigned int),
866 				       const char *what, const char *(*unit)(u32),
867 				       unsigned int last, int err)
868 {
869 	xe_gt_assert(gt, first);
870 	xe_gt_assert(gt, num_vfs);
871 	xe_gt_assert(gt, first <= last);
872 
873 	if (num_vfs == 1)
874 		return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
875 
876 	if (unlikely(err)) {
877 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
878 				   first, first + num_vfs - 1, what);
879 		if (last > first)
880 			pf_config_bulk_set_u32_done(gt, first, last - first, value,
881 						    get, what, unit, last, 0);
882 		return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
883 	}
884 
885 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
886 	value = get(gt, first);
887 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
888 			 first, first + num_vfs - 1, value, unit(value), what);
889 	return 0;
890 }
891 
892 /**
893  * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
894  * @gt: the &xe_gt
895  * @vfid: starting VF identifier
896  * @num_vfs: number of VFs to provision
897  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
898  *
899  * This function can only be called on PF.
900  *
901  * Return: 0 on success or a negative error code on failure.
902  */
903 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
904 					unsigned int num_vfs, u32 num_ctxs)
905 {
906 	unsigned int n;
907 	int err = 0;
908 
909 	xe_gt_assert(gt, vfid);
910 
911 	if (!num_vfs)
912 		return 0;
913 
914 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
915 	for (n = vfid; n < vfid + num_vfs; n++) {
916 		err = pf_provision_vf_ctxs(gt, n, num_ctxs);
917 		if (err)
918 			break;
919 	}
920 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
921 
922 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
923 					   xe_gt_sriov_pf_config_get_ctxs,
924 					   "GuC context IDs", no_unit, n, err);
925 }
926 
927 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
928 {
929 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
930 	u32 spare = pf_get_spare_ctxs(gt);
931 	u32 fair = (idm->total - spare) / num_vfs;
932 	int ret;
933 
934 	for (; fair; --fair) {
935 		ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
936 		if (ret < 0)
937 			continue;
938 		xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
939 		break;
940 	}
941 
942 	xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
943 	return fair;
944 }
945 
946 /**
947  * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
948  * @gt: the &xe_gt
949  * @vfid: starting VF identifier (can't be 0)
950  * @num_vfs: number of VFs to provision (can't be 0)
951  *
952  * This function can only be called on PF.
953  *
954  * Return: 0 on success or a negative error code on failure.
955  */
956 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
957 					unsigned int num_vfs)
958 {
959 	u32 fair;
960 
961 	xe_gt_assert(gt, vfid);
962 	xe_gt_assert(gt, num_vfs);
963 
964 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
965 	fair = pf_estimate_fair_ctxs(gt, num_vfs);
966 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
967 
968 	if (!fair)
969 		return -ENOSPC;
970 
971 	return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
972 }
973 
974 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
975 {
976 	/* XXX: preliminary, we don't use doorbells yet! */
977 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
978 }
979 
980 static u32 pf_get_spare_dbs(struct xe_gt *gt)
981 {
982 	u32 spare;
983 
984 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
985 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
986 
987 	spare = gt->sriov.pf.spare.num_dbs;
988 	spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
989 
990 	return spare;
991 }
992 
993 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
994 {
995 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
996 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
997 
998 	if (spare > GUC_NUM_DOORBELLS)
999 		return -EINVAL;
1000 
1001 	if (spare && spare < pf_get_min_spare_dbs(gt))
1002 		return -EINVAL;
1003 
1004 	gt->sriov.pf.spare.num_dbs = spare;
1005 	return 0;
1006 }
1007 
1008 /* Return: start ID or negative error code on failure */
1009 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
1010 {
1011 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1012 	unsigned int spare = pf_get_spare_dbs(gt);
1013 
1014 	return xe_guc_db_mgr_reserve_range(dbm, num, spare);
1015 }
1016 
1017 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1018 {
1019 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1020 
1021 	if (num)
1022 		xe_guc_db_mgr_release_range(dbm, start, num);
1023 }
1024 
1025 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1026 {
1027 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1028 
1029 	pf_release_dbs(gt, config->begin_db, config->num_dbs);
1030 	config->begin_db = 0;
1031 	config->num_dbs = 0;
1032 }
1033 
1034 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1035 {
1036 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1037 	int ret;
1038 
1039 	xe_gt_assert(gt, vfid);
1040 
1041 	if (num_dbs > GUC_NUM_DOORBELLS)
1042 		return -EINVAL;
1043 
1044 	if (config->num_dbs) {
1045 		ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1046 		if (unlikely(ret))
1047 			return ret;
1048 
1049 		pf_release_config_dbs(gt, config);
1050 	}
1051 
1052 	if (!num_dbs)
1053 		return 0;
1054 
1055 	ret = pf_reserve_dbs(gt, num_dbs);
1056 	if (unlikely(ret < 0))
1057 		return ret;
1058 
1059 	config->begin_db = ret;
1060 	config->num_dbs = num_dbs;
1061 
1062 	ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1063 	if (unlikely(ret)) {
1064 		pf_release_config_dbs(gt, config);
1065 		return ret;
1066 	}
1067 
1068 	xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1069 				vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1070 	return 0;
1071 }
1072 
1073 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1074 {
1075 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1076 
1077 	return config->num_dbs;
1078 }
1079 
1080 /**
1081  * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1082  * @gt: the &xe_gt
1083  * @vfid: the VF identifier
1084  *
1085  * This function can only be called on PF.
1086  * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1087  *
1088  * Return: VF's quota (or PF's spare).
1089  */
1090 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1091 {
1092 	u32 num_dbs;
1093 
1094 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1095 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1096 
1097 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1098 	if (vfid)
1099 		num_dbs = pf_get_vf_config_dbs(gt, vfid);
1100 	else
1101 		num_dbs = pf_get_spare_dbs(gt);
1102 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1103 
1104 	return num_dbs;
1105 }
1106 
1107 /**
1108  * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1109  * @gt: the &xe_gt
1110  * @vfid: the VF identifier
1111  * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1112  *
1113  * This function can only be called on PF.
1114  *
1115  * Return: 0 on success or a negative error code on failure.
1116  */
1117 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1118 {
1119 	int err;
1120 
1121 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1122 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1123 
1124 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1125 	if (vfid)
1126 		err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1127 	else
1128 		err = pf_set_spare_dbs(gt, num_dbs);
1129 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1130 
1131 	return pf_config_set_u32_done(gt, vfid, num_dbs,
1132 				      xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1133 				      "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1134 }
1135 
1136 /**
1137  * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1138  * @gt: the &xe_gt
1139  * @vfid: starting VF identifier (can't be 0)
1140  * @num_vfs: number of VFs to provision
1141  * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1142  *
1143  * This function can only be called on PF.
1144  *
1145  * Return: 0 on success or a negative error code on failure.
1146  */
1147 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1148 				       unsigned int num_vfs, u32 num_dbs)
1149 {
1150 	unsigned int n;
1151 	int err = 0;
1152 
1153 	xe_gt_assert(gt, vfid);
1154 
1155 	if (!num_vfs)
1156 		return 0;
1157 
1158 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1159 	for (n = vfid; n < vfid + num_vfs; n++) {
1160 		err = pf_provision_vf_dbs(gt, n, num_dbs);
1161 		if (err)
1162 			break;
1163 	}
1164 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1165 
1166 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1167 					   xe_gt_sriov_pf_config_get_dbs,
1168 					   "GuC doorbell IDs", no_unit, n, err);
1169 }
1170 
1171 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1172 {
1173 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1174 	u32 spare = pf_get_spare_dbs(gt);
1175 	u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1176 	int ret;
1177 
1178 	for (; fair; --fair) {
1179 		ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1180 		if (ret < 0)
1181 			continue;
1182 		xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1183 		break;
1184 	}
1185 
1186 	xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1187 	return fair;
1188 }
1189 
1190 /**
1191  * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell  IDs.
1192  * @gt: the &xe_gt
1193  * @vfid: starting VF identifier (can't be 0)
1194  * @num_vfs: number of VFs to provision (can't be 0)
1195  *
1196  * This function can only be called on PF.
1197  *
1198  * Return: 0 on success or a negative error code on failure.
1199  */
1200 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1201 				       unsigned int num_vfs)
1202 {
1203 	u32 fair;
1204 
1205 	xe_gt_assert(gt, vfid);
1206 	xe_gt_assert(gt, num_vfs);
1207 
1208 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1209 	fair = pf_estimate_fair_dbs(gt, num_vfs);
1210 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1211 
1212 	if (!fair)
1213 		return -ENOSPC;
1214 
1215 	return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1216 }
1217 
1218 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1219 {
1220 	/* this might be platform dependent */
1221 	return SZ_2M;
1222 }
1223 
1224 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1225 {
1226 	/* this might be platform dependent */
1227 	return SZ_128M; /* XXX: preliminary */
1228 }
1229 
1230 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1231 {
1232 	u64 spare;
1233 
1234 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1235 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1236 
1237 	spare = gt->sriov.pf.spare.lmem_size;
1238 	spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1239 
1240 	return spare;
1241 }
1242 
1243 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1244 {
1245 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1246 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1247 
1248 	if (size && size < pf_get_min_spare_lmem(gt))
1249 		return -EINVAL;
1250 
1251 	gt->sriov.pf.spare.lmem_size = size;
1252 	return 0;
1253 }
1254 
1255 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1256 {
1257 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1258 	struct xe_bo *bo;
1259 
1260 	bo = config->lmem_obj;
1261 	return bo ? bo->size : 0;
1262 }
1263 
1264 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1265 {
1266 	struct xe_device *xe = gt_to_xe(gt);
1267 	struct xe_tile *tile;
1268 	unsigned int tid;
1269 	int err;
1270 
1271 	for_each_tile(tile, xe, tid) {
1272 		if (tile->primary_gt == gt) {
1273 			err = pf_push_vf_cfg_lmem(gt, vfid, size);
1274 		} else {
1275 			u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1276 
1277 			if (!lmem)
1278 				continue;
1279 			err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1280 		}
1281 		if (unlikely(err))
1282 			return err;
1283 	}
1284 	return 0;
1285 }
1286 
1287 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1288 {
1289 	/* TODO */
1290 }
1291 
1292 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1293 {
1294 	struct xe_lmtt *lmtt;
1295 	struct xe_tile *tile;
1296 	unsigned int tid;
1297 
1298 	xe_assert(xe, IS_DGFX(xe));
1299 	xe_assert(xe, IS_SRIOV_PF(xe));
1300 
1301 	for_each_tile(tile, xe, tid) {
1302 		lmtt = &tile->sriov.pf.lmtt;
1303 		xe_lmtt_drop_pages(lmtt, vfid);
1304 	}
1305 }
1306 
1307 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1308 {
1309 	struct xe_gt_sriov_config *config;
1310 	struct xe_tile *tile;
1311 	struct xe_lmtt *lmtt;
1312 	struct xe_bo *bo;
1313 	struct xe_gt *gt;
1314 	u64 total, offset;
1315 	unsigned int gtid;
1316 	unsigned int tid;
1317 	int err;
1318 
1319 	xe_assert(xe, IS_DGFX(xe));
1320 	xe_assert(xe, IS_SRIOV_PF(xe));
1321 
1322 	total = 0;
1323 	for_each_tile(tile, xe, tid)
1324 		total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1325 
1326 	for_each_tile(tile, xe, tid) {
1327 		lmtt = &tile->sriov.pf.lmtt;
1328 
1329 		xe_lmtt_drop_pages(lmtt, vfid);
1330 		if (!total)
1331 			continue;
1332 
1333 		err  = xe_lmtt_prepare_pages(lmtt, vfid, total);
1334 		if (err)
1335 			goto fail;
1336 
1337 		offset = 0;
1338 		for_each_gt(gt, xe, gtid) {
1339 			if (xe_gt_is_media_type(gt))
1340 				continue;
1341 
1342 			config = pf_pick_vf_config(gt, vfid);
1343 			bo = config->lmem_obj;
1344 			if (!bo)
1345 				continue;
1346 
1347 			err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1348 			if (err)
1349 				goto fail;
1350 			offset += bo->size;
1351 		}
1352 	}
1353 
1354 	pf_force_lmtt_invalidate(xe);
1355 	return 0;
1356 
1357 fail:
1358 	for_each_tile(tile, xe, tid) {
1359 		lmtt = &tile->sriov.pf.lmtt;
1360 		xe_lmtt_drop_pages(lmtt, vfid);
1361 	}
1362 	return err;
1363 }
1364 
1365 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1366 {
1367 	xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1368 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1369 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1370 
1371 	if (config->lmem_obj) {
1372 		xe_bo_unpin_map_no_vm(config->lmem_obj);
1373 		config->lmem_obj = NULL;
1374 	}
1375 }
1376 
1377 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1378 {
1379 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1380 	struct xe_device *xe = gt_to_xe(gt);
1381 	struct xe_tile *tile = gt_to_tile(gt);
1382 	struct xe_bo *bo;
1383 	int err;
1384 
1385 	xe_gt_assert(gt, vfid);
1386 	xe_gt_assert(gt, IS_DGFX(xe));
1387 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1388 
1389 	size = round_up(size, pf_get_lmem_alignment(gt));
1390 
1391 	if (config->lmem_obj) {
1392 		err = pf_distribute_config_lmem(gt, vfid, 0);
1393 		if (unlikely(err))
1394 			return err;
1395 
1396 		pf_reset_vf_lmtt(xe, vfid);
1397 		pf_release_vf_config_lmem(gt, config);
1398 	}
1399 	xe_gt_assert(gt, !config->lmem_obj);
1400 
1401 	if (!size)
1402 		return 0;
1403 
1404 	xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1405 	bo = xe_bo_create_pin_map(xe, tile, NULL,
1406 				  ALIGN(size, PAGE_SIZE),
1407 				  ttm_bo_type_kernel,
1408 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1409 				  XE_BO_FLAG_NEEDS_2M |
1410 				  XE_BO_FLAG_PINNED);
1411 	if (IS_ERR(bo))
1412 		return PTR_ERR(bo);
1413 
1414 	config->lmem_obj = bo;
1415 
1416 	err = pf_update_vf_lmtt(xe, vfid);
1417 	if (unlikely(err))
1418 		goto release;
1419 
1420 	err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
1421 	if (unlikely(err))
1422 		goto reset_lmtt;
1423 
1424 	xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1425 				vfid, bo->size, bo->size / SZ_1M);
1426 	return 0;
1427 
1428 reset_lmtt:
1429 	pf_reset_vf_lmtt(xe, vfid);
1430 release:
1431 	pf_release_vf_config_lmem(gt, config);
1432 	return err;
1433 }
1434 
1435 /**
1436  * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1437  * @gt: the &xe_gt
1438  * @vfid: the VF identifier
1439  *
1440  * This function can only be called on PF.
1441  *
1442  * Return: VF's (or PF's spare) LMEM quota.
1443  */
1444 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1445 {
1446 	u64 size;
1447 
1448 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1449 	if (vfid)
1450 		size = pf_get_vf_config_lmem(gt, vfid);
1451 	else
1452 		size = pf_get_spare_lmem(gt);
1453 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1454 
1455 	return size;
1456 }
1457 
1458 /**
1459  * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1460  * @gt: the &xe_gt (can't be media)
1461  * @vfid: the VF identifier
1462  * @size: requested LMEM size
1463  *
1464  * This function can only be called on PF.
1465  */
1466 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1467 {
1468 	int err;
1469 
1470 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1471 	if (vfid)
1472 		err = pf_provision_vf_lmem(gt, vfid, size);
1473 	else
1474 		err = pf_set_spare_lmem(gt, size);
1475 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1476 
1477 	return pf_config_set_u64_done(gt, vfid, size,
1478 				      xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1479 				      vfid ? "LMEM" : "spare LMEM", err);
1480 }
1481 
1482 /**
1483  * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1484  * @gt: the &xe_gt (can't be media)
1485  * @vfid: starting VF identifier (can't be 0)
1486  * @num_vfs: number of VFs to provision
1487  * @size: requested LMEM size
1488  *
1489  * This function can only be called on PF.
1490  *
1491  * Return: 0 on success or a negative error code on failure.
1492  */
1493 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1494 					unsigned int num_vfs, u64 size)
1495 {
1496 	unsigned int n;
1497 	int err = 0;
1498 
1499 	xe_gt_assert(gt, vfid);
1500 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1501 
1502 	if (!num_vfs)
1503 		return 0;
1504 
1505 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1506 	for (n = vfid; n < vfid + num_vfs; n++) {
1507 		err = pf_provision_vf_lmem(gt, n, size);
1508 		if (err)
1509 			break;
1510 	}
1511 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1512 
1513 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1514 					   xe_gt_sriov_pf_config_get_lmem,
1515 					   "LMEM", n, err);
1516 }
1517 
1518 static u64 pf_query_free_lmem(struct xe_gt *gt)
1519 {
1520 	struct xe_tile *tile = gt->tile;
1521 
1522 	return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager);
1523 }
1524 
1525 static u64 pf_query_max_lmem(struct xe_gt *gt)
1526 {
1527 	u64 alignment = pf_get_lmem_alignment(gt);
1528 	u64 spare = pf_get_spare_lmem(gt);
1529 	u64 free = pf_query_free_lmem(gt);
1530 	u64 avail;
1531 
1532 	/* XXX: need to account for 2MB blocks only */
1533 	avail = free > spare ? free - spare : 0;
1534 	avail = round_down(avail, alignment);
1535 
1536 	return avail;
1537 }
1538 
1539 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1540 #define MAX_FAIR_LMEM	SZ_128M	/* XXX: make it small for the driver bringup */
1541 #else
1542 #define MAX_FAIR_LMEM	SZ_2G	/* XXX: known issue with allocating BO over 2GiB */
1543 #endif
1544 
1545 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1546 {
1547 	u64 available = pf_query_max_lmem(gt);
1548 	u64 alignment = pf_get_lmem_alignment(gt);
1549 	u64 fair;
1550 
1551 	fair = div_u64(available, num_vfs);
1552 	fair = rounddown_pow_of_two(fair);	/* XXX: ttm_vram_mgr & drm_buddy limitation */
1553 	fair = ALIGN_DOWN(fair, alignment);
1554 #ifdef MAX_FAIR_LMEM
1555 	fair = min_t(u64, MAX_FAIR_LMEM, fair);
1556 #endif
1557 	xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1558 				available / SZ_1M, num_vfs, fair / SZ_1M);
1559 	return fair;
1560 }
1561 
1562 /**
1563  * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1564  * @gt: the &xe_gt (can't be media)
1565  * @vfid: starting VF identifier (can't be 0)
1566  * @num_vfs: number of VFs to provision (can't be 0)
1567  *
1568  * This function can only be called on PF.
1569  *
1570  * Return: 0 on success or a negative error code on failure.
1571  */
1572 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1573 					unsigned int num_vfs)
1574 {
1575 	u64 fair;
1576 
1577 	xe_gt_assert(gt, vfid);
1578 	xe_gt_assert(gt, num_vfs);
1579 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1580 
1581 	if (!IS_DGFX(gt_to_xe(gt)))
1582 		return 0;
1583 
1584 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1585 	fair = pf_estimate_fair_lmem(gt, num_vfs);
1586 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1587 
1588 	if (!fair)
1589 		return -ENOSPC;
1590 
1591 	return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1592 }
1593 
1594 /**
1595  * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1596  * @gt: the &xe_gt
1597  * @vfid: starting VF identifier (can't be 0)
1598  * @num_vfs: number of VFs to provision (can't be 0)
1599  *
1600  * This function can only be called on PF.
1601  *
1602  * Return: 0 on success or a negative error code on failure.
1603  */
1604 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1605 				   unsigned int num_vfs)
1606 {
1607 	int result = 0;
1608 	int err;
1609 
1610 	xe_gt_assert(gt, vfid);
1611 	xe_gt_assert(gt, num_vfs);
1612 
1613 	if (!xe_gt_is_media_type(gt)) {
1614 		err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1615 		result = result ?: err;
1616 		err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1617 		result = result ?: err;
1618 	}
1619 	err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1620 	result = result ?: err;
1621 	err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1622 	result = result ?: err;
1623 
1624 	return result;
1625 }
1626 
1627 static const char *exec_quantum_unit(u32 exec_quantum)
1628 {
1629 	return exec_quantum ? "ms" : "(infinity)";
1630 }
1631 
1632 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1633 				     u32 exec_quantum)
1634 {
1635 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1636 	int err;
1637 
1638 	err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1639 	if (unlikely(err))
1640 		return err;
1641 
1642 	config->exec_quantum = exec_quantum;
1643 	return 0;
1644 }
1645 
1646 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1647 {
1648 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1649 
1650 	return config->exec_quantum;
1651 }
1652 
1653 /**
1654  * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1655  * @gt: the &xe_gt
1656  * @vfid: the VF identifier
1657  * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1658  *
1659  * This function can only be called on PF.
1660  *
1661  * Return: 0 on success or a negative error code on failure.
1662  */
1663 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1664 					   u32 exec_quantum)
1665 {
1666 	int err;
1667 
1668 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1669 	err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1670 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1671 
1672 	return pf_config_set_u32_done(gt, vfid, exec_quantum,
1673 				      xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1674 				      "execution quantum", exec_quantum_unit, err);
1675 }
1676 
1677 /**
1678  * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1679  * @gt: the &xe_gt
1680  * @vfid: the VF identifier
1681  *
1682  * This function can only be called on PF.
1683  *
1684  * Return: VF's (or PF's) execution quantum in milliseconds.
1685  */
1686 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1687 {
1688 	u32 exec_quantum;
1689 
1690 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1691 	exec_quantum = pf_get_exec_quantum(gt, vfid);
1692 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1693 
1694 	return exec_quantum;
1695 }
1696 
1697 static const char *preempt_timeout_unit(u32 preempt_timeout)
1698 {
1699 	return preempt_timeout ? "us" : "(infinity)";
1700 }
1701 
1702 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1703 					u32 preempt_timeout)
1704 {
1705 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1706 	int err;
1707 
1708 	err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1709 	if (unlikely(err))
1710 		return err;
1711 
1712 	config->preempt_timeout = preempt_timeout;
1713 
1714 	return 0;
1715 }
1716 
1717 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1718 {
1719 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1720 
1721 	return config->preempt_timeout;
1722 }
1723 
1724 /**
1725  * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1726  * @gt: the &xe_gt
1727  * @vfid: the VF identifier
1728  * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1729  *
1730  * This function can only be called on PF.
1731  *
1732  * Return: 0 on success or a negative error code on failure.
1733  */
1734 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1735 					      u32 preempt_timeout)
1736 {
1737 	int err;
1738 
1739 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1740 	err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1741 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1742 
1743 	return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1744 				      xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1745 				      "preemption timeout", preempt_timeout_unit, err);
1746 }
1747 
1748 /**
1749  * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1750  * @gt: the &xe_gt
1751  * @vfid: the VF identifier
1752  *
1753  * This function can only be called on PF.
1754  *
1755  * Return: VF's (or PF's) preemption timeout in microseconds.
1756  */
1757 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1758 {
1759 	u32 preempt_timeout;
1760 
1761 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1762 	preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1763 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1764 
1765 	return preempt_timeout;
1766 }
1767 
1768 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1769 {
1770 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1771 
1772 	config->exec_quantum = 0;
1773 	config->preempt_timeout = 0;
1774 }
1775 
1776 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
1777 				  enum xe_guc_klv_threshold_index index, u32 value)
1778 {
1779 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1780 	int err;
1781 
1782 	err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
1783 	if (unlikely(err))
1784 		return err;
1785 
1786 	config->thresholds[index] = value;
1787 
1788 	return 0;
1789 }
1790 
1791 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
1792 			    enum xe_guc_klv_threshold_index index)
1793 {
1794 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1795 
1796 	return config->thresholds[index];
1797 }
1798 
1799 static const char *threshold_unit(u32 threshold)
1800 {
1801 	return threshold ? "" : "(disabled)";
1802 }
1803 
1804 /**
1805  * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
1806  * @gt: the &xe_gt
1807  * @vfid: the VF identifier
1808  * @index: the threshold index
1809  * @value: requested value (0 means disabled)
1810  *
1811  * This function can only be called on PF.
1812  *
1813  * Return: 0 on success or a negative error code on failure.
1814  */
1815 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
1816 					enum xe_guc_klv_threshold_index index, u32 value)
1817 {
1818 	u32 key = xe_guc_klv_threshold_index_to_key(index);
1819 	const char *name = xe_guc_klv_key_to_string(key);
1820 	int err;
1821 
1822 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1823 	err = pf_provision_threshold(gt, vfid, index, value);
1824 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1825 
1826 	return pf_config_set_u32_done(gt, vfid, value,
1827 				      xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
1828 				      name, threshold_unit, err);
1829 }
1830 
1831 /**
1832  * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
1833  * @gt: the &xe_gt
1834  * @vfid: the VF identifier
1835  * @index: the threshold index
1836  *
1837  * This function can only be called on PF.
1838  *
1839  * Return: value of VF's (or PF's) threshold.
1840  */
1841 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
1842 					enum xe_guc_klv_threshold_index index)
1843 {
1844 	u32 value;
1845 
1846 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1847 	value = pf_get_threshold(gt, vfid, index);
1848 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1849 
1850 	return value;
1851 }
1852 
1853 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1854 {
1855 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1856 
1857 #define reset_threshold_config(TAG, ...) ({				\
1858 	config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0;	\
1859 });
1860 
1861 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
1862 #undef reset_threshold_config
1863 }
1864 
1865 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
1866 {
1867 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1868 	struct xe_device *xe = gt_to_xe(gt);
1869 
1870 	if (!xe_gt_is_media_type(gt)) {
1871 		pf_release_vf_config_ggtt(gt, config);
1872 		if (IS_DGFX(xe)) {
1873 			pf_release_vf_config_lmem(gt, config);
1874 			pf_update_vf_lmtt(xe, vfid);
1875 		}
1876 	}
1877 	pf_release_config_ctxs(gt, config);
1878 	pf_release_config_dbs(gt, config);
1879 	pf_reset_config_sched(gt, config);
1880 	pf_reset_config_thresholds(gt, config);
1881 }
1882 
1883 /**
1884  * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
1885  * @gt: the &xe_gt
1886  * @vfid: the VF identifier (can't be PF)
1887  * @force: force configuration release
1888  *
1889  * This function can only be called on PF.
1890  *
1891  * Return: 0 on success or a negative error code on failure.
1892  */
1893 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
1894 {
1895 	int err;
1896 
1897 	xe_gt_assert(gt, vfid);
1898 
1899 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1900 	err = pf_send_vf_cfg_reset(gt, vfid);
1901 	if (!err || force)
1902 		pf_release_vf_config(gt, vfid);
1903 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1904 
1905 	if (unlikely(err)) {
1906 		xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
1907 				   vfid, ERR_PTR(err),
1908 				   force ? " but all resources were released anyway!" : "");
1909 	}
1910 
1911 	return force ? 0 : err;
1912 }
1913 
1914 static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
1915 {
1916 	if (xe_ggtt_node_allocated(ggtt_region))
1917 		xe_ggtt_assign(ggtt_region, vfid);
1918 }
1919 
1920 static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
1921 {
1922 	struct xe_migrate *m = tile->migrate;
1923 	struct dma_fence *fence;
1924 	int err;
1925 
1926 	if (!bo)
1927 		return 0;
1928 
1929 	xe_bo_lock(bo, false);
1930 	fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
1931 	if (IS_ERR(fence)) {
1932 		err = PTR_ERR(fence);
1933 	} else if (!fence) {
1934 		err = -ENOMEM;
1935 	} else {
1936 		long ret = dma_fence_wait_timeout(fence, false, timeout);
1937 
1938 		err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
1939 		dma_fence_put(fence);
1940 		if (!err)
1941 			xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
1942 						jiffies_to_msecs(timeout - ret));
1943 	}
1944 	xe_bo_unlock(bo);
1945 
1946 	return err;
1947 }
1948 
1949 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
1950 {
1951 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1952 	struct xe_tile *tile = gt_to_tile(gt);
1953 	struct xe_device *xe = gt_to_xe(gt);
1954 	int err = 0;
1955 
1956 	/*
1957 	 * Only GGTT and LMEM requires to be cleared by the PF.
1958 	 * GuC doorbell IDs and context IDs do not need any clearing.
1959 	 */
1960 	if (!xe_gt_is_media_type(gt)) {
1961 		pf_sanitize_ggtt(config->ggtt_region, vfid);
1962 		if (IS_DGFX(xe))
1963 			err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
1964 	}
1965 
1966 	return err;
1967 }
1968 
1969 /**
1970  * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
1971  * @gt: the &xe_gt
1972  * @vfid: the VF identifier (can't be PF)
1973  * @timeout: maximum timeout to wait for completion in jiffies
1974  *
1975  * This function can only be called on PF.
1976  *
1977  * Return: 0 on success or a negative error code on failure.
1978  */
1979 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
1980 {
1981 	int err;
1982 
1983 	xe_gt_assert(gt, vfid != PFID);
1984 
1985 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1986 	err = pf_sanitize_vf_resources(gt, vfid, timeout);
1987 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1988 
1989 	if (unlikely(err))
1990 		xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
1991 				   vfid, ERR_PTR(err));
1992 	return err;
1993 }
1994 
1995 /**
1996  * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
1997  * @gt: the &xe_gt
1998  * @vfid: the VF identifier (can't be PF)
1999  * @refresh: explicit refresh
2000  *
2001  * This function can only be called on PF.
2002  *
2003  * Return: 0 on success or a negative error code on failure.
2004  */
2005 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
2006 {
2007 	int err = 0;
2008 
2009 	xe_gt_assert(gt, vfid);
2010 
2011 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2012 	if (refresh)
2013 		err = pf_send_vf_cfg_reset(gt, vfid);
2014 	if (!err)
2015 		err = pf_push_full_vf_config(gt, vfid);
2016 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2017 
2018 	if (unlikely(err)) {
2019 		xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
2020 				   refresh ? "refresh" : "push", vfid, ERR_PTR(err));
2021 	}
2022 
2023 	return err;
2024 }
2025 
2026 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
2027 {
2028 	struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
2029 	struct xe_device *xe = gt_to_xe(gt);
2030 	bool is_primary = !xe_gt_is_media_type(gt);
2031 	bool valid_ggtt, valid_ctxs, valid_dbs;
2032 	bool valid_any, valid_all;
2033 
2034 	valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
2035 	valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
2036 	valid_dbs = pf_get_vf_config_dbs(gt, vfid);
2037 
2038 	/* note that GuC doorbells are optional */
2039 	valid_any = valid_ctxs || valid_dbs;
2040 	valid_all = valid_ctxs;
2041 
2042 	/* and GGTT/LMEM is configured on primary GT only */
2043 	valid_all = valid_all && valid_ggtt;
2044 	valid_any = valid_any || (valid_ggtt && is_primary);
2045 
2046 	if (IS_DGFX(xe)) {
2047 		bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
2048 
2049 		valid_any = valid_any || (valid_lmem && is_primary);
2050 		valid_all = valid_all && valid_lmem;
2051 	}
2052 
2053 	return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA;
2054 }
2055 
2056 /**
2057  * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
2058  * @gt: the &xe_gt
2059  * @vfid: the VF identifier (can't be PF)
2060  *
2061  * This function can only be called on PF.
2062  *
2063  * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
2064  */
2065 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
2066 {
2067 	bool empty;
2068 
2069 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2070 	xe_gt_assert(gt, vfid);
2071 
2072 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2073 	empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
2074 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2075 
2076 	return empty;
2077 }
2078 
2079 /**
2080  * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob.
2081  * @gt: the &xe_gt
2082  * @vfid: the VF identifier (can't be PF)
2083  * @buf: the buffer to save a config to (or NULL if query the buf size)
2084  * @size: the size of the buffer (or 0 if query the buf size)
2085  *
2086  * This function can only be called on PF.
2087  *
2088  * Return: mininum size of the buffer or the number of bytes saved,
2089  *         or a negative error code on failure.
2090  */
2091 ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
2092 {
2093 	struct xe_gt_sriov_config *config;
2094 	ssize_t ret;
2095 
2096 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2097 	xe_gt_assert(gt, vfid);
2098 	xe_gt_assert(gt, !(!buf ^ !size));
2099 
2100 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2101 	ret = pf_validate_vf_config(gt, vfid);
2102 	if (!size) {
2103 		ret = ret ? 0 : SZ_4K;
2104 	} else if (!ret) {
2105 		if (size < SZ_4K) {
2106 			ret = -ENOBUFS;
2107 		} else {
2108 			config = pf_pick_vf_config(gt, vfid);
2109 			ret = encode_config(buf, config, false) * sizeof(u32);
2110 		}
2111 	}
2112 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2113 
2114 	return ret;
2115 }
2116 
2117 static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid,
2118 				    u32 key, u32 len, const u32 *value)
2119 {
2120 	switch (key) {
2121 	case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
2122 		if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN)
2123 			return -EBADMSG;
2124 		return pf_provision_vf_ctxs(gt, vfid, value[0]);
2125 
2126 	case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
2127 		if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN)
2128 			return -EBADMSG;
2129 		return pf_provision_vf_dbs(gt, vfid, value[0]);
2130 
2131 	case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
2132 		if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN)
2133 			return -EBADMSG;
2134 		return pf_provision_exec_quantum(gt, vfid, value[0]);
2135 
2136 	case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
2137 		if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN)
2138 			return -EBADMSG;
2139 		return pf_provision_preempt_timeout(gt, vfid, value[0]);
2140 
2141 	/* auto-generate case statements */
2142 #define define_threshold_key_to_provision_case(TAG, ...)				\
2143 	case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG):					\
2144 		BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u);		\
2145 		if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG))			\
2146 			return -EBADMSG;						\
2147 		return pf_provision_threshold(gt, vfid,					\
2148 					      MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG),	\
2149 					      value[0]);
2150 
2151 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case)
2152 #undef define_threshold_key_to_provision_case
2153 	}
2154 
2155 	if (xe_gt_is_media_type(gt))
2156 		return -EKEYREJECTED;
2157 
2158 	switch (key) {
2159 	case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
2160 		if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN)
2161 			return -EBADMSG;
2162 		return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0]));
2163 
2164 	case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
2165 		if (!IS_DGFX(gt_to_xe(gt)))
2166 			return -EKEYREJECTED;
2167 		if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN)
2168 			return -EBADMSG;
2169 		return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0]));
2170 	}
2171 
2172 	return -EKEYREJECTED;
2173 }
2174 
2175 static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid,
2176 				const u32 *klvs, size_t num_dwords)
2177 {
2178 	int err;
2179 
2180 	while (num_dwords >= GUC_KLV_LEN_MIN) {
2181 		u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
2182 		u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
2183 
2184 		klvs += GUC_KLV_LEN_MIN;
2185 		num_dwords -= GUC_KLV_LEN_MIN;
2186 
2187 		if (num_dwords < len)
2188 			err = -EBADMSG;
2189 		else
2190 			err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs);
2191 
2192 		if (err) {
2193 			xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err));
2194 			return err;
2195 		}
2196 
2197 		klvs += len;
2198 		num_dwords -= len;
2199 	}
2200 
2201 	return pf_validate_vf_config(gt, vfid);
2202 }
2203 
2204 /**
2205  * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob.
2206  * @gt: the &xe_gt
2207  * @vfid: the VF identifier (can't be PF)
2208  * @buf: the buffer with config data
2209  * @size: the size of the config data
2210  *
2211  * This function can only be called on PF.
2212  *
2213  * Return: 0 on success or a negative error code on failure.
2214  */
2215 int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
2216 				  const void *buf, size_t size)
2217 {
2218 	int err;
2219 
2220 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2221 	xe_gt_assert(gt, vfid);
2222 
2223 	if (!size)
2224 		return -ENODATA;
2225 
2226 	if (size % sizeof(u32))
2227 		return -EINVAL;
2228 
2229 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
2230 		struct drm_printer p = xe_gt_info_printer(gt);
2231 
2232 		drm_printf(&p, "restoring VF%u config:\n", vfid);
2233 		xe_guc_klv_print(buf, size / sizeof(u32), &p);
2234 	}
2235 
2236 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2237 	err = pf_send_vf_cfg_reset(gt, vfid);
2238 	if (!err) {
2239 		pf_release_vf_config(gt, vfid);
2240 		err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32));
2241 	}
2242 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2243 
2244 	return err;
2245 }
2246 
2247 /**
2248  * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
2249  * @gt: the &xe_gt
2250  *
2251  * Any prior configurations pushed to GuC are lost when the GT is reset.
2252  * Push again all non-empty VF configurations to the GuC.
2253  *
2254  * This function can only be called on PF.
2255  */
2256 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
2257 {
2258 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2259 	unsigned int fail = 0, skip = 0;
2260 
2261 	for (n = 1; n <= total_vfs; n++) {
2262 		if (xe_gt_sriov_pf_config_is_empty(gt, n))
2263 			skip++;
2264 		else if (xe_gt_sriov_pf_config_push(gt, n, false))
2265 			fail++;
2266 	}
2267 
2268 	if (fail)
2269 		xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
2270 				   fail, total_vfs - skip, str_plural(total_vfs));
2271 
2272 	if (fail != total_vfs)
2273 		xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
2274 				total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
2275 }
2276 
2277 /**
2278  * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2279  * @gt: the &xe_gt
2280  * @p: the &drm_printer
2281  *
2282  * Print GGTT configuration data for all VFs.
2283  * VFs without provisioned GGTT are ignored.
2284  *
2285  * This function can only be called on PF.
2286  */
2287 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2288 {
2289 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2290 	const struct xe_gt_sriov_config *config;
2291 	char buf[10];
2292 
2293 	for (n = 1; n <= total_vfs; n++) {
2294 		config = &gt->sriov.pf.vfs[n].config;
2295 		if (!xe_ggtt_node_allocated(config->ggtt_region))
2296 			continue;
2297 
2298 		string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
2299 				buf, sizeof(buf));
2300 		drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2301 			   n, config->ggtt_region->base.start,
2302 			   config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
2303 			   buf);
2304 	}
2305 
2306 	return 0;
2307 }
2308 
2309 /**
2310  * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2311  * @gt: the &xe_gt
2312  * @p: the &drm_printer
2313  *
2314  * Print GuC context ID allocations across all VFs.
2315  * VFs without GuC context IDs are skipped.
2316  *
2317  * This function can only be called on PF.
2318  * Return: 0 on success or a negative error code on failure.
2319  */
2320 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2321 {
2322 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2323 	const struct xe_gt_sriov_config *config;
2324 
2325 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2326 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2327 
2328 	for (n = 1; n <= total_vfs; n++) {
2329 		config = &gt->sriov.pf.vfs[n].config;
2330 		if (!config->num_ctxs)
2331 			continue;
2332 
2333 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2334 			   n,
2335 			   config->begin_ctx,
2336 			   config->begin_ctx + config->num_ctxs - 1,
2337 			   config->num_ctxs);
2338 	}
2339 
2340 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2341 	return 0;
2342 }
2343 
2344 /**
2345  * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2346  * @gt: the &xe_gt
2347  * @p: the &drm_printer
2348  *
2349  * Print GuC doorbell IDs allocations across all VFs.
2350  * VFs without GuC doorbell IDs are skipped.
2351  *
2352  * This function can only be called on PF.
2353  * Return: 0 on success or a negative error code on failure.
2354  */
2355 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2356 {
2357 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2358 	const struct xe_gt_sriov_config *config;
2359 
2360 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2361 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2362 
2363 	for (n = 1; n <= total_vfs; n++) {
2364 		config = &gt->sriov.pf.vfs[n].config;
2365 		if (!config->num_dbs)
2366 			continue;
2367 
2368 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2369 			   n,
2370 			   config->begin_db,
2371 			   config->begin_db + config->num_dbs - 1,
2372 			   config->num_dbs);
2373 	}
2374 
2375 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2376 	return 0;
2377 }
2378 
2379 /**
2380  * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations.
2381  * @gt: the &xe_gt
2382  * @p: the &drm_printer
2383  *
2384  * Print LMEM allocations across all VFs.
2385  * VFs without LMEM allocation are skipped.
2386  *
2387  * This function can only be called on PF.
2388  * Return: 0 on success or a negative error code on failure.
2389  */
2390 int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
2391 {
2392 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2393 	const struct xe_gt_sriov_config *config;
2394 	char buf[10];
2395 
2396 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2397 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2398 
2399 	for (n = 1; n <= total_vfs; n++) {
2400 		config = &gt->sriov.pf.vfs[n].config;
2401 		if (!config->lmem_obj)
2402 			continue;
2403 
2404 		string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2,
2405 				buf, sizeof(buf));
2406 		drm_printf(p, "VF%u:\t%zu\t(%s)\n",
2407 			   n, config->lmem_obj->size, buf);
2408 	}
2409 
2410 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2411 	return 0;
2412 }
2413 
2414 /**
2415  * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2416  * @gt: the &xe_gt
2417  * @p: the &drm_printer
2418  *
2419  * Print GGTT ranges that are available for the provisioning.
2420  *
2421  * This function can only be called on PF.
2422  */
2423 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2424 {
2425 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2426 	u64 alignment = pf_get_ggtt_alignment(gt);
2427 	u64 spare, avail, total;
2428 	char buf[10];
2429 
2430 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2431 
2432 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2433 
2434 	spare = pf_get_spare_ggtt(gt);
2435 	total = xe_ggtt_print_holes(ggtt, alignment, p);
2436 
2437 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2438 
2439 	string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2440 	drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2441 
2442 	string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2443 	drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2444 
2445 	avail = total > spare ? total - spare : 0;
2446 
2447 	string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2448 	drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2449 
2450 	return 0;
2451 }
2452