xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8 
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11 
12 #include "regs/xe_guc_regs.h"
13 
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_gt_sriov_pf_config.h"
19 #include "xe_gt_sriov_pf_helpers.h"
20 #include "xe_gt_sriov_pf_policy.h"
21 #include "xe_gt_sriov_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_ct.h"
24 #include "xe_guc_db_mgr.h"
25 #include "xe_guc_fwif.h"
26 #include "xe_guc_id_mgr.h"
27 #include "xe_guc_klv_helpers.h"
28 #include "xe_guc_klv_thresholds_set.h"
29 #include "xe_guc_submit.h"
30 #include "xe_lmtt.h"
31 #include "xe_map.h"
32 #include "xe_sriov.h"
33 #include "xe_ttm_vram_mgr.h"
34 #include "xe_wopcm.h"
35 
36 /*
37  * Return: number of KLVs that were successfully parsed and saved,
38  *         negative error code on failure.
39  */
40 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
41 				    u64 addr, u32 size)
42 {
43 	u32 request[] = {
44 		GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
45 		vfid,
46 		lower_32_bits(addr),
47 		upper_32_bits(addr),
48 		size,
49 	};
50 
51 	return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
52 }
53 
54 /*
55  * Return: 0 on success, negative error code on failure.
56  */
57 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
58 {
59 	struct xe_guc *guc = &gt->uc.guc;
60 	int ret;
61 
62 	ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
63 
64 	return ret <= 0 ? ret : -EPROTO;
65 }
66 
67 /*
68  * Return: number of KLVs that were successfully parsed and saved,
69  *         negative error code on failure.
70  */
71 static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords)
72 {
73 	const u32 bytes = num_dwords * sizeof(u32);
74 	struct xe_tile *tile = gt_to_tile(gt);
75 	struct xe_device *xe = tile_to_xe(tile);
76 	struct xe_guc *guc = &gt->uc.guc;
77 	struct xe_bo *bo;
78 	int ret;
79 
80 	bo = xe_bo_create_pin_map(xe, tile, NULL,
81 				  ALIGN(bytes, PAGE_SIZE),
82 				  ttm_bo_type_kernel,
83 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
84 				  XE_BO_FLAG_GGTT |
85 				  XE_BO_FLAG_GGTT_INVALIDATE);
86 	if (IS_ERR(bo))
87 		return PTR_ERR(bo);
88 
89 	xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
90 
91 	ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords);
92 
93 	xe_bo_unpin_map_no_vm(bo);
94 
95 	return ret;
96 }
97 
98 /*
99  * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
100  *         negative error code on failure.
101  */
102 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
103 			       const u32 *klvs, u32 num_dwords)
104 {
105 	int ret;
106 
107 	xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
108 
109 	ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords);
110 
111 	if (ret != num_klvs) {
112 		int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
113 		struct drm_printer p = xe_gt_info_printer(gt);
114 		char name[8];
115 
116 		xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
117 				   xe_sriov_function_name(vfid, name, sizeof(name)),
118 				   num_klvs, str_plural(num_klvs), ERR_PTR(err));
119 		xe_guc_klv_print(klvs, num_dwords, &p);
120 		return err;
121 	}
122 
123 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
124 		struct drm_printer p = xe_gt_info_printer(gt);
125 
126 		xe_guc_klv_print(klvs, num_dwords, &p);
127 	}
128 
129 	return 0;
130 }
131 
132 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
133 {
134 	u32 klv[] = {
135 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
136 		value,
137 	};
138 
139 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
140 }
141 
142 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
143 {
144 	u32 klv[] = {
145 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
146 		lower_32_bits(value),
147 		upper_32_bits(value),
148 	};
149 
150 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
151 }
152 
153 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
154 {
155 	u32 klvs[] = {
156 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
157 		lower_32_bits(start),
158 		upper_32_bits(start),
159 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
160 		lower_32_bits(size),
161 		upper_32_bits(size),
162 	};
163 
164 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
165 }
166 
167 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
168 {
169 	u32 klvs[] = {
170 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
171 		begin,
172 		PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
173 		num,
174 	};
175 
176 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
177 }
178 
179 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
180 {
181 	u32 klvs[] = {
182 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
183 		begin,
184 		PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
185 		num,
186 	};
187 
188 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
189 }
190 
191 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
192 {
193 	/* GuC will silently clamp values exceeding max */
194 	*exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
195 
196 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
197 }
198 
199 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
200 {
201 	/* GuC will silently clamp values exceeding max */
202 	*preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
203 
204 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
205 }
206 
207 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
208 {
209 	return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
210 }
211 
212 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
213 				    enum xe_guc_klv_threshold_index index, u32 value)
214 {
215 	u32 key = xe_guc_klv_threshold_index_to_key(index);
216 
217 	xe_gt_assert(gt, key);
218 	return pf_push_vf_cfg_u32(gt, vfid, key, value);
219 }
220 
221 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
222 {
223 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
224 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
225 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
226 
227 	return &gt->sriov.pf.vfs[vfid].config;
228 }
229 
230 /* Return: number of configuration dwords written */
231 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
232 {
233 	u32 n = 0;
234 
235 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
236 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
237 		cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
238 		cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
239 
240 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
241 		cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
242 		cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
243 	}
244 
245 	return n;
246 }
247 
248 /* Return: number of configuration dwords written */
249 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
250 {
251 	u32 n = 0;
252 
253 	n += encode_config_ggtt(cfg, config);
254 
255 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
256 	cfg[n++] = config->begin_ctx;
257 
258 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
259 	cfg[n++] = config->num_ctxs;
260 
261 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
262 	cfg[n++] = config->begin_db;
263 
264 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
265 	cfg[n++] = config->num_dbs;
266 
267 	if (config->lmem_obj) {
268 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
269 		cfg[n++] = lower_32_bits(config->lmem_obj->size);
270 		cfg[n++] = upper_32_bits(config->lmem_obj->size);
271 	}
272 
273 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
274 	cfg[n++] = config->exec_quantum;
275 
276 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
277 	cfg[n++] = config->preempt_timeout;
278 
279 	return n;
280 }
281 
282 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
283 {
284 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
285 	u32 max_cfg_dwords = SZ_4K / sizeof(u32);
286 	u32 num_dwords;
287 	int num_klvs;
288 	u32 *cfg;
289 	int err;
290 
291 	cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL);
292 	if (!cfg)
293 		return -ENOMEM;
294 
295 	num_dwords = encode_config(cfg, config);
296 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
297 
298 	if (xe_gt_is_media_type(gt)) {
299 		struct xe_gt *primary = gt->tile->primary_gt;
300 		struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
301 
302 		/* media-GT will never include a GGTT config */
303 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config));
304 
305 		/* the GGTT config must be taken from the primary-GT instead */
306 		num_dwords += encode_config_ggtt(cfg + num_dwords, other);
307 	}
308 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
309 
310 	num_klvs = xe_guc_klv_count(cfg, num_dwords);
311 	err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords);
312 
313 	kfree(cfg);
314 	return err;
315 }
316 
317 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
318 {
319 	struct xe_device *xe = gt_to_xe(gt);
320 
321 	return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
322 }
323 
324 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
325 {
326 	/* XXX: preliminary */
327 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
328 		pf_get_ggtt_alignment(gt) : SZ_64M;
329 }
330 
331 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
332 {
333 	u64 spare;
334 
335 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
336 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
337 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
338 
339 	spare = gt->sriov.pf.spare.ggtt_size;
340 	spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
341 
342 	return spare;
343 }
344 
345 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
346 {
347 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
348 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
349 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
350 
351 	if (size && size < pf_get_min_spare_ggtt(gt))
352 		return -EINVAL;
353 
354 	size = round_up(size, pf_get_ggtt_alignment(gt));
355 	gt->sriov.pf.spare.ggtt_size = size;
356 
357 	return 0;
358 }
359 
360 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
361 {
362 	int err, err2 = 0;
363 
364 	err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
365 
366 	if (tile->media_gt && !err)
367 		err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
368 
369 	return err ?: err2;
370 }
371 
372 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
373 {
374 	if (xe_ggtt_node_allocated(node)) {
375 		/*
376 		 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
377 		 * is redundant, as PTE will be implicitly re-assigned to PF by
378 		 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
379 		 */
380 		xe_ggtt_node_remove(node, false);
381 	}
382 }
383 
384 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
385 {
386 	pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
387 	config->ggtt_region = NULL;
388 }
389 
390 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
391 {
392 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
393 	struct xe_ggtt_node *node = config->ggtt_region;
394 	struct xe_tile *tile = gt_to_tile(gt);
395 	struct xe_ggtt *ggtt = tile->mem.ggtt;
396 	u64 alignment = pf_get_ggtt_alignment(gt);
397 	int err;
398 
399 	xe_gt_assert(gt, vfid);
400 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
401 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
402 
403 	size = round_up(size, alignment);
404 
405 	if (xe_ggtt_node_allocated(node)) {
406 		err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
407 		if (unlikely(err))
408 			return err;
409 
410 		pf_release_ggtt(tile, node);
411 	}
412 	xe_gt_assert(gt, !xe_ggtt_node_allocated(node));
413 
414 	if (!size)
415 		return 0;
416 
417 	node = xe_ggtt_node_init(ggtt);
418 	if (IS_ERR(node))
419 		return PTR_ERR(node);
420 
421 	err = xe_ggtt_node_insert(node, size, alignment);
422 	if (unlikely(err))
423 		goto err;
424 
425 	xe_ggtt_assign(node, vfid);
426 	xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
427 				vfid, node->base.start, node->base.start + node->base.size - 1);
428 
429 	err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
430 	if (unlikely(err))
431 		goto err;
432 
433 	config->ggtt_region = node;
434 	return 0;
435 err:
436 	xe_ggtt_node_fini(node);
437 	return err;
438 }
439 
440 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
441 {
442 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
443 	struct xe_ggtt_node *node = config->ggtt_region;
444 
445 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
446 	return xe_ggtt_node_allocated(node) ? node->base.size : 0;
447 }
448 
449 /**
450  * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
451  * @gt: the &xe_gt
452  * @vfid: the VF identifier
453  *
454  * This function can only be called on PF.
455  *
456  * Return: size of the VF's assigned (or PF's spare) GGTT address space.
457  */
458 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
459 {
460 	u64 size;
461 
462 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
463 	if (vfid)
464 		size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
465 	else
466 		size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
467 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
468 
469 	return size;
470 }
471 
472 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
473 				  u64 actual, const char *what, int err)
474 {
475 	char size[10];
476 	char name[8];
477 
478 	xe_sriov_function_name(vfid, name, sizeof(name));
479 
480 	if (unlikely(err)) {
481 		string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
482 		xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
483 				   name, value, size, what, ERR_PTR(err));
484 		string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
485 		xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
486 				 name, actual, size, what);
487 		return err;
488 	}
489 
490 	/* the actual value may have changed during provisioning */
491 	string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
492 	xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
493 			 name, actual, size, what);
494 	return 0;
495 }
496 
497 /**
498  * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
499  * @gt: the &xe_gt (can't be media)
500  * @vfid: the VF identifier
501  * @size: requested GGTT size
502  *
503  * If &vfid represents PF, then function will change PF's spare GGTT config.
504  *
505  * This function can only be called on PF.
506  *
507  * Return: 0 on success or a negative error code on failure.
508  */
509 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
510 {
511 	int err;
512 
513 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
514 
515 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
516 	if (vfid)
517 		err = pf_provision_vf_ggtt(gt, vfid, size);
518 	else
519 		err = pf_set_spare_ggtt(gt, size);
520 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
521 
522 	return pf_config_set_u64_done(gt, vfid, size,
523 				      xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
524 				      vfid ? "GGTT" : "spare GGTT", err);
525 }
526 
527 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
528 				       u64 value, u64 (*get)(struct xe_gt*, unsigned int),
529 				       const char *what, unsigned int last, int err)
530 {
531 	char size[10];
532 
533 	xe_gt_assert(gt, first);
534 	xe_gt_assert(gt, num_vfs);
535 	xe_gt_assert(gt, first <= last);
536 
537 	if (num_vfs == 1)
538 		return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
539 
540 	if (unlikely(err)) {
541 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
542 				   first, first + num_vfs - 1, what);
543 		if (last > first)
544 			pf_config_bulk_set_u64_done(gt, first, last - first, value,
545 						    get, what, last, 0);
546 		return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
547 	}
548 
549 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
550 	value = get(gt, first);
551 	string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
552 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
553 			 first, first + num_vfs - 1, value, size, what);
554 	return 0;
555 }
556 
557 /**
558  * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
559  * @gt: the &xe_gt (can't be media)
560  * @vfid: starting VF identifier (can't be 0)
561  * @num_vfs: number of VFs to provision
562  * @size: requested GGTT size
563  *
564  * This function can only be called on PF.
565  *
566  * Return: 0 on success or a negative error code on failure.
567  */
568 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
569 					unsigned int num_vfs, u64 size)
570 {
571 	unsigned int n;
572 	int err = 0;
573 
574 	xe_gt_assert(gt, vfid);
575 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
576 
577 	if (!num_vfs)
578 		return 0;
579 
580 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
581 	for (n = vfid; n < vfid + num_vfs; n++) {
582 		err = pf_provision_vf_ggtt(gt, n, size);
583 		if (err)
584 			break;
585 	}
586 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
587 
588 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
589 					   xe_gt_sriov_pf_config_get_ggtt,
590 					   "GGTT", n, err);
591 }
592 
593 /* Return: size of the largest continuous GGTT region */
594 static u64 pf_get_max_ggtt(struct xe_gt *gt)
595 {
596 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
597 	u64 alignment = pf_get_ggtt_alignment(gt);
598 	u64 spare = pf_get_spare_ggtt(gt);
599 	u64 max_hole;
600 
601 	max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
602 
603 	xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
604 				max_hole / SZ_1K, spare / SZ_1K);
605 	return max_hole > spare ? max_hole - spare : 0;
606 }
607 
608 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
609 {
610 	u64 available = pf_get_max_ggtt(gt);
611 	u64 alignment = pf_get_ggtt_alignment(gt);
612 	u64 fair;
613 
614 	/*
615 	 * To simplify the logic we only look at single largest GGTT region
616 	 * as that will be always the best fit for 1 VF case, and most likely
617 	 * will also nicely cover other cases where VFs are provisioned on the
618 	 * fresh and idle PF driver, without any stale GGTT allocations spread
619 	 * in the middle of the full GGTT range.
620 	 */
621 
622 	fair = div_u64(available, num_vfs);
623 	fair = ALIGN_DOWN(fair, alignment);
624 	xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
625 				available / SZ_1K, num_vfs, fair / SZ_1K);
626 	return fair;
627 }
628 
629 /**
630  * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
631  * @gt: the &xe_gt (can't be media)
632  * @vfid: starting VF identifier (can't be 0)
633  * @num_vfs: number of VFs to provision
634  *
635  * This function can only be called on PF.
636  *
637  * Return: 0 on success or a negative error code on failure.
638  */
639 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
640 					unsigned int num_vfs)
641 {
642 	u64 fair;
643 
644 	xe_gt_assert(gt, vfid);
645 	xe_gt_assert(gt, num_vfs);
646 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
647 
648 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
649 	fair = pf_estimate_fair_ggtt(gt, num_vfs);
650 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
651 
652 	if (!fair)
653 		return -ENOSPC;
654 
655 	return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
656 }
657 
658 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
659 {
660 	/* XXX: preliminary */
661 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
662 		hweight64(gt->info.engine_mask) : SZ_256;
663 }
664 
665 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
666 {
667 	u32 spare;
668 
669 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
670 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
671 
672 	spare = gt->sriov.pf.spare.num_ctxs;
673 	spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
674 
675 	return spare;
676 }
677 
678 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
679 {
680 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
681 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
682 
683 	if (spare > GUC_ID_MAX)
684 		return -EINVAL;
685 
686 	if (spare && spare < pf_get_min_spare_ctxs(gt))
687 		return -EINVAL;
688 
689 	gt->sriov.pf.spare.num_ctxs = spare;
690 
691 	return 0;
692 }
693 
694 /* Return: start ID or negative error code on failure */
695 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
696 {
697 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
698 	unsigned int spare = pf_get_spare_ctxs(gt);
699 
700 	return xe_guc_id_mgr_reserve(idm, num, spare);
701 }
702 
703 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
704 {
705 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
706 
707 	if (num)
708 		xe_guc_id_mgr_release(idm, start, num);
709 }
710 
711 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
712 {
713 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
714 
715 	pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
716 	config->begin_ctx = 0;
717 	config->num_ctxs = 0;
718 }
719 
720 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
721 {
722 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
723 	int ret;
724 
725 	xe_gt_assert(gt, vfid);
726 
727 	if (num_ctxs > GUC_ID_MAX)
728 		return -EINVAL;
729 
730 	if (config->num_ctxs) {
731 		ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
732 		if (unlikely(ret))
733 			return ret;
734 
735 		pf_release_config_ctxs(gt, config);
736 	}
737 
738 	if (!num_ctxs)
739 		return 0;
740 
741 	ret = pf_reserve_ctxs(gt, num_ctxs);
742 	if (unlikely(ret < 0))
743 		return ret;
744 
745 	config->begin_ctx = ret;
746 	config->num_ctxs = num_ctxs;
747 
748 	ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
749 	if (unlikely(ret)) {
750 		pf_release_config_ctxs(gt, config);
751 		return ret;
752 	}
753 
754 	xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
755 				vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
756 	return 0;
757 }
758 
759 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
760 {
761 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
762 
763 	return config->num_ctxs;
764 }
765 
766 /**
767  * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
768  * @gt: the &xe_gt
769  * @vfid: the VF identifier
770  *
771  * This function can only be called on PF.
772  * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
773  *
774  * Return: VF's quota (or PF's spare).
775  */
776 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
777 {
778 	u32 num_ctxs;
779 
780 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
781 	if (vfid)
782 		num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
783 	else
784 		num_ctxs = pf_get_spare_ctxs(gt);
785 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
786 
787 	return num_ctxs;
788 }
789 
790 static const char *no_unit(u32 unused)
791 {
792 	return "";
793 }
794 
795 static const char *spare_unit(u32 unused)
796 {
797 	return " spare";
798 }
799 
800 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
801 				  const char *what, const char *(*unit)(u32), int err)
802 {
803 	char name[8];
804 
805 	xe_sriov_function_name(vfid, name, sizeof(name));
806 
807 	if (unlikely(err)) {
808 		xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
809 				   name, value, unit(value), what, ERR_PTR(err));
810 		xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
811 				 name, actual, unit(actual), what);
812 		return err;
813 	}
814 
815 	/* the actual value may have changed during provisioning */
816 	xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
817 			 name, actual, unit(actual), what);
818 	return 0;
819 }
820 
821 /**
822  * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
823  * @gt: the &xe_gt
824  * @vfid: the VF identifier
825  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
826  *
827  * This function can only be called on PF.
828  *
829  * Return: 0 on success or a negative error code on failure.
830  */
831 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
832 {
833 	int err;
834 
835 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
836 	if (vfid)
837 		err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
838 	else
839 		err = pf_set_spare_ctxs(gt, num_ctxs);
840 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
841 
842 	return pf_config_set_u32_done(gt, vfid, num_ctxs,
843 				      xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
844 				      "GuC context IDs", vfid ? no_unit : spare_unit, err);
845 }
846 
847 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
848 				       u32 value, u32 (*get)(struct xe_gt*, unsigned int),
849 				       const char *what, const char *(*unit)(u32),
850 				       unsigned int last, int err)
851 {
852 	xe_gt_assert(gt, first);
853 	xe_gt_assert(gt, num_vfs);
854 	xe_gt_assert(gt, first <= last);
855 
856 	if (num_vfs == 1)
857 		return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
858 
859 	if (unlikely(err)) {
860 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
861 				   first, first + num_vfs - 1, what);
862 		if (last > first)
863 			pf_config_bulk_set_u32_done(gt, first, last - first, value,
864 						    get, what, unit, last, 0);
865 		return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
866 	}
867 
868 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
869 	value = get(gt, first);
870 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
871 			 first, first + num_vfs - 1, value, unit(value), what);
872 	return 0;
873 }
874 
875 /**
876  * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
877  * @gt: the &xe_gt
878  * @vfid: starting VF identifier
879  * @num_vfs: number of VFs to provision
880  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
881  *
882  * This function can only be called on PF.
883  *
884  * Return: 0 on success or a negative error code on failure.
885  */
886 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
887 					unsigned int num_vfs, u32 num_ctxs)
888 {
889 	unsigned int n;
890 	int err = 0;
891 
892 	xe_gt_assert(gt, vfid);
893 
894 	if (!num_vfs)
895 		return 0;
896 
897 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
898 	for (n = vfid; n < vfid + num_vfs; n++) {
899 		err = pf_provision_vf_ctxs(gt, n, num_ctxs);
900 		if (err)
901 			break;
902 	}
903 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
904 
905 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
906 					   xe_gt_sriov_pf_config_get_ctxs,
907 					   "GuC context IDs", no_unit, n, err);
908 }
909 
910 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
911 {
912 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
913 	u32 spare = pf_get_spare_ctxs(gt);
914 	u32 fair = (idm->total - spare) / num_vfs;
915 	int ret;
916 
917 	for (; fair; --fair) {
918 		ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
919 		if (ret < 0)
920 			continue;
921 		xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
922 		break;
923 	}
924 
925 	xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
926 	return fair;
927 }
928 
929 /**
930  * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
931  * @gt: the &xe_gt
932  * @vfid: starting VF identifier (can't be 0)
933  * @num_vfs: number of VFs to provision (can't be 0)
934  *
935  * This function can only be called on PF.
936  *
937  * Return: 0 on success or a negative error code on failure.
938  */
939 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
940 					unsigned int num_vfs)
941 {
942 	u32 fair;
943 
944 	xe_gt_assert(gt, vfid);
945 	xe_gt_assert(gt, num_vfs);
946 
947 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
948 	fair = pf_estimate_fair_ctxs(gt, num_vfs);
949 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
950 
951 	if (!fair)
952 		return -ENOSPC;
953 
954 	return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
955 }
956 
957 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
958 {
959 	/* XXX: preliminary, we don't use doorbells yet! */
960 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
961 }
962 
963 static u32 pf_get_spare_dbs(struct xe_gt *gt)
964 {
965 	u32 spare;
966 
967 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
968 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
969 
970 	spare = gt->sriov.pf.spare.num_dbs;
971 	spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
972 
973 	return spare;
974 }
975 
976 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
977 {
978 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
979 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
980 
981 	if (spare > GUC_NUM_DOORBELLS)
982 		return -EINVAL;
983 
984 	if (spare && spare < pf_get_min_spare_dbs(gt))
985 		return -EINVAL;
986 
987 	gt->sriov.pf.spare.num_dbs = spare;
988 	return 0;
989 }
990 
991 /* Return: start ID or negative error code on failure */
992 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
993 {
994 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
995 	unsigned int spare = pf_get_spare_dbs(gt);
996 
997 	return xe_guc_db_mgr_reserve_range(dbm, num, spare);
998 }
999 
1000 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1001 {
1002 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1003 
1004 	if (num)
1005 		xe_guc_db_mgr_release_range(dbm, start, num);
1006 }
1007 
1008 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1009 {
1010 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1011 
1012 	pf_release_dbs(gt, config->begin_db, config->num_dbs);
1013 	config->begin_db = 0;
1014 	config->num_dbs = 0;
1015 }
1016 
1017 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1018 {
1019 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1020 	int ret;
1021 
1022 	xe_gt_assert(gt, vfid);
1023 
1024 	if (num_dbs > GUC_NUM_DOORBELLS)
1025 		return -EINVAL;
1026 
1027 	if (config->num_dbs) {
1028 		ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1029 		if (unlikely(ret))
1030 			return ret;
1031 
1032 		pf_release_config_dbs(gt, config);
1033 	}
1034 
1035 	if (!num_dbs)
1036 		return 0;
1037 
1038 	ret = pf_reserve_dbs(gt, num_dbs);
1039 	if (unlikely(ret < 0))
1040 		return ret;
1041 
1042 	config->begin_db = ret;
1043 	config->num_dbs = num_dbs;
1044 
1045 	ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1046 	if (unlikely(ret)) {
1047 		pf_release_config_dbs(gt, config);
1048 		return ret;
1049 	}
1050 
1051 	xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1052 				vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1053 	return 0;
1054 }
1055 
1056 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1057 {
1058 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1059 
1060 	return config->num_dbs;
1061 }
1062 
1063 /**
1064  * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1065  * @gt: the &xe_gt
1066  * @vfid: the VF identifier
1067  *
1068  * This function can only be called on PF.
1069  * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1070  *
1071  * Return: VF's quota (or PF's spare).
1072  */
1073 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1074 {
1075 	u32 num_dbs;
1076 
1077 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1078 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1079 
1080 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1081 	if (vfid)
1082 		num_dbs = pf_get_vf_config_dbs(gt, vfid);
1083 	else
1084 		num_dbs = pf_get_spare_dbs(gt);
1085 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1086 
1087 	return num_dbs;
1088 }
1089 
1090 /**
1091  * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1092  * @gt: the &xe_gt
1093  * @vfid: the VF identifier
1094  * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1095  *
1096  * This function can only be called on PF.
1097  *
1098  * Return: 0 on success or a negative error code on failure.
1099  */
1100 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1101 {
1102 	int err;
1103 
1104 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1105 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1106 
1107 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1108 	if (vfid)
1109 		err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1110 	else
1111 		err = pf_set_spare_dbs(gt, num_dbs);
1112 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1113 
1114 	return pf_config_set_u32_done(gt, vfid, num_dbs,
1115 				      xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1116 				      "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1117 }
1118 
1119 /**
1120  * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1121  * @gt: the &xe_gt
1122  * @vfid: starting VF identifier (can't be 0)
1123  * @num_vfs: number of VFs to provision
1124  * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1125  *
1126  * This function can only be called on PF.
1127  *
1128  * Return: 0 on success or a negative error code on failure.
1129  */
1130 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1131 				       unsigned int num_vfs, u32 num_dbs)
1132 {
1133 	unsigned int n;
1134 	int err = 0;
1135 
1136 	xe_gt_assert(gt, vfid);
1137 
1138 	if (!num_vfs)
1139 		return 0;
1140 
1141 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1142 	for (n = vfid; n < vfid + num_vfs; n++) {
1143 		err = pf_provision_vf_dbs(gt, n, num_dbs);
1144 		if (err)
1145 			break;
1146 	}
1147 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1148 
1149 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1150 					   xe_gt_sriov_pf_config_get_dbs,
1151 					   "GuC doorbell IDs", no_unit, n, err);
1152 }
1153 
1154 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1155 {
1156 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1157 	u32 spare = pf_get_spare_dbs(gt);
1158 	u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1159 	int ret;
1160 
1161 	for (; fair; --fair) {
1162 		ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1163 		if (ret < 0)
1164 			continue;
1165 		xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1166 		break;
1167 	}
1168 
1169 	xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1170 	return fair;
1171 }
1172 
1173 /**
1174  * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell  IDs.
1175  * @gt: the &xe_gt
1176  * @vfid: starting VF identifier (can't be 0)
1177  * @num_vfs: number of VFs to provision (can't be 0)
1178  *
1179  * This function can only be called on PF.
1180  *
1181  * Return: 0 on success or a negative error code on failure.
1182  */
1183 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1184 				       unsigned int num_vfs)
1185 {
1186 	u32 fair;
1187 
1188 	xe_gt_assert(gt, vfid);
1189 	xe_gt_assert(gt, num_vfs);
1190 
1191 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1192 	fair = pf_estimate_fair_dbs(gt, num_vfs);
1193 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1194 
1195 	if (!fair)
1196 		return -ENOSPC;
1197 
1198 	return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1199 }
1200 
1201 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1202 {
1203 	/* this might be platform dependent */
1204 	return SZ_2M;
1205 }
1206 
1207 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1208 {
1209 	/* this might be platform dependent */
1210 	return SZ_128M; /* XXX: preliminary */
1211 }
1212 
1213 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1214 {
1215 	u64 spare;
1216 
1217 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1218 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1219 
1220 	spare = gt->sriov.pf.spare.lmem_size;
1221 	spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1222 
1223 	return spare;
1224 }
1225 
1226 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1227 {
1228 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1229 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1230 
1231 	if (size && size < pf_get_min_spare_lmem(gt))
1232 		return -EINVAL;
1233 
1234 	gt->sriov.pf.spare.lmem_size = size;
1235 	return 0;
1236 }
1237 
1238 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1239 {
1240 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1241 	struct xe_bo *bo;
1242 
1243 	bo = config->lmem_obj;
1244 	return bo ? bo->size : 0;
1245 }
1246 
1247 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1248 {
1249 	struct xe_device *xe = gt_to_xe(gt);
1250 	struct xe_tile *tile;
1251 	unsigned int tid;
1252 	int err;
1253 
1254 	for_each_tile(tile, xe, tid) {
1255 		if (tile->primary_gt == gt) {
1256 			err = pf_push_vf_cfg_lmem(gt, vfid, size);
1257 		} else {
1258 			u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1259 
1260 			if (!lmem)
1261 				continue;
1262 			err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1263 		}
1264 		if (unlikely(err))
1265 			return err;
1266 	}
1267 	return 0;
1268 }
1269 
1270 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1271 {
1272 	/* TODO */
1273 }
1274 
1275 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1276 {
1277 	struct xe_lmtt *lmtt;
1278 	struct xe_tile *tile;
1279 	unsigned int tid;
1280 
1281 	xe_assert(xe, IS_DGFX(xe));
1282 	xe_assert(xe, IS_SRIOV_PF(xe));
1283 
1284 	for_each_tile(tile, xe, tid) {
1285 		lmtt = &tile->sriov.pf.lmtt;
1286 		xe_lmtt_drop_pages(lmtt, vfid);
1287 	}
1288 }
1289 
1290 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1291 {
1292 	struct xe_gt_sriov_config *config;
1293 	struct xe_tile *tile;
1294 	struct xe_lmtt *lmtt;
1295 	struct xe_bo *bo;
1296 	struct xe_gt *gt;
1297 	u64 total, offset;
1298 	unsigned int gtid;
1299 	unsigned int tid;
1300 	int err;
1301 
1302 	xe_assert(xe, IS_DGFX(xe));
1303 	xe_assert(xe, IS_SRIOV_PF(xe));
1304 
1305 	total = 0;
1306 	for_each_tile(tile, xe, tid)
1307 		total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1308 
1309 	for_each_tile(tile, xe, tid) {
1310 		lmtt = &tile->sriov.pf.lmtt;
1311 
1312 		xe_lmtt_drop_pages(lmtt, vfid);
1313 		if (!total)
1314 			continue;
1315 
1316 		err  = xe_lmtt_prepare_pages(lmtt, vfid, total);
1317 		if (err)
1318 			goto fail;
1319 
1320 		offset = 0;
1321 		for_each_gt(gt, xe, gtid) {
1322 			if (xe_gt_is_media_type(gt))
1323 				continue;
1324 
1325 			config = pf_pick_vf_config(gt, vfid);
1326 			bo = config->lmem_obj;
1327 			if (!bo)
1328 				continue;
1329 
1330 			err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1331 			if (err)
1332 				goto fail;
1333 			offset += bo->size;
1334 		}
1335 	}
1336 
1337 	pf_force_lmtt_invalidate(xe);
1338 	return 0;
1339 
1340 fail:
1341 	for_each_tile(tile, xe, tid) {
1342 		lmtt = &tile->sriov.pf.lmtt;
1343 		xe_lmtt_drop_pages(lmtt, vfid);
1344 	}
1345 	return err;
1346 }
1347 
1348 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1349 {
1350 	xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1351 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1352 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1353 
1354 	if (config->lmem_obj) {
1355 		xe_bo_unpin_map_no_vm(config->lmem_obj);
1356 		config->lmem_obj = NULL;
1357 	}
1358 }
1359 
1360 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1361 {
1362 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1363 	struct xe_device *xe = gt_to_xe(gt);
1364 	struct xe_tile *tile = gt_to_tile(gt);
1365 	struct xe_bo *bo;
1366 	int err;
1367 
1368 	xe_gt_assert(gt, vfid);
1369 	xe_gt_assert(gt, IS_DGFX(xe));
1370 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1371 
1372 	size = round_up(size, pf_get_lmem_alignment(gt));
1373 
1374 	if (config->lmem_obj) {
1375 		err = pf_distribute_config_lmem(gt, vfid, 0);
1376 		if (unlikely(err))
1377 			return err;
1378 
1379 		pf_reset_vf_lmtt(xe, vfid);
1380 		pf_release_vf_config_lmem(gt, config);
1381 	}
1382 	xe_gt_assert(gt, !config->lmem_obj);
1383 
1384 	if (!size)
1385 		return 0;
1386 
1387 	xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1388 	bo = xe_bo_create_pin_map(xe, tile, NULL,
1389 				  ALIGN(size, PAGE_SIZE),
1390 				  ttm_bo_type_kernel,
1391 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1392 				  XE_BO_FLAG_NEEDS_2M |
1393 				  XE_BO_FLAG_PINNED);
1394 	if (IS_ERR(bo))
1395 		return PTR_ERR(bo);
1396 
1397 	config->lmem_obj = bo;
1398 
1399 	err = pf_update_vf_lmtt(xe, vfid);
1400 	if (unlikely(err))
1401 		goto release;
1402 
1403 	err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
1404 	if (unlikely(err))
1405 		goto reset_lmtt;
1406 
1407 	xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1408 				vfid, bo->size, bo->size / SZ_1M);
1409 	return 0;
1410 
1411 reset_lmtt:
1412 	pf_reset_vf_lmtt(xe, vfid);
1413 release:
1414 	pf_release_vf_config_lmem(gt, config);
1415 	return err;
1416 }
1417 
1418 /**
1419  * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1420  * @gt: the &xe_gt
1421  * @vfid: the VF identifier
1422  *
1423  * This function can only be called on PF.
1424  *
1425  * Return: VF's (or PF's spare) LMEM quota.
1426  */
1427 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1428 {
1429 	u64 size;
1430 
1431 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1432 	if (vfid)
1433 		size = pf_get_vf_config_lmem(gt, vfid);
1434 	else
1435 		size = pf_get_spare_lmem(gt);
1436 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1437 
1438 	return size;
1439 }
1440 
1441 /**
1442  * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1443  * @gt: the &xe_gt (can't be media)
1444  * @vfid: the VF identifier
1445  * @size: requested LMEM size
1446  *
1447  * This function can only be called on PF.
1448  */
1449 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1450 {
1451 	int err;
1452 
1453 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1454 	if (vfid)
1455 		err = pf_provision_vf_lmem(gt, vfid, size);
1456 	else
1457 		err = pf_set_spare_lmem(gt, size);
1458 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1459 
1460 	return pf_config_set_u64_done(gt, vfid, size,
1461 				      xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1462 				      vfid ? "LMEM" : "spare LMEM", err);
1463 }
1464 
1465 /**
1466  * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1467  * @gt: the &xe_gt (can't be media)
1468  * @vfid: starting VF identifier (can't be 0)
1469  * @num_vfs: number of VFs to provision
1470  * @size: requested LMEM size
1471  *
1472  * This function can only be called on PF.
1473  *
1474  * Return: 0 on success or a negative error code on failure.
1475  */
1476 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1477 					unsigned int num_vfs, u64 size)
1478 {
1479 	unsigned int n;
1480 	int err = 0;
1481 
1482 	xe_gt_assert(gt, vfid);
1483 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1484 
1485 	if (!num_vfs)
1486 		return 0;
1487 
1488 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1489 	for (n = vfid; n < vfid + num_vfs; n++) {
1490 		err = pf_provision_vf_lmem(gt, n, size);
1491 		if (err)
1492 			break;
1493 	}
1494 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1495 
1496 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1497 					   xe_gt_sriov_pf_config_get_lmem,
1498 					   "LMEM", n, err);
1499 }
1500 
1501 static u64 pf_query_free_lmem(struct xe_gt *gt)
1502 {
1503 	struct xe_tile *tile = gt->tile;
1504 
1505 	return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager);
1506 }
1507 
1508 static u64 pf_query_max_lmem(struct xe_gt *gt)
1509 {
1510 	u64 alignment = pf_get_lmem_alignment(gt);
1511 	u64 spare = pf_get_spare_lmem(gt);
1512 	u64 free = pf_query_free_lmem(gt);
1513 	u64 avail;
1514 
1515 	/* XXX: need to account for 2MB blocks only */
1516 	avail = free > spare ? free - spare : 0;
1517 	avail = round_down(avail, alignment);
1518 
1519 	return avail;
1520 }
1521 
1522 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1523 #define MAX_FAIR_LMEM	SZ_128M	/* XXX: make it small for the driver bringup */
1524 #else
1525 #define MAX_FAIR_LMEM	SZ_2G	/* XXX: known issue with allocating BO over 2GiB */
1526 #endif
1527 
1528 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1529 {
1530 	u64 available = pf_query_max_lmem(gt);
1531 	u64 alignment = pf_get_lmem_alignment(gt);
1532 	u64 fair;
1533 
1534 	fair = div_u64(available, num_vfs);
1535 	fair = rounddown_pow_of_two(fair);	/* XXX: ttm_vram_mgr & drm_buddy limitation */
1536 	fair = ALIGN_DOWN(fair, alignment);
1537 #ifdef MAX_FAIR_LMEM
1538 	fair = min_t(u64, MAX_FAIR_LMEM, fair);
1539 #endif
1540 	xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1541 				available / SZ_1M, num_vfs, fair / SZ_1M);
1542 	return fair;
1543 }
1544 
1545 /**
1546  * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1547  * @gt: the &xe_gt (can't be media)
1548  * @vfid: starting VF identifier (can't be 0)
1549  * @num_vfs: number of VFs to provision (can't be 0)
1550  *
1551  * This function can only be called on PF.
1552  *
1553  * Return: 0 on success or a negative error code on failure.
1554  */
1555 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1556 					unsigned int num_vfs)
1557 {
1558 	u64 fair;
1559 
1560 	xe_gt_assert(gt, vfid);
1561 	xe_gt_assert(gt, num_vfs);
1562 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1563 
1564 	if (!IS_DGFX(gt_to_xe(gt)))
1565 		return 0;
1566 
1567 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1568 	fair = pf_estimate_fair_lmem(gt, num_vfs);
1569 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1570 
1571 	if (!fair)
1572 		return -ENOSPC;
1573 
1574 	return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1575 }
1576 
1577 /**
1578  * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1579  * @gt: the &xe_gt
1580  * @vfid: starting VF identifier (can't be 0)
1581  * @num_vfs: number of VFs to provision (can't be 0)
1582  *
1583  * This function can only be called on PF.
1584  *
1585  * Return: 0 on success or a negative error code on failure.
1586  */
1587 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1588 				   unsigned int num_vfs)
1589 {
1590 	int result = 0;
1591 	int err;
1592 
1593 	xe_gt_assert(gt, vfid);
1594 	xe_gt_assert(gt, num_vfs);
1595 
1596 	if (!xe_gt_is_media_type(gt)) {
1597 		err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1598 		result = result ?: err;
1599 		err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1600 		result = result ?: err;
1601 	}
1602 	err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1603 	result = result ?: err;
1604 	err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1605 	result = result ?: err;
1606 
1607 	return result;
1608 }
1609 
1610 static const char *exec_quantum_unit(u32 exec_quantum)
1611 {
1612 	return exec_quantum ? "ms" : "(infinity)";
1613 }
1614 
1615 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1616 				     u32 exec_quantum)
1617 {
1618 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1619 	int err;
1620 
1621 	err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1622 	if (unlikely(err))
1623 		return err;
1624 
1625 	config->exec_quantum = exec_quantum;
1626 	return 0;
1627 }
1628 
1629 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1630 {
1631 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1632 
1633 	return config->exec_quantum;
1634 }
1635 
1636 /**
1637  * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1638  * @gt: the &xe_gt
1639  * @vfid: the VF identifier
1640  * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1641  *
1642  * This function can only be called on PF.
1643  *
1644  * Return: 0 on success or a negative error code on failure.
1645  */
1646 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1647 					   u32 exec_quantum)
1648 {
1649 	int err;
1650 
1651 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1652 	err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1653 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1654 
1655 	return pf_config_set_u32_done(gt, vfid, exec_quantum,
1656 				      xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1657 				      "execution quantum", exec_quantum_unit, err);
1658 }
1659 
1660 /**
1661  * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1662  * @gt: the &xe_gt
1663  * @vfid: the VF identifier
1664  *
1665  * This function can only be called on PF.
1666  *
1667  * Return: VF's (or PF's) execution quantum in milliseconds.
1668  */
1669 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1670 {
1671 	u32 exec_quantum;
1672 
1673 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1674 	exec_quantum = pf_get_exec_quantum(gt, vfid);
1675 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1676 
1677 	return exec_quantum;
1678 }
1679 
1680 static const char *preempt_timeout_unit(u32 preempt_timeout)
1681 {
1682 	return preempt_timeout ? "us" : "(infinity)";
1683 }
1684 
1685 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1686 					u32 preempt_timeout)
1687 {
1688 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1689 	int err;
1690 
1691 	err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1692 	if (unlikely(err))
1693 		return err;
1694 
1695 	config->preempt_timeout = preempt_timeout;
1696 
1697 	return 0;
1698 }
1699 
1700 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1701 {
1702 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1703 
1704 	return config->preempt_timeout;
1705 }
1706 
1707 /**
1708  * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1709  * @gt: the &xe_gt
1710  * @vfid: the VF identifier
1711  * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1712  *
1713  * This function can only be called on PF.
1714  *
1715  * Return: 0 on success or a negative error code on failure.
1716  */
1717 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1718 					      u32 preempt_timeout)
1719 {
1720 	int err;
1721 
1722 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1723 	err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1724 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1725 
1726 	return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1727 				      xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1728 				      "preemption timeout", preempt_timeout_unit, err);
1729 }
1730 
1731 /**
1732  * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1733  * @gt: the &xe_gt
1734  * @vfid: the VF identifier
1735  *
1736  * This function can only be called on PF.
1737  *
1738  * Return: VF's (or PF's) preemption timeout in microseconds.
1739  */
1740 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1741 {
1742 	u32 preempt_timeout;
1743 
1744 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1745 	preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1746 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1747 
1748 	return preempt_timeout;
1749 }
1750 
1751 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1752 {
1753 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1754 
1755 	config->exec_quantum = 0;
1756 	config->preempt_timeout = 0;
1757 }
1758 
1759 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
1760 				  enum xe_guc_klv_threshold_index index, u32 value)
1761 {
1762 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1763 	int err;
1764 
1765 	err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
1766 	if (unlikely(err))
1767 		return err;
1768 
1769 	config->thresholds[index] = value;
1770 
1771 	return 0;
1772 }
1773 
1774 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
1775 			    enum xe_guc_klv_threshold_index index)
1776 {
1777 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1778 
1779 	return config->thresholds[index];
1780 }
1781 
1782 static const char *threshold_unit(u32 threshold)
1783 {
1784 	return threshold ? "" : "(disabled)";
1785 }
1786 
1787 /**
1788  * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
1789  * @gt: the &xe_gt
1790  * @vfid: the VF identifier
1791  * @index: the threshold index
1792  * @value: requested value (0 means disabled)
1793  *
1794  * This function can only be called on PF.
1795  *
1796  * Return: 0 on success or a negative error code on failure.
1797  */
1798 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
1799 					enum xe_guc_klv_threshold_index index, u32 value)
1800 {
1801 	u32 key = xe_guc_klv_threshold_index_to_key(index);
1802 	const char *name = xe_guc_klv_key_to_string(key);
1803 	int err;
1804 
1805 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1806 	err = pf_provision_threshold(gt, vfid, index, value);
1807 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1808 
1809 	return pf_config_set_u32_done(gt, vfid, value,
1810 				      xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
1811 				      name, threshold_unit, err);
1812 }
1813 
1814 /**
1815  * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
1816  * @gt: the &xe_gt
1817  * @vfid: the VF identifier
1818  * @index: the threshold index
1819  *
1820  * This function can only be called on PF.
1821  *
1822  * Return: value of VF's (or PF's) threshold.
1823  */
1824 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
1825 					enum xe_guc_klv_threshold_index index)
1826 {
1827 	u32 value;
1828 
1829 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1830 	value = pf_get_threshold(gt, vfid, index);
1831 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1832 
1833 	return value;
1834 }
1835 
1836 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
1837 {
1838 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1839 	struct xe_device *xe = gt_to_xe(gt);
1840 
1841 	if (!xe_gt_is_media_type(gt)) {
1842 		pf_release_vf_config_ggtt(gt, config);
1843 		if (IS_DGFX(xe)) {
1844 			pf_release_vf_config_lmem(gt, config);
1845 			pf_update_vf_lmtt(xe, vfid);
1846 		}
1847 	}
1848 	pf_release_config_ctxs(gt, config);
1849 	pf_release_config_dbs(gt, config);
1850 	pf_reset_config_sched(gt, config);
1851 }
1852 
1853 /**
1854  * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
1855  * @gt: the &xe_gt
1856  * @vfid: the VF identifier (can't be PF)
1857  * @force: force configuration release
1858  *
1859  * This function can only be called on PF.
1860  *
1861  * Return: 0 on success or a negative error code on failure.
1862  */
1863 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
1864 {
1865 	int err;
1866 
1867 	xe_gt_assert(gt, vfid);
1868 
1869 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1870 	err = pf_send_vf_cfg_reset(gt, vfid);
1871 	if (!err || force)
1872 		pf_release_vf_config(gt, vfid);
1873 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1874 
1875 	if (unlikely(err)) {
1876 		xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
1877 				   vfid, ERR_PTR(err),
1878 				   force ? " but all resources were released anyway!" : "");
1879 	}
1880 
1881 	return force ? 0 : err;
1882 }
1883 
1884 /**
1885  * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
1886  * @gt: the &xe_gt
1887  * @vfid: the VF identifier (can't be PF)
1888  * @refresh: explicit refresh
1889  *
1890  * This function can only be called on PF.
1891  *
1892  * Return: 0 on success or a negative error code on failure.
1893  */
1894 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
1895 {
1896 	int err = 0;
1897 
1898 	xe_gt_assert(gt, vfid);
1899 
1900 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1901 	if (refresh)
1902 		err = pf_send_vf_cfg_reset(gt, vfid);
1903 	if (!err)
1904 		err = pf_push_full_vf_config(gt, vfid);
1905 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1906 
1907 	if (unlikely(err)) {
1908 		xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
1909 				   refresh ? "refresh" : "push", vfid, ERR_PTR(err));
1910 	}
1911 
1912 	return err;
1913 }
1914 
1915 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
1916 {
1917 	struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
1918 	struct xe_device *xe = gt_to_xe(gt);
1919 	bool is_primary = !xe_gt_is_media_type(gt);
1920 	bool valid_ggtt, valid_ctxs, valid_dbs;
1921 	bool valid_any, valid_all;
1922 
1923 	valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
1924 	valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
1925 	valid_dbs = pf_get_vf_config_dbs(gt, vfid);
1926 
1927 	/* note that GuC doorbells are optional */
1928 	valid_any = valid_ctxs || valid_dbs;
1929 	valid_all = valid_ctxs;
1930 
1931 	/* and GGTT/LMEM is configured on primary GT only */
1932 	valid_all = valid_all && valid_ggtt;
1933 	valid_any = valid_any || (valid_ggtt && is_primary);
1934 
1935 	if (IS_DGFX(xe)) {
1936 		bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
1937 
1938 		valid_any = valid_any || (valid_lmem && is_primary);
1939 		valid_all = valid_all && valid_lmem;
1940 	}
1941 
1942 	return valid_all ? 1 : valid_any ? -ENOKEY : -ENODATA;
1943 }
1944 
1945 /**
1946  * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
1947  * @gt: the &xe_gt
1948  * @vfid: the VF identifier (can't be PF)
1949  *
1950  * This function can only be called on PF.
1951  *
1952  * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
1953  */
1954 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
1955 {
1956 	bool empty;
1957 
1958 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1959 	xe_gt_assert(gt, vfid);
1960 
1961 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1962 	empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
1963 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1964 
1965 	return empty;
1966 }
1967 
1968 /**
1969  * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
1970  * @gt: the &xe_gt
1971  *
1972  * Any prior configurations pushed to GuC are lost when the GT is reset.
1973  * Push again all non-empty VF configurations to the GuC.
1974  *
1975  * This function can only be called on PF.
1976  */
1977 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
1978 {
1979 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
1980 	unsigned int fail = 0, skip = 0;
1981 
1982 	for (n = 1; n <= total_vfs; n++) {
1983 		if (xe_gt_sriov_pf_config_is_empty(gt, n))
1984 			skip++;
1985 		else if (xe_gt_sriov_pf_config_push(gt, n, false))
1986 			fail++;
1987 	}
1988 
1989 	if (fail)
1990 		xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
1991 				   fail, total_vfs - skip, str_plural(total_vfs));
1992 
1993 	if (fail != total_vfs)
1994 		xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
1995 				total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
1996 }
1997 
1998 /**
1999  * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2000  * @gt: the &xe_gt
2001  * @p: the &drm_printer
2002  *
2003  * Print GGTT configuration data for all VFs.
2004  * VFs without provisioned GGTT are ignored.
2005  *
2006  * This function can only be called on PF.
2007  */
2008 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2009 {
2010 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2011 	const struct xe_gt_sriov_config *config;
2012 	char buf[10];
2013 
2014 	for (n = 1; n <= total_vfs; n++) {
2015 		config = &gt->sriov.pf.vfs[n].config;
2016 		if (!xe_ggtt_node_allocated(config->ggtt_region))
2017 			continue;
2018 
2019 		string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
2020 				buf, sizeof(buf));
2021 		drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2022 			   n, config->ggtt_region->base.start,
2023 			   config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
2024 			   buf);
2025 	}
2026 
2027 	return 0;
2028 }
2029 
2030 /**
2031  * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2032  * @gt: the &xe_gt
2033  * @p: the &drm_printer
2034  *
2035  * Print GuC context ID allocations across all VFs.
2036  * VFs without GuC context IDs are skipped.
2037  *
2038  * This function can only be called on PF.
2039  * Return: 0 on success or a negative error code on failure.
2040  */
2041 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2042 {
2043 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2044 	const struct xe_gt_sriov_config *config;
2045 
2046 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2047 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2048 
2049 	for (n = 1; n <= total_vfs; n++) {
2050 		config = &gt->sriov.pf.vfs[n].config;
2051 		if (!config->num_ctxs)
2052 			continue;
2053 
2054 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2055 			   n,
2056 			   config->begin_ctx,
2057 			   config->begin_ctx + config->num_ctxs - 1,
2058 			   config->num_ctxs);
2059 	}
2060 
2061 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2062 	return 0;
2063 }
2064 
2065 /**
2066  * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2067  * @gt: the &xe_gt
2068  * @p: the &drm_printer
2069  *
2070  * Print GuC doorbell IDs allocations across all VFs.
2071  * VFs without GuC doorbell IDs are skipped.
2072  *
2073  * This function can only be called on PF.
2074  * Return: 0 on success or a negative error code on failure.
2075  */
2076 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2077 {
2078 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2079 	const struct xe_gt_sriov_config *config;
2080 
2081 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2082 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2083 
2084 	for (n = 1; n <= total_vfs; n++) {
2085 		config = &gt->sriov.pf.vfs[n].config;
2086 		if (!config->num_dbs)
2087 			continue;
2088 
2089 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2090 			   n,
2091 			   config->begin_db,
2092 			   config->begin_db + config->num_dbs - 1,
2093 			   config->num_dbs);
2094 	}
2095 
2096 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2097 	return 0;
2098 }
2099 
2100 /**
2101  * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2102  * @gt: the &xe_gt
2103  * @p: the &drm_printer
2104  *
2105  * Print GGTT ranges that are available for the provisioning.
2106  *
2107  * This function can only be called on PF.
2108  */
2109 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2110 {
2111 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2112 	u64 alignment = pf_get_ggtt_alignment(gt);
2113 	u64 spare, avail, total;
2114 	char buf[10];
2115 
2116 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2117 
2118 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2119 
2120 	spare = pf_get_spare_ggtt(gt);
2121 	total = xe_ggtt_print_holes(ggtt, alignment, p);
2122 
2123 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2124 
2125 	string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2126 	drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2127 
2128 	string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2129 	drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2130 
2131 	avail = total > spare ? total - spare : 0;
2132 
2133 	string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2134 	drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2135 
2136 	return 0;
2137 }
2138