xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c (revision 3e7819886281e077e82006fe4804b0d6b0f5643b)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8 
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11 
12 #include "regs/xe_guc_regs.h"
13 
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_gt_sriov_pf_config.h"
19 #include "xe_gt_sriov_pf_helpers.h"
20 #include "xe_gt_sriov_pf_policy.h"
21 #include "xe_gt_sriov_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_ct.h"
24 #include "xe_guc_db_mgr.h"
25 #include "xe_guc_fwif.h"
26 #include "xe_guc_id_mgr.h"
27 #include "xe_guc_klv_helpers.h"
28 #include "xe_guc_submit.h"
29 #include "xe_lmtt.h"
30 #include "xe_map.h"
31 #include "xe_sriov.h"
32 #include "xe_ttm_vram_mgr.h"
33 #include "xe_wopcm.h"
34 
35 /*
36  * Return: number of KLVs that were successfully parsed and saved,
37  *         negative error code on failure.
38  */
39 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
40 				    u64 addr, u32 size)
41 {
42 	u32 request[] = {
43 		GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
44 		vfid,
45 		lower_32_bits(addr),
46 		upper_32_bits(addr),
47 		size,
48 	};
49 
50 	return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
51 }
52 
53 /*
54  * Return: 0 on success, negative error code on failure.
55  */
56 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
57 {
58 	struct xe_guc *guc = &gt->uc.guc;
59 	int ret;
60 
61 	ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
62 
63 	return ret <= 0 ? ret : -EPROTO;
64 }
65 
66 /*
67  * Return: number of KLVs that were successfully parsed and saved,
68  *         negative error code on failure.
69  */
70 static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords)
71 {
72 	const u32 bytes = num_dwords * sizeof(u32);
73 	struct xe_tile *tile = gt_to_tile(gt);
74 	struct xe_device *xe = tile_to_xe(tile);
75 	struct xe_guc *guc = &gt->uc.guc;
76 	struct xe_bo *bo;
77 	int ret;
78 
79 	bo = xe_bo_create_pin_map(xe, tile, NULL,
80 				  ALIGN(bytes, PAGE_SIZE),
81 				  ttm_bo_type_kernel,
82 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
83 				  XE_BO_FLAG_GGTT |
84 				  XE_BO_FLAG_GGTT_INVALIDATE);
85 	if (IS_ERR(bo))
86 		return PTR_ERR(bo);
87 
88 	xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
89 
90 	ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords);
91 
92 	xe_bo_unpin_map_no_vm(bo);
93 
94 	return ret;
95 }
96 
97 /*
98  * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
99  *         negative error code on failure.
100  */
101 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
102 			       const u32 *klvs, u32 num_dwords)
103 {
104 	int ret;
105 
106 	xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
107 
108 	ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords);
109 
110 	if (ret != num_klvs) {
111 		int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
112 		struct drm_printer p = xe_gt_info_printer(gt);
113 		char name[8];
114 
115 		xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
116 				   xe_sriov_function_name(vfid, name, sizeof(name)),
117 				   num_klvs, str_plural(num_klvs), ERR_PTR(err));
118 		xe_guc_klv_print(klvs, num_dwords, &p);
119 		return err;
120 	}
121 
122 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
123 		struct drm_printer p = xe_gt_info_printer(gt);
124 
125 		xe_guc_klv_print(klvs, num_dwords, &p);
126 	}
127 
128 	return 0;
129 }
130 
131 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
132 {
133 	u32 klv[] = {
134 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
135 		value,
136 	};
137 
138 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
139 }
140 
141 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
142 {
143 	u32 klv[] = {
144 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
145 		lower_32_bits(value),
146 		upper_32_bits(value),
147 	};
148 
149 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
150 }
151 
152 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
153 {
154 	u32 klvs[] = {
155 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
156 		lower_32_bits(start),
157 		upper_32_bits(start),
158 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
159 		lower_32_bits(size),
160 		upper_32_bits(size),
161 	};
162 
163 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
164 }
165 
166 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
167 {
168 	u32 klvs[] = {
169 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
170 		begin,
171 		PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
172 		num,
173 	};
174 
175 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
176 }
177 
178 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
179 {
180 	u32 klvs[] = {
181 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
182 		begin,
183 		PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
184 		num,
185 	};
186 
187 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
188 }
189 
190 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 exec_quantum)
191 {
192 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, exec_quantum);
193 }
194 
195 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 preempt_timeout)
196 {
197 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, preempt_timeout);
198 }
199 
200 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
201 {
202 	return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
203 }
204 
205 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
206 {
207 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
208 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
209 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
210 
211 	return &gt->sriov.pf.vfs[vfid].config;
212 }
213 
214 /* Return: number of configuration dwords written */
215 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
216 {
217 	u32 n = 0;
218 
219 	if (drm_mm_node_allocated(&config->ggtt_region)) {
220 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
221 		cfg[n++] = lower_32_bits(config->ggtt_region.start);
222 		cfg[n++] = upper_32_bits(config->ggtt_region.start);
223 
224 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
225 		cfg[n++] = lower_32_bits(config->ggtt_region.size);
226 		cfg[n++] = upper_32_bits(config->ggtt_region.size);
227 	}
228 
229 	return n;
230 }
231 
232 /* Return: number of configuration dwords written */
233 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
234 {
235 	u32 n = 0;
236 
237 	n += encode_config_ggtt(cfg, config);
238 
239 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
240 	cfg[n++] = config->begin_ctx;
241 
242 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
243 	cfg[n++] = config->num_ctxs;
244 
245 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
246 	cfg[n++] = config->begin_db;
247 
248 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
249 	cfg[n++] = config->num_dbs;
250 
251 	if (config->lmem_obj) {
252 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
253 		cfg[n++] = lower_32_bits(config->lmem_obj->size);
254 		cfg[n++] = upper_32_bits(config->lmem_obj->size);
255 	}
256 
257 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
258 	cfg[n++] = config->exec_quantum;
259 
260 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
261 	cfg[n++] = config->preempt_timeout;
262 
263 	return n;
264 }
265 
266 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
267 {
268 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
269 	u32 max_cfg_dwords = SZ_4K / sizeof(u32);
270 	u32 num_dwords;
271 	int num_klvs;
272 	u32 *cfg;
273 	int err;
274 
275 	cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL);
276 	if (!cfg)
277 		return -ENOMEM;
278 
279 	num_dwords = encode_config(cfg, config);
280 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
281 
282 	if (xe_gt_is_media_type(gt)) {
283 		struct xe_gt *primary = gt->tile->primary_gt;
284 		struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
285 
286 		/* media-GT will never include a GGTT config */
287 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config));
288 
289 		/* the GGTT config must be taken from the primary-GT instead */
290 		num_dwords += encode_config_ggtt(cfg + num_dwords, other);
291 	}
292 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
293 
294 	num_klvs = xe_guc_klv_count(cfg, num_dwords);
295 	err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords);
296 
297 	kfree(cfg);
298 	return err;
299 }
300 
301 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
302 {
303 	struct xe_device *xe = gt_to_xe(gt);
304 
305 	return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
306 }
307 
308 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
309 {
310 	/* XXX: preliminary */
311 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
312 		pf_get_ggtt_alignment(gt) : SZ_64M;
313 }
314 
315 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
316 {
317 	u64 spare;
318 
319 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
320 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
321 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
322 
323 	spare = gt->sriov.pf.spare.ggtt_size;
324 	spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
325 
326 	return spare;
327 }
328 
329 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
330 {
331 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
332 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
333 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
334 
335 	if (size && size < pf_get_min_spare_ggtt(gt))
336 		return -EINVAL;
337 
338 	size = round_up(size, pf_get_ggtt_alignment(gt));
339 	gt->sriov.pf.spare.ggtt_size = size;
340 
341 	return 0;
342 }
343 
344 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
345 {
346 	int err, err2 = 0;
347 
348 	err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
349 
350 	if (tile->media_gt && !err)
351 		err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
352 
353 	return err ?: err2;
354 }
355 
356 static void pf_release_ggtt(struct xe_tile *tile, struct drm_mm_node *node)
357 {
358 	struct xe_ggtt *ggtt = tile->mem.ggtt;
359 
360 	if (drm_mm_node_allocated(node)) {
361 		/*
362 		 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
363 		 * is redundant, as PTE will be implicitly re-assigned to PF by
364 		 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
365 		 */
366 		xe_ggtt_remove_node(ggtt, node, false);
367 	}
368 }
369 
370 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
371 {
372 	pf_release_ggtt(gt_to_tile(gt), &config->ggtt_region);
373 }
374 
375 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
376 {
377 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
378 	struct drm_mm_node *node = &config->ggtt_region;
379 	struct xe_tile *tile = gt_to_tile(gt);
380 	struct xe_ggtt *ggtt = tile->mem.ggtt;
381 	u64 alignment = pf_get_ggtt_alignment(gt);
382 	int err;
383 
384 	xe_gt_assert(gt, vfid);
385 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
386 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
387 
388 	size = round_up(size, alignment);
389 
390 	if (drm_mm_node_allocated(node)) {
391 		err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
392 		if (unlikely(err))
393 			return err;
394 
395 		pf_release_ggtt(tile, node);
396 	}
397 	xe_gt_assert(gt, !drm_mm_node_allocated(node));
398 
399 	if (!size)
400 		return 0;
401 
402 	err = xe_ggtt_insert_special_node(ggtt, node, size, alignment);
403 	if (unlikely(err))
404 		return err;
405 
406 	xe_ggtt_assign(ggtt, node, vfid);
407 	xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
408 				vfid, node->start, node->start + node->size - 1);
409 
410 	err = pf_distribute_config_ggtt(gt->tile, vfid, node->start, node->size);
411 	if (unlikely(err))
412 		return err;
413 
414 	return 0;
415 }
416 
417 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
418 {
419 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
420 	struct drm_mm_node *node = &config->ggtt_region;
421 
422 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
423 	return drm_mm_node_allocated(node) ? node->size : 0;
424 }
425 
426 /**
427  * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
428  * @gt: the &xe_gt
429  * @vfid: the VF identifier
430  *
431  * This function can only be called on PF.
432  *
433  * Return: size of the VF's assigned (or PF's spare) GGTT address space.
434  */
435 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
436 {
437 	u64 size;
438 
439 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
440 	if (vfid)
441 		size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
442 	else
443 		size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
444 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
445 
446 	return size;
447 }
448 
449 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
450 				  u64 actual, const char *what, int err)
451 {
452 	char size[10];
453 	char name[8];
454 
455 	xe_sriov_function_name(vfid, name, sizeof(name));
456 
457 	if (unlikely(err)) {
458 		string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
459 		xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
460 				   name, value, size, what, ERR_PTR(err));
461 		string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
462 		xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
463 				 name, actual, size, what);
464 		return err;
465 	}
466 
467 	/* the actual value may have changed during provisioning */
468 	string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
469 	xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
470 			 name, actual, size, what);
471 	return 0;
472 }
473 
474 /**
475  * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
476  * @gt: the &xe_gt (can't be media)
477  * @vfid: the VF identifier
478  * @size: requested GGTT size
479  *
480  * If &vfid represents PF, then function will change PF's spare GGTT config.
481  *
482  * This function can only be called on PF.
483  *
484  * Return: 0 on success or a negative error code on failure.
485  */
486 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
487 {
488 	int err;
489 
490 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
491 
492 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
493 	if (vfid)
494 		err = pf_provision_vf_ggtt(gt, vfid, size);
495 	else
496 		err = pf_set_spare_ggtt(gt, size);
497 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
498 
499 	return pf_config_set_u64_done(gt, vfid, size,
500 				      xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
501 				      vfid ? "GGTT" : "spare GGTT", err);
502 }
503 
504 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
505 				       u64 value, u64 (*get)(struct xe_gt*, unsigned int),
506 				       const char *what, unsigned int last, int err)
507 {
508 	char size[10];
509 
510 	xe_gt_assert(gt, first);
511 	xe_gt_assert(gt, num_vfs);
512 	xe_gt_assert(gt, first <= last);
513 
514 	if (num_vfs == 1)
515 		return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
516 
517 	if (unlikely(err)) {
518 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
519 				   first, first + num_vfs - 1, what);
520 		if (last > first)
521 			pf_config_bulk_set_u64_done(gt, first, last - first, value,
522 						    get, what, last, 0);
523 		return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
524 	}
525 
526 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
527 	value = get(gt, first);
528 	string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
529 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
530 			 first, first + num_vfs - 1, value, size, what);
531 	return 0;
532 }
533 
534 /**
535  * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
536  * @gt: the &xe_gt (can't be media)
537  * @vfid: starting VF identifier (can't be 0)
538  * @num_vfs: number of VFs to provision
539  * @size: requested GGTT size
540  *
541  * This function can only be called on PF.
542  *
543  * Return: 0 on success or a negative error code on failure.
544  */
545 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
546 					unsigned int num_vfs, u64 size)
547 {
548 	unsigned int n;
549 	int err = 0;
550 
551 	xe_gt_assert(gt, vfid);
552 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
553 
554 	if (!num_vfs)
555 		return 0;
556 
557 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
558 	for (n = vfid; n < vfid + num_vfs; n++) {
559 		err = pf_provision_vf_ggtt(gt, n, size);
560 		if (err)
561 			break;
562 	}
563 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
564 
565 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
566 					   xe_gt_sriov_pf_config_get_ggtt,
567 					   "GGTT", n, err);
568 }
569 
570 /* Return: size of the largest continuous GGTT region */
571 static u64 pf_get_max_ggtt(struct xe_gt *gt)
572 {
573 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
574 	const struct drm_mm *mm = &ggtt->mm;
575 	const struct drm_mm_node *entry;
576 	u64 alignment = pf_get_ggtt_alignment(gt);
577 	u64 spare = pf_get_spare_ggtt(gt);
578 	u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt));
579 	u64 hole_start, hole_end, hole_size;
580 	u64 max_hole = 0;
581 
582 	mutex_lock(&ggtt->lock);
583 
584 	drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
585 		hole_start = max(hole_start, hole_min_start);
586 		hole_start = ALIGN(hole_start, alignment);
587 		hole_end = ALIGN_DOWN(hole_end, alignment);
588 		if (hole_start >= hole_end)
589 			continue;
590 		hole_size = hole_end - hole_start;
591 		xe_gt_sriov_dbg_verbose(gt, "HOLE start %llx size %lluK\n",
592 					hole_start, hole_size / SZ_1K);
593 		spare -= min3(spare, hole_size, max_hole);
594 		max_hole = max(max_hole, hole_size);
595 	}
596 
597 	mutex_unlock(&ggtt->lock);
598 
599 	xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
600 				max_hole / SZ_1K, spare / SZ_1K);
601 	return max_hole > spare ? max_hole - spare : 0;
602 }
603 
604 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
605 {
606 	u64 available = pf_get_max_ggtt(gt);
607 	u64 alignment = pf_get_ggtt_alignment(gt);
608 	u64 fair;
609 
610 	/*
611 	 * To simplify the logic we only look at single largest GGTT region
612 	 * as that will be always the best fit for 1 VF case, and most likely
613 	 * will also nicely cover other cases where VFs are provisioned on the
614 	 * fresh and idle PF driver, without any stale GGTT allocations spread
615 	 * in the middle of the full GGTT range.
616 	 */
617 
618 	fair = div_u64(available, num_vfs);
619 	fair = ALIGN_DOWN(fair, alignment);
620 	xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
621 				available / SZ_1K, num_vfs, fair / SZ_1K);
622 	return fair;
623 }
624 
625 /**
626  * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
627  * @gt: the &xe_gt (can't be media)
628  * @vfid: starting VF identifier (can't be 0)
629  * @num_vfs: number of VFs to provision
630  *
631  * This function can only be called on PF.
632  *
633  * Return: 0 on success or a negative error code on failure.
634  */
635 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
636 					unsigned int num_vfs)
637 {
638 	u64 fair;
639 
640 	xe_gt_assert(gt, vfid);
641 	xe_gt_assert(gt, num_vfs);
642 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
643 
644 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
645 	fair = pf_estimate_fair_ggtt(gt, num_vfs);
646 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
647 
648 	if (!fair)
649 		return -ENOSPC;
650 
651 	return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
652 }
653 
654 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
655 {
656 	/* XXX: preliminary */
657 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
658 		hweight64(gt->info.engine_mask) : SZ_256;
659 }
660 
661 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
662 {
663 	u32 spare;
664 
665 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
666 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
667 
668 	spare = gt->sriov.pf.spare.num_ctxs;
669 	spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
670 
671 	return spare;
672 }
673 
674 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
675 {
676 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
677 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
678 
679 	if (spare > GUC_ID_MAX)
680 		return -EINVAL;
681 
682 	if (spare && spare < pf_get_min_spare_ctxs(gt))
683 		return -EINVAL;
684 
685 	gt->sriov.pf.spare.num_ctxs = spare;
686 
687 	return 0;
688 }
689 
690 /* Return: start ID or negative error code on failure */
691 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
692 {
693 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
694 	unsigned int spare = pf_get_spare_ctxs(gt);
695 
696 	return xe_guc_id_mgr_reserve(idm, num, spare);
697 }
698 
699 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
700 {
701 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
702 
703 	if (num)
704 		xe_guc_id_mgr_release(idm, start, num);
705 }
706 
707 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
708 {
709 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
710 
711 	pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
712 	config->begin_ctx = 0;
713 	config->num_ctxs = 0;
714 }
715 
716 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
717 {
718 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
719 	int ret;
720 
721 	xe_gt_assert(gt, vfid);
722 
723 	if (num_ctxs > GUC_ID_MAX)
724 		return -EINVAL;
725 
726 	if (config->num_ctxs) {
727 		ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
728 		if (unlikely(ret))
729 			return ret;
730 
731 		pf_release_config_ctxs(gt, config);
732 	}
733 
734 	if (!num_ctxs)
735 		return 0;
736 
737 	ret = pf_reserve_ctxs(gt, num_ctxs);
738 	if (unlikely(ret < 0))
739 		return ret;
740 
741 	config->begin_ctx = ret;
742 	config->num_ctxs = num_ctxs;
743 
744 	ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
745 	if (unlikely(ret)) {
746 		pf_release_config_ctxs(gt, config);
747 		return ret;
748 	}
749 
750 	xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
751 				vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
752 	return 0;
753 }
754 
755 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
756 {
757 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
758 
759 	return config->num_ctxs;
760 }
761 
762 /**
763  * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
764  * @gt: the &xe_gt
765  * @vfid: the VF identifier
766  *
767  * This function can only be called on PF.
768  * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
769  *
770  * Return: VF's quota (or PF's spare).
771  */
772 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
773 {
774 	u32 num_ctxs;
775 
776 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
777 	if (vfid)
778 		num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
779 	else
780 		num_ctxs = pf_get_spare_ctxs(gt);
781 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
782 
783 	return num_ctxs;
784 }
785 
786 static const char *no_unit(u32 unused)
787 {
788 	return "";
789 }
790 
791 static const char *spare_unit(u32 unused)
792 {
793 	return " spare";
794 }
795 
796 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
797 				  const char *what, const char *(*unit)(u32), int err)
798 {
799 	char name[8];
800 
801 	xe_sriov_function_name(vfid, name, sizeof(name));
802 
803 	if (unlikely(err)) {
804 		xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
805 				   name, value, unit(value), what, ERR_PTR(err));
806 		xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
807 				 name, actual, unit(actual), what);
808 		return err;
809 	}
810 
811 	/* the actual value may have changed during provisioning */
812 	xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
813 			 name, actual, unit(actual), what);
814 	return 0;
815 }
816 
817 /**
818  * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
819  * @gt: the &xe_gt
820  * @vfid: the VF identifier
821  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
822  *
823  * This function can only be called on PF.
824  *
825  * Return: 0 on success or a negative error code on failure.
826  */
827 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
828 {
829 	int err;
830 
831 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
832 	if (vfid)
833 		err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
834 	else
835 		err = pf_set_spare_ctxs(gt, num_ctxs);
836 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
837 
838 	return pf_config_set_u32_done(gt, vfid, num_ctxs,
839 				      xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
840 				      "GuC context IDs", vfid ? no_unit : spare_unit, err);
841 }
842 
843 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
844 				       u32 value, u32 (*get)(struct xe_gt*, unsigned int),
845 				       const char *what, const char *(*unit)(u32),
846 				       unsigned int last, int err)
847 {
848 	xe_gt_assert(gt, first);
849 	xe_gt_assert(gt, num_vfs);
850 	xe_gt_assert(gt, first <= last);
851 
852 	if (num_vfs == 1)
853 		return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
854 
855 	if (unlikely(err)) {
856 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
857 				   first, first + num_vfs - 1, what);
858 		if (last > first)
859 			pf_config_bulk_set_u32_done(gt, first, last - first, value,
860 						    get, what, unit, last, 0);
861 		return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
862 	}
863 
864 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
865 	value = get(gt, first);
866 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
867 			 first, first + num_vfs - 1, value, unit(value), what);
868 	return 0;
869 }
870 
871 /**
872  * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
873  * @gt: the &xe_gt
874  * @vfid: starting VF identifier
875  * @num_vfs: number of VFs to provision
876  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
877  *
878  * This function can only be called on PF.
879  *
880  * Return: 0 on success or a negative error code on failure.
881  */
882 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
883 					unsigned int num_vfs, u32 num_ctxs)
884 {
885 	unsigned int n;
886 	int err = 0;
887 
888 	xe_gt_assert(gt, vfid);
889 
890 	if (!num_vfs)
891 		return 0;
892 
893 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
894 	for (n = vfid; n < vfid + num_vfs; n++) {
895 		err = pf_provision_vf_ctxs(gt, n, num_ctxs);
896 		if (err)
897 			break;
898 	}
899 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
900 
901 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
902 					   xe_gt_sriov_pf_config_get_ctxs,
903 					   "GuC context IDs", no_unit, n, err);
904 }
905 
906 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
907 {
908 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
909 	u32 spare = pf_get_spare_ctxs(gt);
910 	u32 fair = (idm->total - spare) / num_vfs;
911 	int ret;
912 
913 	for (; fair; --fair) {
914 		ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
915 		if (ret < 0)
916 			continue;
917 		xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
918 		break;
919 	}
920 
921 	xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
922 	return fair;
923 }
924 
925 /**
926  * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
927  * @gt: the &xe_gt
928  * @vfid: starting VF identifier (can't be 0)
929  * @num_vfs: number of VFs to provision (can't be 0)
930  *
931  * This function can only be called on PF.
932  *
933  * Return: 0 on success or a negative error code on failure.
934  */
935 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
936 					unsigned int num_vfs)
937 {
938 	u32 fair;
939 
940 	xe_gt_assert(gt, vfid);
941 	xe_gt_assert(gt, num_vfs);
942 
943 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
944 	fair = pf_estimate_fair_ctxs(gt, num_vfs);
945 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
946 
947 	if (!fair)
948 		return -ENOSPC;
949 
950 	return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
951 }
952 
953 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
954 {
955 	/* XXX: preliminary, we don't use doorbells yet! */
956 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
957 }
958 
959 static u32 pf_get_spare_dbs(struct xe_gt *gt)
960 {
961 	u32 spare;
962 
963 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
964 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
965 
966 	spare = gt->sriov.pf.spare.num_dbs;
967 	spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
968 
969 	return spare;
970 }
971 
972 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
973 {
974 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
975 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
976 
977 	if (spare > GUC_NUM_DOORBELLS)
978 		return -EINVAL;
979 
980 	if (spare && spare < pf_get_min_spare_dbs(gt))
981 		return -EINVAL;
982 
983 	gt->sriov.pf.spare.num_dbs = spare;
984 	return 0;
985 }
986 
987 /* Return: start ID or negative error code on failure */
988 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
989 {
990 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
991 	unsigned int spare = pf_get_spare_dbs(gt);
992 
993 	return xe_guc_db_mgr_reserve_range(dbm, num, spare);
994 }
995 
996 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
997 {
998 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
999 
1000 	if (num)
1001 		xe_guc_db_mgr_release_range(dbm, start, num);
1002 }
1003 
1004 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1005 {
1006 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1007 
1008 	pf_release_dbs(gt, config->begin_db, config->num_dbs);
1009 	config->begin_db = 0;
1010 	config->num_dbs = 0;
1011 }
1012 
1013 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1014 {
1015 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1016 	int ret;
1017 
1018 	xe_gt_assert(gt, vfid);
1019 
1020 	if (num_dbs > GUC_NUM_DOORBELLS)
1021 		return -EINVAL;
1022 
1023 	if (config->num_dbs) {
1024 		ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1025 		if (unlikely(ret))
1026 			return ret;
1027 
1028 		pf_release_config_dbs(gt, config);
1029 	}
1030 
1031 	if (!num_dbs)
1032 		return 0;
1033 
1034 	ret = pf_reserve_dbs(gt, num_dbs);
1035 	if (unlikely(ret < 0))
1036 		return ret;
1037 
1038 	config->begin_db = ret;
1039 	config->num_dbs = num_dbs;
1040 
1041 	ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1042 	if (unlikely(ret)) {
1043 		pf_release_config_dbs(gt, config);
1044 		return ret;
1045 	}
1046 
1047 	xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1048 				vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1049 	return 0;
1050 }
1051 
1052 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1053 {
1054 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1055 
1056 	return config->num_dbs;
1057 }
1058 
1059 /**
1060  * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1061  * @gt: the &xe_gt
1062  * @vfid: the VF identifier
1063  *
1064  * This function can only be called on PF.
1065  * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1066  *
1067  * Return: VF's quota (or PF's spare).
1068  */
1069 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1070 {
1071 	u32 num_dbs;
1072 
1073 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1074 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1075 
1076 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1077 	if (vfid)
1078 		num_dbs = pf_get_vf_config_dbs(gt, vfid);
1079 	else
1080 		num_dbs = pf_get_spare_dbs(gt);
1081 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1082 
1083 	return num_dbs;
1084 }
1085 
1086 /**
1087  * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1088  * @gt: the &xe_gt
1089  * @vfid: the VF identifier
1090  * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1091  *
1092  * This function can only be called on PF.
1093  *
1094  * Return: 0 on success or a negative error code on failure.
1095  */
1096 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1097 {
1098 	int err;
1099 
1100 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1101 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1102 
1103 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1104 	if (vfid)
1105 		err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1106 	else
1107 		err = pf_set_spare_dbs(gt, num_dbs);
1108 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1109 
1110 	return pf_config_set_u32_done(gt, vfid, num_dbs,
1111 				      xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1112 				      "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1113 }
1114 
1115 /**
1116  * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1117  * @gt: the &xe_gt
1118  * @vfid: starting VF identifier (can't be 0)
1119  * @num_vfs: number of VFs to provision
1120  * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1121  *
1122  * This function can only be called on PF.
1123  *
1124  * Return: 0 on success or a negative error code on failure.
1125  */
1126 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1127 				       unsigned int num_vfs, u32 num_dbs)
1128 {
1129 	unsigned int n;
1130 	int err = 0;
1131 
1132 	xe_gt_assert(gt, vfid);
1133 
1134 	if (!num_vfs)
1135 		return 0;
1136 
1137 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1138 	for (n = vfid; n < vfid + num_vfs; n++) {
1139 		err = pf_provision_vf_dbs(gt, n, num_dbs);
1140 		if (err)
1141 			break;
1142 	}
1143 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1144 
1145 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1146 					   xe_gt_sriov_pf_config_get_dbs,
1147 					   "GuC doorbell IDs", no_unit, n, err);
1148 }
1149 
1150 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1151 {
1152 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1153 	u32 spare = pf_get_spare_dbs(gt);
1154 	u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1155 	int ret;
1156 
1157 	for (; fair; --fair) {
1158 		ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1159 		if (ret < 0)
1160 			continue;
1161 		xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1162 		break;
1163 	}
1164 
1165 	xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1166 	return fair;
1167 }
1168 
1169 /**
1170  * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell  IDs.
1171  * @gt: the &xe_gt
1172  * @vfid: starting VF identifier (can't be 0)
1173  * @num_vfs: number of VFs to provision (can't be 0)
1174  *
1175  * This function can only be called on PF.
1176  *
1177  * Return: 0 on success or a negative error code on failure.
1178  */
1179 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1180 				       unsigned int num_vfs)
1181 {
1182 	u32 fair;
1183 
1184 	xe_gt_assert(gt, vfid);
1185 	xe_gt_assert(gt, num_vfs);
1186 
1187 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1188 	fair = pf_estimate_fair_dbs(gt, num_vfs);
1189 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1190 
1191 	if (!fair)
1192 		return -ENOSPC;
1193 
1194 	return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1195 }
1196 
1197 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1198 {
1199 	/* this might be platform dependent */
1200 	return SZ_2M;
1201 }
1202 
1203 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1204 {
1205 	/* this might be platform dependent */
1206 	return SZ_128M; /* XXX: preliminary */
1207 }
1208 
1209 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1210 {
1211 	u64 spare;
1212 
1213 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1214 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1215 
1216 	spare = gt->sriov.pf.spare.lmem_size;
1217 	spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1218 
1219 	return spare;
1220 }
1221 
1222 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1223 {
1224 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1225 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1226 
1227 	if (size && size < pf_get_min_spare_lmem(gt))
1228 		return -EINVAL;
1229 
1230 	gt->sriov.pf.spare.lmem_size = size;
1231 	return 0;
1232 }
1233 
1234 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1235 {
1236 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1237 	struct xe_bo *bo;
1238 
1239 	bo = config->lmem_obj;
1240 	return bo ? bo->size : 0;
1241 }
1242 
1243 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1244 {
1245 	struct xe_device *xe = gt_to_xe(gt);
1246 	struct xe_tile *tile;
1247 	unsigned int tid;
1248 	int err;
1249 
1250 	for_each_tile(tile, xe, tid) {
1251 		if (tile->primary_gt == gt) {
1252 			err = pf_push_vf_cfg_lmem(gt, vfid, size);
1253 		} else {
1254 			u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1255 
1256 			if (!lmem)
1257 				continue;
1258 			err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1259 		}
1260 		if (unlikely(err))
1261 			return err;
1262 	}
1263 	return 0;
1264 }
1265 
1266 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1267 {
1268 	/* TODO */
1269 }
1270 
1271 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1272 {
1273 	struct xe_lmtt *lmtt;
1274 	struct xe_tile *tile;
1275 	unsigned int tid;
1276 
1277 	xe_assert(xe, IS_DGFX(xe));
1278 	xe_assert(xe, IS_SRIOV_PF(xe));
1279 
1280 	for_each_tile(tile, xe, tid) {
1281 		lmtt = &tile->sriov.pf.lmtt;
1282 		xe_lmtt_drop_pages(lmtt, vfid);
1283 	}
1284 }
1285 
1286 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1287 {
1288 	struct xe_gt_sriov_config *config;
1289 	struct xe_tile *tile;
1290 	struct xe_lmtt *lmtt;
1291 	struct xe_bo *bo;
1292 	struct xe_gt *gt;
1293 	u64 total, offset;
1294 	unsigned int gtid;
1295 	unsigned int tid;
1296 	int err;
1297 
1298 	xe_assert(xe, IS_DGFX(xe));
1299 	xe_assert(xe, IS_SRIOV_PF(xe));
1300 
1301 	total = 0;
1302 	for_each_tile(tile, xe, tid)
1303 		total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1304 
1305 	for_each_tile(tile, xe, tid) {
1306 		lmtt = &tile->sriov.pf.lmtt;
1307 
1308 		xe_lmtt_drop_pages(lmtt, vfid);
1309 		if (!total)
1310 			continue;
1311 
1312 		err  = xe_lmtt_prepare_pages(lmtt, vfid, total);
1313 		if (err)
1314 			goto fail;
1315 
1316 		offset = 0;
1317 		for_each_gt(gt, xe, gtid) {
1318 			if (xe_gt_is_media_type(gt))
1319 				continue;
1320 
1321 			config = pf_pick_vf_config(gt, vfid);
1322 			bo = config->lmem_obj;
1323 			if (!bo)
1324 				continue;
1325 
1326 			err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1327 			if (err)
1328 				goto fail;
1329 			offset += bo->size;
1330 		}
1331 	}
1332 
1333 	pf_force_lmtt_invalidate(xe);
1334 	return 0;
1335 
1336 fail:
1337 	for_each_tile(tile, xe, tid) {
1338 		lmtt = &tile->sriov.pf.lmtt;
1339 		xe_lmtt_drop_pages(lmtt, vfid);
1340 	}
1341 	return err;
1342 }
1343 
1344 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1345 {
1346 	xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1347 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1348 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1349 
1350 	if (config->lmem_obj) {
1351 		xe_bo_unpin_map_no_vm(config->lmem_obj);
1352 		config->lmem_obj = NULL;
1353 	}
1354 }
1355 
1356 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1357 {
1358 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1359 	struct xe_device *xe = gt_to_xe(gt);
1360 	struct xe_tile *tile = gt_to_tile(gt);
1361 	struct xe_bo *bo;
1362 	int err;
1363 
1364 	xe_gt_assert(gt, vfid);
1365 	xe_gt_assert(gt, IS_DGFX(xe));
1366 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1367 
1368 	size = round_up(size, pf_get_lmem_alignment(gt));
1369 
1370 	if (config->lmem_obj) {
1371 		err = pf_distribute_config_lmem(gt, vfid, 0);
1372 		if (unlikely(err))
1373 			return err;
1374 
1375 		pf_reset_vf_lmtt(xe, vfid);
1376 		pf_release_vf_config_lmem(gt, config);
1377 	}
1378 	xe_gt_assert(gt, !config->lmem_obj);
1379 
1380 	if (!size)
1381 		return 0;
1382 
1383 	xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1384 	bo = xe_bo_create_pin_map(xe, tile, NULL,
1385 				  ALIGN(size, PAGE_SIZE),
1386 				  ttm_bo_type_kernel,
1387 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1388 				  XE_BO_FLAG_PINNED);
1389 	if (IS_ERR(bo))
1390 		return PTR_ERR(bo);
1391 
1392 	config->lmem_obj = bo;
1393 
1394 	err = pf_update_vf_lmtt(xe, vfid);
1395 	if (unlikely(err))
1396 		goto release;
1397 
1398 	err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
1399 	if (unlikely(err))
1400 		goto reset_lmtt;
1401 
1402 	xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1403 				vfid, bo->size, bo->size / SZ_1M);
1404 	return 0;
1405 
1406 reset_lmtt:
1407 	pf_reset_vf_lmtt(xe, vfid);
1408 release:
1409 	pf_release_vf_config_lmem(gt, config);
1410 	return err;
1411 }
1412 
1413 /**
1414  * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1415  * @gt: the &xe_gt
1416  * @vfid: the VF identifier
1417  *
1418  * This function can only be called on PF.
1419  *
1420  * Return: VF's (or PF's spare) LMEM quota.
1421  */
1422 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1423 {
1424 	u64 size;
1425 
1426 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1427 	if (vfid)
1428 		size = pf_get_vf_config_lmem(gt, vfid);
1429 	else
1430 		size = pf_get_spare_lmem(gt);
1431 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1432 
1433 	return size;
1434 }
1435 
1436 /**
1437  * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1438  * @gt: the &xe_gt (can't be media)
1439  * @vfid: the VF identifier
1440  * @size: requested LMEM size
1441  *
1442  * This function can only be called on PF.
1443  */
1444 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1445 {
1446 	int err;
1447 
1448 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1449 	if (vfid)
1450 		err = pf_provision_vf_lmem(gt, vfid, size);
1451 	else
1452 		err = pf_set_spare_lmem(gt, size);
1453 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1454 
1455 	return pf_config_set_u64_done(gt, vfid, size,
1456 				      xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1457 				      vfid ? "LMEM" : "spare LMEM", err);
1458 }
1459 
1460 /**
1461  * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1462  * @gt: the &xe_gt (can't be media)
1463  * @vfid: starting VF identifier (can't be 0)
1464  * @num_vfs: number of VFs to provision
1465  * @size: requested LMEM size
1466  *
1467  * This function can only be called on PF.
1468  *
1469  * Return: 0 on success or a negative error code on failure.
1470  */
1471 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1472 					unsigned int num_vfs, u64 size)
1473 {
1474 	unsigned int n;
1475 	int err = 0;
1476 
1477 	xe_gt_assert(gt, vfid);
1478 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1479 
1480 	if (!num_vfs)
1481 		return 0;
1482 
1483 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1484 	for (n = vfid; n < vfid + num_vfs; n++) {
1485 		err = pf_provision_vf_lmem(gt, n, size);
1486 		if (err)
1487 			break;
1488 	}
1489 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1490 
1491 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1492 					   xe_gt_sriov_pf_config_get_lmem,
1493 					   "LMEM", n, err);
1494 }
1495 
1496 static u64 pf_query_free_lmem(struct xe_gt *gt)
1497 {
1498 	struct xe_tile *tile = gt->tile;
1499 
1500 	return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager);
1501 }
1502 
1503 static u64 pf_query_max_lmem(struct xe_gt *gt)
1504 {
1505 	u64 alignment = pf_get_lmem_alignment(gt);
1506 	u64 spare = pf_get_spare_lmem(gt);
1507 	u64 free = pf_query_free_lmem(gt);
1508 	u64 avail;
1509 
1510 	/* XXX: need to account for 2MB blocks only */
1511 	avail = free > spare ? free - spare : 0;
1512 	avail = round_down(avail, alignment);
1513 
1514 	return avail;
1515 }
1516 
1517 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1518 #define MAX_FAIR_LMEM	SZ_128M	/* XXX: make it small for the driver bringup */
1519 #else
1520 #define MAX_FAIR_LMEM	SZ_2G	/* XXX: known issue with allocating BO over 2GiB */
1521 #endif
1522 
1523 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1524 {
1525 	u64 available = pf_query_max_lmem(gt);
1526 	u64 alignment = pf_get_lmem_alignment(gt);
1527 	u64 fair;
1528 
1529 	fair = div_u64(available, num_vfs);
1530 	fair = ALIGN_DOWN(fair, alignment);
1531 #ifdef MAX_FAIR_LMEM
1532 	fair = min_t(u64, MAX_FAIR_LMEM, fair);
1533 #endif
1534 	xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1535 				available / SZ_1M, num_vfs, fair / SZ_1M);
1536 	return fair;
1537 }
1538 
1539 /**
1540  * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1541  * @gt: the &xe_gt (can't be media)
1542  * @vfid: starting VF identifier (can't be 0)
1543  * @num_vfs: number of VFs to provision (can't be 0)
1544  *
1545  * This function can only be called on PF.
1546  *
1547  * Return: 0 on success or a negative error code on failure.
1548  */
1549 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1550 					unsigned int num_vfs)
1551 {
1552 	u64 fair;
1553 
1554 	xe_gt_assert(gt, vfid);
1555 	xe_gt_assert(gt, num_vfs);
1556 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1557 
1558 	if (!IS_DGFX(gt_to_xe(gt)))
1559 		return 0;
1560 
1561 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1562 	fair = pf_estimate_fair_lmem(gt, num_vfs);
1563 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1564 
1565 	if (!fair)
1566 		return -ENOSPC;
1567 
1568 	return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1569 }
1570 
1571 /**
1572  * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1573  * @gt: the &xe_gt
1574  * @vfid: starting VF identifier (can't be 0)
1575  * @num_vfs: number of VFs to provision (can't be 0)
1576  *
1577  * This function can only be called on PF.
1578  *
1579  * Return: 0 on success or a negative error code on failure.
1580  */
1581 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1582 				   unsigned int num_vfs)
1583 {
1584 	int result = 0;
1585 	int err;
1586 
1587 	xe_gt_assert(gt, vfid);
1588 	xe_gt_assert(gt, num_vfs);
1589 
1590 	if (!xe_gt_is_media_type(gt)) {
1591 		err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1592 		result = result ?: err;
1593 		err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1594 		result = result ?: err;
1595 	}
1596 	err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1597 	result = result ?: err;
1598 	err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1599 	result = result ?: err;
1600 
1601 	return result;
1602 }
1603 
1604 static const char *exec_quantum_unit(u32 exec_quantum)
1605 {
1606 	return exec_quantum ? "ms" : "(infinity)";
1607 }
1608 
1609 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1610 				     u32 exec_quantum)
1611 {
1612 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1613 	int err;
1614 
1615 	err = pf_push_vf_cfg_exec_quantum(gt, vfid, exec_quantum);
1616 	if (unlikely(err))
1617 		return err;
1618 
1619 	config->exec_quantum = exec_quantum;
1620 	return 0;
1621 }
1622 
1623 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1624 {
1625 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1626 
1627 	return config->exec_quantum;
1628 }
1629 
1630 /**
1631  * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1632  * @gt: the &xe_gt
1633  * @vfid: the VF identifier
1634  * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1635  *
1636  * This function can only be called on PF.
1637  *
1638  * Return: 0 on success or a negative error code on failure.
1639  */
1640 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1641 					   u32 exec_quantum)
1642 {
1643 	int err;
1644 
1645 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1646 	err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1647 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1648 
1649 	return pf_config_set_u32_done(gt, vfid, exec_quantum,
1650 				      xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1651 				      "execution quantum", exec_quantum_unit, err);
1652 }
1653 
1654 /**
1655  * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1656  * @gt: the &xe_gt
1657  * @vfid: the VF identifier
1658  *
1659  * This function can only be called on PF.
1660  *
1661  * Return: VF's (or PF's) execution quantum in milliseconds.
1662  */
1663 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1664 {
1665 	u32 exec_quantum;
1666 
1667 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1668 	exec_quantum = pf_get_exec_quantum(gt, vfid);
1669 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1670 
1671 	return exec_quantum;
1672 }
1673 
1674 static const char *preempt_timeout_unit(u32 preempt_timeout)
1675 {
1676 	return preempt_timeout ? "us" : "(infinity)";
1677 }
1678 
1679 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1680 					u32 preempt_timeout)
1681 {
1682 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1683 	int err;
1684 
1685 	err = pf_push_vf_cfg_preempt_timeout(gt, vfid, preempt_timeout);
1686 	if (unlikely(err))
1687 		return err;
1688 
1689 	config->preempt_timeout = preempt_timeout;
1690 
1691 	return 0;
1692 }
1693 
1694 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1695 {
1696 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1697 
1698 	return config->preempt_timeout;
1699 }
1700 
1701 /**
1702  * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1703  * @gt: the &xe_gt
1704  * @vfid: the VF identifier
1705  * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1706  *
1707  * This function can only be called on PF.
1708  *
1709  * Return: 0 on success or a negative error code on failure.
1710  */
1711 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1712 					      u32 preempt_timeout)
1713 {
1714 	int err;
1715 
1716 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1717 	err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1718 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1719 
1720 	return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1721 				      xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1722 				      "preemption timeout", preempt_timeout_unit, err);
1723 }
1724 
1725 /**
1726  * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1727  * @gt: the &xe_gt
1728  * @vfid: the VF identifier
1729  *
1730  * This function can only be called on PF.
1731  *
1732  * Return: VF's (or PF's) preemption timeout in microseconds.
1733  */
1734 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1735 {
1736 	u32 preempt_timeout;
1737 
1738 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1739 	preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1740 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1741 
1742 	return preempt_timeout;
1743 }
1744 
1745 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1746 {
1747 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1748 
1749 	config->exec_quantum = 0;
1750 	config->preempt_timeout = 0;
1751 }
1752 
1753 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
1754 {
1755 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1756 	struct xe_device *xe = gt_to_xe(gt);
1757 
1758 	if (!xe_gt_is_media_type(gt)) {
1759 		pf_release_vf_config_ggtt(gt, config);
1760 		if (IS_DGFX(xe)) {
1761 			pf_release_vf_config_lmem(gt, config);
1762 			pf_update_vf_lmtt(xe, vfid);
1763 		}
1764 	}
1765 	pf_release_config_ctxs(gt, config);
1766 	pf_release_config_dbs(gt, config);
1767 	pf_reset_config_sched(gt, config);
1768 }
1769 
1770 /**
1771  * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
1772  * @gt: the &xe_gt
1773  * @vfid: the VF identifier (can't be PF)
1774  * @force: force configuration release
1775  *
1776  * This function can only be called on PF.
1777  *
1778  * Return: 0 on success or a negative error code on failure.
1779  */
1780 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
1781 {
1782 	int err;
1783 
1784 	xe_gt_assert(gt, vfid);
1785 
1786 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1787 	err = pf_send_vf_cfg_reset(gt, vfid);
1788 	if (!err || force)
1789 		pf_release_vf_config(gt, vfid);
1790 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1791 
1792 	if (unlikely(err)) {
1793 		xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
1794 				   vfid, ERR_PTR(err),
1795 				   force ? " but all resources were released anyway!" : "");
1796 	}
1797 
1798 	return force ? 0 : err;
1799 }
1800 
1801 /**
1802  * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
1803  * @gt: the &xe_gt
1804  * @vfid: the VF identifier (can't be PF)
1805  * @refresh: explicit refresh
1806  *
1807  * This function can only be called on PF.
1808  *
1809  * Return: 0 on success or a negative error code on failure.
1810  */
1811 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
1812 {
1813 	int err = 0;
1814 
1815 	xe_gt_assert(gt, vfid);
1816 
1817 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1818 	if (refresh)
1819 		err = pf_send_vf_cfg_reset(gt, vfid);
1820 	if (!err)
1821 		err = pf_push_full_vf_config(gt, vfid);
1822 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1823 
1824 	if (unlikely(err)) {
1825 		xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
1826 				   refresh ? "refresh" : "push", vfid, ERR_PTR(err));
1827 	}
1828 
1829 	return err;
1830 }
1831 
1832 /**
1833  * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
1834  * @gt: the &xe_gt
1835  * @p: the &drm_printer
1836  *
1837  * Print GGTT configuration data for all VFs.
1838  * VFs without provisioned GGTT are ignored.
1839  *
1840  * This function can only be called on PF.
1841  */
1842 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
1843 {
1844 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
1845 	const struct xe_gt_sriov_config *config;
1846 	char buf[10];
1847 
1848 	for (n = 1; n <= total_vfs; n++) {
1849 		config = &gt->sriov.pf.vfs[n].config;
1850 		if (!drm_mm_node_allocated(&config->ggtt_region))
1851 			continue;
1852 
1853 		string_get_size(config->ggtt_region.size, 1, STRING_UNITS_2, buf, sizeof(buf));
1854 		drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
1855 			   n, config->ggtt_region.start,
1856 			   config->ggtt_region.start + config->ggtt_region.size - 1, buf);
1857 	}
1858 
1859 	return 0;
1860 }
1861 
1862 /**
1863  * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
1864  * @gt: the &xe_gt
1865  * @p: the &drm_printer
1866  *
1867  * Print GuC context ID allocations across all VFs.
1868  * VFs without GuC context IDs are skipped.
1869  *
1870  * This function can only be called on PF.
1871  * Return: 0 on success or a negative error code on failure.
1872  */
1873 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
1874 {
1875 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
1876 	const struct xe_gt_sriov_config *config;
1877 
1878 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1879 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1880 
1881 	for (n = 1; n <= total_vfs; n++) {
1882 		config = &gt->sriov.pf.vfs[n].config;
1883 		if (!config->num_ctxs)
1884 			continue;
1885 
1886 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
1887 			   n,
1888 			   config->begin_ctx,
1889 			   config->begin_ctx + config->num_ctxs - 1,
1890 			   config->num_ctxs);
1891 	}
1892 
1893 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1894 	return 0;
1895 }
1896 
1897 /**
1898  * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
1899  * @gt: the &xe_gt
1900  * @p: the &drm_printer
1901  *
1902  * Print GuC doorbell IDs allocations across all VFs.
1903  * VFs without GuC doorbell IDs are skipped.
1904  *
1905  * This function can only be called on PF.
1906  * Return: 0 on success or a negative error code on failure.
1907  */
1908 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
1909 {
1910 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
1911 	const struct xe_gt_sriov_config *config;
1912 
1913 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1914 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1915 
1916 	for (n = 1; n <= total_vfs; n++) {
1917 		config = &gt->sriov.pf.vfs[n].config;
1918 		if (!config->num_dbs)
1919 			continue;
1920 
1921 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
1922 			   n,
1923 			   config->begin_db,
1924 			   config->begin_db + config->num_dbs - 1,
1925 			   config->num_dbs);
1926 	}
1927 
1928 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1929 	return 0;
1930 }
1931 
1932 /**
1933  * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
1934  * @gt: the &xe_gt
1935  * @p: the &drm_printer
1936  *
1937  * Print GGTT ranges that are available for the provisioning.
1938  *
1939  * This function can only be called on PF.
1940  */
1941 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
1942 {
1943 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
1944 	const struct drm_mm *mm = &ggtt->mm;
1945 	const struct drm_mm_node *entry;
1946 	u64 alignment = pf_get_ggtt_alignment(gt);
1947 	u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt));
1948 	u64 hole_start, hole_end, hole_size;
1949 	u64 spare, avail, total = 0;
1950 	char buf[10];
1951 
1952 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1953 
1954 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1955 
1956 	spare = pf_get_spare_ggtt(gt);
1957 
1958 	mutex_lock(&ggtt->lock);
1959 
1960 	drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
1961 		hole_start = max(hole_start, hole_min_start);
1962 		hole_start = ALIGN(hole_start, alignment);
1963 		hole_end = ALIGN_DOWN(hole_end, alignment);
1964 		if (hole_start >= hole_end)
1965 			continue;
1966 		hole_size = hole_end - hole_start;
1967 		total += hole_size;
1968 
1969 		string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf));
1970 		drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n",
1971 			   hole_start, hole_end - 1, buf);
1972 	}
1973 
1974 	mutex_unlock(&ggtt->lock);
1975 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1976 
1977 	string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
1978 	drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
1979 
1980 	string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
1981 	drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
1982 
1983 	avail = total > spare ? total - spare : 0;
1984 
1985 	string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
1986 	drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
1987 
1988 	return 0;
1989 }
1990