xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c (revision f057b57270c2a17d3f45c177e9434fa5745caa48)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8 
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11 
12 #include "regs/xe_guc_regs.h"
13 
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_gt_sriov_pf_config.h"
19 #include "xe_gt_sriov_pf_helpers.h"
20 #include "xe_gt_sriov_pf_policy.h"
21 #include "xe_gt_sriov_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_ct.h"
24 #include "xe_guc_db_mgr.h"
25 #include "xe_guc_fwif.h"
26 #include "xe_guc_id_mgr.h"
27 #include "xe_guc_klv_helpers.h"
28 #include "xe_guc_klv_thresholds_set.h"
29 #include "xe_guc_submit.h"
30 #include "xe_lmtt.h"
31 #include "xe_map.h"
32 #include "xe_sriov.h"
33 #include "xe_ttm_vram_mgr.h"
34 #include "xe_wopcm.h"
35 
36 /*
37  * Return: number of KLVs that were successfully parsed and saved,
38  *         negative error code on failure.
39  */
40 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
41 				    u64 addr, u32 size)
42 {
43 	u32 request[] = {
44 		GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
45 		vfid,
46 		lower_32_bits(addr),
47 		upper_32_bits(addr),
48 		size,
49 	};
50 
51 	return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
52 }
53 
54 /*
55  * Return: 0 on success, negative error code on failure.
56  */
57 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
58 {
59 	struct xe_guc *guc = &gt->uc.guc;
60 	int ret;
61 
62 	ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
63 
64 	return ret <= 0 ? ret : -EPROTO;
65 }
66 
67 /*
68  * Return: number of KLVs that were successfully parsed and saved,
69  *         negative error code on failure.
70  */
71 static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords)
72 {
73 	const u32 bytes = num_dwords * sizeof(u32);
74 	struct xe_tile *tile = gt_to_tile(gt);
75 	struct xe_device *xe = tile_to_xe(tile);
76 	struct xe_guc *guc = &gt->uc.guc;
77 	struct xe_bo *bo;
78 	int ret;
79 
80 	bo = xe_bo_create_pin_map(xe, tile, NULL,
81 				  ALIGN(bytes, PAGE_SIZE),
82 				  ttm_bo_type_kernel,
83 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
84 				  XE_BO_FLAG_GGTT |
85 				  XE_BO_FLAG_GGTT_INVALIDATE);
86 	if (IS_ERR(bo))
87 		return PTR_ERR(bo);
88 
89 	xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
90 
91 	ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords);
92 
93 	xe_bo_unpin_map_no_vm(bo);
94 
95 	return ret;
96 }
97 
98 /*
99  * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
100  *         negative error code on failure.
101  */
102 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
103 			       const u32 *klvs, u32 num_dwords)
104 {
105 	int ret;
106 
107 	xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
108 
109 	ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords);
110 
111 	if (ret != num_klvs) {
112 		int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
113 		struct drm_printer p = xe_gt_info_printer(gt);
114 		char name[8];
115 
116 		xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
117 				   xe_sriov_function_name(vfid, name, sizeof(name)),
118 				   num_klvs, str_plural(num_klvs), ERR_PTR(err));
119 		xe_guc_klv_print(klvs, num_dwords, &p);
120 		return err;
121 	}
122 
123 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
124 		struct drm_printer p = xe_gt_info_printer(gt);
125 
126 		xe_guc_klv_print(klvs, num_dwords, &p);
127 	}
128 
129 	return 0;
130 }
131 
132 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
133 {
134 	u32 klv[] = {
135 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
136 		value,
137 	};
138 
139 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
140 }
141 
142 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
143 {
144 	u32 klv[] = {
145 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
146 		lower_32_bits(value),
147 		upper_32_bits(value),
148 	};
149 
150 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
151 }
152 
153 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
154 {
155 	u32 klvs[] = {
156 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
157 		lower_32_bits(start),
158 		upper_32_bits(start),
159 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
160 		lower_32_bits(size),
161 		upper_32_bits(size),
162 	};
163 
164 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
165 }
166 
167 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
168 {
169 	u32 klvs[] = {
170 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
171 		begin,
172 		PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
173 		num,
174 	};
175 
176 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
177 }
178 
179 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
180 {
181 	u32 klvs[] = {
182 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
183 		begin,
184 		PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
185 		num,
186 	};
187 
188 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
189 }
190 
191 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
192 {
193 	/* GuC will silently clamp values exceeding max */
194 	*exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
195 
196 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
197 }
198 
199 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
200 {
201 	/* GuC will silently clamp values exceeding max */
202 	*preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
203 
204 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
205 }
206 
207 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
208 {
209 	return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
210 }
211 
212 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
213 				    enum xe_guc_klv_threshold_index index, u32 value)
214 {
215 	u32 key = xe_guc_klv_threshold_index_to_key(index);
216 
217 	xe_gt_assert(gt, key);
218 	return pf_push_vf_cfg_u32(gt, vfid, key, value);
219 }
220 
221 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
222 {
223 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
224 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
225 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
226 
227 	return &gt->sriov.pf.vfs[vfid].config;
228 }
229 
230 /* Return: number of configuration dwords written */
231 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
232 {
233 	u32 n = 0;
234 
235 	if (drm_mm_node_allocated(&config->ggtt_region)) {
236 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
237 		cfg[n++] = lower_32_bits(config->ggtt_region.start);
238 		cfg[n++] = upper_32_bits(config->ggtt_region.start);
239 
240 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
241 		cfg[n++] = lower_32_bits(config->ggtt_region.size);
242 		cfg[n++] = upper_32_bits(config->ggtt_region.size);
243 	}
244 
245 	return n;
246 }
247 
248 /* Return: number of configuration dwords written */
249 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
250 {
251 	u32 n = 0;
252 
253 	n += encode_config_ggtt(cfg, config);
254 
255 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
256 	cfg[n++] = config->begin_ctx;
257 
258 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
259 	cfg[n++] = config->num_ctxs;
260 
261 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
262 	cfg[n++] = config->begin_db;
263 
264 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
265 	cfg[n++] = config->num_dbs;
266 
267 	if (config->lmem_obj) {
268 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
269 		cfg[n++] = lower_32_bits(config->lmem_obj->size);
270 		cfg[n++] = upper_32_bits(config->lmem_obj->size);
271 	}
272 
273 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
274 	cfg[n++] = config->exec_quantum;
275 
276 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
277 	cfg[n++] = config->preempt_timeout;
278 
279 	return n;
280 }
281 
282 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
283 {
284 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
285 	u32 max_cfg_dwords = SZ_4K / sizeof(u32);
286 	u32 num_dwords;
287 	int num_klvs;
288 	u32 *cfg;
289 	int err;
290 
291 	cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL);
292 	if (!cfg)
293 		return -ENOMEM;
294 
295 	num_dwords = encode_config(cfg, config);
296 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
297 
298 	if (xe_gt_is_media_type(gt)) {
299 		struct xe_gt *primary = gt->tile->primary_gt;
300 		struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
301 
302 		/* media-GT will never include a GGTT config */
303 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config));
304 
305 		/* the GGTT config must be taken from the primary-GT instead */
306 		num_dwords += encode_config_ggtt(cfg + num_dwords, other);
307 	}
308 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
309 
310 	num_klvs = xe_guc_klv_count(cfg, num_dwords);
311 	err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords);
312 
313 	kfree(cfg);
314 	return err;
315 }
316 
317 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
318 {
319 	struct xe_device *xe = gt_to_xe(gt);
320 
321 	return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
322 }
323 
324 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
325 {
326 	/* XXX: preliminary */
327 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
328 		pf_get_ggtt_alignment(gt) : SZ_64M;
329 }
330 
331 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
332 {
333 	u64 spare;
334 
335 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
336 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
337 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
338 
339 	spare = gt->sriov.pf.spare.ggtt_size;
340 	spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
341 
342 	return spare;
343 }
344 
345 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
346 {
347 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
348 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
349 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
350 
351 	if (size && size < pf_get_min_spare_ggtt(gt))
352 		return -EINVAL;
353 
354 	size = round_up(size, pf_get_ggtt_alignment(gt));
355 	gt->sriov.pf.spare.ggtt_size = size;
356 
357 	return 0;
358 }
359 
360 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
361 {
362 	int err, err2 = 0;
363 
364 	err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
365 
366 	if (tile->media_gt && !err)
367 		err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
368 
369 	return err ?: err2;
370 }
371 
372 static void pf_release_ggtt(struct xe_tile *tile, struct drm_mm_node *node)
373 {
374 	struct xe_ggtt *ggtt = tile->mem.ggtt;
375 
376 	if (drm_mm_node_allocated(node)) {
377 		/*
378 		 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
379 		 * is redundant, as PTE will be implicitly re-assigned to PF by
380 		 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
381 		 */
382 		xe_ggtt_remove_node(ggtt, node, false);
383 	}
384 }
385 
386 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
387 {
388 	pf_release_ggtt(gt_to_tile(gt), &config->ggtt_region);
389 }
390 
391 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
392 {
393 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
394 	struct drm_mm_node *node = &config->ggtt_region;
395 	struct xe_tile *tile = gt_to_tile(gt);
396 	struct xe_ggtt *ggtt = tile->mem.ggtt;
397 	u64 alignment = pf_get_ggtt_alignment(gt);
398 	int err;
399 
400 	xe_gt_assert(gt, vfid);
401 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
402 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
403 
404 	size = round_up(size, alignment);
405 
406 	if (drm_mm_node_allocated(node)) {
407 		err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
408 		if (unlikely(err))
409 			return err;
410 
411 		pf_release_ggtt(tile, node);
412 	}
413 	xe_gt_assert(gt, !drm_mm_node_allocated(node));
414 
415 	if (!size)
416 		return 0;
417 
418 	err = xe_ggtt_insert_special_node(ggtt, node, size, alignment);
419 	if (unlikely(err))
420 		return err;
421 
422 	xe_ggtt_assign(ggtt, node, vfid);
423 	xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
424 				vfid, node->start, node->start + node->size - 1);
425 
426 	err = pf_distribute_config_ggtt(gt->tile, vfid, node->start, node->size);
427 	if (unlikely(err))
428 		return err;
429 
430 	return 0;
431 }
432 
433 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
434 {
435 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
436 	struct drm_mm_node *node = &config->ggtt_region;
437 
438 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
439 	return drm_mm_node_allocated(node) ? node->size : 0;
440 }
441 
442 /**
443  * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
444  * @gt: the &xe_gt
445  * @vfid: the VF identifier
446  *
447  * This function can only be called on PF.
448  *
449  * Return: size of the VF's assigned (or PF's spare) GGTT address space.
450  */
451 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
452 {
453 	u64 size;
454 
455 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
456 	if (vfid)
457 		size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
458 	else
459 		size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
460 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
461 
462 	return size;
463 }
464 
465 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
466 				  u64 actual, const char *what, int err)
467 {
468 	char size[10];
469 	char name[8];
470 
471 	xe_sriov_function_name(vfid, name, sizeof(name));
472 
473 	if (unlikely(err)) {
474 		string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
475 		xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
476 				   name, value, size, what, ERR_PTR(err));
477 		string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
478 		xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
479 				 name, actual, size, what);
480 		return err;
481 	}
482 
483 	/* the actual value may have changed during provisioning */
484 	string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
485 	xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
486 			 name, actual, size, what);
487 	return 0;
488 }
489 
490 /**
491  * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
492  * @gt: the &xe_gt (can't be media)
493  * @vfid: the VF identifier
494  * @size: requested GGTT size
495  *
496  * If &vfid represents PF, then function will change PF's spare GGTT config.
497  *
498  * This function can only be called on PF.
499  *
500  * Return: 0 on success or a negative error code on failure.
501  */
502 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
503 {
504 	int err;
505 
506 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
507 
508 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
509 	if (vfid)
510 		err = pf_provision_vf_ggtt(gt, vfid, size);
511 	else
512 		err = pf_set_spare_ggtt(gt, size);
513 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
514 
515 	return pf_config_set_u64_done(gt, vfid, size,
516 				      xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
517 				      vfid ? "GGTT" : "spare GGTT", err);
518 }
519 
520 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
521 				       u64 value, u64 (*get)(struct xe_gt*, unsigned int),
522 				       const char *what, unsigned int last, int err)
523 {
524 	char size[10];
525 
526 	xe_gt_assert(gt, first);
527 	xe_gt_assert(gt, num_vfs);
528 	xe_gt_assert(gt, first <= last);
529 
530 	if (num_vfs == 1)
531 		return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
532 
533 	if (unlikely(err)) {
534 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
535 				   first, first + num_vfs - 1, what);
536 		if (last > first)
537 			pf_config_bulk_set_u64_done(gt, first, last - first, value,
538 						    get, what, last, 0);
539 		return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
540 	}
541 
542 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
543 	value = get(gt, first);
544 	string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
545 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
546 			 first, first + num_vfs - 1, value, size, what);
547 	return 0;
548 }
549 
550 /**
551  * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
552  * @gt: the &xe_gt (can't be media)
553  * @vfid: starting VF identifier (can't be 0)
554  * @num_vfs: number of VFs to provision
555  * @size: requested GGTT size
556  *
557  * This function can only be called on PF.
558  *
559  * Return: 0 on success or a negative error code on failure.
560  */
561 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
562 					unsigned int num_vfs, u64 size)
563 {
564 	unsigned int n;
565 	int err = 0;
566 
567 	xe_gt_assert(gt, vfid);
568 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
569 
570 	if (!num_vfs)
571 		return 0;
572 
573 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
574 	for (n = vfid; n < vfid + num_vfs; n++) {
575 		err = pf_provision_vf_ggtt(gt, n, size);
576 		if (err)
577 			break;
578 	}
579 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
580 
581 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
582 					   xe_gt_sriov_pf_config_get_ggtt,
583 					   "GGTT", n, err);
584 }
585 
586 /* Return: size of the largest continuous GGTT region */
587 static u64 pf_get_max_ggtt(struct xe_gt *gt)
588 {
589 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
590 	const struct drm_mm *mm = &ggtt->mm;
591 	const struct drm_mm_node *entry;
592 	u64 alignment = pf_get_ggtt_alignment(gt);
593 	u64 spare = pf_get_spare_ggtt(gt);
594 	u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt));
595 	u64 hole_start, hole_end, hole_size;
596 	u64 max_hole = 0;
597 
598 	mutex_lock(&ggtt->lock);
599 
600 	drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
601 		hole_start = max(hole_start, hole_min_start);
602 		hole_start = ALIGN(hole_start, alignment);
603 		hole_end = ALIGN_DOWN(hole_end, alignment);
604 		if (hole_start >= hole_end)
605 			continue;
606 		hole_size = hole_end - hole_start;
607 		xe_gt_sriov_dbg_verbose(gt, "HOLE start %llx size %lluK\n",
608 					hole_start, hole_size / SZ_1K);
609 		spare -= min3(spare, hole_size, max_hole);
610 		max_hole = max(max_hole, hole_size);
611 	}
612 
613 	mutex_unlock(&ggtt->lock);
614 
615 	xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
616 				max_hole / SZ_1K, spare / SZ_1K);
617 	return max_hole > spare ? max_hole - spare : 0;
618 }
619 
620 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
621 {
622 	u64 available = pf_get_max_ggtt(gt);
623 	u64 alignment = pf_get_ggtt_alignment(gt);
624 	u64 fair;
625 
626 	/*
627 	 * To simplify the logic we only look at single largest GGTT region
628 	 * as that will be always the best fit for 1 VF case, and most likely
629 	 * will also nicely cover other cases where VFs are provisioned on the
630 	 * fresh and idle PF driver, without any stale GGTT allocations spread
631 	 * in the middle of the full GGTT range.
632 	 */
633 
634 	fair = div_u64(available, num_vfs);
635 	fair = ALIGN_DOWN(fair, alignment);
636 	xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
637 				available / SZ_1K, num_vfs, fair / SZ_1K);
638 	return fair;
639 }
640 
641 /**
642  * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
643  * @gt: the &xe_gt (can't be media)
644  * @vfid: starting VF identifier (can't be 0)
645  * @num_vfs: number of VFs to provision
646  *
647  * This function can only be called on PF.
648  *
649  * Return: 0 on success or a negative error code on failure.
650  */
651 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
652 					unsigned int num_vfs)
653 {
654 	u64 fair;
655 
656 	xe_gt_assert(gt, vfid);
657 	xe_gt_assert(gt, num_vfs);
658 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
659 
660 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
661 	fair = pf_estimate_fair_ggtt(gt, num_vfs);
662 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
663 
664 	if (!fair)
665 		return -ENOSPC;
666 
667 	return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
668 }
669 
670 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
671 {
672 	/* XXX: preliminary */
673 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
674 		hweight64(gt->info.engine_mask) : SZ_256;
675 }
676 
677 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
678 {
679 	u32 spare;
680 
681 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
682 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
683 
684 	spare = gt->sriov.pf.spare.num_ctxs;
685 	spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
686 
687 	return spare;
688 }
689 
690 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
691 {
692 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
693 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
694 
695 	if (spare > GUC_ID_MAX)
696 		return -EINVAL;
697 
698 	if (spare && spare < pf_get_min_spare_ctxs(gt))
699 		return -EINVAL;
700 
701 	gt->sriov.pf.spare.num_ctxs = spare;
702 
703 	return 0;
704 }
705 
706 /* Return: start ID or negative error code on failure */
707 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
708 {
709 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
710 	unsigned int spare = pf_get_spare_ctxs(gt);
711 
712 	return xe_guc_id_mgr_reserve(idm, num, spare);
713 }
714 
715 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
716 {
717 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
718 
719 	if (num)
720 		xe_guc_id_mgr_release(idm, start, num);
721 }
722 
723 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
724 {
725 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
726 
727 	pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
728 	config->begin_ctx = 0;
729 	config->num_ctxs = 0;
730 }
731 
732 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
733 {
734 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
735 	int ret;
736 
737 	xe_gt_assert(gt, vfid);
738 
739 	if (num_ctxs > GUC_ID_MAX)
740 		return -EINVAL;
741 
742 	if (config->num_ctxs) {
743 		ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
744 		if (unlikely(ret))
745 			return ret;
746 
747 		pf_release_config_ctxs(gt, config);
748 	}
749 
750 	if (!num_ctxs)
751 		return 0;
752 
753 	ret = pf_reserve_ctxs(gt, num_ctxs);
754 	if (unlikely(ret < 0))
755 		return ret;
756 
757 	config->begin_ctx = ret;
758 	config->num_ctxs = num_ctxs;
759 
760 	ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
761 	if (unlikely(ret)) {
762 		pf_release_config_ctxs(gt, config);
763 		return ret;
764 	}
765 
766 	xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
767 				vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
768 	return 0;
769 }
770 
771 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
772 {
773 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
774 
775 	return config->num_ctxs;
776 }
777 
778 /**
779  * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
780  * @gt: the &xe_gt
781  * @vfid: the VF identifier
782  *
783  * This function can only be called on PF.
784  * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
785  *
786  * Return: VF's quota (or PF's spare).
787  */
788 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
789 {
790 	u32 num_ctxs;
791 
792 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
793 	if (vfid)
794 		num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
795 	else
796 		num_ctxs = pf_get_spare_ctxs(gt);
797 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
798 
799 	return num_ctxs;
800 }
801 
802 static const char *no_unit(u32 unused)
803 {
804 	return "";
805 }
806 
807 static const char *spare_unit(u32 unused)
808 {
809 	return " spare";
810 }
811 
812 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
813 				  const char *what, const char *(*unit)(u32), int err)
814 {
815 	char name[8];
816 
817 	xe_sriov_function_name(vfid, name, sizeof(name));
818 
819 	if (unlikely(err)) {
820 		xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
821 				   name, value, unit(value), what, ERR_PTR(err));
822 		xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
823 				 name, actual, unit(actual), what);
824 		return err;
825 	}
826 
827 	/* the actual value may have changed during provisioning */
828 	xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
829 			 name, actual, unit(actual), what);
830 	return 0;
831 }
832 
833 /**
834  * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
835  * @gt: the &xe_gt
836  * @vfid: the VF identifier
837  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
838  *
839  * This function can only be called on PF.
840  *
841  * Return: 0 on success or a negative error code on failure.
842  */
843 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
844 {
845 	int err;
846 
847 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
848 	if (vfid)
849 		err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
850 	else
851 		err = pf_set_spare_ctxs(gt, num_ctxs);
852 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
853 
854 	return pf_config_set_u32_done(gt, vfid, num_ctxs,
855 				      xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
856 				      "GuC context IDs", vfid ? no_unit : spare_unit, err);
857 }
858 
859 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
860 				       u32 value, u32 (*get)(struct xe_gt*, unsigned int),
861 				       const char *what, const char *(*unit)(u32),
862 				       unsigned int last, int err)
863 {
864 	xe_gt_assert(gt, first);
865 	xe_gt_assert(gt, num_vfs);
866 	xe_gt_assert(gt, first <= last);
867 
868 	if (num_vfs == 1)
869 		return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
870 
871 	if (unlikely(err)) {
872 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
873 				   first, first + num_vfs - 1, what);
874 		if (last > first)
875 			pf_config_bulk_set_u32_done(gt, first, last - first, value,
876 						    get, what, unit, last, 0);
877 		return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
878 	}
879 
880 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
881 	value = get(gt, first);
882 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
883 			 first, first + num_vfs - 1, value, unit(value), what);
884 	return 0;
885 }
886 
887 /**
888  * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
889  * @gt: the &xe_gt
890  * @vfid: starting VF identifier
891  * @num_vfs: number of VFs to provision
892  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
893  *
894  * This function can only be called on PF.
895  *
896  * Return: 0 on success or a negative error code on failure.
897  */
898 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
899 					unsigned int num_vfs, u32 num_ctxs)
900 {
901 	unsigned int n;
902 	int err = 0;
903 
904 	xe_gt_assert(gt, vfid);
905 
906 	if (!num_vfs)
907 		return 0;
908 
909 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
910 	for (n = vfid; n < vfid + num_vfs; n++) {
911 		err = pf_provision_vf_ctxs(gt, n, num_ctxs);
912 		if (err)
913 			break;
914 	}
915 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
916 
917 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
918 					   xe_gt_sriov_pf_config_get_ctxs,
919 					   "GuC context IDs", no_unit, n, err);
920 }
921 
922 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
923 {
924 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
925 	u32 spare = pf_get_spare_ctxs(gt);
926 	u32 fair = (idm->total - spare) / num_vfs;
927 	int ret;
928 
929 	for (; fair; --fair) {
930 		ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
931 		if (ret < 0)
932 			continue;
933 		xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
934 		break;
935 	}
936 
937 	xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
938 	return fair;
939 }
940 
941 /**
942  * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
943  * @gt: the &xe_gt
944  * @vfid: starting VF identifier (can't be 0)
945  * @num_vfs: number of VFs to provision (can't be 0)
946  *
947  * This function can only be called on PF.
948  *
949  * Return: 0 on success or a negative error code on failure.
950  */
951 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
952 					unsigned int num_vfs)
953 {
954 	u32 fair;
955 
956 	xe_gt_assert(gt, vfid);
957 	xe_gt_assert(gt, num_vfs);
958 
959 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
960 	fair = pf_estimate_fair_ctxs(gt, num_vfs);
961 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
962 
963 	if (!fair)
964 		return -ENOSPC;
965 
966 	return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
967 }
968 
969 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
970 {
971 	/* XXX: preliminary, we don't use doorbells yet! */
972 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
973 }
974 
975 static u32 pf_get_spare_dbs(struct xe_gt *gt)
976 {
977 	u32 spare;
978 
979 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
980 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
981 
982 	spare = gt->sriov.pf.spare.num_dbs;
983 	spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
984 
985 	return spare;
986 }
987 
988 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
989 {
990 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
991 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
992 
993 	if (spare > GUC_NUM_DOORBELLS)
994 		return -EINVAL;
995 
996 	if (spare && spare < pf_get_min_spare_dbs(gt))
997 		return -EINVAL;
998 
999 	gt->sriov.pf.spare.num_dbs = spare;
1000 	return 0;
1001 }
1002 
1003 /* Return: start ID or negative error code on failure */
1004 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
1005 {
1006 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1007 	unsigned int spare = pf_get_spare_dbs(gt);
1008 
1009 	return xe_guc_db_mgr_reserve_range(dbm, num, spare);
1010 }
1011 
1012 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1013 {
1014 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1015 
1016 	if (num)
1017 		xe_guc_db_mgr_release_range(dbm, start, num);
1018 }
1019 
1020 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1021 {
1022 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1023 
1024 	pf_release_dbs(gt, config->begin_db, config->num_dbs);
1025 	config->begin_db = 0;
1026 	config->num_dbs = 0;
1027 }
1028 
1029 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1030 {
1031 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1032 	int ret;
1033 
1034 	xe_gt_assert(gt, vfid);
1035 
1036 	if (num_dbs > GUC_NUM_DOORBELLS)
1037 		return -EINVAL;
1038 
1039 	if (config->num_dbs) {
1040 		ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1041 		if (unlikely(ret))
1042 			return ret;
1043 
1044 		pf_release_config_dbs(gt, config);
1045 	}
1046 
1047 	if (!num_dbs)
1048 		return 0;
1049 
1050 	ret = pf_reserve_dbs(gt, num_dbs);
1051 	if (unlikely(ret < 0))
1052 		return ret;
1053 
1054 	config->begin_db = ret;
1055 	config->num_dbs = num_dbs;
1056 
1057 	ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1058 	if (unlikely(ret)) {
1059 		pf_release_config_dbs(gt, config);
1060 		return ret;
1061 	}
1062 
1063 	xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1064 				vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1065 	return 0;
1066 }
1067 
1068 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1069 {
1070 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1071 
1072 	return config->num_dbs;
1073 }
1074 
1075 /**
1076  * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1077  * @gt: the &xe_gt
1078  * @vfid: the VF identifier
1079  *
1080  * This function can only be called on PF.
1081  * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1082  *
1083  * Return: VF's quota (or PF's spare).
1084  */
1085 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1086 {
1087 	u32 num_dbs;
1088 
1089 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1090 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1091 
1092 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1093 	if (vfid)
1094 		num_dbs = pf_get_vf_config_dbs(gt, vfid);
1095 	else
1096 		num_dbs = pf_get_spare_dbs(gt);
1097 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1098 
1099 	return num_dbs;
1100 }
1101 
1102 /**
1103  * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1104  * @gt: the &xe_gt
1105  * @vfid: the VF identifier
1106  * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1107  *
1108  * This function can only be called on PF.
1109  *
1110  * Return: 0 on success or a negative error code on failure.
1111  */
1112 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1113 {
1114 	int err;
1115 
1116 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1117 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1118 
1119 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1120 	if (vfid)
1121 		err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1122 	else
1123 		err = pf_set_spare_dbs(gt, num_dbs);
1124 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1125 
1126 	return pf_config_set_u32_done(gt, vfid, num_dbs,
1127 				      xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1128 				      "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1129 }
1130 
1131 /**
1132  * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1133  * @gt: the &xe_gt
1134  * @vfid: starting VF identifier (can't be 0)
1135  * @num_vfs: number of VFs to provision
1136  * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1137  *
1138  * This function can only be called on PF.
1139  *
1140  * Return: 0 on success or a negative error code on failure.
1141  */
1142 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1143 				       unsigned int num_vfs, u32 num_dbs)
1144 {
1145 	unsigned int n;
1146 	int err = 0;
1147 
1148 	xe_gt_assert(gt, vfid);
1149 
1150 	if (!num_vfs)
1151 		return 0;
1152 
1153 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1154 	for (n = vfid; n < vfid + num_vfs; n++) {
1155 		err = pf_provision_vf_dbs(gt, n, num_dbs);
1156 		if (err)
1157 			break;
1158 	}
1159 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1160 
1161 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1162 					   xe_gt_sriov_pf_config_get_dbs,
1163 					   "GuC doorbell IDs", no_unit, n, err);
1164 }
1165 
1166 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1167 {
1168 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1169 	u32 spare = pf_get_spare_dbs(gt);
1170 	u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1171 	int ret;
1172 
1173 	for (; fair; --fair) {
1174 		ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1175 		if (ret < 0)
1176 			continue;
1177 		xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1178 		break;
1179 	}
1180 
1181 	xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1182 	return fair;
1183 }
1184 
1185 /**
1186  * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell  IDs.
1187  * @gt: the &xe_gt
1188  * @vfid: starting VF identifier (can't be 0)
1189  * @num_vfs: number of VFs to provision (can't be 0)
1190  *
1191  * This function can only be called on PF.
1192  *
1193  * Return: 0 on success or a negative error code on failure.
1194  */
1195 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1196 				       unsigned int num_vfs)
1197 {
1198 	u32 fair;
1199 
1200 	xe_gt_assert(gt, vfid);
1201 	xe_gt_assert(gt, num_vfs);
1202 
1203 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1204 	fair = pf_estimate_fair_dbs(gt, num_vfs);
1205 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1206 
1207 	if (!fair)
1208 		return -ENOSPC;
1209 
1210 	return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1211 }
1212 
1213 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1214 {
1215 	/* this might be platform dependent */
1216 	return SZ_2M;
1217 }
1218 
1219 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1220 {
1221 	/* this might be platform dependent */
1222 	return SZ_128M; /* XXX: preliminary */
1223 }
1224 
1225 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1226 {
1227 	u64 spare;
1228 
1229 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1230 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1231 
1232 	spare = gt->sriov.pf.spare.lmem_size;
1233 	spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1234 
1235 	return spare;
1236 }
1237 
1238 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1239 {
1240 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1241 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1242 
1243 	if (size && size < pf_get_min_spare_lmem(gt))
1244 		return -EINVAL;
1245 
1246 	gt->sriov.pf.spare.lmem_size = size;
1247 	return 0;
1248 }
1249 
1250 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1251 {
1252 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1253 	struct xe_bo *bo;
1254 
1255 	bo = config->lmem_obj;
1256 	return bo ? bo->size : 0;
1257 }
1258 
1259 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1260 {
1261 	struct xe_device *xe = gt_to_xe(gt);
1262 	struct xe_tile *tile;
1263 	unsigned int tid;
1264 	int err;
1265 
1266 	for_each_tile(tile, xe, tid) {
1267 		if (tile->primary_gt == gt) {
1268 			err = pf_push_vf_cfg_lmem(gt, vfid, size);
1269 		} else {
1270 			u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1271 
1272 			if (!lmem)
1273 				continue;
1274 			err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1275 		}
1276 		if (unlikely(err))
1277 			return err;
1278 	}
1279 	return 0;
1280 }
1281 
1282 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1283 {
1284 	/* TODO */
1285 }
1286 
1287 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1288 {
1289 	struct xe_lmtt *lmtt;
1290 	struct xe_tile *tile;
1291 	unsigned int tid;
1292 
1293 	xe_assert(xe, IS_DGFX(xe));
1294 	xe_assert(xe, IS_SRIOV_PF(xe));
1295 
1296 	for_each_tile(tile, xe, tid) {
1297 		lmtt = &tile->sriov.pf.lmtt;
1298 		xe_lmtt_drop_pages(lmtt, vfid);
1299 	}
1300 }
1301 
1302 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1303 {
1304 	struct xe_gt_sriov_config *config;
1305 	struct xe_tile *tile;
1306 	struct xe_lmtt *lmtt;
1307 	struct xe_bo *bo;
1308 	struct xe_gt *gt;
1309 	u64 total, offset;
1310 	unsigned int gtid;
1311 	unsigned int tid;
1312 	int err;
1313 
1314 	xe_assert(xe, IS_DGFX(xe));
1315 	xe_assert(xe, IS_SRIOV_PF(xe));
1316 
1317 	total = 0;
1318 	for_each_tile(tile, xe, tid)
1319 		total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1320 
1321 	for_each_tile(tile, xe, tid) {
1322 		lmtt = &tile->sriov.pf.lmtt;
1323 
1324 		xe_lmtt_drop_pages(lmtt, vfid);
1325 		if (!total)
1326 			continue;
1327 
1328 		err  = xe_lmtt_prepare_pages(lmtt, vfid, total);
1329 		if (err)
1330 			goto fail;
1331 
1332 		offset = 0;
1333 		for_each_gt(gt, xe, gtid) {
1334 			if (xe_gt_is_media_type(gt))
1335 				continue;
1336 
1337 			config = pf_pick_vf_config(gt, vfid);
1338 			bo = config->lmem_obj;
1339 			if (!bo)
1340 				continue;
1341 
1342 			err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1343 			if (err)
1344 				goto fail;
1345 			offset += bo->size;
1346 		}
1347 	}
1348 
1349 	pf_force_lmtt_invalidate(xe);
1350 	return 0;
1351 
1352 fail:
1353 	for_each_tile(tile, xe, tid) {
1354 		lmtt = &tile->sriov.pf.lmtt;
1355 		xe_lmtt_drop_pages(lmtt, vfid);
1356 	}
1357 	return err;
1358 }
1359 
1360 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1361 {
1362 	xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1363 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1364 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1365 
1366 	if (config->lmem_obj) {
1367 		xe_bo_unpin_map_no_vm(config->lmem_obj);
1368 		config->lmem_obj = NULL;
1369 	}
1370 }
1371 
1372 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1373 {
1374 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1375 	struct xe_device *xe = gt_to_xe(gt);
1376 	struct xe_tile *tile = gt_to_tile(gt);
1377 	struct xe_bo *bo;
1378 	int err;
1379 
1380 	xe_gt_assert(gt, vfid);
1381 	xe_gt_assert(gt, IS_DGFX(xe));
1382 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1383 
1384 	size = round_up(size, pf_get_lmem_alignment(gt));
1385 
1386 	if (config->lmem_obj) {
1387 		err = pf_distribute_config_lmem(gt, vfid, 0);
1388 		if (unlikely(err))
1389 			return err;
1390 
1391 		pf_reset_vf_lmtt(xe, vfid);
1392 		pf_release_vf_config_lmem(gt, config);
1393 	}
1394 	xe_gt_assert(gt, !config->lmem_obj);
1395 
1396 	if (!size)
1397 		return 0;
1398 
1399 	xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1400 	bo = xe_bo_create_pin_map(xe, tile, NULL,
1401 				  ALIGN(size, PAGE_SIZE),
1402 				  ttm_bo_type_kernel,
1403 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1404 				  XE_BO_FLAG_PINNED);
1405 	if (IS_ERR(bo))
1406 		return PTR_ERR(bo);
1407 
1408 	config->lmem_obj = bo;
1409 
1410 	err = pf_update_vf_lmtt(xe, vfid);
1411 	if (unlikely(err))
1412 		goto release;
1413 
1414 	err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
1415 	if (unlikely(err))
1416 		goto reset_lmtt;
1417 
1418 	xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1419 				vfid, bo->size, bo->size / SZ_1M);
1420 	return 0;
1421 
1422 reset_lmtt:
1423 	pf_reset_vf_lmtt(xe, vfid);
1424 release:
1425 	pf_release_vf_config_lmem(gt, config);
1426 	return err;
1427 }
1428 
1429 /**
1430  * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1431  * @gt: the &xe_gt
1432  * @vfid: the VF identifier
1433  *
1434  * This function can only be called on PF.
1435  *
1436  * Return: VF's (or PF's spare) LMEM quota.
1437  */
1438 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1439 {
1440 	u64 size;
1441 
1442 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1443 	if (vfid)
1444 		size = pf_get_vf_config_lmem(gt, vfid);
1445 	else
1446 		size = pf_get_spare_lmem(gt);
1447 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1448 
1449 	return size;
1450 }
1451 
1452 /**
1453  * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1454  * @gt: the &xe_gt (can't be media)
1455  * @vfid: the VF identifier
1456  * @size: requested LMEM size
1457  *
1458  * This function can only be called on PF.
1459  */
1460 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1461 {
1462 	int err;
1463 
1464 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1465 	if (vfid)
1466 		err = pf_provision_vf_lmem(gt, vfid, size);
1467 	else
1468 		err = pf_set_spare_lmem(gt, size);
1469 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1470 
1471 	return pf_config_set_u64_done(gt, vfid, size,
1472 				      xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1473 				      vfid ? "LMEM" : "spare LMEM", err);
1474 }
1475 
1476 /**
1477  * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1478  * @gt: the &xe_gt (can't be media)
1479  * @vfid: starting VF identifier (can't be 0)
1480  * @num_vfs: number of VFs to provision
1481  * @size: requested LMEM size
1482  *
1483  * This function can only be called on PF.
1484  *
1485  * Return: 0 on success or a negative error code on failure.
1486  */
1487 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1488 					unsigned int num_vfs, u64 size)
1489 {
1490 	unsigned int n;
1491 	int err = 0;
1492 
1493 	xe_gt_assert(gt, vfid);
1494 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1495 
1496 	if (!num_vfs)
1497 		return 0;
1498 
1499 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1500 	for (n = vfid; n < vfid + num_vfs; n++) {
1501 		err = pf_provision_vf_lmem(gt, n, size);
1502 		if (err)
1503 			break;
1504 	}
1505 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1506 
1507 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1508 					   xe_gt_sriov_pf_config_get_lmem,
1509 					   "LMEM", n, err);
1510 }
1511 
1512 static u64 pf_query_free_lmem(struct xe_gt *gt)
1513 {
1514 	struct xe_tile *tile = gt->tile;
1515 
1516 	return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager);
1517 }
1518 
1519 static u64 pf_query_max_lmem(struct xe_gt *gt)
1520 {
1521 	u64 alignment = pf_get_lmem_alignment(gt);
1522 	u64 spare = pf_get_spare_lmem(gt);
1523 	u64 free = pf_query_free_lmem(gt);
1524 	u64 avail;
1525 
1526 	/* XXX: need to account for 2MB blocks only */
1527 	avail = free > spare ? free - spare : 0;
1528 	avail = round_down(avail, alignment);
1529 
1530 	return avail;
1531 }
1532 
1533 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1534 #define MAX_FAIR_LMEM	SZ_128M	/* XXX: make it small for the driver bringup */
1535 #else
1536 #define MAX_FAIR_LMEM	SZ_2G	/* XXX: known issue with allocating BO over 2GiB */
1537 #endif
1538 
1539 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1540 {
1541 	u64 available = pf_query_max_lmem(gt);
1542 	u64 alignment = pf_get_lmem_alignment(gt);
1543 	u64 fair;
1544 
1545 	fair = div_u64(available, num_vfs);
1546 	fair = rounddown_pow_of_two(fair);	/* XXX: ttm_vram_mgr & drm_buddy limitation */
1547 	fair = ALIGN_DOWN(fair, alignment);
1548 #ifdef MAX_FAIR_LMEM
1549 	fair = min_t(u64, MAX_FAIR_LMEM, fair);
1550 #endif
1551 	xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1552 				available / SZ_1M, num_vfs, fair / SZ_1M);
1553 	return fair;
1554 }
1555 
1556 /**
1557  * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1558  * @gt: the &xe_gt (can't be media)
1559  * @vfid: starting VF identifier (can't be 0)
1560  * @num_vfs: number of VFs to provision (can't be 0)
1561  *
1562  * This function can only be called on PF.
1563  *
1564  * Return: 0 on success or a negative error code on failure.
1565  */
1566 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1567 					unsigned int num_vfs)
1568 {
1569 	u64 fair;
1570 
1571 	xe_gt_assert(gt, vfid);
1572 	xe_gt_assert(gt, num_vfs);
1573 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1574 
1575 	if (!IS_DGFX(gt_to_xe(gt)))
1576 		return 0;
1577 
1578 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1579 	fair = pf_estimate_fair_lmem(gt, num_vfs);
1580 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1581 
1582 	if (!fair)
1583 		return -ENOSPC;
1584 
1585 	return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1586 }
1587 
1588 /**
1589  * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1590  * @gt: the &xe_gt
1591  * @vfid: starting VF identifier (can't be 0)
1592  * @num_vfs: number of VFs to provision (can't be 0)
1593  *
1594  * This function can only be called on PF.
1595  *
1596  * Return: 0 on success or a negative error code on failure.
1597  */
1598 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1599 				   unsigned int num_vfs)
1600 {
1601 	int result = 0;
1602 	int err;
1603 
1604 	xe_gt_assert(gt, vfid);
1605 	xe_gt_assert(gt, num_vfs);
1606 
1607 	if (!xe_gt_is_media_type(gt)) {
1608 		err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1609 		result = result ?: err;
1610 		err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1611 		result = result ?: err;
1612 	}
1613 	err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1614 	result = result ?: err;
1615 	err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1616 	result = result ?: err;
1617 
1618 	return result;
1619 }
1620 
1621 static const char *exec_quantum_unit(u32 exec_quantum)
1622 {
1623 	return exec_quantum ? "ms" : "(infinity)";
1624 }
1625 
1626 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1627 				     u32 exec_quantum)
1628 {
1629 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1630 	int err;
1631 
1632 	err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1633 	if (unlikely(err))
1634 		return err;
1635 
1636 	config->exec_quantum = exec_quantum;
1637 	return 0;
1638 }
1639 
1640 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1641 {
1642 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1643 
1644 	return config->exec_quantum;
1645 }
1646 
1647 /**
1648  * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1649  * @gt: the &xe_gt
1650  * @vfid: the VF identifier
1651  * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1652  *
1653  * This function can only be called on PF.
1654  *
1655  * Return: 0 on success or a negative error code on failure.
1656  */
1657 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1658 					   u32 exec_quantum)
1659 {
1660 	int err;
1661 
1662 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1663 	err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1664 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1665 
1666 	return pf_config_set_u32_done(gt, vfid, exec_quantum,
1667 				      xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1668 				      "execution quantum", exec_quantum_unit, err);
1669 }
1670 
1671 /**
1672  * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1673  * @gt: the &xe_gt
1674  * @vfid: the VF identifier
1675  *
1676  * This function can only be called on PF.
1677  *
1678  * Return: VF's (or PF's) execution quantum in milliseconds.
1679  */
1680 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1681 {
1682 	u32 exec_quantum;
1683 
1684 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1685 	exec_quantum = pf_get_exec_quantum(gt, vfid);
1686 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1687 
1688 	return exec_quantum;
1689 }
1690 
1691 static const char *preempt_timeout_unit(u32 preempt_timeout)
1692 {
1693 	return preempt_timeout ? "us" : "(infinity)";
1694 }
1695 
1696 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1697 					u32 preempt_timeout)
1698 {
1699 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1700 	int err;
1701 
1702 	err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1703 	if (unlikely(err))
1704 		return err;
1705 
1706 	config->preempt_timeout = preempt_timeout;
1707 
1708 	return 0;
1709 }
1710 
1711 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1712 {
1713 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1714 
1715 	return config->preempt_timeout;
1716 }
1717 
1718 /**
1719  * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1720  * @gt: the &xe_gt
1721  * @vfid: the VF identifier
1722  * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1723  *
1724  * This function can only be called on PF.
1725  *
1726  * Return: 0 on success or a negative error code on failure.
1727  */
1728 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1729 					      u32 preempt_timeout)
1730 {
1731 	int err;
1732 
1733 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1734 	err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1735 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1736 
1737 	return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1738 				      xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1739 				      "preemption timeout", preempt_timeout_unit, err);
1740 }
1741 
1742 /**
1743  * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1744  * @gt: the &xe_gt
1745  * @vfid: the VF identifier
1746  *
1747  * This function can only be called on PF.
1748  *
1749  * Return: VF's (or PF's) preemption timeout in microseconds.
1750  */
1751 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1752 {
1753 	u32 preempt_timeout;
1754 
1755 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1756 	preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1757 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1758 
1759 	return preempt_timeout;
1760 }
1761 
1762 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1763 {
1764 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1765 
1766 	config->exec_quantum = 0;
1767 	config->preempt_timeout = 0;
1768 }
1769 
1770 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
1771 				  enum xe_guc_klv_threshold_index index, u32 value)
1772 {
1773 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1774 	int err;
1775 
1776 	err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
1777 	if (unlikely(err))
1778 		return err;
1779 
1780 	config->thresholds[index] = value;
1781 
1782 	return 0;
1783 }
1784 
1785 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
1786 			    enum xe_guc_klv_threshold_index index)
1787 {
1788 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1789 
1790 	return config->thresholds[index];
1791 }
1792 
1793 static const char *threshold_unit(u32 threshold)
1794 {
1795 	return threshold ? "" : "(disabled)";
1796 }
1797 
1798 /**
1799  * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
1800  * @gt: the &xe_gt
1801  * @vfid: the VF identifier
1802  * @index: the threshold index
1803  * @value: requested value (0 means disabled)
1804  *
1805  * This function can only be called on PF.
1806  *
1807  * Return: 0 on success or a negative error code on failure.
1808  */
1809 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
1810 					enum xe_guc_klv_threshold_index index, u32 value)
1811 {
1812 	u32 key = xe_guc_klv_threshold_index_to_key(index);
1813 	const char *name = xe_guc_klv_key_to_string(key);
1814 	int err;
1815 
1816 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1817 	err = pf_provision_threshold(gt, vfid, index, value);
1818 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1819 
1820 	return pf_config_set_u32_done(gt, vfid, value,
1821 				      xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
1822 				      name, threshold_unit, err);
1823 }
1824 
1825 /**
1826  * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
1827  * @gt: the &xe_gt
1828  * @vfid: the VF identifier
1829  * @index: the threshold index
1830  *
1831  * This function can only be called on PF.
1832  *
1833  * Return: value of VF's (or PF's) threshold.
1834  */
1835 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
1836 					enum xe_guc_klv_threshold_index index)
1837 {
1838 	u32 value;
1839 
1840 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1841 	value = pf_get_threshold(gt, vfid, index);
1842 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1843 
1844 	return value;
1845 }
1846 
1847 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
1848 {
1849 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1850 	struct xe_device *xe = gt_to_xe(gt);
1851 
1852 	if (!xe_gt_is_media_type(gt)) {
1853 		pf_release_vf_config_ggtt(gt, config);
1854 		if (IS_DGFX(xe)) {
1855 			pf_release_vf_config_lmem(gt, config);
1856 			pf_update_vf_lmtt(xe, vfid);
1857 		}
1858 	}
1859 	pf_release_config_ctxs(gt, config);
1860 	pf_release_config_dbs(gt, config);
1861 	pf_reset_config_sched(gt, config);
1862 }
1863 
1864 /**
1865  * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
1866  * @gt: the &xe_gt
1867  * @vfid: the VF identifier (can't be PF)
1868  * @force: force configuration release
1869  *
1870  * This function can only be called on PF.
1871  *
1872  * Return: 0 on success or a negative error code on failure.
1873  */
1874 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
1875 {
1876 	int err;
1877 
1878 	xe_gt_assert(gt, vfid);
1879 
1880 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1881 	err = pf_send_vf_cfg_reset(gt, vfid);
1882 	if (!err || force)
1883 		pf_release_vf_config(gt, vfid);
1884 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1885 
1886 	if (unlikely(err)) {
1887 		xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
1888 				   vfid, ERR_PTR(err),
1889 				   force ? " but all resources were released anyway!" : "");
1890 	}
1891 
1892 	return force ? 0 : err;
1893 }
1894 
1895 /**
1896  * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
1897  * @gt: the &xe_gt
1898  * @vfid: the VF identifier (can't be PF)
1899  * @refresh: explicit refresh
1900  *
1901  * This function can only be called on PF.
1902  *
1903  * Return: 0 on success or a negative error code on failure.
1904  */
1905 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
1906 {
1907 	int err = 0;
1908 
1909 	xe_gt_assert(gt, vfid);
1910 
1911 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1912 	if (refresh)
1913 		err = pf_send_vf_cfg_reset(gt, vfid);
1914 	if (!err)
1915 		err = pf_push_full_vf_config(gt, vfid);
1916 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1917 
1918 	if (unlikely(err)) {
1919 		xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
1920 				   refresh ? "refresh" : "push", vfid, ERR_PTR(err));
1921 	}
1922 
1923 	return err;
1924 }
1925 
1926 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
1927 {
1928 	struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
1929 	struct xe_device *xe = gt_to_xe(gt);
1930 	bool is_primary = !xe_gt_is_media_type(gt);
1931 	bool valid_ggtt, valid_ctxs, valid_dbs;
1932 	bool valid_any, valid_all;
1933 
1934 	valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
1935 	valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
1936 	valid_dbs = pf_get_vf_config_dbs(gt, vfid);
1937 
1938 	/* note that GuC doorbells are optional */
1939 	valid_any = valid_ctxs || valid_dbs;
1940 	valid_all = valid_ctxs;
1941 
1942 	/* and GGTT/LMEM is configured on primary GT only */
1943 	valid_all = valid_all && valid_ggtt;
1944 	valid_any = valid_any || (valid_ggtt && is_primary);
1945 
1946 	if (IS_DGFX(xe)) {
1947 		bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
1948 
1949 		valid_any = valid_any || (valid_lmem && is_primary);
1950 		valid_all = valid_all && valid_lmem;
1951 	}
1952 
1953 	return valid_all ? 1 : valid_any ? -ENOKEY : -ENODATA;
1954 }
1955 
1956 /**
1957  * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
1958  * @gt: the &xe_gt
1959  * @vfid: the VF identifier (can't be PF)
1960  *
1961  * This function can only be called on PF.
1962  *
1963  * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
1964  */
1965 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
1966 {
1967 	bool empty;
1968 
1969 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1970 	xe_gt_assert(gt, vfid);
1971 
1972 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1973 	empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
1974 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1975 
1976 	return empty;
1977 }
1978 
1979 /**
1980  * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
1981  * @gt: the &xe_gt
1982  *
1983  * Any prior configurations pushed to GuC are lost when the GT is reset.
1984  * Push again all non-empty VF configurations to the GuC.
1985  *
1986  * This function can only be called on PF.
1987  */
1988 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
1989 {
1990 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
1991 	unsigned int fail = 0, skip = 0;
1992 
1993 	for (n = 1; n <= total_vfs; n++) {
1994 		if (xe_gt_sriov_pf_config_is_empty(gt, n))
1995 			skip++;
1996 		else if (xe_gt_sriov_pf_config_push(gt, n, false))
1997 			fail++;
1998 	}
1999 
2000 	if (fail)
2001 		xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
2002 				   fail, total_vfs - skip, str_plural(total_vfs));
2003 
2004 	if (fail != total_vfs)
2005 		xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
2006 				total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
2007 }
2008 
2009 /**
2010  * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2011  * @gt: the &xe_gt
2012  * @p: the &drm_printer
2013  *
2014  * Print GGTT configuration data for all VFs.
2015  * VFs without provisioned GGTT are ignored.
2016  *
2017  * This function can only be called on PF.
2018  */
2019 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2020 {
2021 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2022 	const struct xe_gt_sriov_config *config;
2023 	char buf[10];
2024 
2025 	for (n = 1; n <= total_vfs; n++) {
2026 		config = &gt->sriov.pf.vfs[n].config;
2027 		if (!drm_mm_node_allocated(&config->ggtt_region))
2028 			continue;
2029 
2030 		string_get_size(config->ggtt_region.size, 1, STRING_UNITS_2, buf, sizeof(buf));
2031 		drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2032 			   n, config->ggtt_region.start,
2033 			   config->ggtt_region.start + config->ggtt_region.size - 1, buf);
2034 	}
2035 
2036 	return 0;
2037 }
2038 
2039 /**
2040  * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2041  * @gt: the &xe_gt
2042  * @p: the &drm_printer
2043  *
2044  * Print GuC context ID allocations across all VFs.
2045  * VFs without GuC context IDs are skipped.
2046  *
2047  * This function can only be called on PF.
2048  * Return: 0 on success or a negative error code on failure.
2049  */
2050 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2051 {
2052 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2053 	const struct xe_gt_sriov_config *config;
2054 
2055 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2056 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2057 
2058 	for (n = 1; n <= total_vfs; n++) {
2059 		config = &gt->sriov.pf.vfs[n].config;
2060 		if (!config->num_ctxs)
2061 			continue;
2062 
2063 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2064 			   n,
2065 			   config->begin_ctx,
2066 			   config->begin_ctx + config->num_ctxs - 1,
2067 			   config->num_ctxs);
2068 	}
2069 
2070 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2071 	return 0;
2072 }
2073 
2074 /**
2075  * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2076  * @gt: the &xe_gt
2077  * @p: the &drm_printer
2078  *
2079  * Print GuC doorbell IDs allocations across all VFs.
2080  * VFs without GuC doorbell IDs are skipped.
2081  *
2082  * This function can only be called on PF.
2083  * Return: 0 on success or a negative error code on failure.
2084  */
2085 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2086 {
2087 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2088 	const struct xe_gt_sriov_config *config;
2089 
2090 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2091 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2092 
2093 	for (n = 1; n <= total_vfs; n++) {
2094 		config = &gt->sriov.pf.vfs[n].config;
2095 		if (!config->num_dbs)
2096 			continue;
2097 
2098 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2099 			   n,
2100 			   config->begin_db,
2101 			   config->begin_db + config->num_dbs - 1,
2102 			   config->num_dbs);
2103 	}
2104 
2105 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2106 	return 0;
2107 }
2108 
2109 /**
2110  * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2111  * @gt: the &xe_gt
2112  * @p: the &drm_printer
2113  *
2114  * Print GGTT ranges that are available for the provisioning.
2115  *
2116  * This function can only be called on PF.
2117  */
2118 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2119 {
2120 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2121 	const struct drm_mm *mm = &ggtt->mm;
2122 	const struct drm_mm_node *entry;
2123 	u64 alignment = pf_get_ggtt_alignment(gt);
2124 	u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt));
2125 	u64 hole_start, hole_end, hole_size;
2126 	u64 spare, avail, total = 0;
2127 	char buf[10];
2128 
2129 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2130 
2131 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2132 
2133 	spare = pf_get_spare_ggtt(gt);
2134 
2135 	mutex_lock(&ggtt->lock);
2136 
2137 	drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
2138 		hole_start = max(hole_start, hole_min_start);
2139 		hole_start = ALIGN(hole_start, alignment);
2140 		hole_end = ALIGN_DOWN(hole_end, alignment);
2141 		if (hole_start >= hole_end)
2142 			continue;
2143 		hole_size = hole_end - hole_start;
2144 		total += hole_size;
2145 
2146 		string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf));
2147 		drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n",
2148 			   hole_start, hole_end - 1, buf);
2149 	}
2150 
2151 	mutex_unlock(&ggtt->lock);
2152 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2153 
2154 	string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2155 	drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2156 
2157 	string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2158 	drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2159 
2160 	avail = total > spare ? total - spare : 0;
2161 
2162 	string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2163 	drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2164 
2165 	return 0;
2166 }
2167