xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c (revision 9fd2da71c301184d98fe37674ca8d017d1ce6600)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8 
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11 
12 #include "regs/xe_guc_regs.h"
13 
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_gt_sriov_pf_config.h"
19 #include "xe_gt_sriov_pf_helpers.h"
20 #include "xe_gt_sriov_pf_policy.h"
21 #include "xe_gt_sriov_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_buf.h"
24 #include "xe_guc_ct.h"
25 #include "xe_guc_db_mgr.h"
26 #include "xe_guc_fwif.h"
27 #include "xe_guc_id_mgr.h"
28 #include "xe_guc_klv_helpers.h"
29 #include "xe_guc_klv_thresholds_set.h"
30 #include "xe_guc_submit.h"
31 #include "xe_lmtt.h"
32 #include "xe_map.h"
33 #include "xe_migrate.h"
34 #include "xe_sriov.h"
35 #include "xe_ttm_vram_mgr.h"
36 #include "xe_vram_types.h"
37 #include "xe_wopcm.h"
38 
39 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
40 
41 /*
42  * Return: number of KLVs that were successfully parsed and saved,
43  *         negative error code on failure.
44  */
45 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
46 				    u64 addr, u32 size)
47 {
48 	u32 request[] = {
49 		GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
50 		vfid,
51 		lower_32_bits(addr),
52 		upper_32_bits(addr),
53 		size,
54 	};
55 
56 	return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
57 }
58 
59 /*
60  * Return: 0 on success, negative error code on failure.
61  */
62 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
63 {
64 	struct xe_guc *guc = &gt->uc.guc;
65 	int ret;
66 
67 	ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
68 
69 	return ret <= 0 ? ret : -EPROTO;
70 }
71 
72 /*
73  * Return: number of KLVs that were successfully parsed and saved,
74  *         negative error code on failure.
75  */
76 static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords)
77 {
78 	struct xe_guc *guc = &gt->uc.guc;
79 
80 	return guc_action_update_vf_cfg(guc, vfid, xe_guc_buf_flush(buf), num_dwords);
81 }
82 
83 /*
84  * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
85  *         negative error code on failure.
86  */
87 static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
88 			       struct xe_guc_buf buf, u32 num_dwords)
89 {
90 	int ret;
91 
92 	ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords);
93 
94 	if (ret != num_klvs) {
95 		int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
96 		void *klvs = xe_guc_buf_cpu_ptr(buf);
97 		struct drm_printer p = xe_gt_info_printer(gt);
98 		char name[8];
99 
100 		xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
101 				   xe_sriov_function_name(vfid, name, sizeof(name)),
102 				   num_klvs, str_plural(num_klvs), ERR_PTR(err));
103 		xe_guc_klv_print(klvs, num_dwords, &p);
104 		return err;
105 	}
106 
107 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
108 		struct drm_printer p = xe_gt_dbg_printer(gt);
109 		void *klvs = xe_guc_buf_cpu_ptr(buf);
110 		char name[8];
111 
112 		xe_gt_sriov_dbg(gt, "pushed %s config with %u KLV%s:\n",
113 				xe_sriov_function_name(vfid, name, sizeof(name)),
114 				num_klvs, str_plural(num_klvs));
115 		xe_guc_klv_print(klvs, num_dwords, &p);
116 	}
117 
118 	return 0;
119 }
120 
121 /*
122  * Return: 0 on success, -ENOBUFS if no free buffer for the indirect data,
123  *         negative error code on failure.
124  */
125 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
126 			       const u32 *klvs, u32 num_dwords)
127 {
128 	CLASS(xe_guc_buf_from_data, buf)(&gt->uc.guc.buf, klvs, num_dwords * sizeof(u32));
129 
130 	xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
131 
132 	if (!xe_guc_buf_is_valid(buf))
133 		return -ENOBUFS;
134 
135 	return pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
136 }
137 
138 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
139 {
140 	u32 klv[] = {
141 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
142 		value,
143 	};
144 
145 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
146 }
147 
148 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
149 {
150 	u32 klv[] = {
151 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
152 		lower_32_bits(value),
153 		upper_32_bits(value),
154 	};
155 
156 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
157 }
158 
159 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
160 {
161 	u32 klvs[] = {
162 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
163 		lower_32_bits(start),
164 		upper_32_bits(start),
165 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
166 		lower_32_bits(size),
167 		upper_32_bits(size),
168 	};
169 
170 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
171 }
172 
173 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
174 {
175 	u32 klvs[] = {
176 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
177 		begin,
178 		PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
179 		num,
180 	};
181 
182 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
183 }
184 
185 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
186 {
187 	u32 klvs[] = {
188 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
189 		begin,
190 		PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
191 		num,
192 	};
193 
194 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
195 }
196 
197 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
198 {
199 	/* GuC will silently clamp values exceeding max */
200 	*exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
201 
202 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
203 }
204 
205 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
206 {
207 	/* GuC will silently clamp values exceeding max */
208 	*preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
209 
210 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
211 }
212 
213 static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
214 {
215 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority);
216 }
217 
218 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
219 {
220 	return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
221 }
222 
223 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
224 				    enum xe_guc_klv_threshold_index index, u32 value)
225 {
226 	u32 key = xe_guc_klv_threshold_index_to_key(index);
227 
228 	xe_gt_assert(gt, key);
229 	return pf_push_vf_cfg_u32(gt, vfid, key, value);
230 }
231 
232 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
233 {
234 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
235 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
236 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
237 
238 	return &gt->sriov.pf.vfs[vfid].config;
239 }
240 
241 /* Return: number of configuration dwords written */
242 static u32 encode_ggtt(u32 *cfg, u64 start, u64 size, bool details)
243 {
244 	u32 n = 0;
245 
246 	if (details) {
247 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
248 		cfg[n++] = lower_32_bits(start);
249 		cfg[n++] = upper_32_bits(start);
250 	}
251 
252 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
253 	cfg[n++] = lower_32_bits(size);
254 	cfg[n++] = upper_32_bits(size);
255 
256 	return n;
257 }
258 
259 /* Return: number of configuration dwords written */
260 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
261 {
262 	struct xe_ggtt_node *node = config->ggtt_region;
263 
264 	if (!xe_ggtt_node_allocated(node))
265 		return 0;
266 
267 	return encode_ggtt(cfg, node->base.start, node->base.size, details);
268 }
269 
270 /* Return: number of configuration dwords written */
271 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
272 {
273 	u32 n = 0;
274 
275 	n += encode_config_ggtt(cfg, config, details);
276 
277 	if (details && config->num_ctxs) {
278 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
279 		cfg[n++] = config->begin_ctx;
280 	}
281 
282 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
283 	cfg[n++] = config->num_ctxs;
284 
285 	if (details && config->num_dbs) {
286 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
287 		cfg[n++] = config->begin_db;
288 	}
289 
290 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
291 	cfg[n++] = config->num_dbs;
292 
293 	if (config->lmem_obj) {
294 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
295 		cfg[n++] = lower_32_bits(xe_bo_size(config->lmem_obj));
296 		cfg[n++] = upper_32_bits(xe_bo_size(config->lmem_obj));
297 	}
298 
299 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
300 	cfg[n++] = config->exec_quantum;
301 
302 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
303 	cfg[n++] = config->preempt_timeout;
304 
305 #define encode_threshold_config(TAG, ...) ({					\
306 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG);			\
307 	cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)];	\
308 });
309 
310 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
311 #undef encode_threshold_config
312 
313 	return n;
314 }
315 
316 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
317 {
318 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
319 	u32 max_cfg_dwords = xe_guc_buf_cache_dwords(&gt->uc.guc.buf);
320 	CLASS(xe_guc_buf, buf)(&gt->uc.guc.buf, max_cfg_dwords);
321 	u32 num_dwords;
322 	int num_klvs;
323 	u32 *cfg;
324 	int err;
325 
326 	if (!xe_guc_buf_is_valid(buf))
327 		return -ENOBUFS;
328 
329 	cfg = xe_guc_buf_cpu_ptr(buf);
330 	num_dwords = encode_config(cfg, config, true);
331 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
332 
333 	if (xe_gt_is_media_type(gt)) {
334 		struct xe_gt *primary = gt->tile->primary_gt;
335 		struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
336 
337 		/* media-GT will never include a GGTT config */
338 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
339 
340 		/* the GGTT config must be taken from the primary-GT instead */
341 		num_dwords += encode_config_ggtt(cfg + num_dwords, other, true);
342 	}
343 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
344 
345 	if (vfid == PFID) {
346 		u64 ggtt_start = xe_wopcm_size(gt_to_xe(gt));
347 		u64 ggtt_size = gt_to_tile(gt)->mem.ggtt->size - ggtt_start;
348 
349 		/* plain PF config data will never include a real GGTT region */
350 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
351 
352 		/* fake PF GGTT config covers full GGTT range except reserved WOPCM */
353 		num_dwords += encode_ggtt(cfg + num_dwords, ggtt_start, ggtt_size, true);
354 	}
355 
356 	num_klvs = xe_guc_klv_count(cfg, num_dwords);
357 	err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
358 
359 	return err;
360 }
361 
362 static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset)
363 {
364 	int err = 0;
365 
366 	xe_gt_assert(gt, vfid);
367 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
368 
369 	if (reset)
370 		err = pf_send_vf_cfg_reset(gt, vfid);
371 	if (!err)
372 		err = pf_push_full_vf_config(gt, vfid);
373 
374 	return err;
375 }
376 
377 static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid)
378 {
379 	return pf_push_vf_cfg(gt, vfid, true);
380 }
381 
382 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
383 {
384 	struct xe_device *xe = gt_to_xe(gt);
385 
386 	return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
387 }
388 
389 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
390 {
391 	/* XXX: preliminary */
392 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
393 		pf_get_ggtt_alignment(gt) : SZ_64M;
394 }
395 
396 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
397 {
398 	u64 spare;
399 
400 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
401 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
402 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
403 
404 	spare = gt->sriov.pf.spare.ggtt_size;
405 	spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
406 
407 	return spare;
408 }
409 
410 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
411 {
412 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
413 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
414 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
415 
416 	if (size && size < pf_get_min_spare_ggtt(gt))
417 		return -EINVAL;
418 
419 	size = round_up(size, pf_get_ggtt_alignment(gt));
420 	gt->sriov.pf.spare.ggtt_size = size;
421 
422 	return 0;
423 }
424 
425 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
426 {
427 	int err, err2 = 0;
428 
429 	err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
430 
431 	if (tile->media_gt && !err)
432 		err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
433 
434 	return err ?: err2;
435 }
436 
437 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
438 {
439 	if (xe_ggtt_node_allocated(node)) {
440 		/*
441 		 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
442 		 * is redundant, as PTE will be implicitly re-assigned to PF by
443 		 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
444 		 */
445 		xe_ggtt_node_remove(node, false);
446 	} else {
447 		xe_ggtt_node_fini(node);
448 	}
449 }
450 
451 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
452 {
453 	pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
454 	config->ggtt_region = NULL;
455 }
456 
457 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
458 {
459 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
460 	struct xe_ggtt_node *node;
461 	struct xe_tile *tile = gt_to_tile(gt);
462 	struct xe_ggtt *ggtt = tile->mem.ggtt;
463 	u64 alignment = pf_get_ggtt_alignment(gt);
464 	int err;
465 
466 	xe_gt_assert(gt, vfid);
467 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
468 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
469 
470 	size = round_up(size, alignment);
471 
472 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
473 		err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
474 		if (unlikely(err))
475 			return err;
476 
477 		pf_release_vf_config_ggtt(gt, config);
478 
479 		err = pf_refresh_vf_cfg(gt, vfid);
480 		if (unlikely(err))
481 			return err;
482 	}
483 	xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
484 
485 	if (!size)
486 		return 0;
487 
488 	node = xe_ggtt_node_init(ggtt);
489 	if (IS_ERR(node))
490 		return PTR_ERR(node);
491 
492 	err = xe_ggtt_node_insert(node, size, alignment);
493 	if (unlikely(err))
494 		goto err;
495 
496 	xe_ggtt_assign(node, vfid);
497 	xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
498 				vfid, node->base.start, node->base.start + node->base.size - 1);
499 
500 	err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
501 	if (unlikely(err))
502 		goto err;
503 
504 	config->ggtt_region = node;
505 	return 0;
506 err:
507 	pf_release_ggtt(tile, node);
508 	return err;
509 }
510 
511 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
512 {
513 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
514 	struct xe_ggtt_node *node = config->ggtt_region;
515 
516 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
517 	return xe_ggtt_node_allocated(node) ? node->base.size : 0;
518 }
519 
520 /**
521  * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
522  * @gt: the &xe_gt
523  * @vfid: the VF identifier
524  *
525  * This function can only be called on PF.
526  *
527  * Return: size of the VF's assigned (or PF's spare) GGTT address space.
528  */
529 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
530 {
531 	u64 size;
532 
533 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
534 	if (vfid)
535 		size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
536 	else
537 		size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
538 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
539 
540 	return size;
541 }
542 
543 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
544 				  u64 actual, const char *what, int err)
545 {
546 	char size[10];
547 	char name[8];
548 
549 	xe_sriov_function_name(vfid, name, sizeof(name));
550 
551 	if (unlikely(err)) {
552 		string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
553 		xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
554 				   name, value, size, what, ERR_PTR(err));
555 		string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
556 		xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
557 				 name, actual, size, what);
558 		return err;
559 	}
560 
561 	/* the actual value may have changed during provisioning */
562 	string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
563 	xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
564 			 name, actual, size, what);
565 	return 0;
566 }
567 
568 /**
569  * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
570  * @gt: the &xe_gt (can't be media)
571  * @vfid: the VF identifier
572  * @size: requested GGTT size
573  *
574  * If &vfid represents PF, then function will change PF's spare GGTT config.
575  *
576  * This function can only be called on PF.
577  *
578  * Return: 0 on success or a negative error code on failure.
579  */
580 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
581 {
582 	int err;
583 
584 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
585 
586 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
587 	if (vfid)
588 		err = pf_provision_vf_ggtt(gt, vfid, size);
589 	else
590 		err = pf_set_spare_ggtt(gt, size);
591 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
592 
593 	return pf_config_set_u64_done(gt, vfid, size,
594 				      xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
595 				      vfid ? "GGTT" : "spare GGTT", err);
596 }
597 
598 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
599 				       u64 value, u64 (*get)(struct xe_gt*, unsigned int),
600 				       const char *what, unsigned int last, int err)
601 {
602 	char size[10];
603 
604 	xe_gt_assert(gt, first);
605 	xe_gt_assert(gt, num_vfs);
606 	xe_gt_assert(gt, first <= last);
607 
608 	if (num_vfs == 1)
609 		return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
610 
611 	if (unlikely(err)) {
612 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
613 				   first, first + num_vfs - 1, what);
614 		if (last > first)
615 			pf_config_bulk_set_u64_done(gt, first, last - first, value,
616 						    get, what, last, 0);
617 		return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
618 	}
619 
620 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
621 	value = get(gt, first);
622 	string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
623 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
624 			 first, first + num_vfs - 1, value, size, what);
625 	return 0;
626 }
627 
628 /**
629  * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
630  * @gt: the &xe_gt (can't be media)
631  * @vfid: starting VF identifier (can't be 0)
632  * @num_vfs: number of VFs to provision
633  * @size: requested GGTT size
634  *
635  * This function can only be called on PF.
636  *
637  * Return: 0 on success or a negative error code on failure.
638  */
639 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
640 					unsigned int num_vfs, u64 size)
641 {
642 	unsigned int n;
643 	int err = 0;
644 
645 	xe_gt_assert(gt, vfid);
646 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
647 
648 	if (!num_vfs)
649 		return 0;
650 
651 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
652 	for (n = vfid; n < vfid + num_vfs; n++) {
653 		err = pf_provision_vf_ggtt(gt, n, size);
654 		if (err)
655 			break;
656 	}
657 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
658 
659 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
660 					   xe_gt_sriov_pf_config_get_ggtt,
661 					   "GGTT", n, err);
662 }
663 
664 /* Return: size of the largest continuous GGTT region */
665 static u64 pf_get_max_ggtt(struct xe_gt *gt)
666 {
667 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
668 	u64 alignment = pf_get_ggtt_alignment(gt);
669 	u64 spare = pf_get_spare_ggtt(gt);
670 	u64 max_hole;
671 
672 	max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
673 
674 	xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
675 				max_hole / SZ_1K, spare / SZ_1K);
676 	return max_hole > spare ? max_hole - spare : 0;
677 }
678 
679 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
680 {
681 	u64 available = pf_get_max_ggtt(gt);
682 	u64 alignment = pf_get_ggtt_alignment(gt);
683 	u64 fair;
684 
685 	/*
686 	 * To simplify the logic we only look at single largest GGTT region
687 	 * as that will be always the best fit for 1 VF case, and most likely
688 	 * will also nicely cover other cases where VFs are provisioned on the
689 	 * fresh and idle PF driver, without any stale GGTT allocations spread
690 	 * in the middle of the full GGTT range.
691 	 */
692 
693 	fair = div_u64(available, num_vfs);
694 	fair = ALIGN_DOWN(fair, alignment);
695 	xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
696 				available / SZ_1K, num_vfs, fair / SZ_1K);
697 	return fair;
698 }
699 
700 /**
701  * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
702  * @gt: the &xe_gt (can't be media)
703  * @vfid: starting VF identifier (can't be 0)
704  * @num_vfs: number of VFs to provision
705  *
706  * This function can only be called on PF.
707  *
708  * Return: 0 on success or a negative error code on failure.
709  */
710 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
711 					unsigned int num_vfs)
712 {
713 	u64 fair;
714 
715 	xe_gt_assert(gt, vfid);
716 	xe_gt_assert(gt, num_vfs);
717 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
718 
719 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
720 	fair = pf_estimate_fair_ggtt(gt, num_vfs);
721 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
722 
723 	if (!fair)
724 		return -ENOSPC;
725 
726 	return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
727 }
728 
729 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
730 {
731 	/* XXX: preliminary */
732 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
733 		hweight64(gt->info.engine_mask) : SZ_256;
734 }
735 
736 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
737 {
738 	u32 spare;
739 
740 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
741 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
742 
743 	spare = gt->sriov.pf.spare.num_ctxs;
744 	spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
745 
746 	return spare;
747 }
748 
749 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
750 {
751 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
752 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
753 
754 	if (spare > GUC_ID_MAX)
755 		return -EINVAL;
756 
757 	if (spare && spare < pf_get_min_spare_ctxs(gt))
758 		return -EINVAL;
759 
760 	gt->sriov.pf.spare.num_ctxs = spare;
761 
762 	return 0;
763 }
764 
765 /* Return: start ID or negative error code on failure */
766 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
767 {
768 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
769 	unsigned int spare = pf_get_spare_ctxs(gt);
770 
771 	return xe_guc_id_mgr_reserve(idm, num, spare);
772 }
773 
774 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
775 {
776 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
777 
778 	if (num)
779 		xe_guc_id_mgr_release(idm, start, num);
780 }
781 
782 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
783 {
784 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
785 
786 	pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
787 	config->begin_ctx = 0;
788 	config->num_ctxs = 0;
789 }
790 
791 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
792 {
793 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
794 	int ret;
795 
796 	xe_gt_assert(gt, vfid);
797 
798 	if (num_ctxs > GUC_ID_MAX)
799 		return -EINVAL;
800 
801 	if (config->num_ctxs) {
802 		ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
803 		if (unlikely(ret))
804 			return ret;
805 
806 		pf_release_config_ctxs(gt, config);
807 
808 		ret = pf_refresh_vf_cfg(gt, vfid);
809 		if (unlikely(ret))
810 			return ret;
811 	}
812 
813 	if (!num_ctxs)
814 		return 0;
815 
816 	ret = pf_reserve_ctxs(gt, num_ctxs);
817 	if (unlikely(ret < 0))
818 		return ret;
819 
820 	config->begin_ctx = ret;
821 	config->num_ctxs = num_ctxs;
822 
823 	ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
824 	if (unlikely(ret)) {
825 		pf_release_config_ctxs(gt, config);
826 		return ret;
827 	}
828 
829 	xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
830 				vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
831 	return 0;
832 }
833 
834 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
835 {
836 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
837 
838 	return config->num_ctxs;
839 }
840 
841 /**
842  * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
843  * @gt: the &xe_gt
844  * @vfid: the VF identifier
845  *
846  * This function can only be called on PF.
847  * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
848  *
849  * Return: VF's quota (or PF's spare).
850  */
851 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
852 {
853 	u32 num_ctxs;
854 
855 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
856 	if (vfid)
857 		num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
858 	else
859 		num_ctxs = pf_get_spare_ctxs(gt);
860 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
861 
862 	return num_ctxs;
863 }
864 
865 static const char *no_unit(u32 unused)
866 {
867 	return "";
868 }
869 
870 static const char *spare_unit(u32 unused)
871 {
872 	return " spare";
873 }
874 
875 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
876 				  const char *what, const char *(*unit)(u32), int err)
877 {
878 	char name[8];
879 
880 	xe_sriov_function_name(vfid, name, sizeof(name));
881 
882 	if (unlikely(err)) {
883 		xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
884 				   name, value, unit(value), what, ERR_PTR(err));
885 		xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
886 				 name, actual, unit(actual), what);
887 		return err;
888 	}
889 
890 	/* the actual value may have changed during provisioning */
891 	xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
892 			 name, actual, unit(actual), what);
893 	return 0;
894 }
895 
896 /**
897  * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
898  * @gt: the &xe_gt
899  * @vfid: the VF identifier
900  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
901  *
902  * This function can only be called on PF.
903  *
904  * Return: 0 on success or a negative error code on failure.
905  */
906 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
907 {
908 	int err;
909 
910 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
911 	if (vfid)
912 		err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
913 	else
914 		err = pf_set_spare_ctxs(gt, num_ctxs);
915 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
916 
917 	return pf_config_set_u32_done(gt, vfid, num_ctxs,
918 				      xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
919 				      "GuC context IDs", vfid ? no_unit : spare_unit, err);
920 }
921 
922 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
923 				       u32 value, u32 (*get)(struct xe_gt*, unsigned int),
924 				       const char *what, const char *(*unit)(u32),
925 				       unsigned int last, int err)
926 {
927 	xe_gt_assert(gt, first);
928 	xe_gt_assert(gt, num_vfs);
929 	xe_gt_assert(gt, first <= last);
930 
931 	if (num_vfs == 1)
932 		return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
933 
934 	if (unlikely(err)) {
935 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
936 				   first, first + num_vfs - 1, what);
937 		if (last > first)
938 			pf_config_bulk_set_u32_done(gt, first, last - first, value,
939 						    get, what, unit, last, 0);
940 		return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
941 	}
942 
943 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
944 	value = get(gt, first);
945 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
946 			 first, first + num_vfs - 1, value, unit(value), what);
947 	return 0;
948 }
949 
950 /**
951  * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
952  * @gt: the &xe_gt
953  * @vfid: starting VF identifier
954  * @num_vfs: number of VFs to provision
955  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
956  *
957  * This function can only be called on PF.
958  *
959  * Return: 0 on success or a negative error code on failure.
960  */
961 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
962 					unsigned int num_vfs, u32 num_ctxs)
963 {
964 	unsigned int n;
965 	int err = 0;
966 
967 	xe_gt_assert(gt, vfid);
968 
969 	if (!num_vfs)
970 		return 0;
971 
972 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
973 	for (n = vfid; n < vfid + num_vfs; n++) {
974 		err = pf_provision_vf_ctxs(gt, n, num_ctxs);
975 		if (err)
976 			break;
977 	}
978 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
979 
980 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
981 					   xe_gt_sriov_pf_config_get_ctxs,
982 					   "GuC context IDs", no_unit, n, err);
983 }
984 
985 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
986 {
987 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
988 	u32 spare = pf_get_spare_ctxs(gt);
989 	u32 fair = (idm->total - spare) / num_vfs;
990 	int ret;
991 
992 	for (; fair; --fair) {
993 		ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
994 		if (ret < 0)
995 			continue;
996 		xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
997 		break;
998 	}
999 
1000 	xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
1001 	return fair;
1002 }
1003 
1004 /**
1005  * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
1006  * @gt: the &xe_gt
1007  * @vfid: starting VF identifier (can't be 0)
1008  * @num_vfs: number of VFs to provision (can't be 0)
1009  *
1010  * This function can only be called on PF.
1011  *
1012  * Return: 0 on success or a negative error code on failure.
1013  */
1014 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
1015 					unsigned int num_vfs)
1016 {
1017 	u32 fair;
1018 
1019 	xe_gt_assert(gt, vfid);
1020 	xe_gt_assert(gt, num_vfs);
1021 
1022 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1023 	fair = pf_estimate_fair_ctxs(gt, num_vfs);
1024 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1025 
1026 	if (!fair)
1027 		return -ENOSPC;
1028 
1029 	return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
1030 }
1031 
1032 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
1033 {
1034 	/* XXX: preliminary, we don't use doorbells yet! */
1035 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
1036 }
1037 
1038 static u32 pf_get_spare_dbs(struct xe_gt *gt)
1039 {
1040 	u32 spare;
1041 
1042 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1043 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1044 
1045 	spare = gt->sriov.pf.spare.num_dbs;
1046 	spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
1047 
1048 	return spare;
1049 }
1050 
1051 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
1052 {
1053 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1054 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1055 
1056 	if (spare > GUC_NUM_DOORBELLS)
1057 		return -EINVAL;
1058 
1059 	if (spare && spare < pf_get_min_spare_dbs(gt))
1060 		return -EINVAL;
1061 
1062 	gt->sriov.pf.spare.num_dbs = spare;
1063 	return 0;
1064 }
1065 
1066 /* Return: start ID or negative error code on failure */
1067 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
1068 {
1069 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1070 	unsigned int spare = pf_get_spare_dbs(gt);
1071 
1072 	return xe_guc_db_mgr_reserve_range(dbm, num, spare);
1073 }
1074 
1075 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1076 {
1077 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1078 
1079 	if (num)
1080 		xe_guc_db_mgr_release_range(dbm, start, num);
1081 }
1082 
1083 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1084 {
1085 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1086 
1087 	pf_release_dbs(gt, config->begin_db, config->num_dbs);
1088 	config->begin_db = 0;
1089 	config->num_dbs = 0;
1090 }
1091 
1092 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1093 {
1094 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1095 	int ret;
1096 
1097 	xe_gt_assert(gt, vfid);
1098 
1099 	if (num_dbs > GUC_NUM_DOORBELLS)
1100 		return -EINVAL;
1101 
1102 	if (config->num_dbs) {
1103 		ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1104 		if (unlikely(ret))
1105 			return ret;
1106 
1107 		pf_release_config_dbs(gt, config);
1108 
1109 		ret = pf_refresh_vf_cfg(gt, vfid);
1110 		if (unlikely(ret))
1111 			return ret;
1112 	}
1113 
1114 	if (!num_dbs)
1115 		return 0;
1116 
1117 	ret = pf_reserve_dbs(gt, num_dbs);
1118 	if (unlikely(ret < 0))
1119 		return ret;
1120 
1121 	config->begin_db = ret;
1122 	config->num_dbs = num_dbs;
1123 
1124 	ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1125 	if (unlikely(ret)) {
1126 		pf_release_config_dbs(gt, config);
1127 		return ret;
1128 	}
1129 
1130 	xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1131 				vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1132 	return 0;
1133 }
1134 
1135 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1136 {
1137 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1138 
1139 	return config->num_dbs;
1140 }
1141 
1142 /**
1143  * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1144  * @gt: the &xe_gt
1145  * @vfid: the VF identifier
1146  *
1147  * This function can only be called on PF.
1148  * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1149  *
1150  * Return: VF's quota (or PF's spare).
1151  */
1152 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1153 {
1154 	u32 num_dbs;
1155 
1156 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1157 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1158 
1159 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1160 	if (vfid)
1161 		num_dbs = pf_get_vf_config_dbs(gt, vfid);
1162 	else
1163 		num_dbs = pf_get_spare_dbs(gt);
1164 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1165 
1166 	return num_dbs;
1167 }
1168 
1169 /**
1170  * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1171  * @gt: the &xe_gt
1172  * @vfid: the VF identifier
1173  * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1174  *
1175  * This function can only be called on PF.
1176  *
1177  * Return: 0 on success or a negative error code on failure.
1178  */
1179 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1180 {
1181 	int err;
1182 
1183 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1184 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1185 
1186 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1187 	if (vfid)
1188 		err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1189 	else
1190 		err = pf_set_spare_dbs(gt, num_dbs);
1191 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1192 
1193 	return pf_config_set_u32_done(gt, vfid, num_dbs,
1194 				      xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1195 				      "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1196 }
1197 
1198 /**
1199  * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1200  * @gt: the &xe_gt
1201  * @vfid: starting VF identifier (can't be 0)
1202  * @num_vfs: number of VFs to provision
1203  * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1204  *
1205  * This function can only be called on PF.
1206  *
1207  * Return: 0 on success or a negative error code on failure.
1208  */
1209 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1210 				       unsigned int num_vfs, u32 num_dbs)
1211 {
1212 	unsigned int n;
1213 	int err = 0;
1214 
1215 	xe_gt_assert(gt, vfid);
1216 
1217 	if (!num_vfs)
1218 		return 0;
1219 
1220 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1221 	for (n = vfid; n < vfid + num_vfs; n++) {
1222 		err = pf_provision_vf_dbs(gt, n, num_dbs);
1223 		if (err)
1224 			break;
1225 	}
1226 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1227 
1228 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1229 					   xe_gt_sriov_pf_config_get_dbs,
1230 					   "GuC doorbell IDs", no_unit, n, err);
1231 }
1232 
1233 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1234 {
1235 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1236 	u32 spare = pf_get_spare_dbs(gt);
1237 	u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1238 	int ret;
1239 
1240 	for (; fair; --fair) {
1241 		ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1242 		if (ret < 0)
1243 			continue;
1244 		xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1245 		break;
1246 	}
1247 
1248 	xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1249 	return fair;
1250 }
1251 
1252 /**
1253  * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell  IDs.
1254  * @gt: the &xe_gt
1255  * @vfid: starting VF identifier (can't be 0)
1256  * @num_vfs: number of VFs to provision (can't be 0)
1257  *
1258  * This function can only be called on PF.
1259  *
1260  * Return: 0 on success or a negative error code on failure.
1261  */
1262 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1263 				       unsigned int num_vfs)
1264 {
1265 	u32 fair;
1266 
1267 	xe_gt_assert(gt, vfid);
1268 	xe_gt_assert(gt, num_vfs);
1269 
1270 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1271 	fair = pf_estimate_fair_dbs(gt, num_vfs);
1272 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1273 
1274 	if (!fair)
1275 		return -ENOSPC;
1276 
1277 	return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1278 }
1279 
1280 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1281 {
1282 	/* this might be platform dependent */
1283 	return SZ_2M;
1284 }
1285 
1286 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1287 {
1288 	/* this might be platform dependent */
1289 	return SZ_128M; /* XXX: preliminary */
1290 }
1291 
1292 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1293 {
1294 	u64 spare;
1295 
1296 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1297 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1298 
1299 	spare = gt->sriov.pf.spare.lmem_size;
1300 	spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1301 
1302 	return spare;
1303 }
1304 
1305 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1306 {
1307 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1308 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1309 
1310 	if (size && size < pf_get_min_spare_lmem(gt))
1311 		return -EINVAL;
1312 
1313 	gt->sriov.pf.spare.lmem_size = size;
1314 	return 0;
1315 }
1316 
1317 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1318 {
1319 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1320 	struct xe_bo *bo;
1321 
1322 	bo = config->lmem_obj;
1323 	return bo ? xe_bo_size(bo) : 0;
1324 }
1325 
1326 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1327 {
1328 	struct xe_device *xe = gt_to_xe(gt);
1329 	struct xe_tile *tile;
1330 	unsigned int tid;
1331 	int err;
1332 
1333 	for_each_tile(tile, xe, tid) {
1334 		if (tile->primary_gt == gt) {
1335 			err = pf_push_vf_cfg_lmem(gt, vfid, size);
1336 		} else {
1337 			u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1338 
1339 			if (!lmem)
1340 				continue;
1341 			err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1342 		}
1343 		if (unlikely(err))
1344 			return err;
1345 	}
1346 	return 0;
1347 }
1348 
1349 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1350 {
1351 	struct xe_lmtt *lmtt;
1352 	struct xe_tile *tile;
1353 	unsigned int tid;
1354 
1355 	xe_assert(xe, xe_device_has_lmtt(xe));
1356 	xe_assert(xe, IS_SRIOV_PF(xe));
1357 
1358 	for_each_tile(tile, xe, tid) {
1359 		lmtt = &tile->sriov.pf.lmtt;
1360 		xe_lmtt_invalidate_hw(lmtt);
1361 	}
1362 }
1363 
1364 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1365 {
1366 	struct xe_lmtt *lmtt;
1367 	struct xe_tile *tile;
1368 	unsigned int tid;
1369 
1370 	xe_assert(xe, xe_device_has_lmtt(xe));
1371 	xe_assert(xe, IS_SRIOV_PF(xe));
1372 
1373 	for_each_tile(tile, xe, tid) {
1374 		lmtt = &tile->sriov.pf.lmtt;
1375 		xe_lmtt_drop_pages(lmtt, vfid);
1376 	}
1377 }
1378 
1379 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1380 {
1381 	struct xe_gt_sriov_config *config;
1382 	struct xe_tile *tile;
1383 	struct xe_lmtt *lmtt;
1384 	struct xe_bo *bo;
1385 	struct xe_gt *gt;
1386 	u64 total, offset;
1387 	unsigned int gtid;
1388 	unsigned int tid;
1389 	int err;
1390 
1391 	xe_assert(xe, xe_device_has_lmtt(xe));
1392 	xe_assert(xe, IS_SRIOV_PF(xe));
1393 
1394 	total = 0;
1395 	for_each_tile(tile, xe, tid)
1396 		total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1397 
1398 	for_each_tile(tile, xe, tid) {
1399 		lmtt = &tile->sriov.pf.lmtt;
1400 
1401 		xe_lmtt_drop_pages(lmtt, vfid);
1402 		if (!total)
1403 			continue;
1404 
1405 		err  = xe_lmtt_prepare_pages(lmtt, vfid, total);
1406 		if (err)
1407 			goto fail;
1408 
1409 		offset = 0;
1410 		for_each_gt(gt, xe, gtid) {
1411 			if (xe_gt_is_media_type(gt))
1412 				continue;
1413 
1414 			config = pf_pick_vf_config(gt, vfid);
1415 			bo = config->lmem_obj;
1416 			if (!bo)
1417 				continue;
1418 
1419 			err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1420 			if (err)
1421 				goto fail;
1422 			offset += xe_bo_size(bo);
1423 		}
1424 	}
1425 
1426 	pf_force_lmtt_invalidate(xe);
1427 	return 0;
1428 
1429 fail:
1430 	for_each_tile(tile, xe, tid) {
1431 		lmtt = &tile->sriov.pf.lmtt;
1432 		xe_lmtt_drop_pages(lmtt, vfid);
1433 	}
1434 	return err;
1435 }
1436 
1437 /* Return: %true if there was an LMEM provisioned, %false otherwise */
1438 static bool pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1439 {
1440 	xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1441 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
1442 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1443 
1444 	if (config->lmem_obj) {
1445 		xe_bo_unpin_map_no_vm(config->lmem_obj);
1446 		config->lmem_obj = NULL;
1447 		return true;
1448 	}
1449 	return false;
1450 }
1451 
1452 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1453 {
1454 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1455 	struct xe_device *xe = gt_to_xe(gt);
1456 	struct xe_tile *tile = gt_to_tile(gt);
1457 	struct xe_bo *bo;
1458 	int err;
1459 
1460 	xe_gt_assert(gt, vfid);
1461 	xe_gt_assert(gt, IS_DGFX(xe));
1462 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
1463 
1464 	size = round_up(size, pf_get_lmem_alignment(gt));
1465 
1466 	if (config->lmem_obj) {
1467 		err = pf_distribute_config_lmem(gt, vfid, 0);
1468 		if (unlikely(err))
1469 			return err;
1470 
1471 		if (xe_device_has_lmtt(xe))
1472 			pf_reset_vf_lmtt(xe, vfid);
1473 		pf_release_vf_config_lmem(gt, config);
1474 	}
1475 	xe_gt_assert(gt, !config->lmem_obj);
1476 
1477 	if (!size)
1478 		return 0;
1479 
1480 	xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1481 	bo = xe_bo_create_locked(xe, tile, NULL,
1482 				 ALIGN(size, PAGE_SIZE),
1483 				 ttm_bo_type_kernel,
1484 				 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1485 				 XE_BO_FLAG_NEEDS_2M |
1486 				 XE_BO_FLAG_PINNED |
1487 				 XE_BO_FLAG_PINNED_LATE_RESTORE);
1488 	if (IS_ERR(bo))
1489 		return PTR_ERR(bo);
1490 
1491 	err = xe_bo_pin(bo);
1492 	xe_bo_unlock(bo);
1493 	if (unlikely(err)) {
1494 		xe_bo_put(bo);
1495 		return err;
1496 	}
1497 
1498 	config->lmem_obj = bo;
1499 
1500 	if (xe_device_has_lmtt(xe)) {
1501 		err = pf_update_vf_lmtt(xe, vfid);
1502 		if (unlikely(err))
1503 			goto release;
1504 	}
1505 
1506 	err = pf_push_vf_cfg_lmem(gt, vfid, xe_bo_size(bo));
1507 	if (unlikely(err))
1508 		goto reset_lmtt;
1509 
1510 	xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1511 				vfid, xe_bo_size(bo), xe_bo_size(bo) / SZ_1M);
1512 	return 0;
1513 
1514 reset_lmtt:
1515 	if (xe_device_has_lmtt(xe))
1516 		pf_reset_vf_lmtt(xe, vfid);
1517 release:
1518 	pf_release_vf_config_lmem(gt, config);
1519 	return err;
1520 }
1521 
1522 /**
1523  * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1524  * @gt: the &xe_gt
1525  * @vfid: the VF identifier
1526  *
1527  * This function can only be called on PF.
1528  *
1529  * Return: VF's (or PF's spare) LMEM quota.
1530  */
1531 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1532 {
1533 	u64 size;
1534 
1535 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1536 	if (vfid)
1537 		size = pf_get_vf_config_lmem(gt, vfid);
1538 	else
1539 		size = pf_get_spare_lmem(gt);
1540 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1541 
1542 	return size;
1543 }
1544 
1545 /**
1546  * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1547  * @gt: the &xe_gt (can't be media)
1548  * @vfid: the VF identifier
1549  * @size: requested LMEM size
1550  *
1551  * This function can only be called on PF.
1552  */
1553 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1554 {
1555 	int err;
1556 
1557 	xe_gt_assert(gt, xe_device_has_lmtt(gt_to_xe(gt)));
1558 
1559 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1560 	if (vfid)
1561 		err = pf_provision_vf_lmem(gt, vfid, size);
1562 	else
1563 		err = pf_set_spare_lmem(gt, size);
1564 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1565 
1566 	return pf_config_set_u64_done(gt, vfid, size,
1567 				      xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1568 				      vfid ? "LMEM" : "spare LMEM", err);
1569 }
1570 
1571 /**
1572  * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1573  * @gt: the &xe_gt (can't be media)
1574  * @vfid: starting VF identifier (can't be 0)
1575  * @num_vfs: number of VFs to provision
1576  * @size: requested LMEM size
1577  *
1578  * This function can only be called on PF.
1579  *
1580  * Return: 0 on success or a negative error code on failure.
1581  */
1582 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1583 					unsigned int num_vfs, u64 size)
1584 {
1585 	unsigned int n;
1586 	int err = 0;
1587 
1588 	xe_gt_assert(gt, vfid);
1589 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
1590 
1591 	if (!num_vfs)
1592 		return 0;
1593 
1594 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1595 	for (n = vfid; n < vfid + num_vfs; n++) {
1596 		err = pf_provision_vf_lmem(gt, n, size);
1597 		if (err)
1598 			break;
1599 	}
1600 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1601 
1602 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1603 					   xe_gt_sriov_pf_config_get_lmem,
1604 					   "LMEM", n, err);
1605 }
1606 
1607 static u64 pf_query_free_lmem(struct xe_gt *gt)
1608 {
1609 	struct xe_tile *tile = gt->tile;
1610 
1611 	return xe_ttm_vram_get_avail(&tile->mem.vram->ttm.manager);
1612 }
1613 
1614 static u64 pf_query_max_lmem(struct xe_gt *gt)
1615 {
1616 	u64 alignment = pf_get_lmem_alignment(gt);
1617 	u64 spare = pf_get_spare_lmem(gt);
1618 	u64 free = pf_query_free_lmem(gt);
1619 	u64 avail;
1620 
1621 	/* XXX: need to account for 2MB blocks only */
1622 	avail = free > spare ? free - spare : 0;
1623 	avail = round_down(avail, alignment);
1624 
1625 	return avail;
1626 }
1627 
1628 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1629 #define MAX_FAIR_LMEM	SZ_128M	/* XXX: make it small for the driver bringup */
1630 #endif
1631 
1632 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1633 {
1634 	u64 available = pf_query_max_lmem(gt);
1635 	u64 alignment = pf_get_lmem_alignment(gt);
1636 	u64 fair;
1637 
1638 	fair = div_u64(available, num_vfs);
1639 	fair = rounddown_pow_of_two(fair);	/* XXX: ttm_vram_mgr & drm_buddy limitation */
1640 	fair = ALIGN_DOWN(fair, alignment);
1641 #ifdef MAX_FAIR_LMEM
1642 	fair = min_t(u64, MAX_FAIR_LMEM, fair);
1643 #endif
1644 	xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1645 				available / SZ_1M, num_vfs, fair / SZ_1M);
1646 	return fair;
1647 }
1648 
1649 /**
1650  * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1651  * @gt: the &xe_gt (can't be media)
1652  * @vfid: starting VF identifier (can't be 0)
1653  * @num_vfs: number of VFs to provision (can't be 0)
1654  *
1655  * This function can only be called on PF.
1656  *
1657  * Return: 0 on success or a negative error code on failure.
1658  */
1659 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1660 					unsigned int num_vfs)
1661 {
1662 	u64 fair;
1663 
1664 	xe_gt_assert(gt, vfid);
1665 	xe_gt_assert(gt, num_vfs);
1666 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
1667 
1668 	if (!xe_device_has_lmtt(gt_to_xe(gt)))
1669 		return 0;
1670 
1671 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1672 	fair = pf_estimate_fair_lmem(gt, num_vfs);
1673 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1674 
1675 	if (!fair)
1676 		return -ENOSPC;
1677 
1678 	return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1679 }
1680 
1681 /**
1682  * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1683  * @gt: the &xe_gt
1684  * @vfid: starting VF identifier (can't be 0)
1685  * @num_vfs: number of VFs to provision (can't be 0)
1686  *
1687  * This function can only be called on PF.
1688  *
1689  * Return: 0 on success or a negative error code on failure.
1690  */
1691 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1692 				   unsigned int num_vfs)
1693 {
1694 	int result = 0;
1695 	int err;
1696 
1697 	xe_gt_assert(gt, vfid);
1698 	xe_gt_assert(gt, num_vfs);
1699 
1700 	if (xe_gt_is_main_type(gt)) {
1701 		err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1702 		result = result ?: err;
1703 		err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1704 		result = result ?: err;
1705 	}
1706 	err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1707 	result = result ?: err;
1708 	err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1709 	result = result ?: err;
1710 
1711 	return result;
1712 }
1713 
1714 static const char *exec_quantum_unit(u32 exec_quantum)
1715 {
1716 	return exec_quantum ? "ms" : "(infinity)";
1717 }
1718 
1719 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1720 				     u32 exec_quantum)
1721 {
1722 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1723 	int err;
1724 
1725 	err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1726 	if (unlikely(err))
1727 		return err;
1728 
1729 	config->exec_quantum = exec_quantum;
1730 	return 0;
1731 }
1732 
1733 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1734 {
1735 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1736 
1737 	return config->exec_quantum;
1738 }
1739 
1740 /**
1741  * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1742  * @gt: the &xe_gt
1743  * @vfid: the VF identifier
1744  * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1745  *
1746  * This function can only be called on PF.
1747  *
1748  * Return: 0 on success or a negative error code on failure.
1749  */
1750 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1751 					   u32 exec_quantum)
1752 {
1753 	int err;
1754 
1755 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1756 	err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1757 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1758 
1759 	return pf_config_set_u32_done(gt, vfid, exec_quantum,
1760 				      xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1761 				      "execution quantum", exec_quantum_unit, err);
1762 }
1763 
1764 /**
1765  * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1766  * @gt: the &xe_gt
1767  * @vfid: the VF identifier
1768  *
1769  * This function can only be called on PF.
1770  *
1771  * Return: VF's (or PF's) execution quantum in milliseconds.
1772  */
1773 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1774 {
1775 	u32 exec_quantum;
1776 
1777 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1778 	exec_quantum = pf_get_exec_quantum(gt, vfid);
1779 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1780 
1781 	return exec_quantum;
1782 }
1783 
1784 static const char *preempt_timeout_unit(u32 preempt_timeout)
1785 {
1786 	return preempt_timeout ? "us" : "(infinity)";
1787 }
1788 
1789 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1790 					u32 preempt_timeout)
1791 {
1792 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1793 	int err;
1794 
1795 	err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1796 	if (unlikely(err))
1797 		return err;
1798 
1799 	config->preempt_timeout = preempt_timeout;
1800 
1801 	return 0;
1802 }
1803 
1804 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1805 {
1806 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1807 
1808 	return config->preempt_timeout;
1809 }
1810 
1811 /**
1812  * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1813  * @gt: the &xe_gt
1814  * @vfid: the VF identifier
1815  * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1816  *
1817  * This function can only be called on PF.
1818  *
1819  * Return: 0 on success or a negative error code on failure.
1820  */
1821 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1822 					      u32 preempt_timeout)
1823 {
1824 	int err;
1825 
1826 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1827 	err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1828 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1829 
1830 	return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1831 				      xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1832 				      "preemption timeout", preempt_timeout_unit, err);
1833 }
1834 
1835 /**
1836  * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1837  * @gt: the &xe_gt
1838  * @vfid: the VF identifier
1839  *
1840  * This function can only be called on PF.
1841  *
1842  * Return: VF's (or PF's) preemption timeout in microseconds.
1843  */
1844 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1845 {
1846 	u32 preempt_timeout;
1847 
1848 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1849 	preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1850 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1851 
1852 	return preempt_timeout;
1853 }
1854 
1855 static const char *sched_priority_unit(u32 priority)
1856 {
1857 	return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" :
1858 		priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" :
1859 		priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" :
1860 		"(?)";
1861 }
1862 
1863 static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
1864 {
1865 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1866 	int err;
1867 
1868 	err = pf_push_vf_cfg_sched_priority(gt, vfid, priority);
1869 	if (unlikely(err))
1870 		return err;
1871 
1872 	config->sched_priority = priority;
1873 	return 0;
1874 }
1875 
1876 static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
1877 {
1878 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1879 
1880 	return config->sched_priority;
1881 }
1882 
1883 /**
1884  * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority.
1885  * @gt: the &xe_gt
1886  * @vfid: the VF identifier
1887  * @priority: requested scheduling priority
1888  *
1889  * This function can only be called on PF.
1890  *
1891  * Return: 0 on success or a negative error code on failure.
1892  */
1893 int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
1894 {
1895 	int err;
1896 
1897 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1898 	err = pf_provision_sched_priority(gt, vfid, priority);
1899 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1900 
1901 	return pf_config_set_u32_done(gt, vfid, priority,
1902 				      xe_gt_sriov_pf_config_get_sched_priority(gt, vfid),
1903 				      "scheduling priority", sched_priority_unit, err);
1904 }
1905 
1906 /**
1907  * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority.
1908  * @gt: the &xe_gt
1909  * @vfid: the VF identifier
1910  *
1911  * This function can only be called on PF.
1912  *
1913  * Return: VF's (or PF's) scheduling priority.
1914  */
1915 u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
1916 {
1917 	u32 priority;
1918 
1919 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1920 	priority = pf_get_sched_priority(gt, vfid);
1921 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1922 
1923 	return priority;
1924 }
1925 
1926 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1927 {
1928 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1929 
1930 	config->exec_quantum = 0;
1931 	config->preempt_timeout = 0;
1932 }
1933 
1934 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
1935 				  enum xe_guc_klv_threshold_index index, u32 value)
1936 {
1937 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1938 	int err;
1939 
1940 	err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
1941 	if (unlikely(err))
1942 		return err;
1943 
1944 	config->thresholds[index] = value;
1945 
1946 	return 0;
1947 }
1948 
1949 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
1950 			    enum xe_guc_klv_threshold_index index)
1951 {
1952 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1953 
1954 	return config->thresholds[index];
1955 }
1956 
1957 static const char *threshold_unit(u32 threshold)
1958 {
1959 	return threshold ? "" : "(disabled)";
1960 }
1961 
1962 /**
1963  * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
1964  * @gt: the &xe_gt
1965  * @vfid: the VF identifier
1966  * @index: the threshold index
1967  * @value: requested value (0 means disabled)
1968  *
1969  * This function can only be called on PF.
1970  *
1971  * Return: 0 on success or a negative error code on failure.
1972  */
1973 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
1974 					enum xe_guc_klv_threshold_index index, u32 value)
1975 {
1976 	u32 key = xe_guc_klv_threshold_index_to_key(index);
1977 	const char *name = xe_guc_klv_key_to_string(key);
1978 	int err;
1979 
1980 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1981 	err = pf_provision_threshold(gt, vfid, index, value);
1982 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1983 
1984 	return pf_config_set_u32_done(gt, vfid, value,
1985 				      xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
1986 				      name, threshold_unit, err);
1987 }
1988 
1989 /**
1990  * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
1991  * @gt: the &xe_gt
1992  * @vfid: the VF identifier
1993  * @index: the threshold index
1994  *
1995  * This function can only be called on PF.
1996  *
1997  * Return: value of VF's (or PF's) threshold.
1998  */
1999 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
2000 					enum xe_guc_klv_threshold_index index)
2001 {
2002 	u32 value;
2003 
2004 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2005 	value = pf_get_threshold(gt, vfid, index);
2006 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2007 
2008 	return value;
2009 }
2010 
2011 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
2012 {
2013 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
2014 
2015 #define reset_threshold_config(TAG, ...) ({				\
2016 	config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0;	\
2017 });
2018 
2019 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
2020 #undef reset_threshold_config
2021 }
2022 
2023 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
2024 {
2025 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2026 	struct xe_device *xe = gt_to_xe(gt);
2027 	bool released;
2028 
2029 	if (xe_gt_is_main_type(gt)) {
2030 		pf_release_vf_config_ggtt(gt, config);
2031 		if (IS_DGFX(xe)) {
2032 			released = pf_release_vf_config_lmem(gt, config);
2033 			if (released && xe_device_has_lmtt(xe))
2034 				pf_update_vf_lmtt(xe, vfid);
2035 		}
2036 	}
2037 	pf_release_config_ctxs(gt, config);
2038 	pf_release_config_dbs(gt, config);
2039 	pf_reset_config_sched(gt, config);
2040 	pf_reset_config_thresholds(gt, config);
2041 }
2042 
2043 /**
2044  * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
2045  * @gt: the &xe_gt
2046  * @vfid: the VF identifier (can't be PF)
2047  * @force: force configuration release
2048  *
2049  * This function can only be called on PF.
2050  *
2051  * Return: 0 on success or a negative error code on failure.
2052  */
2053 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
2054 {
2055 	int err;
2056 
2057 	xe_gt_assert(gt, vfid);
2058 
2059 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2060 	err = pf_send_vf_cfg_reset(gt, vfid);
2061 	if (!err || force)
2062 		pf_release_vf_config(gt, vfid);
2063 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2064 
2065 	if (unlikely(err)) {
2066 		xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
2067 				   vfid, ERR_PTR(err),
2068 				   force ? " but all resources were released anyway!" : "");
2069 	}
2070 
2071 	return force ? 0 : err;
2072 }
2073 
2074 static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
2075 {
2076 	if (xe_ggtt_node_allocated(ggtt_region))
2077 		xe_ggtt_assign(ggtt_region, vfid);
2078 }
2079 
2080 static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
2081 {
2082 	struct xe_migrate *m = tile->migrate;
2083 	struct dma_fence *fence;
2084 	int err;
2085 
2086 	if (!bo)
2087 		return 0;
2088 
2089 	xe_bo_lock(bo, false);
2090 	fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
2091 	if (IS_ERR(fence)) {
2092 		err = PTR_ERR(fence);
2093 	} else if (!fence) {
2094 		err = -ENOMEM;
2095 	} else {
2096 		long ret = dma_fence_wait_timeout(fence, false, timeout);
2097 
2098 		err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
2099 		dma_fence_put(fence);
2100 		if (!err)
2101 			xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
2102 						jiffies_to_msecs(timeout - ret));
2103 	}
2104 	xe_bo_unlock(bo);
2105 
2106 	return err;
2107 }
2108 
2109 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
2110 {
2111 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2112 	struct xe_tile *tile = gt_to_tile(gt);
2113 	struct xe_device *xe = gt_to_xe(gt);
2114 	int err = 0;
2115 
2116 	/*
2117 	 * Only GGTT and LMEM requires to be cleared by the PF.
2118 	 * GuC doorbell IDs and context IDs do not need any clearing.
2119 	 */
2120 	if (xe_gt_is_main_type(gt)) {
2121 		pf_sanitize_ggtt(config->ggtt_region, vfid);
2122 		if (IS_DGFX(xe))
2123 			err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
2124 	}
2125 
2126 	return err;
2127 }
2128 
2129 /**
2130  * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
2131  * @gt: the &xe_gt
2132  * @vfid: the VF identifier (can't be PF)
2133  * @timeout: maximum timeout to wait for completion in jiffies
2134  *
2135  * This function can only be called on PF.
2136  *
2137  * Return: 0 on success or a negative error code on failure.
2138  */
2139 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
2140 {
2141 	int err;
2142 
2143 	xe_gt_assert(gt, vfid != PFID);
2144 
2145 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2146 	err = pf_sanitize_vf_resources(gt, vfid, timeout);
2147 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2148 
2149 	if (unlikely(err))
2150 		xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
2151 				   vfid, ERR_PTR(err));
2152 	return err;
2153 }
2154 
2155 /**
2156  * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
2157  * @gt: the &xe_gt
2158  * @vfid: the VF identifier (can't be PF)
2159  * @refresh: explicit refresh
2160  *
2161  * This function can only be called on PF.
2162  *
2163  * Return: 0 on success or a negative error code on failure.
2164  */
2165 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
2166 {
2167 	int err = 0;
2168 
2169 	xe_gt_assert(gt, vfid);
2170 
2171 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2172 	err = pf_push_vf_cfg(gt, vfid, refresh);
2173 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2174 
2175 	if (unlikely(err)) {
2176 		xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
2177 				   refresh ? "refresh" : "push", vfid, ERR_PTR(err));
2178 	}
2179 
2180 	return err;
2181 }
2182 
2183 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
2184 {
2185 	struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
2186 	struct xe_device *xe = gt_to_xe(gt);
2187 	bool is_primary = xe_gt_is_main_type(gt);
2188 	bool valid_ggtt, valid_ctxs, valid_dbs;
2189 	bool valid_any, valid_all;
2190 
2191 	valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
2192 	valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
2193 	valid_dbs = pf_get_vf_config_dbs(gt, vfid);
2194 
2195 	/* note that GuC doorbells are optional */
2196 	valid_any = valid_ctxs || valid_dbs;
2197 	valid_all = valid_ctxs;
2198 
2199 	/* and GGTT/LMEM is configured on primary GT only */
2200 	valid_all = valid_all && valid_ggtt;
2201 	valid_any = valid_any || (valid_ggtt && is_primary);
2202 
2203 	if (xe_device_has_lmtt(xe)) {
2204 		bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
2205 
2206 		valid_any = valid_any || (valid_lmem && is_primary);
2207 		valid_all = valid_all && valid_lmem;
2208 	}
2209 
2210 	return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA;
2211 }
2212 
2213 /**
2214  * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
2215  * @gt: the &xe_gt
2216  * @vfid: the VF identifier (can't be PF)
2217  *
2218  * This function can only be called on PF.
2219  *
2220  * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
2221  */
2222 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
2223 {
2224 	bool empty;
2225 
2226 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2227 	xe_gt_assert(gt, vfid);
2228 
2229 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2230 	empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
2231 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2232 
2233 	return empty;
2234 }
2235 
2236 /**
2237  * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob.
2238  * @gt: the &xe_gt
2239  * @vfid: the VF identifier (can't be PF)
2240  * @buf: the buffer to save a config to (or NULL if query the buf size)
2241  * @size: the size of the buffer (or 0 if query the buf size)
2242  *
2243  * This function can only be called on PF.
2244  *
2245  * Return: minimum size of the buffer or the number of bytes saved,
2246  *         or a negative error code on failure.
2247  */
2248 ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
2249 {
2250 	struct xe_gt_sriov_config *config;
2251 	ssize_t ret;
2252 
2253 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2254 	xe_gt_assert(gt, vfid);
2255 	xe_gt_assert(gt, !(!buf ^ !size));
2256 
2257 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2258 	ret = pf_validate_vf_config(gt, vfid);
2259 	if (!size) {
2260 		ret = ret ? 0 : SZ_4K;
2261 	} else if (!ret) {
2262 		if (size < SZ_4K) {
2263 			ret = -ENOBUFS;
2264 		} else {
2265 			config = pf_pick_vf_config(gt, vfid);
2266 			ret = encode_config(buf, config, false) * sizeof(u32);
2267 		}
2268 	}
2269 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2270 
2271 	return ret;
2272 }
2273 
2274 static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid,
2275 				    u32 key, u32 len, const u32 *value)
2276 {
2277 	switch (key) {
2278 	case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
2279 		if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN)
2280 			return -EBADMSG;
2281 		return pf_provision_vf_ctxs(gt, vfid, value[0]);
2282 
2283 	case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
2284 		if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN)
2285 			return -EBADMSG;
2286 		return pf_provision_vf_dbs(gt, vfid, value[0]);
2287 
2288 	case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
2289 		if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN)
2290 			return -EBADMSG;
2291 		return pf_provision_exec_quantum(gt, vfid, value[0]);
2292 
2293 	case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
2294 		if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN)
2295 			return -EBADMSG;
2296 		return pf_provision_preempt_timeout(gt, vfid, value[0]);
2297 
2298 	/* auto-generate case statements */
2299 #define define_threshold_key_to_provision_case(TAG, ...)				\
2300 	case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG):					\
2301 		BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u);		\
2302 		if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG))			\
2303 			return -EBADMSG;						\
2304 		return pf_provision_threshold(gt, vfid,					\
2305 					      MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG),	\
2306 					      value[0]);
2307 
2308 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case)
2309 #undef define_threshold_key_to_provision_case
2310 	}
2311 
2312 	if (xe_gt_is_media_type(gt))
2313 		return -EKEYREJECTED;
2314 
2315 	switch (key) {
2316 	case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
2317 		if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN)
2318 			return -EBADMSG;
2319 		return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0]));
2320 
2321 	case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
2322 		if (!IS_DGFX(gt_to_xe(gt)))
2323 			return -EKEYREJECTED;
2324 		if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN)
2325 			return -EBADMSG;
2326 		return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0]));
2327 	}
2328 
2329 	return -EKEYREJECTED;
2330 }
2331 
2332 static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid,
2333 				const u32 *klvs, size_t num_dwords)
2334 {
2335 	int err;
2336 
2337 	while (num_dwords >= GUC_KLV_LEN_MIN) {
2338 		u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
2339 		u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
2340 
2341 		klvs += GUC_KLV_LEN_MIN;
2342 		num_dwords -= GUC_KLV_LEN_MIN;
2343 
2344 		if (num_dwords < len)
2345 			err = -EBADMSG;
2346 		else
2347 			err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs);
2348 
2349 		if (err) {
2350 			xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err));
2351 			return err;
2352 		}
2353 
2354 		klvs += len;
2355 		num_dwords -= len;
2356 	}
2357 
2358 	return pf_validate_vf_config(gt, vfid);
2359 }
2360 
2361 /**
2362  * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob.
2363  * @gt: the &xe_gt
2364  * @vfid: the VF identifier (can't be PF)
2365  * @buf: the buffer with config data
2366  * @size: the size of the config data
2367  *
2368  * This function can only be called on PF.
2369  *
2370  * Return: 0 on success or a negative error code on failure.
2371  */
2372 int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
2373 				  const void *buf, size_t size)
2374 {
2375 	int err;
2376 
2377 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2378 	xe_gt_assert(gt, vfid);
2379 
2380 	if (!size)
2381 		return -ENODATA;
2382 
2383 	if (size % sizeof(u32))
2384 		return -EINVAL;
2385 
2386 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
2387 		struct drm_printer p = xe_gt_dbg_printer(gt);
2388 
2389 		drm_printf(&p, "restoring VF%u config:\n", vfid);
2390 		xe_guc_klv_print(buf, size / sizeof(u32), &p);
2391 	}
2392 
2393 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2394 	err = pf_send_vf_cfg_reset(gt, vfid);
2395 	if (!err) {
2396 		pf_release_vf_config(gt, vfid);
2397 		err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32));
2398 	}
2399 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2400 
2401 	return err;
2402 }
2403 
2404 static void pf_prepare_self_config(struct xe_gt *gt)
2405 {
2406 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, PFID);
2407 
2408 	/*
2409 	 * We want PF to be allowed to use all of context ID, doorbells IDs
2410 	 * and whole usable GGTT area. While we can store ctxs/dbs numbers
2411 	 * directly in the config structure, can't do the same with the GGTT
2412 	 * configuration, so let it be prepared on demand while pushing KLVs.
2413 	 */
2414 	config->num_ctxs = GUC_ID_MAX;
2415 	config->num_dbs = GUC_NUM_DOORBELLS;
2416 }
2417 
2418 static int pf_push_self_config(struct xe_gt *gt)
2419 {
2420 	int err;
2421 
2422 	err = pf_push_full_vf_config(gt, PFID);
2423 	if (err) {
2424 		xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n",
2425 				ERR_PTR(err));
2426 		return err;
2427 	}
2428 
2429 	xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n");
2430 	return 0;
2431 }
2432 
2433 static void fini_config(void *arg)
2434 {
2435 	struct xe_gt *gt = arg;
2436 	struct xe_device *xe = gt_to_xe(gt);
2437 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
2438 
2439 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2440 	for (n = 1; n <= total_vfs; n++)
2441 		pf_release_vf_config(gt, n);
2442 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2443 }
2444 
2445 /**
2446  * xe_gt_sriov_pf_config_init - Initialize SR-IOV configuration data.
2447  * @gt: the &xe_gt
2448  *
2449  * This function can only be called on PF.
2450  *
2451  * Return: 0 on success or a negative error code on failure.
2452  */
2453 int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
2454 {
2455 	struct xe_device *xe = gt_to_xe(gt);
2456 	int err;
2457 
2458 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
2459 
2460 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2461 	pf_prepare_self_config(gt);
2462 	err = pf_push_self_config(gt);
2463 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2464 
2465 	if (err)
2466 		return err;
2467 
2468 	return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
2469 }
2470 
2471 /**
2472  * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
2473  * @gt: the &xe_gt
2474  *
2475  * Any prior configurations pushed to GuC are lost when the GT is reset.
2476  * Push again all non-empty VF configurations to the GuC.
2477  *
2478  * This function can only be called on PF.
2479  */
2480 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
2481 {
2482 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2483 	unsigned int fail = 0, skip = 0;
2484 
2485 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2486 	pf_push_self_config(gt);
2487 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2488 
2489 	for (n = 1; n <= total_vfs; n++) {
2490 		if (xe_gt_sriov_pf_config_is_empty(gt, n))
2491 			skip++;
2492 		else if (xe_gt_sriov_pf_config_push(gt, n, false))
2493 			fail++;
2494 	}
2495 
2496 	if (fail)
2497 		xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
2498 				   fail, total_vfs - skip, str_plural(total_vfs));
2499 
2500 	if (fail != total_vfs)
2501 		xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
2502 				total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
2503 }
2504 
2505 /**
2506  * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2507  * @gt: the &xe_gt
2508  * @p: the &drm_printer
2509  *
2510  * Print GGTT configuration data for all VFs.
2511  * VFs without provisioned GGTT are ignored.
2512  *
2513  * This function can only be called on PF.
2514  */
2515 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2516 {
2517 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2518 	const struct xe_gt_sriov_config *config;
2519 	char buf[10];
2520 
2521 	for (n = 1; n <= total_vfs; n++) {
2522 		config = &gt->sriov.pf.vfs[n].config;
2523 		if (!xe_ggtt_node_allocated(config->ggtt_region))
2524 			continue;
2525 
2526 		string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
2527 				buf, sizeof(buf));
2528 		drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2529 			   n, config->ggtt_region->base.start,
2530 			   config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
2531 			   buf);
2532 	}
2533 
2534 	return 0;
2535 }
2536 
2537 /**
2538  * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2539  * @gt: the &xe_gt
2540  * @p: the &drm_printer
2541  *
2542  * Print GuC context ID allocations across all VFs.
2543  * VFs without GuC context IDs are skipped.
2544  *
2545  * This function can only be called on PF.
2546  * Return: 0 on success or a negative error code on failure.
2547  */
2548 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2549 {
2550 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2551 	const struct xe_gt_sriov_config *config;
2552 
2553 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2554 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2555 
2556 	for (n = 1; n <= total_vfs; n++) {
2557 		config = &gt->sriov.pf.vfs[n].config;
2558 		if (!config->num_ctxs)
2559 			continue;
2560 
2561 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2562 			   n,
2563 			   config->begin_ctx,
2564 			   config->begin_ctx + config->num_ctxs - 1,
2565 			   config->num_ctxs);
2566 	}
2567 
2568 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2569 	return 0;
2570 }
2571 
2572 /**
2573  * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2574  * @gt: the &xe_gt
2575  * @p: the &drm_printer
2576  *
2577  * Print GuC doorbell IDs allocations across all VFs.
2578  * VFs without GuC doorbell IDs are skipped.
2579  *
2580  * This function can only be called on PF.
2581  * Return: 0 on success or a negative error code on failure.
2582  */
2583 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2584 {
2585 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2586 	const struct xe_gt_sriov_config *config;
2587 
2588 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2589 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2590 
2591 	for (n = 1; n <= total_vfs; n++) {
2592 		config = &gt->sriov.pf.vfs[n].config;
2593 		if (!config->num_dbs)
2594 			continue;
2595 
2596 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2597 			   n,
2598 			   config->begin_db,
2599 			   config->begin_db + config->num_dbs - 1,
2600 			   config->num_dbs);
2601 	}
2602 
2603 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2604 	return 0;
2605 }
2606 
2607 /**
2608  * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations.
2609  * @gt: the &xe_gt
2610  * @p: the &drm_printer
2611  *
2612  * Print LMEM allocations across all VFs.
2613  * VFs without LMEM allocation are skipped.
2614  *
2615  * This function can only be called on PF.
2616  * Return: 0 on success or a negative error code on failure.
2617  */
2618 int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
2619 {
2620 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2621 	const struct xe_gt_sriov_config *config;
2622 	char buf[10];
2623 
2624 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2625 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2626 
2627 	for (n = 1; n <= total_vfs; n++) {
2628 		config = &gt->sriov.pf.vfs[n].config;
2629 		if (!config->lmem_obj)
2630 			continue;
2631 
2632 		string_get_size(xe_bo_size(config->lmem_obj), 1, STRING_UNITS_2,
2633 				buf, sizeof(buf));
2634 		drm_printf(p, "VF%u:\t%zu\t(%s)\n",
2635 			   n, xe_bo_size(config->lmem_obj), buf);
2636 	}
2637 
2638 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2639 	return 0;
2640 }
2641 
2642 /**
2643  * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2644  * @gt: the &xe_gt
2645  * @p: the &drm_printer
2646  *
2647  * Print GGTT ranges that are available for the provisioning.
2648  *
2649  * This function can only be called on PF.
2650  */
2651 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2652 {
2653 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2654 	u64 alignment = pf_get_ggtt_alignment(gt);
2655 	u64 spare, avail, total;
2656 	char buf[10];
2657 
2658 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2659 
2660 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2661 
2662 	spare = pf_get_spare_ggtt(gt);
2663 	total = xe_ggtt_print_holes(ggtt, alignment, p);
2664 
2665 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2666 
2667 	string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2668 	drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2669 
2670 	string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2671 	drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2672 
2673 	avail = total > spare ? total - spare : 0;
2674 
2675 	string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2676 	drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2677 
2678 	return 0;
2679 }
2680