xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c (revision 205a7309cccd34ad49c2b6b1b59b907c12395d6c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8 
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11 
12 #include "regs/xe_guc_regs.h"
13 
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_gt_sriov_pf_config.h"
19 #include "xe_gt_sriov_pf_helpers.h"
20 #include "xe_gt_sriov_pf_policy.h"
21 #include "xe_gt_sriov_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_buf.h"
24 #include "xe_guc_ct.h"
25 #include "xe_guc_db_mgr.h"
26 #include "xe_guc_fwif.h"
27 #include "xe_guc_id_mgr.h"
28 #include "xe_guc_klv_helpers.h"
29 #include "xe_guc_klv_thresholds_set.h"
30 #include "xe_guc_submit.h"
31 #include "xe_lmtt.h"
32 #include "xe_map.h"
33 #include "xe_migrate.h"
34 #include "xe_sriov.h"
35 #include "xe_ttm_vram_mgr.h"
36 #include "xe_wopcm.h"
37 
38 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
39 
40 /*
41  * Return: number of KLVs that were successfully parsed and saved,
42  *         negative error code on failure.
43  */
44 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
45 				    u64 addr, u32 size)
46 {
47 	u32 request[] = {
48 		GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
49 		vfid,
50 		lower_32_bits(addr),
51 		upper_32_bits(addr),
52 		size,
53 	};
54 
55 	return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
56 }
57 
58 /*
59  * Return: 0 on success, negative error code on failure.
60  */
61 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
62 {
63 	struct xe_guc *guc = &gt->uc.guc;
64 	int ret;
65 
66 	ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
67 
68 	return ret <= 0 ? ret : -EPROTO;
69 }
70 
71 /*
72  * Return: number of KLVs that were successfully parsed and saved,
73  *         negative error code on failure.
74  */
75 static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords)
76 {
77 	struct xe_guc *guc = &gt->uc.guc;
78 
79 	return guc_action_update_vf_cfg(guc, vfid, xe_guc_buf_flush(buf), num_dwords);
80 }
81 
82 /*
83  * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
84  *         negative error code on failure.
85  */
86 static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
87 			       struct xe_guc_buf buf, u32 num_dwords)
88 {
89 	int ret;
90 
91 	ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords);
92 
93 	if (ret != num_klvs) {
94 		int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
95 		void *klvs = xe_guc_buf_cpu_ptr(buf);
96 		struct drm_printer p = xe_gt_info_printer(gt);
97 		char name[8];
98 
99 		xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
100 				   xe_sriov_function_name(vfid, name, sizeof(name)),
101 				   num_klvs, str_plural(num_klvs), ERR_PTR(err));
102 		xe_guc_klv_print(klvs, num_dwords, &p);
103 		return err;
104 	}
105 
106 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
107 		struct drm_printer p = xe_gt_info_printer(gt);
108 		void *klvs = xe_guc_buf_cpu_ptr(buf);
109 		char name[8];
110 
111 		xe_gt_sriov_info(gt, "pushed %s config with %u KLV%s:\n",
112 				 xe_sriov_function_name(vfid, name, sizeof(name)),
113 				 num_klvs, str_plural(num_klvs));
114 		xe_guc_klv_print(klvs, num_dwords, &p);
115 	}
116 
117 	return 0;
118 }
119 
120 /*
121  * Return: 0 on success, -ENOBUFS if no free buffer for the indirect data,
122  *         negative error code on failure.
123  */
124 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
125 			       const u32 *klvs, u32 num_dwords)
126 {
127 	CLASS(xe_guc_buf_from_data, buf)(&gt->uc.guc.buf, klvs, num_dwords * sizeof(u32));
128 
129 	xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
130 
131 	if (!xe_guc_buf_is_valid(buf))
132 		return -ENOBUFS;
133 
134 	return pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
135 }
136 
137 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
138 {
139 	u32 klv[] = {
140 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
141 		value,
142 	};
143 
144 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
145 }
146 
147 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
148 {
149 	u32 klv[] = {
150 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
151 		lower_32_bits(value),
152 		upper_32_bits(value),
153 	};
154 
155 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
156 }
157 
158 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
159 {
160 	u32 klvs[] = {
161 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
162 		lower_32_bits(start),
163 		upper_32_bits(start),
164 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
165 		lower_32_bits(size),
166 		upper_32_bits(size),
167 	};
168 
169 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
170 }
171 
172 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
173 {
174 	u32 klvs[] = {
175 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
176 		begin,
177 		PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
178 		num,
179 	};
180 
181 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
182 }
183 
184 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
185 {
186 	u32 klvs[] = {
187 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
188 		begin,
189 		PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
190 		num,
191 	};
192 
193 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
194 }
195 
196 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
197 {
198 	/* GuC will silently clamp values exceeding max */
199 	*exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
200 
201 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
202 }
203 
204 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
205 {
206 	/* GuC will silently clamp values exceeding max */
207 	*preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
208 
209 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
210 }
211 
212 static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
213 {
214 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority);
215 }
216 
217 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
218 {
219 	return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
220 }
221 
222 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
223 				    enum xe_guc_klv_threshold_index index, u32 value)
224 {
225 	u32 key = xe_guc_klv_threshold_index_to_key(index);
226 
227 	xe_gt_assert(gt, key);
228 	return pf_push_vf_cfg_u32(gt, vfid, key, value);
229 }
230 
231 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
232 {
233 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
234 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
235 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
236 
237 	return &gt->sriov.pf.vfs[vfid].config;
238 }
239 
240 /* Return: number of configuration dwords written */
241 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
242 {
243 	u32 n = 0;
244 
245 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
246 		if (details) {
247 			cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
248 			cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
249 			cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
250 		}
251 
252 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
253 		cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
254 		cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
255 	}
256 
257 	return n;
258 }
259 
260 /* Return: number of configuration dwords written */
261 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
262 {
263 	u32 n = 0;
264 
265 	n += encode_config_ggtt(cfg, config, details);
266 
267 	if (details && config->num_ctxs) {
268 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
269 		cfg[n++] = config->begin_ctx;
270 	}
271 
272 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
273 	cfg[n++] = config->num_ctxs;
274 
275 	if (details && config->num_dbs) {
276 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
277 		cfg[n++] = config->begin_db;
278 	}
279 
280 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
281 	cfg[n++] = config->num_dbs;
282 
283 	if (config->lmem_obj) {
284 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
285 		cfg[n++] = lower_32_bits(config->lmem_obj->size);
286 		cfg[n++] = upper_32_bits(config->lmem_obj->size);
287 	}
288 
289 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
290 	cfg[n++] = config->exec_quantum;
291 
292 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
293 	cfg[n++] = config->preempt_timeout;
294 
295 #define encode_threshold_config(TAG, ...) ({					\
296 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG);			\
297 	cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)];	\
298 });
299 
300 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
301 #undef encode_threshold_config
302 
303 	return n;
304 }
305 
306 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
307 {
308 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
309 	u32 max_cfg_dwords = xe_guc_buf_cache_dwords(&gt->uc.guc.buf);
310 	CLASS(xe_guc_buf, buf)(&gt->uc.guc.buf, max_cfg_dwords);
311 	u32 num_dwords;
312 	int num_klvs;
313 	u32 *cfg;
314 	int err;
315 
316 	if (!xe_guc_buf_is_valid(buf))
317 		return -ENOBUFS;
318 
319 	cfg = xe_guc_buf_cpu_ptr(buf);
320 	num_dwords = encode_config(cfg, config, true);
321 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
322 
323 	if (xe_gt_is_media_type(gt)) {
324 		struct xe_gt *primary = gt->tile->primary_gt;
325 		struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
326 
327 		/* media-GT will never include a GGTT config */
328 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
329 
330 		/* the GGTT config must be taken from the primary-GT instead */
331 		num_dwords += encode_config_ggtt(cfg + num_dwords, other, true);
332 	}
333 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
334 
335 	num_klvs = xe_guc_klv_count(cfg, num_dwords);
336 	err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
337 
338 	return err;
339 }
340 
341 static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset)
342 {
343 	int err = 0;
344 
345 	xe_gt_assert(gt, vfid);
346 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
347 
348 	if (reset)
349 		err = pf_send_vf_cfg_reset(gt, vfid);
350 	if (!err)
351 		err = pf_push_full_vf_config(gt, vfid);
352 
353 	return err;
354 }
355 
356 static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid)
357 {
358 	return pf_push_vf_cfg(gt, vfid, true);
359 }
360 
361 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
362 {
363 	struct xe_device *xe = gt_to_xe(gt);
364 
365 	return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
366 }
367 
368 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
369 {
370 	/* XXX: preliminary */
371 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
372 		pf_get_ggtt_alignment(gt) : SZ_64M;
373 }
374 
375 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
376 {
377 	u64 spare;
378 
379 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
380 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
381 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
382 
383 	spare = gt->sriov.pf.spare.ggtt_size;
384 	spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
385 
386 	return spare;
387 }
388 
389 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
390 {
391 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
392 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
393 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
394 
395 	if (size && size < pf_get_min_spare_ggtt(gt))
396 		return -EINVAL;
397 
398 	size = round_up(size, pf_get_ggtt_alignment(gt));
399 	gt->sriov.pf.spare.ggtt_size = size;
400 
401 	return 0;
402 }
403 
404 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
405 {
406 	int err, err2 = 0;
407 
408 	err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
409 
410 	if (tile->media_gt && !err)
411 		err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
412 
413 	return err ?: err2;
414 }
415 
416 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
417 {
418 	if (xe_ggtt_node_allocated(node)) {
419 		/*
420 		 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
421 		 * is redundant, as PTE will be implicitly re-assigned to PF by
422 		 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
423 		 */
424 		xe_ggtt_node_remove(node, false);
425 	} else {
426 		xe_ggtt_node_fini(node);
427 	}
428 }
429 
430 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
431 {
432 	pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
433 	config->ggtt_region = NULL;
434 }
435 
436 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
437 {
438 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
439 	struct xe_ggtt_node *node;
440 	struct xe_tile *tile = gt_to_tile(gt);
441 	struct xe_ggtt *ggtt = tile->mem.ggtt;
442 	u64 alignment = pf_get_ggtt_alignment(gt);
443 	int err;
444 
445 	xe_gt_assert(gt, vfid);
446 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
447 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
448 
449 	size = round_up(size, alignment);
450 
451 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
452 		err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
453 		if (unlikely(err))
454 			return err;
455 
456 		pf_release_vf_config_ggtt(gt, config);
457 
458 		err = pf_refresh_vf_cfg(gt, vfid);
459 		if (unlikely(err))
460 			return err;
461 	}
462 	xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
463 
464 	if (!size)
465 		return 0;
466 
467 	node = xe_ggtt_node_init(ggtt);
468 	if (IS_ERR(node))
469 		return PTR_ERR(node);
470 
471 	err = xe_ggtt_node_insert(node, size, alignment);
472 	if (unlikely(err))
473 		goto err;
474 
475 	xe_ggtt_assign(node, vfid);
476 	xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
477 				vfid, node->base.start, node->base.start + node->base.size - 1);
478 
479 	err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
480 	if (unlikely(err))
481 		goto err;
482 
483 	config->ggtt_region = node;
484 	return 0;
485 err:
486 	pf_release_ggtt(tile, node);
487 	return err;
488 }
489 
490 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
491 {
492 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
493 	struct xe_ggtt_node *node = config->ggtt_region;
494 
495 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
496 	return xe_ggtt_node_allocated(node) ? node->base.size : 0;
497 }
498 
499 /**
500  * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
501  * @gt: the &xe_gt
502  * @vfid: the VF identifier
503  *
504  * This function can only be called on PF.
505  *
506  * Return: size of the VF's assigned (or PF's spare) GGTT address space.
507  */
508 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
509 {
510 	u64 size;
511 
512 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
513 	if (vfid)
514 		size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
515 	else
516 		size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
517 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
518 
519 	return size;
520 }
521 
522 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
523 				  u64 actual, const char *what, int err)
524 {
525 	char size[10];
526 	char name[8];
527 
528 	xe_sriov_function_name(vfid, name, sizeof(name));
529 
530 	if (unlikely(err)) {
531 		string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
532 		xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
533 				   name, value, size, what, ERR_PTR(err));
534 		string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
535 		xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
536 				 name, actual, size, what);
537 		return err;
538 	}
539 
540 	/* the actual value may have changed during provisioning */
541 	string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
542 	xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
543 			 name, actual, size, what);
544 	return 0;
545 }
546 
547 /**
548  * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
549  * @gt: the &xe_gt (can't be media)
550  * @vfid: the VF identifier
551  * @size: requested GGTT size
552  *
553  * If &vfid represents PF, then function will change PF's spare GGTT config.
554  *
555  * This function can only be called on PF.
556  *
557  * Return: 0 on success or a negative error code on failure.
558  */
559 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
560 {
561 	int err;
562 
563 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
564 
565 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
566 	if (vfid)
567 		err = pf_provision_vf_ggtt(gt, vfid, size);
568 	else
569 		err = pf_set_spare_ggtt(gt, size);
570 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
571 
572 	return pf_config_set_u64_done(gt, vfid, size,
573 				      xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
574 				      vfid ? "GGTT" : "spare GGTT", err);
575 }
576 
577 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
578 				       u64 value, u64 (*get)(struct xe_gt*, unsigned int),
579 				       const char *what, unsigned int last, int err)
580 {
581 	char size[10];
582 
583 	xe_gt_assert(gt, first);
584 	xe_gt_assert(gt, num_vfs);
585 	xe_gt_assert(gt, first <= last);
586 
587 	if (num_vfs == 1)
588 		return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
589 
590 	if (unlikely(err)) {
591 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
592 				   first, first + num_vfs - 1, what);
593 		if (last > first)
594 			pf_config_bulk_set_u64_done(gt, first, last - first, value,
595 						    get, what, last, 0);
596 		return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
597 	}
598 
599 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
600 	value = get(gt, first);
601 	string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
602 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
603 			 first, first + num_vfs - 1, value, size, what);
604 	return 0;
605 }
606 
607 /**
608  * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
609  * @gt: the &xe_gt (can't be media)
610  * @vfid: starting VF identifier (can't be 0)
611  * @num_vfs: number of VFs to provision
612  * @size: requested GGTT size
613  *
614  * This function can only be called on PF.
615  *
616  * Return: 0 on success or a negative error code on failure.
617  */
618 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
619 					unsigned int num_vfs, u64 size)
620 {
621 	unsigned int n;
622 	int err = 0;
623 
624 	xe_gt_assert(gt, vfid);
625 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
626 
627 	if (!num_vfs)
628 		return 0;
629 
630 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
631 	for (n = vfid; n < vfid + num_vfs; n++) {
632 		err = pf_provision_vf_ggtt(gt, n, size);
633 		if (err)
634 			break;
635 	}
636 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
637 
638 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
639 					   xe_gt_sriov_pf_config_get_ggtt,
640 					   "GGTT", n, err);
641 }
642 
643 /* Return: size of the largest continuous GGTT region */
644 static u64 pf_get_max_ggtt(struct xe_gt *gt)
645 {
646 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
647 	u64 alignment = pf_get_ggtt_alignment(gt);
648 	u64 spare = pf_get_spare_ggtt(gt);
649 	u64 max_hole;
650 
651 	max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
652 
653 	xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
654 				max_hole / SZ_1K, spare / SZ_1K);
655 	return max_hole > spare ? max_hole - spare : 0;
656 }
657 
658 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
659 {
660 	u64 available = pf_get_max_ggtt(gt);
661 	u64 alignment = pf_get_ggtt_alignment(gt);
662 	u64 fair;
663 
664 	/*
665 	 * To simplify the logic we only look at single largest GGTT region
666 	 * as that will be always the best fit for 1 VF case, and most likely
667 	 * will also nicely cover other cases where VFs are provisioned on the
668 	 * fresh and idle PF driver, without any stale GGTT allocations spread
669 	 * in the middle of the full GGTT range.
670 	 */
671 
672 	fair = div_u64(available, num_vfs);
673 	fair = ALIGN_DOWN(fair, alignment);
674 	xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
675 				available / SZ_1K, num_vfs, fair / SZ_1K);
676 	return fair;
677 }
678 
679 /**
680  * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
681  * @gt: the &xe_gt (can't be media)
682  * @vfid: starting VF identifier (can't be 0)
683  * @num_vfs: number of VFs to provision
684  *
685  * This function can only be called on PF.
686  *
687  * Return: 0 on success or a negative error code on failure.
688  */
689 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
690 					unsigned int num_vfs)
691 {
692 	u64 fair;
693 
694 	xe_gt_assert(gt, vfid);
695 	xe_gt_assert(gt, num_vfs);
696 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
697 
698 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
699 	fair = pf_estimate_fair_ggtt(gt, num_vfs);
700 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
701 
702 	if (!fair)
703 		return -ENOSPC;
704 
705 	return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
706 }
707 
708 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
709 {
710 	/* XXX: preliminary */
711 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
712 		hweight64(gt->info.engine_mask) : SZ_256;
713 }
714 
715 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
716 {
717 	u32 spare;
718 
719 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
720 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
721 
722 	spare = gt->sriov.pf.spare.num_ctxs;
723 	spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
724 
725 	return spare;
726 }
727 
728 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
729 {
730 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
731 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
732 
733 	if (spare > GUC_ID_MAX)
734 		return -EINVAL;
735 
736 	if (spare && spare < pf_get_min_spare_ctxs(gt))
737 		return -EINVAL;
738 
739 	gt->sriov.pf.spare.num_ctxs = spare;
740 
741 	return 0;
742 }
743 
744 /* Return: start ID or negative error code on failure */
745 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
746 {
747 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
748 	unsigned int spare = pf_get_spare_ctxs(gt);
749 
750 	return xe_guc_id_mgr_reserve(idm, num, spare);
751 }
752 
753 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
754 {
755 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
756 
757 	if (num)
758 		xe_guc_id_mgr_release(idm, start, num);
759 }
760 
761 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
762 {
763 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
764 
765 	pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
766 	config->begin_ctx = 0;
767 	config->num_ctxs = 0;
768 }
769 
770 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
771 {
772 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
773 	int ret;
774 
775 	xe_gt_assert(gt, vfid);
776 
777 	if (num_ctxs > GUC_ID_MAX)
778 		return -EINVAL;
779 
780 	if (config->num_ctxs) {
781 		ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
782 		if (unlikely(ret))
783 			return ret;
784 
785 		pf_release_config_ctxs(gt, config);
786 
787 		ret = pf_refresh_vf_cfg(gt, vfid);
788 		if (unlikely(ret))
789 			return ret;
790 	}
791 
792 	if (!num_ctxs)
793 		return 0;
794 
795 	ret = pf_reserve_ctxs(gt, num_ctxs);
796 	if (unlikely(ret < 0))
797 		return ret;
798 
799 	config->begin_ctx = ret;
800 	config->num_ctxs = num_ctxs;
801 
802 	ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
803 	if (unlikely(ret)) {
804 		pf_release_config_ctxs(gt, config);
805 		return ret;
806 	}
807 
808 	xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
809 				vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
810 	return 0;
811 }
812 
813 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
814 {
815 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
816 
817 	return config->num_ctxs;
818 }
819 
820 /**
821  * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
822  * @gt: the &xe_gt
823  * @vfid: the VF identifier
824  *
825  * This function can only be called on PF.
826  * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
827  *
828  * Return: VF's quota (or PF's spare).
829  */
830 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
831 {
832 	u32 num_ctxs;
833 
834 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
835 	if (vfid)
836 		num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
837 	else
838 		num_ctxs = pf_get_spare_ctxs(gt);
839 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
840 
841 	return num_ctxs;
842 }
843 
844 static const char *no_unit(u32 unused)
845 {
846 	return "";
847 }
848 
849 static const char *spare_unit(u32 unused)
850 {
851 	return " spare";
852 }
853 
854 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
855 				  const char *what, const char *(*unit)(u32), int err)
856 {
857 	char name[8];
858 
859 	xe_sriov_function_name(vfid, name, sizeof(name));
860 
861 	if (unlikely(err)) {
862 		xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
863 				   name, value, unit(value), what, ERR_PTR(err));
864 		xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
865 				 name, actual, unit(actual), what);
866 		return err;
867 	}
868 
869 	/* the actual value may have changed during provisioning */
870 	xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
871 			 name, actual, unit(actual), what);
872 	return 0;
873 }
874 
875 /**
876  * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
877  * @gt: the &xe_gt
878  * @vfid: the VF identifier
879  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
880  *
881  * This function can only be called on PF.
882  *
883  * Return: 0 on success or a negative error code on failure.
884  */
885 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
886 {
887 	int err;
888 
889 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
890 	if (vfid)
891 		err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
892 	else
893 		err = pf_set_spare_ctxs(gt, num_ctxs);
894 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
895 
896 	return pf_config_set_u32_done(gt, vfid, num_ctxs,
897 				      xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
898 				      "GuC context IDs", vfid ? no_unit : spare_unit, err);
899 }
900 
901 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
902 				       u32 value, u32 (*get)(struct xe_gt*, unsigned int),
903 				       const char *what, const char *(*unit)(u32),
904 				       unsigned int last, int err)
905 {
906 	xe_gt_assert(gt, first);
907 	xe_gt_assert(gt, num_vfs);
908 	xe_gt_assert(gt, first <= last);
909 
910 	if (num_vfs == 1)
911 		return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
912 
913 	if (unlikely(err)) {
914 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
915 				   first, first + num_vfs - 1, what);
916 		if (last > first)
917 			pf_config_bulk_set_u32_done(gt, first, last - first, value,
918 						    get, what, unit, last, 0);
919 		return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
920 	}
921 
922 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
923 	value = get(gt, first);
924 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
925 			 first, first + num_vfs - 1, value, unit(value), what);
926 	return 0;
927 }
928 
929 /**
930  * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
931  * @gt: the &xe_gt
932  * @vfid: starting VF identifier
933  * @num_vfs: number of VFs to provision
934  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
935  *
936  * This function can only be called on PF.
937  *
938  * Return: 0 on success or a negative error code on failure.
939  */
940 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
941 					unsigned int num_vfs, u32 num_ctxs)
942 {
943 	unsigned int n;
944 	int err = 0;
945 
946 	xe_gt_assert(gt, vfid);
947 
948 	if (!num_vfs)
949 		return 0;
950 
951 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
952 	for (n = vfid; n < vfid + num_vfs; n++) {
953 		err = pf_provision_vf_ctxs(gt, n, num_ctxs);
954 		if (err)
955 			break;
956 	}
957 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
958 
959 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
960 					   xe_gt_sriov_pf_config_get_ctxs,
961 					   "GuC context IDs", no_unit, n, err);
962 }
963 
964 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
965 {
966 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
967 	u32 spare = pf_get_spare_ctxs(gt);
968 	u32 fair = (idm->total - spare) / num_vfs;
969 	int ret;
970 
971 	for (; fair; --fair) {
972 		ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
973 		if (ret < 0)
974 			continue;
975 		xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
976 		break;
977 	}
978 
979 	xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
980 	return fair;
981 }
982 
983 /**
984  * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
985  * @gt: the &xe_gt
986  * @vfid: starting VF identifier (can't be 0)
987  * @num_vfs: number of VFs to provision (can't be 0)
988  *
989  * This function can only be called on PF.
990  *
991  * Return: 0 on success or a negative error code on failure.
992  */
993 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
994 					unsigned int num_vfs)
995 {
996 	u32 fair;
997 
998 	xe_gt_assert(gt, vfid);
999 	xe_gt_assert(gt, num_vfs);
1000 
1001 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1002 	fair = pf_estimate_fair_ctxs(gt, num_vfs);
1003 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1004 
1005 	if (!fair)
1006 		return -ENOSPC;
1007 
1008 	return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
1009 }
1010 
1011 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
1012 {
1013 	/* XXX: preliminary, we don't use doorbells yet! */
1014 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
1015 }
1016 
1017 static u32 pf_get_spare_dbs(struct xe_gt *gt)
1018 {
1019 	u32 spare;
1020 
1021 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1022 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1023 
1024 	spare = gt->sriov.pf.spare.num_dbs;
1025 	spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
1026 
1027 	return spare;
1028 }
1029 
1030 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
1031 {
1032 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1033 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1034 
1035 	if (spare > GUC_NUM_DOORBELLS)
1036 		return -EINVAL;
1037 
1038 	if (spare && spare < pf_get_min_spare_dbs(gt))
1039 		return -EINVAL;
1040 
1041 	gt->sriov.pf.spare.num_dbs = spare;
1042 	return 0;
1043 }
1044 
1045 /* Return: start ID or negative error code on failure */
1046 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
1047 {
1048 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1049 	unsigned int spare = pf_get_spare_dbs(gt);
1050 
1051 	return xe_guc_db_mgr_reserve_range(dbm, num, spare);
1052 }
1053 
1054 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1055 {
1056 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1057 
1058 	if (num)
1059 		xe_guc_db_mgr_release_range(dbm, start, num);
1060 }
1061 
1062 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1063 {
1064 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1065 
1066 	pf_release_dbs(gt, config->begin_db, config->num_dbs);
1067 	config->begin_db = 0;
1068 	config->num_dbs = 0;
1069 }
1070 
1071 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1072 {
1073 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1074 	int ret;
1075 
1076 	xe_gt_assert(gt, vfid);
1077 
1078 	if (num_dbs > GUC_NUM_DOORBELLS)
1079 		return -EINVAL;
1080 
1081 	if (config->num_dbs) {
1082 		ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1083 		if (unlikely(ret))
1084 			return ret;
1085 
1086 		pf_release_config_dbs(gt, config);
1087 
1088 		ret = pf_refresh_vf_cfg(gt, vfid);
1089 		if (unlikely(ret))
1090 			return ret;
1091 	}
1092 
1093 	if (!num_dbs)
1094 		return 0;
1095 
1096 	ret = pf_reserve_dbs(gt, num_dbs);
1097 	if (unlikely(ret < 0))
1098 		return ret;
1099 
1100 	config->begin_db = ret;
1101 	config->num_dbs = num_dbs;
1102 
1103 	ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1104 	if (unlikely(ret)) {
1105 		pf_release_config_dbs(gt, config);
1106 		return ret;
1107 	}
1108 
1109 	xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1110 				vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1111 	return 0;
1112 }
1113 
1114 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1115 {
1116 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1117 
1118 	return config->num_dbs;
1119 }
1120 
1121 /**
1122  * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1123  * @gt: the &xe_gt
1124  * @vfid: the VF identifier
1125  *
1126  * This function can only be called on PF.
1127  * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1128  *
1129  * Return: VF's quota (or PF's spare).
1130  */
1131 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1132 {
1133 	u32 num_dbs;
1134 
1135 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1136 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1137 
1138 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1139 	if (vfid)
1140 		num_dbs = pf_get_vf_config_dbs(gt, vfid);
1141 	else
1142 		num_dbs = pf_get_spare_dbs(gt);
1143 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1144 
1145 	return num_dbs;
1146 }
1147 
1148 /**
1149  * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1150  * @gt: the &xe_gt
1151  * @vfid: the VF identifier
1152  * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1153  *
1154  * This function can only be called on PF.
1155  *
1156  * Return: 0 on success or a negative error code on failure.
1157  */
1158 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1159 {
1160 	int err;
1161 
1162 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1163 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1164 
1165 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1166 	if (vfid)
1167 		err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1168 	else
1169 		err = pf_set_spare_dbs(gt, num_dbs);
1170 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1171 
1172 	return pf_config_set_u32_done(gt, vfid, num_dbs,
1173 				      xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1174 				      "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1175 }
1176 
1177 /**
1178  * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1179  * @gt: the &xe_gt
1180  * @vfid: starting VF identifier (can't be 0)
1181  * @num_vfs: number of VFs to provision
1182  * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1183  *
1184  * This function can only be called on PF.
1185  *
1186  * Return: 0 on success or a negative error code on failure.
1187  */
1188 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1189 				       unsigned int num_vfs, u32 num_dbs)
1190 {
1191 	unsigned int n;
1192 	int err = 0;
1193 
1194 	xe_gt_assert(gt, vfid);
1195 
1196 	if (!num_vfs)
1197 		return 0;
1198 
1199 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1200 	for (n = vfid; n < vfid + num_vfs; n++) {
1201 		err = pf_provision_vf_dbs(gt, n, num_dbs);
1202 		if (err)
1203 			break;
1204 	}
1205 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1206 
1207 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1208 					   xe_gt_sriov_pf_config_get_dbs,
1209 					   "GuC doorbell IDs", no_unit, n, err);
1210 }
1211 
1212 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1213 {
1214 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1215 	u32 spare = pf_get_spare_dbs(gt);
1216 	u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1217 	int ret;
1218 
1219 	for (; fair; --fair) {
1220 		ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1221 		if (ret < 0)
1222 			continue;
1223 		xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1224 		break;
1225 	}
1226 
1227 	xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1228 	return fair;
1229 }
1230 
1231 /**
1232  * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell  IDs.
1233  * @gt: the &xe_gt
1234  * @vfid: starting VF identifier (can't be 0)
1235  * @num_vfs: number of VFs to provision (can't be 0)
1236  *
1237  * This function can only be called on PF.
1238  *
1239  * Return: 0 on success or a negative error code on failure.
1240  */
1241 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1242 				       unsigned int num_vfs)
1243 {
1244 	u32 fair;
1245 
1246 	xe_gt_assert(gt, vfid);
1247 	xe_gt_assert(gt, num_vfs);
1248 
1249 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1250 	fair = pf_estimate_fair_dbs(gt, num_vfs);
1251 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1252 
1253 	if (!fair)
1254 		return -ENOSPC;
1255 
1256 	return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1257 }
1258 
1259 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1260 {
1261 	/* this might be platform dependent */
1262 	return SZ_2M;
1263 }
1264 
1265 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1266 {
1267 	/* this might be platform dependent */
1268 	return SZ_128M; /* XXX: preliminary */
1269 }
1270 
1271 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1272 {
1273 	u64 spare;
1274 
1275 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1276 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1277 
1278 	spare = gt->sriov.pf.spare.lmem_size;
1279 	spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1280 
1281 	return spare;
1282 }
1283 
1284 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1285 {
1286 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1287 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1288 
1289 	if (size && size < pf_get_min_spare_lmem(gt))
1290 		return -EINVAL;
1291 
1292 	gt->sriov.pf.spare.lmem_size = size;
1293 	return 0;
1294 }
1295 
1296 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1297 {
1298 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1299 	struct xe_bo *bo;
1300 
1301 	bo = config->lmem_obj;
1302 	return bo ? bo->size : 0;
1303 }
1304 
1305 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1306 {
1307 	struct xe_device *xe = gt_to_xe(gt);
1308 	struct xe_tile *tile;
1309 	unsigned int tid;
1310 	int err;
1311 
1312 	for_each_tile(tile, xe, tid) {
1313 		if (tile->primary_gt == gt) {
1314 			err = pf_push_vf_cfg_lmem(gt, vfid, size);
1315 		} else {
1316 			u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1317 
1318 			if (!lmem)
1319 				continue;
1320 			err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1321 		}
1322 		if (unlikely(err))
1323 			return err;
1324 	}
1325 	return 0;
1326 }
1327 
1328 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1329 {
1330 	/* TODO */
1331 }
1332 
1333 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1334 {
1335 	struct xe_lmtt *lmtt;
1336 	struct xe_tile *tile;
1337 	unsigned int tid;
1338 
1339 	xe_assert(xe, xe_device_has_lmtt(xe));
1340 	xe_assert(xe, IS_SRIOV_PF(xe));
1341 
1342 	for_each_tile(tile, xe, tid) {
1343 		lmtt = &tile->sriov.pf.lmtt;
1344 		xe_lmtt_drop_pages(lmtt, vfid);
1345 	}
1346 }
1347 
1348 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1349 {
1350 	struct xe_gt_sriov_config *config;
1351 	struct xe_tile *tile;
1352 	struct xe_lmtt *lmtt;
1353 	struct xe_bo *bo;
1354 	struct xe_gt *gt;
1355 	u64 total, offset;
1356 	unsigned int gtid;
1357 	unsigned int tid;
1358 	int err;
1359 
1360 	xe_assert(xe, xe_device_has_lmtt(xe));
1361 	xe_assert(xe, IS_SRIOV_PF(xe));
1362 
1363 	total = 0;
1364 	for_each_tile(tile, xe, tid)
1365 		total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1366 
1367 	for_each_tile(tile, xe, tid) {
1368 		lmtt = &tile->sriov.pf.lmtt;
1369 
1370 		xe_lmtt_drop_pages(lmtt, vfid);
1371 		if (!total)
1372 			continue;
1373 
1374 		err  = xe_lmtt_prepare_pages(lmtt, vfid, total);
1375 		if (err)
1376 			goto fail;
1377 
1378 		offset = 0;
1379 		for_each_gt(gt, xe, gtid) {
1380 			if (xe_gt_is_media_type(gt))
1381 				continue;
1382 
1383 			config = pf_pick_vf_config(gt, vfid);
1384 			bo = config->lmem_obj;
1385 			if (!bo)
1386 				continue;
1387 
1388 			err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1389 			if (err)
1390 				goto fail;
1391 			offset += bo->size;
1392 		}
1393 	}
1394 
1395 	pf_force_lmtt_invalidate(xe);
1396 	return 0;
1397 
1398 fail:
1399 	for_each_tile(tile, xe, tid) {
1400 		lmtt = &tile->sriov.pf.lmtt;
1401 		xe_lmtt_drop_pages(lmtt, vfid);
1402 	}
1403 	return err;
1404 }
1405 
1406 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1407 {
1408 	xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1409 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1410 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1411 
1412 	if (config->lmem_obj) {
1413 		xe_bo_unpin_map_no_vm(config->lmem_obj);
1414 		config->lmem_obj = NULL;
1415 	}
1416 }
1417 
1418 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1419 {
1420 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1421 	struct xe_device *xe = gt_to_xe(gt);
1422 	struct xe_tile *tile = gt_to_tile(gt);
1423 	struct xe_bo *bo;
1424 	int err;
1425 
1426 	xe_gt_assert(gt, vfid);
1427 	xe_gt_assert(gt, IS_DGFX(xe));
1428 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1429 
1430 	size = round_up(size, pf_get_lmem_alignment(gt));
1431 
1432 	if (config->lmem_obj) {
1433 		err = pf_distribute_config_lmem(gt, vfid, 0);
1434 		if (unlikely(err))
1435 			return err;
1436 
1437 		if (xe_device_has_lmtt(xe))
1438 			pf_reset_vf_lmtt(xe, vfid);
1439 		pf_release_vf_config_lmem(gt, config);
1440 	}
1441 	xe_gt_assert(gt, !config->lmem_obj);
1442 
1443 	if (!size)
1444 		return 0;
1445 
1446 	xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1447 	bo = xe_bo_create_locked(xe, tile, NULL,
1448 				 ALIGN(size, PAGE_SIZE),
1449 				 ttm_bo_type_kernel,
1450 				 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1451 				 XE_BO_FLAG_NEEDS_2M |
1452 				 XE_BO_FLAG_PINNED |
1453 				 XE_BO_FLAG_PINNED_LATE_RESTORE);
1454 	if (IS_ERR(bo))
1455 		return PTR_ERR(bo);
1456 
1457 	err = xe_bo_pin(bo);
1458 	xe_bo_unlock(bo);
1459 	if (unlikely(err)) {
1460 		xe_bo_put(bo);
1461 		return err;
1462 	}
1463 
1464 	config->lmem_obj = bo;
1465 
1466 	if (xe_device_has_lmtt(xe)) {
1467 		err = pf_update_vf_lmtt(xe, vfid);
1468 		if (unlikely(err))
1469 			goto release;
1470 	}
1471 
1472 	err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
1473 	if (unlikely(err))
1474 		goto reset_lmtt;
1475 
1476 	xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1477 				vfid, bo->size, bo->size / SZ_1M);
1478 	return 0;
1479 
1480 reset_lmtt:
1481 	if (xe_device_has_lmtt(xe))
1482 		pf_reset_vf_lmtt(xe, vfid);
1483 release:
1484 	pf_release_vf_config_lmem(gt, config);
1485 	return err;
1486 }
1487 
1488 /**
1489  * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1490  * @gt: the &xe_gt
1491  * @vfid: the VF identifier
1492  *
1493  * This function can only be called on PF.
1494  *
1495  * Return: VF's (or PF's spare) LMEM quota.
1496  */
1497 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1498 {
1499 	u64 size;
1500 
1501 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1502 	if (vfid)
1503 		size = pf_get_vf_config_lmem(gt, vfid);
1504 	else
1505 		size = pf_get_spare_lmem(gt);
1506 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1507 
1508 	return size;
1509 }
1510 
1511 /**
1512  * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1513  * @gt: the &xe_gt (can't be media)
1514  * @vfid: the VF identifier
1515  * @size: requested LMEM size
1516  *
1517  * This function can only be called on PF.
1518  */
1519 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1520 {
1521 	int err;
1522 
1523 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1524 	if (vfid)
1525 		err = pf_provision_vf_lmem(gt, vfid, size);
1526 	else
1527 		err = pf_set_spare_lmem(gt, size);
1528 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1529 
1530 	return pf_config_set_u64_done(gt, vfid, size,
1531 				      xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1532 				      vfid ? "LMEM" : "spare LMEM", err);
1533 }
1534 
1535 /**
1536  * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1537  * @gt: the &xe_gt (can't be media)
1538  * @vfid: starting VF identifier (can't be 0)
1539  * @num_vfs: number of VFs to provision
1540  * @size: requested LMEM size
1541  *
1542  * This function can only be called on PF.
1543  *
1544  * Return: 0 on success or a negative error code on failure.
1545  */
1546 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1547 					unsigned int num_vfs, u64 size)
1548 {
1549 	unsigned int n;
1550 	int err = 0;
1551 
1552 	xe_gt_assert(gt, vfid);
1553 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1554 
1555 	if (!num_vfs)
1556 		return 0;
1557 
1558 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1559 	for (n = vfid; n < vfid + num_vfs; n++) {
1560 		err = pf_provision_vf_lmem(gt, n, size);
1561 		if (err)
1562 			break;
1563 	}
1564 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1565 
1566 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1567 					   xe_gt_sriov_pf_config_get_lmem,
1568 					   "LMEM", n, err);
1569 }
1570 
1571 static u64 pf_query_free_lmem(struct xe_gt *gt)
1572 {
1573 	struct xe_tile *tile = gt->tile;
1574 
1575 	return xe_ttm_vram_get_avail(&tile->mem.vram.ttm.manager);
1576 }
1577 
1578 static u64 pf_query_max_lmem(struct xe_gt *gt)
1579 {
1580 	u64 alignment = pf_get_lmem_alignment(gt);
1581 	u64 spare = pf_get_spare_lmem(gt);
1582 	u64 free = pf_query_free_lmem(gt);
1583 	u64 avail;
1584 
1585 	/* XXX: need to account for 2MB blocks only */
1586 	avail = free > spare ? free - spare : 0;
1587 	avail = round_down(avail, alignment);
1588 
1589 	return avail;
1590 }
1591 
1592 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1593 #define MAX_FAIR_LMEM	SZ_128M	/* XXX: make it small for the driver bringup */
1594 #endif
1595 
1596 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1597 {
1598 	u64 available = pf_query_max_lmem(gt);
1599 	u64 alignment = pf_get_lmem_alignment(gt);
1600 	u64 fair;
1601 
1602 	fair = div_u64(available, num_vfs);
1603 	fair = rounddown_pow_of_two(fair);	/* XXX: ttm_vram_mgr & drm_buddy limitation */
1604 	fair = ALIGN_DOWN(fair, alignment);
1605 #ifdef MAX_FAIR_LMEM
1606 	fair = min_t(u64, MAX_FAIR_LMEM, fair);
1607 #endif
1608 	xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1609 				available / SZ_1M, num_vfs, fair / SZ_1M);
1610 	return fair;
1611 }
1612 
1613 /**
1614  * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1615  * @gt: the &xe_gt (can't be media)
1616  * @vfid: starting VF identifier (can't be 0)
1617  * @num_vfs: number of VFs to provision (can't be 0)
1618  *
1619  * This function can only be called on PF.
1620  *
1621  * Return: 0 on success or a negative error code on failure.
1622  */
1623 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1624 					unsigned int num_vfs)
1625 {
1626 	u64 fair;
1627 
1628 	xe_gt_assert(gt, vfid);
1629 	xe_gt_assert(gt, num_vfs);
1630 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1631 
1632 	if (!IS_DGFX(gt_to_xe(gt)))
1633 		return 0;
1634 
1635 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1636 	fair = pf_estimate_fair_lmem(gt, num_vfs);
1637 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1638 
1639 	if (!fair)
1640 		return -ENOSPC;
1641 
1642 	return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1643 }
1644 
1645 /**
1646  * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1647  * @gt: the &xe_gt
1648  * @vfid: starting VF identifier (can't be 0)
1649  * @num_vfs: number of VFs to provision (can't be 0)
1650  *
1651  * This function can only be called on PF.
1652  *
1653  * Return: 0 on success or a negative error code on failure.
1654  */
1655 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1656 				   unsigned int num_vfs)
1657 {
1658 	int result = 0;
1659 	int err;
1660 
1661 	xe_gt_assert(gt, vfid);
1662 	xe_gt_assert(gt, num_vfs);
1663 
1664 	if (!xe_gt_is_media_type(gt)) {
1665 		err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1666 		result = result ?: err;
1667 		err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1668 		result = result ?: err;
1669 	}
1670 	err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1671 	result = result ?: err;
1672 	err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1673 	result = result ?: err;
1674 
1675 	return result;
1676 }
1677 
1678 static const char *exec_quantum_unit(u32 exec_quantum)
1679 {
1680 	return exec_quantum ? "ms" : "(infinity)";
1681 }
1682 
1683 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1684 				     u32 exec_quantum)
1685 {
1686 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1687 	int err;
1688 
1689 	err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1690 	if (unlikely(err))
1691 		return err;
1692 
1693 	config->exec_quantum = exec_quantum;
1694 	return 0;
1695 }
1696 
1697 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1698 {
1699 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1700 
1701 	return config->exec_quantum;
1702 }
1703 
1704 /**
1705  * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1706  * @gt: the &xe_gt
1707  * @vfid: the VF identifier
1708  * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1709  *
1710  * This function can only be called on PF.
1711  *
1712  * Return: 0 on success or a negative error code on failure.
1713  */
1714 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1715 					   u32 exec_quantum)
1716 {
1717 	int err;
1718 
1719 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1720 	err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1721 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1722 
1723 	return pf_config_set_u32_done(gt, vfid, exec_quantum,
1724 				      xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1725 				      "execution quantum", exec_quantum_unit, err);
1726 }
1727 
1728 /**
1729  * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1730  * @gt: the &xe_gt
1731  * @vfid: the VF identifier
1732  *
1733  * This function can only be called on PF.
1734  *
1735  * Return: VF's (or PF's) execution quantum in milliseconds.
1736  */
1737 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1738 {
1739 	u32 exec_quantum;
1740 
1741 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1742 	exec_quantum = pf_get_exec_quantum(gt, vfid);
1743 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1744 
1745 	return exec_quantum;
1746 }
1747 
1748 static const char *preempt_timeout_unit(u32 preempt_timeout)
1749 {
1750 	return preempt_timeout ? "us" : "(infinity)";
1751 }
1752 
1753 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1754 					u32 preempt_timeout)
1755 {
1756 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1757 	int err;
1758 
1759 	err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1760 	if (unlikely(err))
1761 		return err;
1762 
1763 	config->preempt_timeout = preempt_timeout;
1764 
1765 	return 0;
1766 }
1767 
1768 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1769 {
1770 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1771 
1772 	return config->preempt_timeout;
1773 }
1774 
1775 /**
1776  * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1777  * @gt: the &xe_gt
1778  * @vfid: the VF identifier
1779  * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1780  *
1781  * This function can only be called on PF.
1782  *
1783  * Return: 0 on success or a negative error code on failure.
1784  */
1785 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1786 					      u32 preempt_timeout)
1787 {
1788 	int err;
1789 
1790 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1791 	err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1792 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1793 
1794 	return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1795 				      xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1796 				      "preemption timeout", preempt_timeout_unit, err);
1797 }
1798 
1799 /**
1800  * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1801  * @gt: the &xe_gt
1802  * @vfid: the VF identifier
1803  *
1804  * This function can only be called on PF.
1805  *
1806  * Return: VF's (or PF's) preemption timeout in microseconds.
1807  */
1808 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1809 {
1810 	u32 preempt_timeout;
1811 
1812 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1813 	preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1814 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1815 
1816 	return preempt_timeout;
1817 }
1818 
1819 static const char *sched_priority_unit(u32 priority)
1820 {
1821 	return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" :
1822 		priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" :
1823 		priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" :
1824 		"(?)";
1825 }
1826 
1827 static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
1828 {
1829 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1830 	int err;
1831 
1832 	err = pf_push_vf_cfg_sched_priority(gt, vfid, priority);
1833 	if (unlikely(err))
1834 		return err;
1835 
1836 	config->sched_priority = priority;
1837 	return 0;
1838 }
1839 
1840 static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
1841 {
1842 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1843 
1844 	return config->sched_priority;
1845 }
1846 
1847 /**
1848  * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority.
1849  * @gt: the &xe_gt
1850  * @vfid: the VF identifier
1851  * @priority: requested scheduling priority
1852  *
1853  * This function can only be called on PF.
1854  *
1855  * Return: 0 on success or a negative error code on failure.
1856  */
1857 int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
1858 {
1859 	int err;
1860 
1861 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1862 	err = pf_provision_sched_priority(gt, vfid, priority);
1863 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1864 
1865 	return pf_config_set_u32_done(gt, vfid, priority,
1866 				      xe_gt_sriov_pf_config_get_sched_priority(gt, vfid),
1867 				      "scheduling priority", sched_priority_unit, err);
1868 }
1869 
1870 /**
1871  * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority.
1872  * @gt: the &xe_gt
1873  * @vfid: the VF identifier
1874  *
1875  * This function can only be called on PF.
1876  *
1877  * Return: VF's (or PF's) scheduling priority.
1878  */
1879 u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
1880 {
1881 	u32 priority;
1882 
1883 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1884 	priority = pf_get_sched_priority(gt, vfid);
1885 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1886 
1887 	return priority;
1888 }
1889 
1890 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1891 {
1892 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1893 
1894 	config->exec_quantum = 0;
1895 	config->preempt_timeout = 0;
1896 }
1897 
1898 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
1899 				  enum xe_guc_klv_threshold_index index, u32 value)
1900 {
1901 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1902 	int err;
1903 
1904 	err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
1905 	if (unlikely(err))
1906 		return err;
1907 
1908 	config->thresholds[index] = value;
1909 
1910 	return 0;
1911 }
1912 
1913 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
1914 			    enum xe_guc_klv_threshold_index index)
1915 {
1916 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1917 
1918 	return config->thresholds[index];
1919 }
1920 
1921 static const char *threshold_unit(u32 threshold)
1922 {
1923 	return threshold ? "" : "(disabled)";
1924 }
1925 
1926 /**
1927  * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
1928  * @gt: the &xe_gt
1929  * @vfid: the VF identifier
1930  * @index: the threshold index
1931  * @value: requested value (0 means disabled)
1932  *
1933  * This function can only be called on PF.
1934  *
1935  * Return: 0 on success or a negative error code on failure.
1936  */
1937 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
1938 					enum xe_guc_klv_threshold_index index, u32 value)
1939 {
1940 	u32 key = xe_guc_klv_threshold_index_to_key(index);
1941 	const char *name = xe_guc_klv_key_to_string(key);
1942 	int err;
1943 
1944 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1945 	err = pf_provision_threshold(gt, vfid, index, value);
1946 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1947 
1948 	return pf_config_set_u32_done(gt, vfid, value,
1949 				      xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
1950 				      name, threshold_unit, err);
1951 }
1952 
1953 /**
1954  * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
1955  * @gt: the &xe_gt
1956  * @vfid: the VF identifier
1957  * @index: the threshold index
1958  *
1959  * This function can only be called on PF.
1960  *
1961  * Return: value of VF's (or PF's) threshold.
1962  */
1963 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
1964 					enum xe_guc_klv_threshold_index index)
1965 {
1966 	u32 value;
1967 
1968 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1969 	value = pf_get_threshold(gt, vfid, index);
1970 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1971 
1972 	return value;
1973 }
1974 
1975 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1976 {
1977 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1978 
1979 #define reset_threshold_config(TAG, ...) ({				\
1980 	config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0;	\
1981 });
1982 
1983 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
1984 #undef reset_threshold_config
1985 }
1986 
1987 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
1988 {
1989 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1990 	struct xe_device *xe = gt_to_xe(gt);
1991 
1992 	if (!xe_gt_is_media_type(gt)) {
1993 		pf_release_vf_config_ggtt(gt, config);
1994 		if (IS_DGFX(xe)) {
1995 			pf_release_vf_config_lmem(gt, config);
1996 			if (xe_device_has_lmtt(xe))
1997 				pf_update_vf_lmtt(xe, vfid);
1998 		}
1999 	}
2000 	pf_release_config_ctxs(gt, config);
2001 	pf_release_config_dbs(gt, config);
2002 	pf_reset_config_sched(gt, config);
2003 	pf_reset_config_thresholds(gt, config);
2004 }
2005 
2006 /**
2007  * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
2008  * @gt: the &xe_gt
2009  * @vfid: the VF identifier (can't be PF)
2010  * @force: force configuration release
2011  *
2012  * This function can only be called on PF.
2013  *
2014  * Return: 0 on success or a negative error code on failure.
2015  */
2016 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
2017 {
2018 	int err;
2019 
2020 	xe_gt_assert(gt, vfid);
2021 
2022 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2023 	err = pf_send_vf_cfg_reset(gt, vfid);
2024 	if (!err || force)
2025 		pf_release_vf_config(gt, vfid);
2026 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2027 
2028 	if (unlikely(err)) {
2029 		xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
2030 				   vfid, ERR_PTR(err),
2031 				   force ? " but all resources were released anyway!" : "");
2032 	}
2033 
2034 	return force ? 0 : err;
2035 }
2036 
2037 static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
2038 {
2039 	if (xe_ggtt_node_allocated(ggtt_region))
2040 		xe_ggtt_assign(ggtt_region, vfid);
2041 }
2042 
2043 static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
2044 {
2045 	struct xe_migrate *m = tile->migrate;
2046 	struct dma_fence *fence;
2047 	int err;
2048 
2049 	if (!bo)
2050 		return 0;
2051 
2052 	xe_bo_lock(bo, false);
2053 	fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
2054 	if (IS_ERR(fence)) {
2055 		err = PTR_ERR(fence);
2056 	} else if (!fence) {
2057 		err = -ENOMEM;
2058 	} else {
2059 		long ret = dma_fence_wait_timeout(fence, false, timeout);
2060 
2061 		err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
2062 		dma_fence_put(fence);
2063 		if (!err)
2064 			xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
2065 						jiffies_to_msecs(timeout - ret));
2066 	}
2067 	xe_bo_unlock(bo);
2068 
2069 	return err;
2070 }
2071 
2072 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
2073 {
2074 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2075 	struct xe_tile *tile = gt_to_tile(gt);
2076 	struct xe_device *xe = gt_to_xe(gt);
2077 	int err = 0;
2078 
2079 	/*
2080 	 * Only GGTT and LMEM requires to be cleared by the PF.
2081 	 * GuC doorbell IDs and context IDs do not need any clearing.
2082 	 */
2083 	if (!xe_gt_is_media_type(gt)) {
2084 		pf_sanitize_ggtt(config->ggtt_region, vfid);
2085 		if (IS_DGFX(xe))
2086 			err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
2087 	}
2088 
2089 	return err;
2090 }
2091 
2092 /**
2093  * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
2094  * @gt: the &xe_gt
2095  * @vfid: the VF identifier (can't be PF)
2096  * @timeout: maximum timeout to wait for completion in jiffies
2097  *
2098  * This function can only be called on PF.
2099  *
2100  * Return: 0 on success or a negative error code on failure.
2101  */
2102 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
2103 {
2104 	int err;
2105 
2106 	xe_gt_assert(gt, vfid != PFID);
2107 
2108 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2109 	err = pf_sanitize_vf_resources(gt, vfid, timeout);
2110 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2111 
2112 	if (unlikely(err))
2113 		xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
2114 				   vfid, ERR_PTR(err));
2115 	return err;
2116 }
2117 
2118 /**
2119  * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
2120  * @gt: the &xe_gt
2121  * @vfid: the VF identifier (can't be PF)
2122  * @refresh: explicit refresh
2123  *
2124  * This function can only be called on PF.
2125  *
2126  * Return: 0 on success or a negative error code on failure.
2127  */
2128 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
2129 {
2130 	int err = 0;
2131 
2132 	xe_gt_assert(gt, vfid);
2133 
2134 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2135 	err = pf_push_vf_cfg(gt, vfid, refresh);
2136 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2137 
2138 	if (unlikely(err)) {
2139 		xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
2140 				   refresh ? "refresh" : "push", vfid, ERR_PTR(err));
2141 	}
2142 
2143 	return err;
2144 }
2145 
2146 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
2147 {
2148 	struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
2149 	struct xe_device *xe = gt_to_xe(gt);
2150 	bool is_primary = !xe_gt_is_media_type(gt);
2151 	bool valid_ggtt, valid_ctxs, valid_dbs;
2152 	bool valid_any, valid_all;
2153 
2154 	valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
2155 	valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
2156 	valid_dbs = pf_get_vf_config_dbs(gt, vfid);
2157 
2158 	/* note that GuC doorbells are optional */
2159 	valid_any = valid_ctxs || valid_dbs;
2160 	valid_all = valid_ctxs;
2161 
2162 	/* and GGTT/LMEM is configured on primary GT only */
2163 	valid_all = valid_all && valid_ggtt;
2164 	valid_any = valid_any || (valid_ggtt && is_primary);
2165 
2166 	if (IS_DGFX(xe)) {
2167 		bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
2168 
2169 		valid_any = valid_any || (valid_lmem && is_primary);
2170 		valid_all = valid_all && valid_lmem;
2171 	}
2172 
2173 	return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA;
2174 }
2175 
2176 /**
2177  * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
2178  * @gt: the &xe_gt
2179  * @vfid: the VF identifier (can't be PF)
2180  *
2181  * This function can only be called on PF.
2182  *
2183  * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
2184  */
2185 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
2186 {
2187 	bool empty;
2188 
2189 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2190 	xe_gt_assert(gt, vfid);
2191 
2192 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2193 	empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
2194 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2195 
2196 	return empty;
2197 }
2198 
2199 /**
2200  * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob.
2201  * @gt: the &xe_gt
2202  * @vfid: the VF identifier (can't be PF)
2203  * @buf: the buffer to save a config to (or NULL if query the buf size)
2204  * @size: the size of the buffer (or 0 if query the buf size)
2205  *
2206  * This function can only be called on PF.
2207  *
2208  * Return: minimum size of the buffer or the number of bytes saved,
2209  *         or a negative error code on failure.
2210  */
2211 ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
2212 {
2213 	struct xe_gt_sriov_config *config;
2214 	ssize_t ret;
2215 
2216 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2217 	xe_gt_assert(gt, vfid);
2218 	xe_gt_assert(gt, !(!buf ^ !size));
2219 
2220 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2221 	ret = pf_validate_vf_config(gt, vfid);
2222 	if (!size) {
2223 		ret = ret ? 0 : SZ_4K;
2224 	} else if (!ret) {
2225 		if (size < SZ_4K) {
2226 			ret = -ENOBUFS;
2227 		} else {
2228 			config = pf_pick_vf_config(gt, vfid);
2229 			ret = encode_config(buf, config, false) * sizeof(u32);
2230 		}
2231 	}
2232 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2233 
2234 	return ret;
2235 }
2236 
2237 static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid,
2238 				    u32 key, u32 len, const u32 *value)
2239 {
2240 	switch (key) {
2241 	case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
2242 		if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN)
2243 			return -EBADMSG;
2244 		return pf_provision_vf_ctxs(gt, vfid, value[0]);
2245 
2246 	case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
2247 		if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN)
2248 			return -EBADMSG;
2249 		return pf_provision_vf_dbs(gt, vfid, value[0]);
2250 
2251 	case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
2252 		if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN)
2253 			return -EBADMSG;
2254 		return pf_provision_exec_quantum(gt, vfid, value[0]);
2255 
2256 	case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
2257 		if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN)
2258 			return -EBADMSG;
2259 		return pf_provision_preempt_timeout(gt, vfid, value[0]);
2260 
2261 	/* auto-generate case statements */
2262 #define define_threshold_key_to_provision_case(TAG, ...)				\
2263 	case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG):					\
2264 		BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u);		\
2265 		if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG))			\
2266 			return -EBADMSG;						\
2267 		return pf_provision_threshold(gt, vfid,					\
2268 					      MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG),	\
2269 					      value[0]);
2270 
2271 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case)
2272 #undef define_threshold_key_to_provision_case
2273 	}
2274 
2275 	if (xe_gt_is_media_type(gt))
2276 		return -EKEYREJECTED;
2277 
2278 	switch (key) {
2279 	case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
2280 		if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN)
2281 			return -EBADMSG;
2282 		return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0]));
2283 
2284 	case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
2285 		if (!IS_DGFX(gt_to_xe(gt)))
2286 			return -EKEYREJECTED;
2287 		if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN)
2288 			return -EBADMSG;
2289 		return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0]));
2290 	}
2291 
2292 	return -EKEYREJECTED;
2293 }
2294 
2295 static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid,
2296 				const u32 *klvs, size_t num_dwords)
2297 {
2298 	int err;
2299 
2300 	while (num_dwords >= GUC_KLV_LEN_MIN) {
2301 		u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
2302 		u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
2303 
2304 		klvs += GUC_KLV_LEN_MIN;
2305 		num_dwords -= GUC_KLV_LEN_MIN;
2306 
2307 		if (num_dwords < len)
2308 			err = -EBADMSG;
2309 		else
2310 			err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs);
2311 
2312 		if (err) {
2313 			xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err));
2314 			return err;
2315 		}
2316 
2317 		klvs += len;
2318 		num_dwords -= len;
2319 	}
2320 
2321 	return pf_validate_vf_config(gt, vfid);
2322 }
2323 
2324 /**
2325  * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob.
2326  * @gt: the &xe_gt
2327  * @vfid: the VF identifier (can't be PF)
2328  * @buf: the buffer with config data
2329  * @size: the size of the config data
2330  *
2331  * This function can only be called on PF.
2332  *
2333  * Return: 0 on success or a negative error code on failure.
2334  */
2335 int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
2336 				  const void *buf, size_t size)
2337 {
2338 	int err;
2339 
2340 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2341 	xe_gt_assert(gt, vfid);
2342 
2343 	if (!size)
2344 		return -ENODATA;
2345 
2346 	if (size % sizeof(u32))
2347 		return -EINVAL;
2348 
2349 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
2350 		struct drm_printer p = xe_gt_info_printer(gt);
2351 
2352 		drm_printf(&p, "restoring VF%u config:\n", vfid);
2353 		xe_guc_klv_print(buf, size / sizeof(u32), &p);
2354 	}
2355 
2356 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2357 	err = pf_send_vf_cfg_reset(gt, vfid);
2358 	if (!err) {
2359 		pf_release_vf_config(gt, vfid);
2360 		err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32));
2361 	}
2362 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2363 
2364 	return err;
2365 }
2366 
2367 static int pf_push_self_config(struct xe_gt *gt)
2368 {
2369 	int err;
2370 
2371 	err = pf_push_full_vf_config(gt, PFID);
2372 	if (err) {
2373 		xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n",
2374 				ERR_PTR(err));
2375 		return err;
2376 	}
2377 
2378 	xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n");
2379 	return 0;
2380 }
2381 
2382 static void fini_config(void *arg)
2383 {
2384 	struct xe_gt *gt = arg;
2385 	struct xe_device *xe = gt_to_xe(gt);
2386 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
2387 
2388 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2389 	for (n = 1; n <= total_vfs; n++)
2390 		pf_release_vf_config(gt, n);
2391 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2392 }
2393 
2394 /**
2395  * xe_gt_sriov_pf_config_init - Initialize SR-IOV configuration data.
2396  * @gt: the &xe_gt
2397  *
2398  * This function can only be called on PF.
2399  *
2400  * Return: 0 on success or a negative error code on failure.
2401  */
2402 int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
2403 {
2404 	struct xe_device *xe = gt_to_xe(gt);
2405 	int err;
2406 
2407 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
2408 
2409 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2410 	err = pf_push_self_config(gt);
2411 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2412 
2413 	if (err)
2414 		return err;
2415 
2416 	return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
2417 }
2418 
2419 /**
2420  * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
2421  * @gt: the &xe_gt
2422  *
2423  * Any prior configurations pushed to GuC are lost when the GT is reset.
2424  * Push again all non-empty VF configurations to the GuC.
2425  *
2426  * This function can only be called on PF.
2427  */
2428 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
2429 {
2430 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2431 	unsigned int fail = 0, skip = 0;
2432 
2433 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2434 	pf_push_self_config(gt);
2435 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2436 
2437 	for (n = 1; n <= total_vfs; n++) {
2438 		if (xe_gt_sriov_pf_config_is_empty(gt, n))
2439 			skip++;
2440 		else if (xe_gt_sriov_pf_config_push(gt, n, false))
2441 			fail++;
2442 	}
2443 
2444 	if (fail)
2445 		xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
2446 				   fail, total_vfs - skip, str_plural(total_vfs));
2447 
2448 	if (fail != total_vfs)
2449 		xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
2450 				total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
2451 }
2452 
2453 /**
2454  * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2455  * @gt: the &xe_gt
2456  * @p: the &drm_printer
2457  *
2458  * Print GGTT configuration data for all VFs.
2459  * VFs without provisioned GGTT are ignored.
2460  *
2461  * This function can only be called on PF.
2462  */
2463 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2464 {
2465 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2466 	const struct xe_gt_sriov_config *config;
2467 	char buf[10];
2468 
2469 	for (n = 1; n <= total_vfs; n++) {
2470 		config = &gt->sriov.pf.vfs[n].config;
2471 		if (!xe_ggtt_node_allocated(config->ggtt_region))
2472 			continue;
2473 
2474 		string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
2475 				buf, sizeof(buf));
2476 		drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2477 			   n, config->ggtt_region->base.start,
2478 			   config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
2479 			   buf);
2480 	}
2481 
2482 	return 0;
2483 }
2484 
2485 /**
2486  * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2487  * @gt: the &xe_gt
2488  * @p: the &drm_printer
2489  *
2490  * Print GuC context ID allocations across all VFs.
2491  * VFs without GuC context IDs are skipped.
2492  *
2493  * This function can only be called on PF.
2494  * Return: 0 on success or a negative error code on failure.
2495  */
2496 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2497 {
2498 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2499 	const struct xe_gt_sriov_config *config;
2500 
2501 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2502 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2503 
2504 	for (n = 1; n <= total_vfs; n++) {
2505 		config = &gt->sriov.pf.vfs[n].config;
2506 		if (!config->num_ctxs)
2507 			continue;
2508 
2509 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2510 			   n,
2511 			   config->begin_ctx,
2512 			   config->begin_ctx + config->num_ctxs - 1,
2513 			   config->num_ctxs);
2514 	}
2515 
2516 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2517 	return 0;
2518 }
2519 
2520 /**
2521  * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2522  * @gt: the &xe_gt
2523  * @p: the &drm_printer
2524  *
2525  * Print GuC doorbell IDs allocations across all VFs.
2526  * VFs without GuC doorbell IDs are skipped.
2527  *
2528  * This function can only be called on PF.
2529  * Return: 0 on success or a negative error code on failure.
2530  */
2531 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2532 {
2533 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2534 	const struct xe_gt_sriov_config *config;
2535 
2536 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2537 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2538 
2539 	for (n = 1; n <= total_vfs; n++) {
2540 		config = &gt->sriov.pf.vfs[n].config;
2541 		if (!config->num_dbs)
2542 			continue;
2543 
2544 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2545 			   n,
2546 			   config->begin_db,
2547 			   config->begin_db + config->num_dbs - 1,
2548 			   config->num_dbs);
2549 	}
2550 
2551 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2552 	return 0;
2553 }
2554 
2555 /**
2556  * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations.
2557  * @gt: the &xe_gt
2558  * @p: the &drm_printer
2559  *
2560  * Print LMEM allocations across all VFs.
2561  * VFs without LMEM allocation are skipped.
2562  *
2563  * This function can only be called on PF.
2564  * Return: 0 on success or a negative error code on failure.
2565  */
2566 int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
2567 {
2568 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2569 	const struct xe_gt_sriov_config *config;
2570 	char buf[10];
2571 
2572 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2573 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2574 
2575 	for (n = 1; n <= total_vfs; n++) {
2576 		config = &gt->sriov.pf.vfs[n].config;
2577 		if (!config->lmem_obj)
2578 			continue;
2579 
2580 		string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2,
2581 				buf, sizeof(buf));
2582 		drm_printf(p, "VF%u:\t%zu\t(%s)\n",
2583 			   n, config->lmem_obj->size, buf);
2584 	}
2585 
2586 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2587 	return 0;
2588 }
2589 
2590 /**
2591  * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2592  * @gt: the &xe_gt
2593  * @p: the &drm_printer
2594  *
2595  * Print GGTT ranges that are available for the provisioning.
2596  *
2597  * This function can only be called on PF.
2598  */
2599 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2600 {
2601 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2602 	u64 alignment = pf_get_ggtt_alignment(gt);
2603 	u64 spare, avail, total;
2604 	char buf[10];
2605 
2606 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2607 
2608 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2609 
2610 	spare = pf_get_spare_ggtt(gt);
2611 	total = xe_ggtt_print_holes(ggtt, alignment, p);
2612 
2613 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2614 
2615 	string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2616 	drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2617 
2618 	string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2619 	drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2620 
2621 	avail = total > spare ? total - spare : 0;
2622 
2623 	string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2624 	drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2625 
2626 	return 0;
2627 }
2628