xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c (revision 917b10d90990fd2138b5dbc2d22cfa428c070ade)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8 
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11 
12 #include "regs/xe_guc_regs.h"
13 
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_gt_sriov_pf_config.h"
19 #include "xe_gt_sriov_pf_helpers.h"
20 #include "xe_gt_sriov_pf_policy.h"
21 #include "xe_gt_sriov_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_buf.h"
24 #include "xe_guc_ct.h"
25 #include "xe_guc_db_mgr.h"
26 #include "xe_guc_fwif.h"
27 #include "xe_guc_id_mgr.h"
28 #include "xe_guc_klv_helpers.h"
29 #include "xe_guc_klv_thresholds_set.h"
30 #include "xe_guc_submit.h"
31 #include "xe_lmtt.h"
32 #include "xe_map.h"
33 #include "xe_migrate.h"
34 #include "xe_sriov.h"
35 #include "xe_ttm_vram_mgr.h"
36 #include "xe_wopcm.h"
37 
38 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
39 
40 /*
41  * Return: number of KLVs that were successfully parsed and saved,
42  *         negative error code on failure.
43  */
44 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
45 				    u64 addr, u32 size)
46 {
47 	u32 request[] = {
48 		GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
49 		vfid,
50 		lower_32_bits(addr),
51 		upper_32_bits(addr),
52 		size,
53 	};
54 
55 	return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
56 }
57 
58 /*
59  * Return: 0 on success, negative error code on failure.
60  */
61 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
62 {
63 	struct xe_guc *guc = &gt->uc.guc;
64 	int ret;
65 
66 	ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
67 
68 	return ret <= 0 ? ret : -EPROTO;
69 }
70 
71 /*
72  * Return: number of KLVs that were successfully parsed and saved,
73  *         negative error code on failure.
74  */
75 static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords)
76 {
77 	struct xe_guc *guc = &gt->uc.guc;
78 
79 	return guc_action_update_vf_cfg(guc, vfid, xe_guc_buf_flush(buf), num_dwords);
80 }
81 
82 /*
83  * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
84  *         negative error code on failure.
85  */
86 static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
87 			       struct xe_guc_buf buf, u32 num_dwords)
88 {
89 	int ret;
90 
91 	ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords);
92 
93 	if (ret != num_klvs) {
94 		int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
95 		void *klvs = xe_guc_buf_cpu_ptr(buf);
96 		struct drm_printer p = xe_gt_info_printer(gt);
97 		char name[8];
98 
99 		xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
100 				   xe_sriov_function_name(vfid, name, sizeof(name)),
101 				   num_klvs, str_plural(num_klvs), ERR_PTR(err));
102 		xe_guc_klv_print(klvs, num_dwords, &p);
103 		return err;
104 	}
105 
106 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
107 		struct drm_printer p = xe_gt_info_printer(gt);
108 		void *klvs = xe_guc_buf_cpu_ptr(buf);
109 		char name[8];
110 
111 		xe_gt_sriov_info(gt, "pushed %s config with %u KLV%s:\n",
112 				 xe_sriov_function_name(vfid, name, sizeof(name)),
113 				 num_klvs, str_plural(num_klvs));
114 		xe_guc_klv_print(klvs, num_dwords, &p);
115 	}
116 
117 	return 0;
118 }
119 
120 /*
121  * Return: 0 on success, -ENOBUFS if no free buffer for the indirect data,
122  *         negative error code on failure.
123  */
124 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
125 			       const u32 *klvs, u32 num_dwords)
126 {
127 	CLASS(xe_guc_buf_from_data, buf)(&gt->uc.guc.buf, klvs, num_dwords * sizeof(u32));
128 
129 	xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
130 
131 	if (!xe_guc_buf_is_valid(buf))
132 		return -ENOBUFS;
133 
134 	return pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
135 }
136 
137 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
138 {
139 	u32 klv[] = {
140 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
141 		value,
142 	};
143 
144 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
145 }
146 
147 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
148 {
149 	u32 klv[] = {
150 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
151 		lower_32_bits(value),
152 		upper_32_bits(value),
153 	};
154 
155 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
156 }
157 
158 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
159 {
160 	u32 klvs[] = {
161 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
162 		lower_32_bits(start),
163 		upper_32_bits(start),
164 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
165 		lower_32_bits(size),
166 		upper_32_bits(size),
167 	};
168 
169 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
170 }
171 
172 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
173 {
174 	u32 klvs[] = {
175 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
176 		begin,
177 		PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
178 		num,
179 	};
180 
181 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
182 }
183 
184 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
185 {
186 	u32 klvs[] = {
187 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
188 		begin,
189 		PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
190 		num,
191 	};
192 
193 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
194 }
195 
196 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
197 {
198 	/* GuC will silently clamp values exceeding max */
199 	*exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
200 
201 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
202 }
203 
204 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
205 {
206 	/* GuC will silently clamp values exceeding max */
207 	*preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
208 
209 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
210 }
211 
212 static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
213 {
214 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority);
215 }
216 
217 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
218 {
219 	return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
220 }
221 
222 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
223 				    enum xe_guc_klv_threshold_index index, u32 value)
224 {
225 	u32 key = xe_guc_klv_threshold_index_to_key(index);
226 
227 	xe_gt_assert(gt, key);
228 	return pf_push_vf_cfg_u32(gt, vfid, key, value);
229 }
230 
231 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
232 {
233 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
234 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
235 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
236 
237 	return &gt->sriov.pf.vfs[vfid].config;
238 }
239 
240 /* Return: number of configuration dwords written */
241 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
242 {
243 	u32 n = 0;
244 
245 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
246 		if (details) {
247 			cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
248 			cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
249 			cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
250 		}
251 
252 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
253 		cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
254 		cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
255 	}
256 
257 	return n;
258 }
259 
260 /* Return: number of configuration dwords written */
261 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
262 {
263 	u32 n = 0;
264 
265 	n += encode_config_ggtt(cfg, config, details);
266 
267 	if (details && config->num_ctxs) {
268 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
269 		cfg[n++] = config->begin_ctx;
270 	}
271 
272 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
273 	cfg[n++] = config->num_ctxs;
274 
275 	if (details && config->num_dbs) {
276 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
277 		cfg[n++] = config->begin_db;
278 	}
279 
280 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
281 	cfg[n++] = config->num_dbs;
282 
283 	if (config->lmem_obj) {
284 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
285 		cfg[n++] = lower_32_bits(config->lmem_obj->size);
286 		cfg[n++] = upper_32_bits(config->lmem_obj->size);
287 	}
288 
289 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
290 	cfg[n++] = config->exec_quantum;
291 
292 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
293 	cfg[n++] = config->preempt_timeout;
294 
295 #define encode_threshold_config(TAG, ...) ({					\
296 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG);			\
297 	cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)];	\
298 });
299 
300 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
301 #undef encode_threshold_config
302 
303 	return n;
304 }
305 
306 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
307 {
308 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
309 	u32 max_cfg_dwords = xe_guc_buf_cache_dwords(&gt->uc.guc.buf);
310 	CLASS(xe_guc_buf, buf)(&gt->uc.guc.buf, max_cfg_dwords);
311 	u32 num_dwords;
312 	int num_klvs;
313 	u32 *cfg;
314 	int err;
315 
316 	if (!xe_guc_buf_is_valid(buf))
317 		return -ENOBUFS;
318 
319 	cfg = xe_guc_buf_cpu_ptr(buf);
320 	num_dwords = encode_config(cfg, config, true);
321 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
322 
323 	if (xe_gt_is_media_type(gt)) {
324 		struct xe_gt *primary = gt->tile->primary_gt;
325 		struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
326 
327 		/* media-GT will never include a GGTT config */
328 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
329 
330 		/* the GGTT config must be taken from the primary-GT instead */
331 		num_dwords += encode_config_ggtt(cfg + num_dwords, other, true);
332 	}
333 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
334 
335 	num_klvs = xe_guc_klv_count(cfg, num_dwords);
336 	err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
337 
338 	return err;
339 }
340 
341 static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset)
342 {
343 	int err = 0;
344 
345 	xe_gt_assert(gt, vfid);
346 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
347 
348 	if (reset)
349 		err = pf_send_vf_cfg_reset(gt, vfid);
350 	if (!err)
351 		err = pf_push_full_vf_config(gt, vfid);
352 
353 	return err;
354 }
355 
356 static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid)
357 {
358 	return pf_push_vf_cfg(gt, vfid, true);
359 }
360 
361 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
362 {
363 	struct xe_device *xe = gt_to_xe(gt);
364 
365 	return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
366 }
367 
368 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
369 {
370 	/* XXX: preliminary */
371 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
372 		pf_get_ggtt_alignment(gt) : SZ_64M;
373 }
374 
375 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
376 {
377 	u64 spare;
378 
379 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
380 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
381 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
382 
383 	spare = gt->sriov.pf.spare.ggtt_size;
384 	spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
385 
386 	return spare;
387 }
388 
389 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
390 {
391 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
392 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
393 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
394 
395 	if (size && size < pf_get_min_spare_ggtt(gt))
396 		return -EINVAL;
397 
398 	size = round_up(size, pf_get_ggtt_alignment(gt));
399 	gt->sriov.pf.spare.ggtt_size = size;
400 
401 	return 0;
402 }
403 
404 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
405 {
406 	int err, err2 = 0;
407 
408 	err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
409 
410 	if (tile->media_gt && !err)
411 		err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
412 
413 	return err ?: err2;
414 }
415 
416 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
417 {
418 	if (xe_ggtt_node_allocated(node)) {
419 		/*
420 		 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
421 		 * is redundant, as PTE will be implicitly re-assigned to PF by
422 		 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
423 		 */
424 		xe_ggtt_node_remove(node, false);
425 	} else {
426 		xe_ggtt_node_fini(node);
427 	}
428 }
429 
430 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
431 {
432 	pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
433 	config->ggtt_region = NULL;
434 }
435 
436 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
437 {
438 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
439 	struct xe_ggtt_node *node;
440 	struct xe_tile *tile = gt_to_tile(gt);
441 	struct xe_ggtt *ggtt = tile->mem.ggtt;
442 	u64 alignment = pf_get_ggtt_alignment(gt);
443 	int err;
444 
445 	xe_gt_assert(gt, vfid);
446 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
447 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
448 
449 	size = round_up(size, alignment);
450 
451 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
452 		err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
453 		if (unlikely(err))
454 			return err;
455 
456 		pf_release_vf_config_ggtt(gt, config);
457 
458 		err = pf_refresh_vf_cfg(gt, vfid);
459 		if (unlikely(err))
460 			return err;
461 	}
462 	xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
463 
464 	if (!size)
465 		return 0;
466 
467 	node = xe_ggtt_node_init(ggtt);
468 	if (IS_ERR(node))
469 		return PTR_ERR(node);
470 
471 	err = xe_ggtt_node_insert(node, size, alignment);
472 	if (unlikely(err))
473 		goto err;
474 
475 	xe_ggtt_assign(node, vfid);
476 	xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
477 				vfid, node->base.start, node->base.start + node->base.size - 1);
478 
479 	err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
480 	if (unlikely(err))
481 		goto err;
482 
483 	config->ggtt_region = node;
484 	return 0;
485 err:
486 	pf_release_ggtt(tile, node);
487 	return err;
488 }
489 
490 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
491 {
492 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
493 	struct xe_ggtt_node *node = config->ggtt_region;
494 
495 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
496 	return xe_ggtt_node_allocated(node) ? node->base.size : 0;
497 }
498 
499 /**
500  * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
501  * @gt: the &xe_gt
502  * @vfid: the VF identifier
503  *
504  * This function can only be called on PF.
505  *
506  * Return: size of the VF's assigned (or PF's spare) GGTT address space.
507  */
508 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
509 {
510 	u64 size;
511 
512 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
513 	if (vfid)
514 		size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
515 	else
516 		size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
517 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
518 
519 	return size;
520 }
521 
522 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
523 				  u64 actual, const char *what, int err)
524 {
525 	char size[10];
526 	char name[8];
527 
528 	xe_sriov_function_name(vfid, name, sizeof(name));
529 
530 	if (unlikely(err)) {
531 		string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
532 		xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
533 				   name, value, size, what, ERR_PTR(err));
534 		string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
535 		xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
536 				 name, actual, size, what);
537 		return err;
538 	}
539 
540 	/* the actual value may have changed during provisioning */
541 	string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
542 	xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
543 			 name, actual, size, what);
544 	return 0;
545 }
546 
547 /**
548  * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
549  * @gt: the &xe_gt (can't be media)
550  * @vfid: the VF identifier
551  * @size: requested GGTT size
552  *
553  * If &vfid represents PF, then function will change PF's spare GGTT config.
554  *
555  * This function can only be called on PF.
556  *
557  * Return: 0 on success or a negative error code on failure.
558  */
559 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
560 {
561 	int err;
562 
563 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
564 
565 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
566 	if (vfid)
567 		err = pf_provision_vf_ggtt(gt, vfid, size);
568 	else
569 		err = pf_set_spare_ggtt(gt, size);
570 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
571 
572 	return pf_config_set_u64_done(gt, vfid, size,
573 				      xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
574 				      vfid ? "GGTT" : "spare GGTT", err);
575 }
576 
577 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
578 				       u64 value, u64 (*get)(struct xe_gt*, unsigned int),
579 				       const char *what, unsigned int last, int err)
580 {
581 	char size[10];
582 
583 	xe_gt_assert(gt, first);
584 	xe_gt_assert(gt, num_vfs);
585 	xe_gt_assert(gt, first <= last);
586 
587 	if (num_vfs == 1)
588 		return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
589 
590 	if (unlikely(err)) {
591 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
592 				   first, first + num_vfs - 1, what);
593 		if (last > first)
594 			pf_config_bulk_set_u64_done(gt, first, last - first, value,
595 						    get, what, last, 0);
596 		return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
597 	}
598 
599 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
600 	value = get(gt, first);
601 	string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
602 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
603 			 first, first + num_vfs - 1, value, size, what);
604 	return 0;
605 }
606 
607 /**
608  * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
609  * @gt: the &xe_gt (can't be media)
610  * @vfid: starting VF identifier (can't be 0)
611  * @num_vfs: number of VFs to provision
612  * @size: requested GGTT size
613  *
614  * This function can only be called on PF.
615  *
616  * Return: 0 on success or a negative error code on failure.
617  */
618 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
619 					unsigned int num_vfs, u64 size)
620 {
621 	unsigned int n;
622 	int err = 0;
623 
624 	xe_gt_assert(gt, vfid);
625 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
626 
627 	if (!num_vfs)
628 		return 0;
629 
630 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
631 	for (n = vfid; n < vfid + num_vfs; n++) {
632 		err = pf_provision_vf_ggtt(gt, n, size);
633 		if (err)
634 			break;
635 	}
636 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
637 
638 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
639 					   xe_gt_sriov_pf_config_get_ggtt,
640 					   "GGTT", n, err);
641 }
642 
643 /* Return: size of the largest continuous GGTT region */
644 static u64 pf_get_max_ggtt(struct xe_gt *gt)
645 {
646 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
647 	u64 alignment = pf_get_ggtt_alignment(gt);
648 	u64 spare = pf_get_spare_ggtt(gt);
649 	u64 max_hole;
650 
651 	max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
652 
653 	xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
654 				max_hole / SZ_1K, spare / SZ_1K);
655 	return max_hole > spare ? max_hole - spare : 0;
656 }
657 
658 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
659 {
660 	u64 available = pf_get_max_ggtt(gt);
661 	u64 alignment = pf_get_ggtt_alignment(gt);
662 	u64 fair;
663 
664 	/*
665 	 * To simplify the logic we only look at single largest GGTT region
666 	 * as that will be always the best fit for 1 VF case, and most likely
667 	 * will also nicely cover other cases where VFs are provisioned on the
668 	 * fresh and idle PF driver, without any stale GGTT allocations spread
669 	 * in the middle of the full GGTT range.
670 	 */
671 
672 	fair = div_u64(available, num_vfs);
673 	fair = ALIGN_DOWN(fair, alignment);
674 	xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
675 				available / SZ_1K, num_vfs, fair / SZ_1K);
676 	return fair;
677 }
678 
679 /**
680  * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
681  * @gt: the &xe_gt (can't be media)
682  * @vfid: starting VF identifier (can't be 0)
683  * @num_vfs: number of VFs to provision
684  *
685  * This function can only be called on PF.
686  *
687  * Return: 0 on success or a negative error code on failure.
688  */
689 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
690 					unsigned int num_vfs)
691 {
692 	u64 fair;
693 
694 	xe_gt_assert(gt, vfid);
695 	xe_gt_assert(gt, num_vfs);
696 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
697 
698 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
699 	fair = pf_estimate_fair_ggtt(gt, num_vfs);
700 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
701 
702 	if (!fair)
703 		return -ENOSPC;
704 
705 	return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
706 }
707 
708 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
709 {
710 	/* XXX: preliminary */
711 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
712 		hweight64(gt->info.engine_mask) : SZ_256;
713 }
714 
715 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
716 {
717 	u32 spare;
718 
719 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
720 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
721 
722 	spare = gt->sriov.pf.spare.num_ctxs;
723 	spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
724 
725 	return spare;
726 }
727 
728 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
729 {
730 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
731 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
732 
733 	if (spare > GUC_ID_MAX)
734 		return -EINVAL;
735 
736 	if (spare && spare < pf_get_min_spare_ctxs(gt))
737 		return -EINVAL;
738 
739 	gt->sriov.pf.spare.num_ctxs = spare;
740 
741 	return 0;
742 }
743 
744 /* Return: start ID or negative error code on failure */
745 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
746 {
747 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
748 	unsigned int spare = pf_get_spare_ctxs(gt);
749 
750 	return xe_guc_id_mgr_reserve(idm, num, spare);
751 }
752 
753 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
754 {
755 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
756 
757 	if (num)
758 		xe_guc_id_mgr_release(idm, start, num);
759 }
760 
761 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
762 {
763 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
764 
765 	pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
766 	config->begin_ctx = 0;
767 	config->num_ctxs = 0;
768 }
769 
770 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
771 {
772 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
773 	int ret;
774 
775 	xe_gt_assert(gt, vfid);
776 
777 	if (num_ctxs > GUC_ID_MAX)
778 		return -EINVAL;
779 
780 	if (config->num_ctxs) {
781 		ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
782 		if (unlikely(ret))
783 			return ret;
784 
785 		pf_release_config_ctxs(gt, config);
786 
787 		ret = pf_refresh_vf_cfg(gt, vfid);
788 		if (unlikely(ret))
789 			return ret;
790 	}
791 
792 	if (!num_ctxs)
793 		return 0;
794 
795 	ret = pf_reserve_ctxs(gt, num_ctxs);
796 	if (unlikely(ret < 0))
797 		return ret;
798 
799 	config->begin_ctx = ret;
800 	config->num_ctxs = num_ctxs;
801 
802 	ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
803 	if (unlikely(ret)) {
804 		pf_release_config_ctxs(gt, config);
805 		return ret;
806 	}
807 
808 	xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
809 				vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
810 	return 0;
811 }
812 
813 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
814 {
815 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
816 
817 	return config->num_ctxs;
818 }
819 
820 /**
821  * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
822  * @gt: the &xe_gt
823  * @vfid: the VF identifier
824  *
825  * This function can only be called on PF.
826  * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
827  *
828  * Return: VF's quota (or PF's spare).
829  */
830 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
831 {
832 	u32 num_ctxs;
833 
834 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
835 	if (vfid)
836 		num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
837 	else
838 		num_ctxs = pf_get_spare_ctxs(gt);
839 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
840 
841 	return num_ctxs;
842 }
843 
844 static const char *no_unit(u32 unused)
845 {
846 	return "";
847 }
848 
849 static const char *spare_unit(u32 unused)
850 {
851 	return " spare";
852 }
853 
854 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
855 				  const char *what, const char *(*unit)(u32), int err)
856 {
857 	char name[8];
858 
859 	xe_sriov_function_name(vfid, name, sizeof(name));
860 
861 	if (unlikely(err)) {
862 		xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
863 				   name, value, unit(value), what, ERR_PTR(err));
864 		xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
865 				 name, actual, unit(actual), what);
866 		return err;
867 	}
868 
869 	/* the actual value may have changed during provisioning */
870 	xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
871 			 name, actual, unit(actual), what);
872 	return 0;
873 }
874 
875 /**
876  * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
877  * @gt: the &xe_gt
878  * @vfid: the VF identifier
879  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
880  *
881  * This function can only be called on PF.
882  *
883  * Return: 0 on success or a negative error code on failure.
884  */
885 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
886 {
887 	int err;
888 
889 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
890 	if (vfid)
891 		err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
892 	else
893 		err = pf_set_spare_ctxs(gt, num_ctxs);
894 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
895 
896 	return pf_config_set_u32_done(gt, vfid, num_ctxs,
897 				      xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
898 				      "GuC context IDs", vfid ? no_unit : spare_unit, err);
899 }
900 
901 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
902 				       u32 value, u32 (*get)(struct xe_gt*, unsigned int),
903 				       const char *what, const char *(*unit)(u32),
904 				       unsigned int last, int err)
905 {
906 	xe_gt_assert(gt, first);
907 	xe_gt_assert(gt, num_vfs);
908 	xe_gt_assert(gt, first <= last);
909 
910 	if (num_vfs == 1)
911 		return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
912 
913 	if (unlikely(err)) {
914 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
915 				   first, first + num_vfs - 1, what);
916 		if (last > first)
917 			pf_config_bulk_set_u32_done(gt, first, last - first, value,
918 						    get, what, unit, last, 0);
919 		return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
920 	}
921 
922 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
923 	value = get(gt, first);
924 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
925 			 first, first + num_vfs - 1, value, unit(value), what);
926 	return 0;
927 }
928 
929 /**
930  * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
931  * @gt: the &xe_gt
932  * @vfid: starting VF identifier
933  * @num_vfs: number of VFs to provision
934  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
935  *
936  * This function can only be called on PF.
937  *
938  * Return: 0 on success or a negative error code on failure.
939  */
940 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
941 					unsigned int num_vfs, u32 num_ctxs)
942 {
943 	unsigned int n;
944 	int err = 0;
945 
946 	xe_gt_assert(gt, vfid);
947 
948 	if (!num_vfs)
949 		return 0;
950 
951 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
952 	for (n = vfid; n < vfid + num_vfs; n++) {
953 		err = pf_provision_vf_ctxs(gt, n, num_ctxs);
954 		if (err)
955 			break;
956 	}
957 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
958 
959 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
960 					   xe_gt_sriov_pf_config_get_ctxs,
961 					   "GuC context IDs", no_unit, n, err);
962 }
963 
964 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
965 {
966 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
967 	u32 spare = pf_get_spare_ctxs(gt);
968 	u32 fair = (idm->total - spare) / num_vfs;
969 	int ret;
970 
971 	for (; fair; --fair) {
972 		ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
973 		if (ret < 0)
974 			continue;
975 		xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
976 		break;
977 	}
978 
979 	xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
980 	return fair;
981 }
982 
983 /**
984  * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
985  * @gt: the &xe_gt
986  * @vfid: starting VF identifier (can't be 0)
987  * @num_vfs: number of VFs to provision (can't be 0)
988  *
989  * This function can only be called on PF.
990  *
991  * Return: 0 on success or a negative error code on failure.
992  */
993 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
994 					unsigned int num_vfs)
995 {
996 	u32 fair;
997 
998 	xe_gt_assert(gt, vfid);
999 	xe_gt_assert(gt, num_vfs);
1000 
1001 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1002 	fair = pf_estimate_fair_ctxs(gt, num_vfs);
1003 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1004 
1005 	if (!fair)
1006 		return -ENOSPC;
1007 
1008 	return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
1009 }
1010 
1011 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
1012 {
1013 	/* XXX: preliminary, we don't use doorbells yet! */
1014 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
1015 }
1016 
1017 static u32 pf_get_spare_dbs(struct xe_gt *gt)
1018 {
1019 	u32 spare;
1020 
1021 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1022 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1023 
1024 	spare = gt->sriov.pf.spare.num_dbs;
1025 	spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
1026 
1027 	return spare;
1028 }
1029 
1030 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
1031 {
1032 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1033 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1034 
1035 	if (spare > GUC_NUM_DOORBELLS)
1036 		return -EINVAL;
1037 
1038 	if (spare && spare < pf_get_min_spare_dbs(gt))
1039 		return -EINVAL;
1040 
1041 	gt->sriov.pf.spare.num_dbs = spare;
1042 	return 0;
1043 }
1044 
1045 /* Return: start ID or negative error code on failure */
1046 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
1047 {
1048 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1049 	unsigned int spare = pf_get_spare_dbs(gt);
1050 
1051 	return xe_guc_db_mgr_reserve_range(dbm, num, spare);
1052 }
1053 
1054 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1055 {
1056 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1057 
1058 	if (num)
1059 		xe_guc_db_mgr_release_range(dbm, start, num);
1060 }
1061 
1062 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1063 {
1064 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1065 
1066 	pf_release_dbs(gt, config->begin_db, config->num_dbs);
1067 	config->begin_db = 0;
1068 	config->num_dbs = 0;
1069 }
1070 
1071 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1072 {
1073 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1074 	int ret;
1075 
1076 	xe_gt_assert(gt, vfid);
1077 
1078 	if (num_dbs > GUC_NUM_DOORBELLS)
1079 		return -EINVAL;
1080 
1081 	if (config->num_dbs) {
1082 		ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1083 		if (unlikely(ret))
1084 			return ret;
1085 
1086 		pf_release_config_dbs(gt, config);
1087 
1088 		ret = pf_refresh_vf_cfg(gt, vfid);
1089 		if (unlikely(ret))
1090 			return ret;
1091 	}
1092 
1093 	if (!num_dbs)
1094 		return 0;
1095 
1096 	ret = pf_reserve_dbs(gt, num_dbs);
1097 	if (unlikely(ret < 0))
1098 		return ret;
1099 
1100 	config->begin_db = ret;
1101 	config->num_dbs = num_dbs;
1102 
1103 	ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1104 	if (unlikely(ret)) {
1105 		pf_release_config_dbs(gt, config);
1106 		return ret;
1107 	}
1108 
1109 	xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1110 				vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1111 	return 0;
1112 }
1113 
1114 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1115 {
1116 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1117 
1118 	return config->num_dbs;
1119 }
1120 
1121 /**
1122  * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1123  * @gt: the &xe_gt
1124  * @vfid: the VF identifier
1125  *
1126  * This function can only be called on PF.
1127  * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1128  *
1129  * Return: VF's quota (or PF's spare).
1130  */
1131 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1132 {
1133 	u32 num_dbs;
1134 
1135 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1136 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1137 
1138 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1139 	if (vfid)
1140 		num_dbs = pf_get_vf_config_dbs(gt, vfid);
1141 	else
1142 		num_dbs = pf_get_spare_dbs(gt);
1143 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1144 
1145 	return num_dbs;
1146 }
1147 
1148 /**
1149  * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1150  * @gt: the &xe_gt
1151  * @vfid: the VF identifier
1152  * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1153  *
1154  * This function can only be called on PF.
1155  *
1156  * Return: 0 on success or a negative error code on failure.
1157  */
1158 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1159 {
1160 	int err;
1161 
1162 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1163 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1164 
1165 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1166 	if (vfid)
1167 		err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1168 	else
1169 		err = pf_set_spare_dbs(gt, num_dbs);
1170 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1171 
1172 	return pf_config_set_u32_done(gt, vfid, num_dbs,
1173 				      xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1174 				      "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1175 }
1176 
1177 /**
1178  * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1179  * @gt: the &xe_gt
1180  * @vfid: starting VF identifier (can't be 0)
1181  * @num_vfs: number of VFs to provision
1182  * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1183  *
1184  * This function can only be called on PF.
1185  *
1186  * Return: 0 on success or a negative error code on failure.
1187  */
1188 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1189 				       unsigned int num_vfs, u32 num_dbs)
1190 {
1191 	unsigned int n;
1192 	int err = 0;
1193 
1194 	xe_gt_assert(gt, vfid);
1195 
1196 	if (!num_vfs)
1197 		return 0;
1198 
1199 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1200 	for (n = vfid; n < vfid + num_vfs; n++) {
1201 		err = pf_provision_vf_dbs(gt, n, num_dbs);
1202 		if (err)
1203 			break;
1204 	}
1205 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1206 
1207 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1208 					   xe_gt_sriov_pf_config_get_dbs,
1209 					   "GuC doorbell IDs", no_unit, n, err);
1210 }
1211 
1212 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1213 {
1214 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1215 	u32 spare = pf_get_spare_dbs(gt);
1216 	u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1217 	int ret;
1218 
1219 	for (; fair; --fair) {
1220 		ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1221 		if (ret < 0)
1222 			continue;
1223 		xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1224 		break;
1225 	}
1226 
1227 	xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1228 	return fair;
1229 }
1230 
1231 /**
1232  * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell  IDs.
1233  * @gt: the &xe_gt
1234  * @vfid: starting VF identifier (can't be 0)
1235  * @num_vfs: number of VFs to provision (can't be 0)
1236  *
1237  * This function can only be called on PF.
1238  *
1239  * Return: 0 on success or a negative error code on failure.
1240  */
1241 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1242 				       unsigned int num_vfs)
1243 {
1244 	u32 fair;
1245 
1246 	xe_gt_assert(gt, vfid);
1247 	xe_gt_assert(gt, num_vfs);
1248 
1249 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1250 	fair = pf_estimate_fair_dbs(gt, num_vfs);
1251 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1252 
1253 	if (!fair)
1254 		return -ENOSPC;
1255 
1256 	return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1257 }
1258 
1259 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1260 {
1261 	/* this might be platform dependent */
1262 	return SZ_2M;
1263 }
1264 
1265 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1266 {
1267 	/* this might be platform dependent */
1268 	return SZ_128M; /* XXX: preliminary */
1269 }
1270 
1271 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1272 {
1273 	u64 spare;
1274 
1275 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1276 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1277 
1278 	spare = gt->sriov.pf.spare.lmem_size;
1279 	spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1280 
1281 	return spare;
1282 }
1283 
1284 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1285 {
1286 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1287 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1288 
1289 	if (size && size < pf_get_min_spare_lmem(gt))
1290 		return -EINVAL;
1291 
1292 	gt->sriov.pf.spare.lmem_size = size;
1293 	return 0;
1294 }
1295 
1296 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1297 {
1298 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1299 	struct xe_bo *bo;
1300 
1301 	bo = config->lmem_obj;
1302 	return bo ? bo->size : 0;
1303 }
1304 
1305 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1306 {
1307 	struct xe_device *xe = gt_to_xe(gt);
1308 	struct xe_tile *tile;
1309 	unsigned int tid;
1310 	int err;
1311 
1312 	for_each_tile(tile, xe, tid) {
1313 		if (tile->primary_gt == gt) {
1314 			err = pf_push_vf_cfg_lmem(gt, vfid, size);
1315 		} else {
1316 			u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1317 
1318 			if (!lmem)
1319 				continue;
1320 			err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1321 		}
1322 		if (unlikely(err))
1323 			return err;
1324 	}
1325 	return 0;
1326 }
1327 
1328 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1329 {
1330 	/* TODO */
1331 }
1332 
1333 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1334 {
1335 	struct xe_lmtt *lmtt;
1336 	struct xe_tile *tile;
1337 	unsigned int tid;
1338 
1339 	xe_assert(xe, xe_device_has_lmtt(xe));
1340 	xe_assert(xe, IS_SRIOV_PF(xe));
1341 
1342 	for_each_tile(tile, xe, tid) {
1343 		lmtt = &tile->sriov.pf.lmtt;
1344 		xe_lmtt_drop_pages(lmtt, vfid);
1345 	}
1346 }
1347 
1348 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1349 {
1350 	struct xe_gt_sriov_config *config;
1351 	struct xe_tile *tile;
1352 	struct xe_lmtt *lmtt;
1353 	struct xe_bo *bo;
1354 	struct xe_gt *gt;
1355 	u64 total, offset;
1356 	unsigned int gtid;
1357 	unsigned int tid;
1358 	int err;
1359 
1360 	xe_assert(xe, xe_device_has_lmtt(xe));
1361 	xe_assert(xe, IS_SRIOV_PF(xe));
1362 
1363 	total = 0;
1364 	for_each_tile(tile, xe, tid)
1365 		total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1366 
1367 	for_each_tile(tile, xe, tid) {
1368 		lmtt = &tile->sriov.pf.lmtt;
1369 
1370 		xe_lmtt_drop_pages(lmtt, vfid);
1371 		if (!total)
1372 			continue;
1373 
1374 		err  = xe_lmtt_prepare_pages(lmtt, vfid, total);
1375 		if (err)
1376 			goto fail;
1377 
1378 		offset = 0;
1379 		for_each_gt(gt, xe, gtid) {
1380 			if (xe_gt_is_media_type(gt))
1381 				continue;
1382 
1383 			config = pf_pick_vf_config(gt, vfid);
1384 			bo = config->lmem_obj;
1385 			if (!bo)
1386 				continue;
1387 
1388 			err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1389 			if (err)
1390 				goto fail;
1391 			offset += bo->size;
1392 		}
1393 	}
1394 
1395 	pf_force_lmtt_invalidate(xe);
1396 	return 0;
1397 
1398 fail:
1399 	for_each_tile(tile, xe, tid) {
1400 		lmtt = &tile->sriov.pf.lmtt;
1401 		xe_lmtt_drop_pages(lmtt, vfid);
1402 	}
1403 	return err;
1404 }
1405 
1406 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1407 {
1408 	xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1409 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1410 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1411 
1412 	if (config->lmem_obj) {
1413 		xe_bo_unpin_map_no_vm(config->lmem_obj);
1414 		config->lmem_obj = NULL;
1415 	}
1416 }
1417 
1418 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1419 {
1420 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1421 	struct xe_device *xe = gt_to_xe(gt);
1422 	struct xe_tile *tile = gt_to_tile(gt);
1423 	struct xe_bo *bo;
1424 	int err;
1425 
1426 	xe_gt_assert(gt, vfid);
1427 	xe_gt_assert(gt, IS_DGFX(xe));
1428 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1429 
1430 	size = round_up(size, pf_get_lmem_alignment(gt));
1431 
1432 	if (config->lmem_obj) {
1433 		err = pf_distribute_config_lmem(gt, vfid, 0);
1434 		if (unlikely(err))
1435 			return err;
1436 
1437 		if (xe_device_has_lmtt(xe))
1438 			pf_reset_vf_lmtt(xe, vfid);
1439 		pf_release_vf_config_lmem(gt, config);
1440 	}
1441 	xe_gt_assert(gt, !config->lmem_obj);
1442 
1443 	if (!size)
1444 		return 0;
1445 
1446 	xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1447 	bo = xe_bo_create_locked(xe, tile, NULL,
1448 				 ALIGN(size, PAGE_SIZE),
1449 				 ttm_bo_type_kernel,
1450 				 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1451 				 XE_BO_FLAG_NEEDS_2M |
1452 				 XE_BO_FLAG_PINNED |
1453 				 XE_BO_FLAG_PINNED_LATE_RESTORE);
1454 	if (IS_ERR(bo))
1455 		return PTR_ERR(bo);
1456 
1457 	err = xe_bo_pin(bo);
1458 	xe_bo_unlock(bo);
1459 	if (unlikely(err)) {
1460 		xe_bo_put(bo);
1461 		return err;
1462 	}
1463 
1464 	config->lmem_obj = bo;
1465 
1466 	if (xe_device_has_lmtt(xe)) {
1467 		err = pf_update_vf_lmtt(xe, vfid);
1468 		if (unlikely(err))
1469 			goto release;
1470 	}
1471 
1472 	err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
1473 	if (unlikely(err))
1474 		goto reset_lmtt;
1475 
1476 	xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1477 				vfid, bo->size, bo->size / SZ_1M);
1478 	return 0;
1479 
1480 reset_lmtt:
1481 	if (xe_device_has_lmtt(xe))
1482 		pf_reset_vf_lmtt(xe, vfid);
1483 release:
1484 	pf_release_vf_config_lmem(gt, config);
1485 	return err;
1486 }
1487 
1488 /**
1489  * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1490  * @gt: the &xe_gt
1491  * @vfid: the VF identifier
1492  *
1493  * This function can only be called on PF.
1494  *
1495  * Return: VF's (or PF's spare) LMEM quota.
1496  */
1497 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1498 {
1499 	u64 size;
1500 
1501 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1502 	if (vfid)
1503 		size = pf_get_vf_config_lmem(gt, vfid);
1504 	else
1505 		size = pf_get_spare_lmem(gt);
1506 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1507 
1508 	return size;
1509 }
1510 
1511 /**
1512  * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1513  * @gt: the &xe_gt (can't be media)
1514  * @vfid: the VF identifier
1515  * @size: requested LMEM size
1516  *
1517  * This function can only be called on PF.
1518  */
1519 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1520 {
1521 	int err;
1522 
1523 	xe_gt_assert(gt, xe_device_has_lmtt(gt_to_xe(gt)));
1524 
1525 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1526 	if (vfid)
1527 		err = pf_provision_vf_lmem(gt, vfid, size);
1528 	else
1529 		err = pf_set_spare_lmem(gt, size);
1530 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1531 
1532 	return pf_config_set_u64_done(gt, vfid, size,
1533 				      xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1534 				      vfid ? "LMEM" : "spare LMEM", err);
1535 }
1536 
1537 /**
1538  * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1539  * @gt: the &xe_gt (can't be media)
1540  * @vfid: starting VF identifier (can't be 0)
1541  * @num_vfs: number of VFs to provision
1542  * @size: requested LMEM size
1543  *
1544  * This function can only be called on PF.
1545  *
1546  * Return: 0 on success or a negative error code on failure.
1547  */
1548 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1549 					unsigned int num_vfs, u64 size)
1550 {
1551 	unsigned int n;
1552 	int err = 0;
1553 
1554 	xe_gt_assert(gt, vfid);
1555 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1556 
1557 	if (!num_vfs)
1558 		return 0;
1559 
1560 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1561 	for (n = vfid; n < vfid + num_vfs; n++) {
1562 		err = pf_provision_vf_lmem(gt, n, size);
1563 		if (err)
1564 			break;
1565 	}
1566 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1567 
1568 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1569 					   xe_gt_sriov_pf_config_get_lmem,
1570 					   "LMEM", n, err);
1571 }
1572 
1573 static u64 pf_query_free_lmem(struct xe_gt *gt)
1574 {
1575 	struct xe_tile *tile = gt->tile;
1576 
1577 	return xe_ttm_vram_get_avail(&tile->mem.vram.ttm.manager);
1578 }
1579 
1580 static u64 pf_query_max_lmem(struct xe_gt *gt)
1581 {
1582 	u64 alignment = pf_get_lmem_alignment(gt);
1583 	u64 spare = pf_get_spare_lmem(gt);
1584 	u64 free = pf_query_free_lmem(gt);
1585 	u64 avail;
1586 
1587 	/* XXX: need to account for 2MB blocks only */
1588 	avail = free > spare ? free - spare : 0;
1589 	avail = round_down(avail, alignment);
1590 
1591 	return avail;
1592 }
1593 
1594 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1595 #define MAX_FAIR_LMEM	SZ_128M	/* XXX: make it small for the driver bringup */
1596 #endif
1597 
1598 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1599 {
1600 	u64 available = pf_query_max_lmem(gt);
1601 	u64 alignment = pf_get_lmem_alignment(gt);
1602 	u64 fair;
1603 
1604 	fair = div_u64(available, num_vfs);
1605 	fair = rounddown_pow_of_two(fair);	/* XXX: ttm_vram_mgr & drm_buddy limitation */
1606 	fair = ALIGN_DOWN(fair, alignment);
1607 #ifdef MAX_FAIR_LMEM
1608 	fair = min_t(u64, MAX_FAIR_LMEM, fair);
1609 #endif
1610 	xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1611 				available / SZ_1M, num_vfs, fair / SZ_1M);
1612 	return fair;
1613 }
1614 
1615 /**
1616  * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1617  * @gt: the &xe_gt (can't be media)
1618  * @vfid: starting VF identifier (can't be 0)
1619  * @num_vfs: number of VFs to provision (can't be 0)
1620  *
1621  * This function can only be called on PF.
1622  *
1623  * Return: 0 on success or a negative error code on failure.
1624  */
1625 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1626 					unsigned int num_vfs)
1627 {
1628 	u64 fair;
1629 
1630 	xe_gt_assert(gt, vfid);
1631 	xe_gt_assert(gt, num_vfs);
1632 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1633 
1634 	if (!xe_device_has_lmtt(gt_to_xe(gt)))
1635 		return 0;
1636 
1637 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1638 	fair = pf_estimate_fair_lmem(gt, num_vfs);
1639 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1640 
1641 	if (!fair)
1642 		return -ENOSPC;
1643 
1644 	return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1645 }
1646 
1647 /**
1648  * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1649  * @gt: the &xe_gt
1650  * @vfid: starting VF identifier (can't be 0)
1651  * @num_vfs: number of VFs to provision (can't be 0)
1652  *
1653  * This function can only be called on PF.
1654  *
1655  * Return: 0 on success or a negative error code on failure.
1656  */
1657 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1658 				   unsigned int num_vfs)
1659 {
1660 	int result = 0;
1661 	int err;
1662 
1663 	xe_gt_assert(gt, vfid);
1664 	xe_gt_assert(gt, num_vfs);
1665 
1666 	if (!xe_gt_is_media_type(gt)) {
1667 		err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1668 		result = result ?: err;
1669 		err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1670 		result = result ?: err;
1671 	}
1672 	err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1673 	result = result ?: err;
1674 	err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1675 	result = result ?: err;
1676 
1677 	return result;
1678 }
1679 
1680 static const char *exec_quantum_unit(u32 exec_quantum)
1681 {
1682 	return exec_quantum ? "ms" : "(infinity)";
1683 }
1684 
1685 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1686 				     u32 exec_quantum)
1687 {
1688 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1689 	int err;
1690 
1691 	err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1692 	if (unlikely(err))
1693 		return err;
1694 
1695 	config->exec_quantum = exec_quantum;
1696 	return 0;
1697 }
1698 
1699 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1700 {
1701 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1702 
1703 	return config->exec_quantum;
1704 }
1705 
1706 /**
1707  * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1708  * @gt: the &xe_gt
1709  * @vfid: the VF identifier
1710  * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1711  *
1712  * This function can only be called on PF.
1713  *
1714  * Return: 0 on success or a negative error code on failure.
1715  */
1716 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1717 					   u32 exec_quantum)
1718 {
1719 	int err;
1720 
1721 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1722 	err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1723 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1724 
1725 	return pf_config_set_u32_done(gt, vfid, exec_quantum,
1726 				      xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1727 				      "execution quantum", exec_quantum_unit, err);
1728 }
1729 
1730 /**
1731  * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1732  * @gt: the &xe_gt
1733  * @vfid: the VF identifier
1734  *
1735  * This function can only be called on PF.
1736  *
1737  * Return: VF's (or PF's) execution quantum in milliseconds.
1738  */
1739 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1740 {
1741 	u32 exec_quantum;
1742 
1743 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1744 	exec_quantum = pf_get_exec_quantum(gt, vfid);
1745 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1746 
1747 	return exec_quantum;
1748 }
1749 
1750 static const char *preempt_timeout_unit(u32 preempt_timeout)
1751 {
1752 	return preempt_timeout ? "us" : "(infinity)";
1753 }
1754 
1755 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1756 					u32 preempt_timeout)
1757 {
1758 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1759 	int err;
1760 
1761 	err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1762 	if (unlikely(err))
1763 		return err;
1764 
1765 	config->preempt_timeout = preempt_timeout;
1766 
1767 	return 0;
1768 }
1769 
1770 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1771 {
1772 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1773 
1774 	return config->preempt_timeout;
1775 }
1776 
1777 /**
1778  * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1779  * @gt: the &xe_gt
1780  * @vfid: the VF identifier
1781  * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1782  *
1783  * This function can only be called on PF.
1784  *
1785  * Return: 0 on success or a negative error code on failure.
1786  */
1787 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1788 					      u32 preempt_timeout)
1789 {
1790 	int err;
1791 
1792 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1793 	err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1794 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1795 
1796 	return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1797 				      xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1798 				      "preemption timeout", preempt_timeout_unit, err);
1799 }
1800 
1801 /**
1802  * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1803  * @gt: the &xe_gt
1804  * @vfid: the VF identifier
1805  *
1806  * This function can only be called on PF.
1807  *
1808  * Return: VF's (or PF's) preemption timeout in microseconds.
1809  */
1810 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1811 {
1812 	u32 preempt_timeout;
1813 
1814 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1815 	preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1816 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1817 
1818 	return preempt_timeout;
1819 }
1820 
1821 static const char *sched_priority_unit(u32 priority)
1822 {
1823 	return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" :
1824 		priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" :
1825 		priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" :
1826 		"(?)";
1827 }
1828 
1829 static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
1830 {
1831 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1832 	int err;
1833 
1834 	err = pf_push_vf_cfg_sched_priority(gt, vfid, priority);
1835 	if (unlikely(err))
1836 		return err;
1837 
1838 	config->sched_priority = priority;
1839 	return 0;
1840 }
1841 
1842 static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
1843 {
1844 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1845 
1846 	return config->sched_priority;
1847 }
1848 
1849 /**
1850  * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority.
1851  * @gt: the &xe_gt
1852  * @vfid: the VF identifier
1853  * @priority: requested scheduling priority
1854  *
1855  * This function can only be called on PF.
1856  *
1857  * Return: 0 on success or a negative error code on failure.
1858  */
1859 int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
1860 {
1861 	int err;
1862 
1863 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1864 	err = pf_provision_sched_priority(gt, vfid, priority);
1865 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1866 
1867 	return pf_config_set_u32_done(gt, vfid, priority,
1868 				      xe_gt_sriov_pf_config_get_sched_priority(gt, vfid),
1869 				      "scheduling priority", sched_priority_unit, err);
1870 }
1871 
1872 /**
1873  * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority.
1874  * @gt: the &xe_gt
1875  * @vfid: the VF identifier
1876  *
1877  * This function can only be called on PF.
1878  *
1879  * Return: VF's (or PF's) scheduling priority.
1880  */
1881 u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
1882 {
1883 	u32 priority;
1884 
1885 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1886 	priority = pf_get_sched_priority(gt, vfid);
1887 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1888 
1889 	return priority;
1890 }
1891 
1892 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1893 {
1894 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1895 
1896 	config->exec_quantum = 0;
1897 	config->preempt_timeout = 0;
1898 }
1899 
1900 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
1901 				  enum xe_guc_klv_threshold_index index, u32 value)
1902 {
1903 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1904 	int err;
1905 
1906 	err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
1907 	if (unlikely(err))
1908 		return err;
1909 
1910 	config->thresholds[index] = value;
1911 
1912 	return 0;
1913 }
1914 
1915 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
1916 			    enum xe_guc_klv_threshold_index index)
1917 {
1918 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1919 
1920 	return config->thresholds[index];
1921 }
1922 
1923 static const char *threshold_unit(u32 threshold)
1924 {
1925 	return threshold ? "" : "(disabled)";
1926 }
1927 
1928 /**
1929  * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
1930  * @gt: the &xe_gt
1931  * @vfid: the VF identifier
1932  * @index: the threshold index
1933  * @value: requested value (0 means disabled)
1934  *
1935  * This function can only be called on PF.
1936  *
1937  * Return: 0 on success or a negative error code on failure.
1938  */
1939 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
1940 					enum xe_guc_klv_threshold_index index, u32 value)
1941 {
1942 	u32 key = xe_guc_klv_threshold_index_to_key(index);
1943 	const char *name = xe_guc_klv_key_to_string(key);
1944 	int err;
1945 
1946 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1947 	err = pf_provision_threshold(gt, vfid, index, value);
1948 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1949 
1950 	return pf_config_set_u32_done(gt, vfid, value,
1951 				      xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
1952 				      name, threshold_unit, err);
1953 }
1954 
1955 /**
1956  * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
1957  * @gt: the &xe_gt
1958  * @vfid: the VF identifier
1959  * @index: the threshold index
1960  *
1961  * This function can only be called on PF.
1962  *
1963  * Return: value of VF's (or PF's) threshold.
1964  */
1965 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
1966 					enum xe_guc_klv_threshold_index index)
1967 {
1968 	u32 value;
1969 
1970 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1971 	value = pf_get_threshold(gt, vfid, index);
1972 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1973 
1974 	return value;
1975 }
1976 
1977 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1978 {
1979 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1980 
1981 #define reset_threshold_config(TAG, ...) ({				\
1982 	config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0;	\
1983 });
1984 
1985 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
1986 #undef reset_threshold_config
1987 }
1988 
1989 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
1990 {
1991 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1992 	struct xe_device *xe = gt_to_xe(gt);
1993 
1994 	if (!xe_gt_is_media_type(gt)) {
1995 		pf_release_vf_config_ggtt(gt, config);
1996 		if (IS_DGFX(xe)) {
1997 			pf_release_vf_config_lmem(gt, config);
1998 			if (xe_device_has_lmtt(xe))
1999 				pf_update_vf_lmtt(xe, vfid);
2000 		}
2001 	}
2002 	pf_release_config_ctxs(gt, config);
2003 	pf_release_config_dbs(gt, config);
2004 	pf_reset_config_sched(gt, config);
2005 	pf_reset_config_thresholds(gt, config);
2006 }
2007 
2008 /**
2009  * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
2010  * @gt: the &xe_gt
2011  * @vfid: the VF identifier (can't be PF)
2012  * @force: force configuration release
2013  *
2014  * This function can only be called on PF.
2015  *
2016  * Return: 0 on success or a negative error code on failure.
2017  */
2018 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
2019 {
2020 	int err;
2021 
2022 	xe_gt_assert(gt, vfid);
2023 
2024 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2025 	err = pf_send_vf_cfg_reset(gt, vfid);
2026 	if (!err || force)
2027 		pf_release_vf_config(gt, vfid);
2028 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2029 
2030 	if (unlikely(err)) {
2031 		xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
2032 				   vfid, ERR_PTR(err),
2033 				   force ? " but all resources were released anyway!" : "");
2034 	}
2035 
2036 	return force ? 0 : err;
2037 }
2038 
2039 static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
2040 {
2041 	if (xe_ggtt_node_allocated(ggtt_region))
2042 		xe_ggtt_assign(ggtt_region, vfid);
2043 }
2044 
2045 static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
2046 {
2047 	struct xe_migrate *m = tile->migrate;
2048 	struct dma_fence *fence;
2049 	int err;
2050 
2051 	if (!bo)
2052 		return 0;
2053 
2054 	xe_bo_lock(bo, false);
2055 	fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
2056 	if (IS_ERR(fence)) {
2057 		err = PTR_ERR(fence);
2058 	} else if (!fence) {
2059 		err = -ENOMEM;
2060 	} else {
2061 		long ret = dma_fence_wait_timeout(fence, false, timeout);
2062 
2063 		err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
2064 		dma_fence_put(fence);
2065 		if (!err)
2066 			xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
2067 						jiffies_to_msecs(timeout - ret));
2068 	}
2069 	xe_bo_unlock(bo);
2070 
2071 	return err;
2072 }
2073 
2074 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
2075 {
2076 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2077 	struct xe_tile *tile = gt_to_tile(gt);
2078 	struct xe_device *xe = gt_to_xe(gt);
2079 	int err = 0;
2080 
2081 	/*
2082 	 * Only GGTT and LMEM requires to be cleared by the PF.
2083 	 * GuC doorbell IDs and context IDs do not need any clearing.
2084 	 */
2085 	if (!xe_gt_is_media_type(gt)) {
2086 		pf_sanitize_ggtt(config->ggtt_region, vfid);
2087 		if (IS_DGFX(xe))
2088 			err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
2089 	}
2090 
2091 	return err;
2092 }
2093 
2094 /**
2095  * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
2096  * @gt: the &xe_gt
2097  * @vfid: the VF identifier (can't be PF)
2098  * @timeout: maximum timeout to wait for completion in jiffies
2099  *
2100  * This function can only be called on PF.
2101  *
2102  * Return: 0 on success or a negative error code on failure.
2103  */
2104 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
2105 {
2106 	int err;
2107 
2108 	xe_gt_assert(gt, vfid != PFID);
2109 
2110 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2111 	err = pf_sanitize_vf_resources(gt, vfid, timeout);
2112 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2113 
2114 	if (unlikely(err))
2115 		xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
2116 				   vfid, ERR_PTR(err));
2117 	return err;
2118 }
2119 
2120 /**
2121  * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
2122  * @gt: the &xe_gt
2123  * @vfid: the VF identifier (can't be PF)
2124  * @refresh: explicit refresh
2125  *
2126  * This function can only be called on PF.
2127  *
2128  * Return: 0 on success or a negative error code on failure.
2129  */
2130 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
2131 {
2132 	int err = 0;
2133 
2134 	xe_gt_assert(gt, vfid);
2135 
2136 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2137 	err = pf_push_vf_cfg(gt, vfid, refresh);
2138 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2139 
2140 	if (unlikely(err)) {
2141 		xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
2142 				   refresh ? "refresh" : "push", vfid, ERR_PTR(err));
2143 	}
2144 
2145 	return err;
2146 }
2147 
2148 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
2149 {
2150 	struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
2151 	struct xe_device *xe = gt_to_xe(gt);
2152 	bool is_primary = !xe_gt_is_media_type(gt);
2153 	bool valid_ggtt, valid_ctxs, valid_dbs;
2154 	bool valid_any, valid_all;
2155 
2156 	valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
2157 	valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
2158 	valid_dbs = pf_get_vf_config_dbs(gt, vfid);
2159 
2160 	/* note that GuC doorbells are optional */
2161 	valid_any = valid_ctxs || valid_dbs;
2162 	valid_all = valid_ctxs;
2163 
2164 	/* and GGTT/LMEM is configured on primary GT only */
2165 	valid_all = valid_all && valid_ggtt;
2166 	valid_any = valid_any || (valid_ggtt && is_primary);
2167 
2168 	if (xe_device_has_lmtt(xe)) {
2169 		bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
2170 
2171 		valid_any = valid_any || (valid_lmem && is_primary);
2172 		valid_all = valid_all && valid_lmem;
2173 	}
2174 
2175 	return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA;
2176 }
2177 
2178 /**
2179  * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
2180  * @gt: the &xe_gt
2181  * @vfid: the VF identifier (can't be PF)
2182  *
2183  * This function can only be called on PF.
2184  *
2185  * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
2186  */
2187 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
2188 {
2189 	bool empty;
2190 
2191 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2192 	xe_gt_assert(gt, vfid);
2193 
2194 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2195 	empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
2196 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2197 
2198 	return empty;
2199 }
2200 
2201 /**
2202  * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob.
2203  * @gt: the &xe_gt
2204  * @vfid: the VF identifier (can't be PF)
2205  * @buf: the buffer to save a config to (or NULL if query the buf size)
2206  * @size: the size of the buffer (or 0 if query the buf size)
2207  *
2208  * This function can only be called on PF.
2209  *
2210  * Return: minimum size of the buffer or the number of bytes saved,
2211  *         or a negative error code on failure.
2212  */
2213 ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
2214 {
2215 	struct xe_gt_sriov_config *config;
2216 	ssize_t ret;
2217 
2218 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2219 	xe_gt_assert(gt, vfid);
2220 	xe_gt_assert(gt, !(!buf ^ !size));
2221 
2222 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2223 	ret = pf_validate_vf_config(gt, vfid);
2224 	if (!size) {
2225 		ret = ret ? 0 : SZ_4K;
2226 	} else if (!ret) {
2227 		if (size < SZ_4K) {
2228 			ret = -ENOBUFS;
2229 		} else {
2230 			config = pf_pick_vf_config(gt, vfid);
2231 			ret = encode_config(buf, config, false) * sizeof(u32);
2232 		}
2233 	}
2234 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2235 
2236 	return ret;
2237 }
2238 
2239 static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid,
2240 				    u32 key, u32 len, const u32 *value)
2241 {
2242 	switch (key) {
2243 	case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
2244 		if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN)
2245 			return -EBADMSG;
2246 		return pf_provision_vf_ctxs(gt, vfid, value[0]);
2247 
2248 	case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
2249 		if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN)
2250 			return -EBADMSG;
2251 		return pf_provision_vf_dbs(gt, vfid, value[0]);
2252 
2253 	case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
2254 		if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN)
2255 			return -EBADMSG;
2256 		return pf_provision_exec_quantum(gt, vfid, value[0]);
2257 
2258 	case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
2259 		if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN)
2260 			return -EBADMSG;
2261 		return pf_provision_preempt_timeout(gt, vfid, value[0]);
2262 
2263 	/* auto-generate case statements */
2264 #define define_threshold_key_to_provision_case(TAG, ...)				\
2265 	case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG):					\
2266 		BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u);		\
2267 		if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG))			\
2268 			return -EBADMSG;						\
2269 		return pf_provision_threshold(gt, vfid,					\
2270 					      MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG),	\
2271 					      value[0]);
2272 
2273 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case)
2274 #undef define_threshold_key_to_provision_case
2275 	}
2276 
2277 	if (xe_gt_is_media_type(gt))
2278 		return -EKEYREJECTED;
2279 
2280 	switch (key) {
2281 	case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
2282 		if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN)
2283 			return -EBADMSG;
2284 		return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0]));
2285 
2286 	case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
2287 		if (!IS_DGFX(gt_to_xe(gt)))
2288 			return -EKEYREJECTED;
2289 		if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN)
2290 			return -EBADMSG;
2291 		return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0]));
2292 	}
2293 
2294 	return -EKEYREJECTED;
2295 }
2296 
2297 static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid,
2298 				const u32 *klvs, size_t num_dwords)
2299 {
2300 	int err;
2301 
2302 	while (num_dwords >= GUC_KLV_LEN_MIN) {
2303 		u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
2304 		u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
2305 
2306 		klvs += GUC_KLV_LEN_MIN;
2307 		num_dwords -= GUC_KLV_LEN_MIN;
2308 
2309 		if (num_dwords < len)
2310 			err = -EBADMSG;
2311 		else
2312 			err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs);
2313 
2314 		if (err) {
2315 			xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err));
2316 			return err;
2317 		}
2318 
2319 		klvs += len;
2320 		num_dwords -= len;
2321 	}
2322 
2323 	return pf_validate_vf_config(gt, vfid);
2324 }
2325 
2326 /**
2327  * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob.
2328  * @gt: the &xe_gt
2329  * @vfid: the VF identifier (can't be PF)
2330  * @buf: the buffer with config data
2331  * @size: the size of the config data
2332  *
2333  * This function can only be called on PF.
2334  *
2335  * Return: 0 on success or a negative error code on failure.
2336  */
2337 int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
2338 				  const void *buf, size_t size)
2339 {
2340 	int err;
2341 
2342 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2343 	xe_gt_assert(gt, vfid);
2344 
2345 	if (!size)
2346 		return -ENODATA;
2347 
2348 	if (size % sizeof(u32))
2349 		return -EINVAL;
2350 
2351 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
2352 		struct drm_printer p = xe_gt_info_printer(gt);
2353 
2354 		drm_printf(&p, "restoring VF%u config:\n", vfid);
2355 		xe_guc_klv_print(buf, size / sizeof(u32), &p);
2356 	}
2357 
2358 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2359 	err = pf_send_vf_cfg_reset(gt, vfid);
2360 	if (!err) {
2361 		pf_release_vf_config(gt, vfid);
2362 		err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32));
2363 	}
2364 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2365 
2366 	return err;
2367 }
2368 
2369 static void fini_config(void *arg)
2370 {
2371 	struct xe_gt *gt = arg;
2372 	struct xe_device *xe = gt_to_xe(gt);
2373 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
2374 
2375 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2376 	for (n = 1; n <= total_vfs; n++)
2377 		pf_release_vf_config(gt, n);
2378 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2379 }
2380 
2381 /**
2382  * xe_gt_sriov_pf_config_init - Initialize SR-IOV configuration data.
2383  * @gt: the &xe_gt
2384  *
2385  * This function can only be called on PF.
2386  *
2387  * Return: 0 on success or a negative error code on failure.
2388  */
2389 int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
2390 {
2391 	struct xe_device *xe = gt_to_xe(gt);
2392 
2393 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
2394 
2395 	return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
2396 }
2397 
2398 /**
2399  * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
2400  * @gt: the &xe_gt
2401  *
2402  * Any prior configurations pushed to GuC are lost when the GT is reset.
2403  * Push again all non-empty VF configurations to the GuC.
2404  *
2405  * This function can only be called on PF.
2406  */
2407 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
2408 {
2409 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2410 	unsigned int fail = 0, skip = 0;
2411 
2412 	for (n = 1; n <= total_vfs; n++) {
2413 		if (xe_gt_sriov_pf_config_is_empty(gt, n))
2414 			skip++;
2415 		else if (xe_gt_sriov_pf_config_push(gt, n, false))
2416 			fail++;
2417 	}
2418 
2419 	if (fail)
2420 		xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
2421 				   fail, total_vfs - skip, str_plural(total_vfs));
2422 
2423 	if (fail != total_vfs)
2424 		xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
2425 				total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
2426 }
2427 
2428 /**
2429  * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2430  * @gt: the &xe_gt
2431  * @p: the &drm_printer
2432  *
2433  * Print GGTT configuration data for all VFs.
2434  * VFs without provisioned GGTT are ignored.
2435  *
2436  * This function can only be called on PF.
2437  */
2438 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2439 {
2440 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2441 	const struct xe_gt_sriov_config *config;
2442 	char buf[10];
2443 
2444 	for (n = 1; n <= total_vfs; n++) {
2445 		config = &gt->sriov.pf.vfs[n].config;
2446 		if (!xe_ggtt_node_allocated(config->ggtt_region))
2447 			continue;
2448 
2449 		string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
2450 				buf, sizeof(buf));
2451 		drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2452 			   n, config->ggtt_region->base.start,
2453 			   config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
2454 			   buf);
2455 	}
2456 
2457 	return 0;
2458 }
2459 
2460 /**
2461  * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2462  * @gt: the &xe_gt
2463  * @p: the &drm_printer
2464  *
2465  * Print GuC context ID allocations across all VFs.
2466  * VFs without GuC context IDs are skipped.
2467  *
2468  * This function can only be called on PF.
2469  * Return: 0 on success or a negative error code on failure.
2470  */
2471 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2472 {
2473 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2474 	const struct xe_gt_sriov_config *config;
2475 
2476 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2477 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2478 
2479 	for (n = 1; n <= total_vfs; n++) {
2480 		config = &gt->sriov.pf.vfs[n].config;
2481 		if (!config->num_ctxs)
2482 			continue;
2483 
2484 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2485 			   n,
2486 			   config->begin_ctx,
2487 			   config->begin_ctx + config->num_ctxs - 1,
2488 			   config->num_ctxs);
2489 	}
2490 
2491 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2492 	return 0;
2493 }
2494 
2495 /**
2496  * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2497  * @gt: the &xe_gt
2498  * @p: the &drm_printer
2499  *
2500  * Print GuC doorbell IDs allocations across all VFs.
2501  * VFs without GuC doorbell IDs are skipped.
2502  *
2503  * This function can only be called on PF.
2504  * Return: 0 on success or a negative error code on failure.
2505  */
2506 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2507 {
2508 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2509 	const struct xe_gt_sriov_config *config;
2510 
2511 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2512 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2513 
2514 	for (n = 1; n <= total_vfs; n++) {
2515 		config = &gt->sriov.pf.vfs[n].config;
2516 		if (!config->num_dbs)
2517 			continue;
2518 
2519 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2520 			   n,
2521 			   config->begin_db,
2522 			   config->begin_db + config->num_dbs - 1,
2523 			   config->num_dbs);
2524 	}
2525 
2526 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2527 	return 0;
2528 }
2529 
2530 /**
2531  * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations.
2532  * @gt: the &xe_gt
2533  * @p: the &drm_printer
2534  *
2535  * Print LMEM allocations across all VFs.
2536  * VFs without LMEM allocation are skipped.
2537  *
2538  * This function can only be called on PF.
2539  * Return: 0 on success or a negative error code on failure.
2540  */
2541 int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
2542 {
2543 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2544 	const struct xe_gt_sriov_config *config;
2545 	char buf[10];
2546 
2547 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2548 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2549 
2550 	for (n = 1; n <= total_vfs; n++) {
2551 		config = &gt->sriov.pf.vfs[n].config;
2552 		if (!config->lmem_obj)
2553 			continue;
2554 
2555 		string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2,
2556 				buf, sizeof(buf));
2557 		drm_printf(p, "VF%u:\t%zu\t(%s)\n",
2558 			   n, config->lmem_obj->size, buf);
2559 	}
2560 
2561 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2562 	return 0;
2563 }
2564 
2565 /**
2566  * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2567  * @gt: the &xe_gt
2568  * @p: the &drm_printer
2569  *
2570  * Print GGTT ranges that are available for the provisioning.
2571  *
2572  * This function can only be called on PF.
2573  */
2574 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2575 {
2576 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2577 	u64 alignment = pf_get_ggtt_alignment(gt);
2578 	u64 spare, avail, total;
2579 	char buf[10];
2580 
2581 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2582 
2583 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2584 
2585 	spare = pf_get_spare_ggtt(gt);
2586 	total = xe_ggtt_print_holes(ggtt, alignment, p);
2587 
2588 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2589 
2590 	string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2591 	drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2592 
2593 	string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2594 	drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2595 
2596 	avail = total > spare ? total - spare : 0;
2597 
2598 	string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2599 	drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2600 
2601 	return 0;
2602 }
2603