xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8 
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11 
12 #include "regs/xe_guc_regs.h"
13 
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_gt_sriov_pf_config.h"
19 #include "xe_gt_sriov_pf_helpers.h"
20 #include "xe_gt_sriov_pf_policy.h"
21 #include "xe_gt_sriov_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_ct.h"
24 #include "xe_guc_db_mgr.h"
25 #include "xe_guc_fwif.h"
26 #include "xe_guc_id_mgr.h"
27 #include "xe_guc_klv_helpers.h"
28 #include "xe_guc_klv_thresholds_set.h"
29 #include "xe_guc_submit.h"
30 #include "xe_lmtt.h"
31 #include "xe_map.h"
32 #include "xe_migrate.h"
33 #include "xe_sriov.h"
34 #include "xe_ttm_vram_mgr.h"
35 #include "xe_wopcm.h"
36 
37 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
38 
39 /*
40  * Return: number of KLVs that were successfully parsed and saved,
41  *         negative error code on failure.
42  */
guc_action_update_vf_cfg(struct xe_guc * guc,u32 vfid,u64 addr,u32 size)43 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
44 				    u64 addr, u32 size)
45 {
46 	u32 request[] = {
47 		GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
48 		vfid,
49 		lower_32_bits(addr),
50 		upper_32_bits(addr),
51 		size,
52 	};
53 
54 	return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
55 }
56 
57 /*
58  * Return: 0 on success, negative error code on failure.
59  */
pf_send_vf_cfg_reset(struct xe_gt * gt,u32 vfid)60 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
61 {
62 	struct xe_guc *guc = &gt->uc.guc;
63 	int ret;
64 
65 	ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
66 
67 	return ret <= 0 ? ret : -EPROTO;
68 }
69 
70 /*
71  * Return: number of KLVs that were successfully parsed and saved,
72  *         negative error code on failure.
73  */
pf_send_vf_cfg_klvs(struct xe_gt * gt,u32 vfid,const u32 * klvs,u32 num_dwords)74 static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords)
75 {
76 	const u32 bytes = num_dwords * sizeof(u32);
77 	struct xe_tile *tile = gt_to_tile(gt);
78 	struct xe_device *xe = tile_to_xe(tile);
79 	struct xe_guc *guc = &gt->uc.guc;
80 	struct xe_bo *bo;
81 	int ret;
82 
83 	bo = xe_bo_create_pin_map(xe, tile, NULL,
84 				  ALIGN(bytes, PAGE_SIZE),
85 				  ttm_bo_type_kernel,
86 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
87 				  XE_BO_FLAG_GGTT |
88 				  XE_BO_FLAG_GGTT_INVALIDATE);
89 	if (IS_ERR(bo))
90 		return PTR_ERR(bo);
91 
92 	xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
93 
94 	ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords);
95 
96 	xe_bo_unpin_map_no_vm(bo);
97 
98 	return ret;
99 }
100 
101 /*
102  * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
103  *         negative error code on failure.
104  */
pf_push_vf_cfg_klvs(struct xe_gt * gt,unsigned int vfid,u32 num_klvs,const u32 * klvs,u32 num_dwords)105 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
106 			       const u32 *klvs, u32 num_dwords)
107 {
108 	int ret;
109 
110 	xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
111 
112 	ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords);
113 
114 	if (ret != num_klvs) {
115 		int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
116 		struct drm_printer p = xe_gt_info_printer(gt);
117 		char name[8];
118 
119 		xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
120 				   xe_sriov_function_name(vfid, name, sizeof(name)),
121 				   num_klvs, str_plural(num_klvs), ERR_PTR(err));
122 		xe_guc_klv_print(klvs, num_dwords, &p);
123 		return err;
124 	}
125 
126 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
127 		struct drm_printer p = xe_gt_info_printer(gt);
128 
129 		xe_guc_klv_print(klvs, num_dwords, &p);
130 	}
131 
132 	return 0;
133 }
134 
pf_push_vf_cfg_u32(struct xe_gt * gt,unsigned int vfid,u16 key,u32 value)135 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
136 {
137 	u32 klv[] = {
138 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
139 		value,
140 	};
141 
142 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
143 }
144 
pf_push_vf_cfg_u64(struct xe_gt * gt,unsigned int vfid,u16 key,u64 value)145 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
146 {
147 	u32 klv[] = {
148 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
149 		lower_32_bits(value),
150 		upper_32_bits(value),
151 	};
152 
153 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
154 }
155 
pf_push_vf_cfg_ggtt(struct xe_gt * gt,unsigned int vfid,u64 start,u64 size)156 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
157 {
158 	u32 klvs[] = {
159 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
160 		lower_32_bits(start),
161 		upper_32_bits(start),
162 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
163 		lower_32_bits(size),
164 		upper_32_bits(size),
165 	};
166 
167 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
168 }
169 
pf_push_vf_cfg_ctxs(struct xe_gt * gt,unsigned int vfid,u32 begin,u32 num)170 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
171 {
172 	u32 klvs[] = {
173 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
174 		begin,
175 		PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
176 		num,
177 	};
178 
179 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
180 }
181 
pf_push_vf_cfg_dbs(struct xe_gt * gt,unsigned int vfid,u32 begin,u32 num)182 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
183 {
184 	u32 klvs[] = {
185 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
186 		begin,
187 		PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
188 		num,
189 	};
190 
191 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
192 }
193 
pf_push_vf_cfg_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 * exec_quantum)194 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
195 {
196 	/* GuC will silently clamp values exceeding max */
197 	*exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
198 
199 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
200 }
201 
pf_push_vf_cfg_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 * preempt_timeout)202 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
203 {
204 	/* GuC will silently clamp values exceeding max */
205 	*preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
206 
207 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
208 }
209 
pf_push_vf_cfg_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)210 static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
211 {
212 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority);
213 }
214 
pf_push_vf_cfg_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)215 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
216 {
217 	return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
218 }
219 
pf_push_vf_cfg_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)220 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
221 				    enum xe_guc_klv_threshold_index index, u32 value)
222 {
223 	u32 key = xe_guc_klv_threshold_index_to_key(index);
224 
225 	xe_gt_assert(gt, key);
226 	return pf_push_vf_cfg_u32(gt, vfid, key, value);
227 }
228 
pf_pick_vf_config(struct xe_gt * gt,unsigned int vfid)229 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
230 {
231 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
232 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
233 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
234 
235 	return &gt->sriov.pf.vfs[vfid].config;
236 }
237 
238 /* Return: number of configuration dwords written */
encode_config_ggtt(u32 * cfg,const struct xe_gt_sriov_config * config,bool details)239 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
240 {
241 	u32 n = 0;
242 
243 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
244 		if (details) {
245 			cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
246 			cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
247 			cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
248 		}
249 
250 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
251 		cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
252 		cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
253 	}
254 
255 	return n;
256 }
257 
258 /* Return: number of configuration dwords written */
encode_config(u32 * cfg,const struct xe_gt_sriov_config * config,bool details)259 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
260 {
261 	u32 n = 0;
262 
263 	n += encode_config_ggtt(cfg, config, details);
264 
265 	if (details) {
266 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
267 		cfg[n++] = config->begin_ctx;
268 	}
269 
270 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
271 	cfg[n++] = config->num_ctxs;
272 
273 	if (details) {
274 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
275 		cfg[n++] = config->begin_db;
276 	}
277 
278 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
279 	cfg[n++] = config->num_dbs;
280 
281 	if (config->lmem_obj) {
282 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
283 		cfg[n++] = lower_32_bits(config->lmem_obj->size);
284 		cfg[n++] = upper_32_bits(config->lmem_obj->size);
285 	}
286 
287 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
288 	cfg[n++] = config->exec_quantum;
289 
290 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
291 	cfg[n++] = config->preempt_timeout;
292 
293 #define encode_threshold_config(TAG, ...) ({					\
294 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG);			\
295 	cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)];	\
296 });
297 
298 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
299 #undef encode_threshold_config
300 
301 	return n;
302 }
303 
pf_push_full_vf_config(struct xe_gt * gt,unsigned int vfid)304 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
305 {
306 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
307 	u32 max_cfg_dwords = SZ_4K / sizeof(u32);
308 	u32 num_dwords;
309 	int num_klvs;
310 	u32 *cfg;
311 	int err;
312 
313 	cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL);
314 	if (!cfg)
315 		return -ENOMEM;
316 
317 	num_dwords = encode_config(cfg, config, true);
318 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
319 
320 	if (xe_gt_is_media_type(gt)) {
321 		struct xe_gt *primary = gt->tile->primary_gt;
322 		struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
323 
324 		/* media-GT will never include a GGTT config */
325 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
326 
327 		/* the GGTT config must be taken from the primary-GT instead */
328 		num_dwords += encode_config_ggtt(cfg + num_dwords, other, true);
329 	}
330 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
331 
332 	num_klvs = xe_guc_klv_count(cfg, num_dwords);
333 	err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords);
334 
335 	kfree(cfg);
336 	return err;
337 }
338 
pf_get_ggtt_alignment(struct xe_gt * gt)339 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
340 {
341 	struct xe_device *xe = gt_to_xe(gt);
342 
343 	return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
344 }
345 
pf_get_min_spare_ggtt(struct xe_gt * gt)346 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
347 {
348 	/* XXX: preliminary */
349 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
350 		pf_get_ggtt_alignment(gt) : SZ_64M;
351 }
352 
pf_get_spare_ggtt(struct xe_gt * gt)353 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
354 {
355 	u64 spare;
356 
357 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
358 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
359 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
360 
361 	spare = gt->sriov.pf.spare.ggtt_size;
362 	spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
363 
364 	return spare;
365 }
366 
pf_set_spare_ggtt(struct xe_gt * gt,u64 size)367 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
368 {
369 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
370 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
371 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
372 
373 	if (size && size < pf_get_min_spare_ggtt(gt))
374 		return -EINVAL;
375 
376 	size = round_up(size, pf_get_ggtt_alignment(gt));
377 	gt->sriov.pf.spare.ggtt_size = size;
378 
379 	return 0;
380 }
381 
pf_distribute_config_ggtt(struct xe_tile * tile,unsigned int vfid,u64 start,u64 size)382 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
383 {
384 	int err, err2 = 0;
385 
386 	err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
387 
388 	if (tile->media_gt && !err)
389 		err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
390 
391 	return err ?: err2;
392 }
393 
pf_release_ggtt(struct xe_tile * tile,struct xe_ggtt_node * node)394 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
395 {
396 	if (xe_ggtt_node_allocated(node)) {
397 		/*
398 		 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
399 		 * is redundant, as PTE will be implicitly re-assigned to PF by
400 		 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
401 		 */
402 		xe_ggtt_node_remove(node, false);
403 	} else {
404 		xe_ggtt_node_fini(node);
405 	}
406 }
407 
pf_release_vf_config_ggtt(struct xe_gt * gt,struct xe_gt_sriov_config * config)408 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
409 {
410 	pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
411 	config->ggtt_region = NULL;
412 }
413 
pf_provision_vf_ggtt(struct xe_gt * gt,unsigned int vfid,u64 size)414 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
415 {
416 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
417 	struct xe_ggtt_node *node;
418 	struct xe_tile *tile = gt_to_tile(gt);
419 	struct xe_ggtt *ggtt = tile->mem.ggtt;
420 	u64 alignment = pf_get_ggtt_alignment(gt);
421 	int err;
422 
423 	xe_gt_assert(gt, vfid);
424 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
425 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
426 
427 	size = round_up(size, alignment);
428 
429 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
430 		err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
431 		if (unlikely(err))
432 			return err;
433 
434 		pf_release_vf_config_ggtt(gt, config);
435 	}
436 	xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
437 
438 	if (!size)
439 		return 0;
440 
441 	node = xe_ggtt_node_init(ggtt);
442 	if (IS_ERR(node))
443 		return PTR_ERR(node);
444 
445 	err = xe_ggtt_node_insert(node, size, alignment);
446 	if (unlikely(err))
447 		goto err;
448 
449 	xe_ggtt_assign(node, vfid);
450 	xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
451 				vfid, node->base.start, node->base.start + node->base.size - 1);
452 
453 	err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
454 	if (unlikely(err))
455 		goto err;
456 
457 	config->ggtt_region = node;
458 	return 0;
459 err:
460 	pf_release_ggtt(tile, node);
461 	return err;
462 }
463 
pf_get_vf_config_ggtt(struct xe_gt * gt,unsigned int vfid)464 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
465 {
466 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
467 	struct xe_ggtt_node *node = config->ggtt_region;
468 
469 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
470 	return xe_ggtt_node_allocated(node) ? node->base.size : 0;
471 }
472 
473 /**
474  * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
475  * @gt: the &xe_gt
476  * @vfid: the VF identifier
477  *
478  * This function can only be called on PF.
479  *
480  * Return: size of the VF's assigned (or PF's spare) GGTT address space.
481  */
xe_gt_sriov_pf_config_get_ggtt(struct xe_gt * gt,unsigned int vfid)482 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
483 {
484 	u64 size;
485 
486 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
487 	if (vfid)
488 		size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
489 	else
490 		size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
491 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
492 
493 	return size;
494 }
495 
pf_config_set_u64_done(struct xe_gt * gt,unsigned int vfid,u64 value,u64 actual,const char * what,int err)496 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
497 				  u64 actual, const char *what, int err)
498 {
499 	char size[10];
500 	char name[8];
501 
502 	xe_sriov_function_name(vfid, name, sizeof(name));
503 
504 	if (unlikely(err)) {
505 		string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
506 		xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
507 				   name, value, size, what, ERR_PTR(err));
508 		string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
509 		xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
510 				 name, actual, size, what);
511 		return err;
512 	}
513 
514 	/* the actual value may have changed during provisioning */
515 	string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
516 	xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
517 			 name, actual, size, what);
518 	return 0;
519 }
520 
521 /**
522  * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
523  * @gt: the &xe_gt (can't be media)
524  * @vfid: the VF identifier
525  * @size: requested GGTT size
526  *
527  * If &vfid represents PF, then function will change PF's spare GGTT config.
528  *
529  * This function can only be called on PF.
530  *
531  * Return: 0 on success or a negative error code on failure.
532  */
xe_gt_sriov_pf_config_set_ggtt(struct xe_gt * gt,unsigned int vfid,u64 size)533 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
534 {
535 	int err;
536 
537 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
538 
539 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
540 	if (vfid)
541 		err = pf_provision_vf_ggtt(gt, vfid, size);
542 	else
543 		err = pf_set_spare_ggtt(gt, size);
544 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
545 
546 	return pf_config_set_u64_done(gt, vfid, size,
547 				      xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
548 				      vfid ? "GGTT" : "spare GGTT", err);
549 }
550 
pf_config_bulk_set_u64_done(struct xe_gt * gt,unsigned int first,unsigned int num_vfs,u64 value,u64 (* get)(struct xe_gt *,unsigned int),const char * what,unsigned int last,int err)551 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
552 				       u64 value, u64 (*get)(struct xe_gt*, unsigned int),
553 				       const char *what, unsigned int last, int err)
554 {
555 	char size[10];
556 
557 	xe_gt_assert(gt, first);
558 	xe_gt_assert(gt, num_vfs);
559 	xe_gt_assert(gt, first <= last);
560 
561 	if (num_vfs == 1)
562 		return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
563 
564 	if (unlikely(err)) {
565 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
566 				   first, first + num_vfs - 1, what);
567 		if (last > first)
568 			pf_config_bulk_set_u64_done(gt, first, last - first, value,
569 						    get, what, last, 0);
570 		return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
571 	}
572 
573 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
574 	value = get(gt, first);
575 	string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
576 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
577 			 first, first + num_vfs - 1, value, size, what);
578 	return 0;
579 }
580 
581 /**
582  * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
583  * @gt: the &xe_gt (can't be media)
584  * @vfid: starting VF identifier (can't be 0)
585  * @num_vfs: number of VFs to provision
586  * @size: requested GGTT size
587  *
588  * This function can only be called on PF.
589  *
590  * Return: 0 on success or a negative error code on failure.
591  */
xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u64 size)592 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
593 					unsigned int num_vfs, u64 size)
594 {
595 	unsigned int n;
596 	int err = 0;
597 
598 	xe_gt_assert(gt, vfid);
599 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
600 
601 	if (!num_vfs)
602 		return 0;
603 
604 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
605 	for (n = vfid; n < vfid + num_vfs; n++) {
606 		err = pf_provision_vf_ggtt(gt, n, size);
607 		if (err)
608 			break;
609 	}
610 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
611 
612 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
613 					   xe_gt_sriov_pf_config_get_ggtt,
614 					   "GGTT", n, err);
615 }
616 
617 /* Return: size of the largest continuous GGTT region */
pf_get_max_ggtt(struct xe_gt * gt)618 static u64 pf_get_max_ggtt(struct xe_gt *gt)
619 {
620 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
621 	u64 alignment = pf_get_ggtt_alignment(gt);
622 	u64 spare = pf_get_spare_ggtt(gt);
623 	u64 max_hole;
624 
625 	max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
626 
627 	xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
628 				max_hole / SZ_1K, spare / SZ_1K);
629 	return max_hole > spare ? max_hole - spare : 0;
630 }
631 
pf_estimate_fair_ggtt(struct xe_gt * gt,unsigned int num_vfs)632 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
633 {
634 	u64 available = pf_get_max_ggtt(gt);
635 	u64 alignment = pf_get_ggtt_alignment(gt);
636 	u64 fair;
637 
638 	/*
639 	 * To simplify the logic we only look at single largest GGTT region
640 	 * as that will be always the best fit for 1 VF case, and most likely
641 	 * will also nicely cover other cases where VFs are provisioned on the
642 	 * fresh and idle PF driver, without any stale GGTT allocations spread
643 	 * in the middle of the full GGTT range.
644 	 */
645 
646 	fair = div_u64(available, num_vfs);
647 	fair = ALIGN_DOWN(fair, alignment);
648 	xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
649 				available / SZ_1K, num_vfs, fair / SZ_1K);
650 	return fair;
651 }
652 
653 /**
654  * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
655  * @gt: the &xe_gt (can't be media)
656  * @vfid: starting VF identifier (can't be 0)
657  * @num_vfs: number of VFs to provision
658  *
659  * This function can only be called on PF.
660  *
661  * Return: 0 on success or a negative error code on failure.
662  */
xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)663 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
664 					unsigned int num_vfs)
665 {
666 	u64 fair;
667 
668 	xe_gt_assert(gt, vfid);
669 	xe_gt_assert(gt, num_vfs);
670 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
671 
672 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
673 	fair = pf_estimate_fair_ggtt(gt, num_vfs);
674 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
675 
676 	if (!fair)
677 		return -ENOSPC;
678 
679 	return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
680 }
681 
pf_get_min_spare_ctxs(struct xe_gt * gt)682 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
683 {
684 	/* XXX: preliminary */
685 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
686 		hweight64(gt->info.engine_mask) : SZ_256;
687 }
688 
pf_get_spare_ctxs(struct xe_gt * gt)689 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
690 {
691 	u32 spare;
692 
693 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
694 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
695 
696 	spare = gt->sriov.pf.spare.num_ctxs;
697 	spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
698 
699 	return spare;
700 }
701 
pf_set_spare_ctxs(struct xe_gt * gt,u32 spare)702 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
703 {
704 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
705 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
706 
707 	if (spare > GUC_ID_MAX)
708 		return -EINVAL;
709 
710 	if (spare && spare < pf_get_min_spare_ctxs(gt))
711 		return -EINVAL;
712 
713 	gt->sriov.pf.spare.num_ctxs = spare;
714 
715 	return 0;
716 }
717 
718 /* Return: start ID or negative error code on failure */
pf_reserve_ctxs(struct xe_gt * gt,u32 num)719 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
720 {
721 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
722 	unsigned int spare = pf_get_spare_ctxs(gt);
723 
724 	return xe_guc_id_mgr_reserve(idm, num, spare);
725 }
726 
pf_release_ctxs(struct xe_gt * gt,u32 start,u32 num)727 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
728 {
729 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
730 
731 	if (num)
732 		xe_guc_id_mgr_release(idm, start, num);
733 }
734 
pf_release_config_ctxs(struct xe_gt * gt,struct xe_gt_sriov_config * config)735 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
736 {
737 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
738 
739 	pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
740 	config->begin_ctx = 0;
741 	config->num_ctxs = 0;
742 }
743 
pf_provision_vf_ctxs(struct xe_gt * gt,unsigned int vfid,u32 num_ctxs)744 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
745 {
746 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
747 	int ret;
748 
749 	xe_gt_assert(gt, vfid);
750 
751 	if (num_ctxs > GUC_ID_MAX)
752 		return -EINVAL;
753 
754 	if (config->num_ctxs) {
755 		ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
756 		if (unlikely(ret))
757 			return ret;
758 
759 		pf_release_config_ctxs(gt, config);
760 	}
761 
762 	if (!num_ctxs)
763 		return 0;
764 
765 	ret = pf_reserve_ctxs(gt, num_ctxs);
766 	if (unlikely(ret < 0))
767 		return ret;
768 
769 	config->begin_ctx = ret;
770 	config->num_ctxs = num_ctxs;
771 
772 	ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
773 	if (unlikely(ret)) {
774 		pf_release_config_ctxs(gt, config);
775 		return ret;
776 	}
777 
778 	xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
779 				vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
780 	return 0;
781 }
782 
pf_get_vf_config_ctxs(struct xe_gt * gt,unsigned int vfid)783 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
784 {
785 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
786 
787 	return config->num_ctxs;
788 }
789 
790 /**
791  * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
792  * @gt: the &xe_gt
793  * @vfid: the VF identifier
794  *
795  * This function can only be called on PF.
796  * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
797  *
798  * Return: VF's quota (or PF's spare).
799  */
xe_gt_sriov_pf_config_get_ctxs(struct xe_gt * gt,unsigned int vfid)800 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
801 {
802 	u32 num_ctxs;
803 
804 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
805 	if (vfid)
806 		num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
807 	else
808 		num_ctxs = pf_get_spare_ctxs(gt);
809 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
810 
811 	return num_ctxs;
812 }
813 
no_unit(u32 unused)814 static const char *no_unit(u32 unused)
815 {
816 	return "";
817 }
818 
spare_unit(u32 unused)819 static const char *spare_unit(u32 unused)
820 {
821 	return " spare";
822 }
823 
pf_config_set_u32_done(struct xe_gt * gt,unsigned int vfid,u32 value,u32 actual,const char * what,const char * (* unit)(u32),int err)824 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
825 				  const char *what, const char *(*unit)(u32), int err)
826 {
827 	char name[8];
828 
829 	xe_sriov_function_name(vfid, name, sizeof(name));
830 
831 	if (unlikely(err)) {
832 		xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
833 				   name, value, unit(value), what, ERR_PTR(err));
834 		xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
835 				 name, actual, unit(actual), what);
836 		return err;
837 	}
838 
839 	/* the actual value may have changed during provisioning */
840 	xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
841 			 name, actual, unit(actual), what);
842 	return 0;
843 }
844 
845 /**
846  * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
847  * @gt: the &xe_gt
848  * @vfid: the VF identifier
849  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
850  *
851  * This function can only be called on PF.
852  *
853  * Return: 0 on success or a negative error code on failure.
854  */
xe_gt_sriov_pf_config_set_ctxs(struct xe_gt * gt,unsigned int vfid,u32 num_ctxs)855 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
856 {
857 	int err;
858 
859 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
860 	if (vfid)
861 		err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
862 	else
863 		err = pf_set_spare_ctxs(gt, num_ctxs);
864 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
865 
866 	return pf_config_set_u32_done(gt, vfid, num_ctxs,
867 				      xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
868 				      "GuC context IDs", vfid ? no_unit : spare_unit, err);
869 }
870 
pf_config_bulk_set_u32_done(struct xe_gt * gt,unsigned int first,unsigned int num_vfs,u32 value,u32 (* get)(struct xe_gt *,unsigned int),const char * what,const char * (* unit)(u32),unsigned int last,int err)871 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
872 				       u32 value, u32 (*get)(struct xe_gt*, unsigned int),
873 				       const char *what, const char *(*unit)(u32),
874 				       unsigned int last, int err)
875 {
876 	xe_gt_assert(gt, first);
877 	xe_gt_assert(gt, num_vfs);
878 	xe_gt_assert(gt, first <= last);
879 
880 	if (num_vfs == 1)
881 		return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
882 
883 	if (unlikely(err)) {
884 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
885 				   first, first + num_vfs - 1, what);
886 		if (last > first)
887 			pf_config_bulk_set_u32_done(gt, first, last - first, value,
888 						    get, what, unit, last, 0);
889 		return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
890 	}
891 
892 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
893 	value = get(gt, first);
894 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
895 			 first, first + num_vfs - 1, value, unit(value), what);
896 	return 0;
897 }
898 
899 /**
900  * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
901  * @gt: the &xe_gt
902  * @vfid: starting VF identifier
903  * @num_vfs: number of VFs to provision
904  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
905  *
906  * This function can only be called on PF.
907  *
908  * Return: 0 on success or a negative error code on failure.
909  */
xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u32 num_ctxs)910 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
911 					unsigned int num_vfs, u32 num_ctxs)
912 {
913 	unsigned int n;
914 	int err = 0;
915 
916 	xe_gt_assert(gt, vfid);
917 
918 	if (!num_vfs)
919 		return 0;
920 
921 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
922 	for (n = vfid; n < vfid + num_vfs; n++) {
923 		err = pf_provision_vf_ctxs(gt, n, num_ctxs);
924 		if (err)
925 			break;
926 	}
927 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
928 
929 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
930 					   xe_gt_sriov_pf_config_get_ctxs,
931 					   "GuC context IDs", no_unit, n, err);
932 }
933 
pf_estimate_fair_ctxs(struct xe_gt * gt,unsigned int num_vfs)934 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
935 {
936 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
937 	u32 spare = pf_get_spare_ctxs(gt);
938 	u32 fair = (idm->total - spare) / num_vfs;
939 	int ret;
940 
941 	for (; fair; --fair) {
942 		ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
943 		if (ret < 0)
944 			continue;
945 		xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
946 		break;
947 	}
948 
949 	xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
950 	return fair;
951 }
952 
953 /**
954  * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
955  * @gt: the &xe_gt
956  * @vfid: starting VF identifier (can't be 0)
957  * @num_vfs: number of VFs to provision (can't be 0)
958  *
959  * This function can only be called on PF.
960  *
961  * Return: 0 on success or a negative error code on failure.
962  */
xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)963 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
964 					unsigned int num_vfs)
965 {
966 	u32 fair;
967 
968 	xe_gt_assert(gt, vfid);
969 	xe_gt_assert(gt, num_vfs);
970 
971 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
972 	fair = pf_estimate_fair_ctxs(gt, num_vfs);
973 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
974 
975 	if (!fair)
976 		return -ENOSPC;
977 
978 	return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
979 }
980 
pf_get_min_spare_dbs(struct xe_gt * gt)981 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
982 {
983 	/* XXX: preliminary, we don't use doorbells yet! */
984 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
985 }
986 
pf_get_spare_dbs(struct xe_gt * gt)987 static u32 pf_get_spare_dbs(struct xe_gt *gt)
988 {
989 	u32 spare;
990 
991 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
992 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
993 
994 	spare = gt->sriov.pf.spare.num_dbs;
995 	spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
996 
997 	return spare;
998 }
999 
pf_set_spare_dbs(struct xe_gt * gt,u32 spare)1000 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
1001 {
1002 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1003 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1004 
1005 	if (spare > GUC_NUM_DOORBELLS)
1006 		return -EINVAL;
1007 
1008 	if (spare && spare < pf_get_min_spare_dbs(gt))
1009 		return -EINVAL;
1010 
1011 	gt->sriov.pf.spare.num_dbs = spare;
1012 	return 0;
1013 }
1014 
1015 /* Return: start ID or negative error code on failure */
pf_reserve_dbs(struct xe_gt * gt,u32 num)1016 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
1017 {
1018 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1019 	unsigned int spare = pf_get_spare_dbs(gt);
1020 
1021 	return xe_guc_db_mgr_reserve_range(dbm, num, spare);
1022 }
1023 
pf_release_dbs(struct xe_gt * gt,u32 start,u32 num)1024 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1025 {
1026 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1027 
1028 	if (num)
1029 		xe_guc_db_mgr_release_range(dbm, start, num);
1030 }
1031 
pf_release_config_dbs(struct xe_gt * gt,struct xe_gt_sriov_config * config)1032 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1033 {
1034 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1035 
1036 	pf_release_dbs(gt, config->begin_db, config->num_dbs);
1037 	config->begin_db = 0;
1038 	config->num_dbs = 0;
1039 }
1040 
pf_provision_vf_dbs(struct xe_gt * gt,unsigned int vfid,u32 num_dbs)1041 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1042 {
1043 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1044 	int ret;
1045 
1046 	xe_gt_assert(gt, vfid);
1047 
1048 	if (num_dbs > GUC_NUM_DOORBELLS)
1049 		return -EINVAL;
1050 
1051 	if (config->num_dbs) {
1052 		ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1053 		if (unlikely(ret))
1054 			return ret;
1055 
1056 		pf_release_config_dbs(gt, config);
1057 	}
1058 
1059 	if (!num_dbs)
1060 		return 0;
1061 
1062 	ret = pf_reserve_dbs(gt, num_dbs);
1063 	if (unlikely(ret < 0))
1064 		return ret;
1065 
1066 	config->begin_db = ret;
1067 	config->num_dbs = num_dbs;
1068 
1069 	ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1070 	if (unlikely(ret)) {
1071 		pf_release_config_dbs(gt, config);
1072 		return ret;
1073 	}
1074 
1075 	xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1076 				vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1077 	return 0;
1078 }
1079 
pf_get_vf_config_dbs(struct xe_gt * gt,unsigned int vfid)1080 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1081 {
1082 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1083 
1084 	return config->num_dbs;
1085 }
1086 
1087 /**
1088  * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1089  * @gt: the &xe_gt
1090  * @vfid: the VF identifier
1091  *
1092  * This function can only be called on PF.
1093  * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1094  *
1095  * Return: VF's quota (or PF's spare).
1096  */
xe_gt_sriov_pf_config_get_dbs(struct xe_gt * gt,unsigned int vfid)1097 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1098 {
1099 	u32 num_dbs;
1100 
1101 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1102 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1103 
1104 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1105 	if (vfid)
1106 		num_dbs = pf_get_vf_config_dbs(gt, vfid);
1107 	else
1108 		num_dbs = pf_get_spare_dbs(gt);
1109 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1110 
1111 	return num_dbs;
1112 }
1113 
1114 /**
1115  * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1116  * @gt: the &xe_gt
1117  * @vfid: the VF identifier
1118  * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1119  *
1120  * This function can only be called on PF.
1121  *
1122  * Return: 0 on success or a negative error code on failure.
1123  */
xe_gt_sriov_pf_config_set_dbs(struct xe_gt * gt,unsigned int vfid,u32 num_dbs)1124 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1125 {
1126 	int err;
1127 
1128 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1129 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1130 
1131 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1132 	if (vfid)
1133 		err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1134 	else
1135 		err = pf_set_spare_dbs(gt, num_dbs);
1136 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1137 
1138 	return pf_config_set_u32_done(gt, vfid, num_dbs,
1139 				      xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1140 				      "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1141 }
1142 
1143 /**
1144  * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1145  * @gt: the &xe_gt
1146  * @vfid: starting VF identifier (can't be 0)
1147  * @num_vfs: number of VFs to provision
1148  * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1149  *
1150  * This function can only be called on PF.
1151  *
1152  * Return: 0 on success or a negative error code on failure.
1153  */
xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u32 num_dbs)1154 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1155 				       unsigned int num_vfs, u32 num_dbs)
1156 {
1157 	unsigned int n;
1158 	int err = 0;
1159 
1160 	xe_gt_assert(gt, vfid);
1161 
1162 	if (!num_vfs)
1163 		return 0;
1164 
1165 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1166 	for (n = vfid; n < vfid + num_vfs; n++) {
1167 		err = pf_provision_vf_dbs(gt, n, num_dbs);
1168 		if (err)
1169 			break;
1170 	}
1171 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1172 
1173 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1174 					   xe_gt_sriov_pf_config_get_dbs,
1175 					   "GuC doorbell IDs", no_unit, n, err);
1176 }
1177 
pf_estimate_fair_dbs(struct xe_gt * gt,unsigned int num_vfs)1178 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1179 {
1180 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1181 	u32 spare = pf_get_spare_dbs(gt);
1182 	u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1183 	int ret;
1184 
1185 	for (; fair; --fair) {
1186 		ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1187 		if (ret < 0)
1188 			continue;
1189 		xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1190 		break;
1191 	}
1192 
1193 	xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1194 	return fair;
1195 }
1196 
1197 /**
1198  * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell  IDs.
1199  * @gt: the &xe_gt
1200  * @vfid: starting VF identifier (can't be 0)
1201  * @num_vfs: number of VFs to provision (can't be 0)
1202  *
1203  * This function can only be called on PF.
1204  *
1205  * Return: 0 on success or a negative error code on failure.
1206  */
xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1207 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1208 				       unsigned int num_vfs)
1209 {
1210 	u32 fair;
1211 
1212 	xe_gt_assert(gt, vfid);
1213 	xe_gt_assert(gt, num_vfs);
1214 
1215 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1216 	fair = pf_estimate_fair_dbs(gt, num_vfs);
1217 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1218 
1219 	if (!fair)
1220 		return -ENOSPC;
1221 
1222 	return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1223 }
1224 
pf_get_lmem_alignment(struct xe_gt * gt)1225 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1226 {
1227 	/* this might be platform dependent */
1228 	return SZ_2M;
1229 }
1230 
pf_get_min_spare_lmem(struct xe_gt * gt)1231 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1232 {
1233 	/* this might be platform dependent */
1234 	return SZ_128M; /* XXX: preliminary */
1235 }
1236 
pf_get_spare_lmem(struct xe_gt * gt)1237 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1238 {
1239 	u64 spare;
1240 
1241 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1242 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1243 
1244 	spare = gt->sriov.pf.spare.lmem_size;
1245 	spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1246 
1247 	return spare;
1248 }
1249 
pf_set_spare_lmem(struct xe_gt * gt,u64 size)1250 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1251 {
1252 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1253 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1254 
1255 	if (size && size < pf_get_min_spare_lmem(gt))
1256 		return -EINVAL;
1257 
1258 	gt->sriov.pf.spare.lmem_size = size;
1259 	return 0;
1260 }
1261 
pf_get_vf_config_lmem(struct xe_gt * gt,unsigned int vfid)1262 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1263 {
1264 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1265 	struct xe_bo *bo;
1266 
1267 	bo = config->lmem_obj;
1268 	return bo ? bo->size : 0;
1269 }
1270 
pf_distribute_config_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1271 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1272 {
1273 	struct xe_device *xe = gt_to_xe(gt);
1274 	struct xe_tile *tile;
1275 	unsigned int tid;
1276 	int err;
1277 
1278 	for_each_tile(tile, xe, tid) {
1279 		if (tile->primary_gt == gt) {
1280 			err = pf_push_vf_cfg_lmem(gt, vfid, size);
1281 		} else {
1282 			u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1283 
1284 			if (!lmem)
1285 				continue;
1286 			err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1287 		}
1288 		if (unlikely(err))
1289 			return err;
1290 	}
1291 	return 0;
1292 }
1293 
pf_force_lmtt_invalidate(struct xe_device * xe)1294 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1295 {
1296 	/* TODO */
1297 }
1298 
pf_reset_vf_lmtt(struct xe_device * xe,unsigned int vfid)1299 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1300 {
1301 	struct xe_lmtt *lmtt;
1302 	struct xe_tile *tile;
1303 	unsigned int tid;
1304 
1305 	xe_assert(xe, IS_DGFX(xe));
1306 	xe_assert(xe, IS_SRIOV_PF(xe));
1307 
1308 	for_each_tile(tile, xe, tid) {
1309 		lmtt = &tile->sriov.pf.lmtt;
1310 		xe_lmtt_drop_pages(lmtt, vfid);
1311 	}
1312 }
1313 
pf_update_vf_lmtt(struct xe_device * xe,unsigned int vfid)1314 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1315 {
1316 	struct xe_gt_sriov_config *config;
1317 	struct xe_tile *tile;
1318 	struct xe_lmtt *lmtt;
1319 	struct xe_bo *bo;
1320 	struct xe_gt *gt;
1321 	u64 total, offset;
1322 	unsigned int gtid;
1323 	unsigned int tid;
1324 	int err;
1325 
1326 	xe_assert(xe, IS_DGFX(xe));
1327 	xe_assert(xe, IS_SRIOV_PF(xe));
1328 
1329 	total = 0;
1330 	for_each_tile(tile, xe, tid)
1331 		total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1332 
1333 	for_each_tile(tile, xe, tid) {
1334 		lmtt = &tile->sriov.pf.lmtt;
1335 
1336 		xe_lmtt_drop_pages(lmtt, vfid);
1337 		if (!total)
1338 			continue;
1339 
1340 		err  = xe_lmtt_prepare_pages(lmtt, vfid, total);
1341 		if (err)
1342 			goto fail;
1343 
1344 		offset = 0;
1345 		for_each_gt(gt, xe, gtid) {
1346 			if (xe_gt_is_media_type(gt))
1347 				continue;
1348 
1349 			config = pf_pick_vf_config(gt, vfid);
1350 			bo = config->lmem_obj;
1351 			if (!bo)
1352 				continue;
1353 
1354 			err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1355 			if (err)
1356 				goto fail;
1357 			offset += bo->size;
1358 		}
1359 	}
1360 
1361 	pf_force_lmtt_invalidate(xe);
1362 	return 0;
1363 
1364 fail:
1365 	for_each_tile(tile, xe, tid) {
1366 		lmtt = &tile->sriov.pf.lmtt;
1367 		xe_lmtt_drop_pages(lmtt, vfid);
1368 	}
1369 	return err;
1370 }
1371 
pf_release_vf_config_lmem(struct xe_gt * gt,struct xe_gt_sriov_config * config)1372 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1373 {
1374 	xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1375 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1376 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1377 
1378 	if (config->lmem_obj) {
1379 		xe_bo_unpin_map_no_vm(config->lmem_obj);
1380 		config->lmem_obj = NULL;
1381 	}
1382 }
1383 
pf_provision_vf_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1384 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1385 {
1386 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1387 	struct xe_device *xe = gt_to_xe(gt);
1388 	struct xe_tile *tile = gt_to_tile(gt);
1389 	struct xe_bo *bo;
1390 	int err;
1391 
1392 	xe_gt_assert(gt, vfid);
1393 	xe_gt_assert(gt, IS_DGFX(xe));
1394 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1395 
1396 	size = round_up(size, pf_get_lmem_alignment(gt));
1397 
1398 	if (config->lmem_obj) {
1399 		err = pf_distribute_config_lmem(gt, vfid, 0);
1400 		if (unlikely(err))
1401 			return err;
1402 
1403 		pf_reset_vf_lmtt(xe, vfid);
1404 		pf_release_vf_config_lmem(gt, config);
1405 	}
1406 	xe_gt_assert(gt, !config->lmem_obj);
1407 
1408 	if (!size)
1409 		return 0;
1410 
1411 	xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1412 	bo = xe_bo_create_pin_map(xe, tile, NULL,
1413 				  ALIGN(size, PAGE_SIZE),
1414 				  ttm_bo_type_kernel,
1415 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1416 				  XE_BO_FLAG_NEEDS_2M |
1417 				  XE_BO_FLAG_PINNED);
1418 	if (IS_ERR(bo))
1419 		return PTR_ERR(bo);
1420 
1421 	config->lmem_obj = bo;
1422 
1423 	err = pf_update_vf_lmtt(xe, vfid);
1424 	if (unlikely(err))
1425 		goto release;
1426 
1427 	err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
1428 	if (unlikely(err))
1429 		goto reset_lmtt;
1430 
1431 	xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1432 				vfid, bo->size, bo->size / SZ_1M);
1433 	return 0;
1434 
1435 reset_lmtt:
1436 	pf_reset_vf_lmtt(xe, vfid);
1437 release:
1438 	pf_release_vf_config_lmem(gt, config);
1439 	return err;
1440 }
1441 
1442 /**
1443  * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1444  * @gt: the &xe_gt
1445  * @vfid: the VF identifier
1446  *
1447  * This function can only be called on PF.
1448  *
1449  * Return: VF's (or PF's spare) LMEM quota.
1450  */
xe_gt_sriov_pf_config_get_lmem(struct xe_gt * gt,unsigned int vfid)1451 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1452 {
1453 	u64 size;
1454 
1455 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1456 	if (vfid)
1457 		size = pf_get_vf_config_lmem(gt, vfid);
1458 	else
1459 		size = pf_get_spare_lmem(gt);
1460 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1461 
1462 	return size;
1463 }
1464 
1465 /**
1466  * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1467  * @gt: the &xe_gt (can't be media)
1468  * @vfid: the VF identifier
1469  * @size: requested LMEM size
1470  *
1471  * This function can only be called on PF.
1472  */
xe_gt_sriov_pf_config_set_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1473 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1474 {
1475 	int err;
1476 
1477 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1478 	if (vfid)
1479 		err = pf_provision_vf_lmem(gt, vfid, size);
1480 	else
1481 		err = pf_set_spare_lmem(gt, size);
1482 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1483 
1484 	return pf_config_set_u64_done(gt, vfid, size,
1485 				      xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1486 				      vfid ? "LMEM" : "spare LMEM", err);
1487 }
1488 
1489 /**
1490  * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1491  * @gt: the &xe_gt (can't be media)
1492  * @vfid: starting VF identifier (can't be 0)
1493  * @num_vfs: number of VFs to provision
1494  * @size: requested LMEM size
1495  *
1496  * This function can only be called on PF.
1497  *
1498  * Return: 0 on success or a negative error code on failure.
1499  */
xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u64 size)1500 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1501 					unsigned int num_vfs, u64 size)
1502 {
1503 	unsigned int n;
1504 	int err = 0;
1505 
1506 	xe_gt_assert(gt, vfid);
1507 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1508 
1509 	if (!num_vfs)
1510 		return 0;
1511 
1512 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1513 	for (n = vfid; n < vfid + num_vfs; n++) {
1514 		err = pf_provision_vf_lmem(gt, n, size);
1515 		if (err)
1516 			break;
1517 	}
1518 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1519 
1520 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1521 					   xe_gt_sriov_pf_config_get_lmem,
1522 					   "LMEM", n, err);
1523 }
1524 
pf_query_free_lmem(struct xe_gt * gt)1525 static u64 pf_query_free_lmem(struct xe_gt *gt)
1526 {
1527 	struct xe_tile *tile = gt->tile;
1528 
1529 	return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager);
1530 }
1531 
pf_query_max_lmem(struct xe_gt * gt)1532 static u64 pf_query_max_lmem(struct xe_gt *gt)
1533 {
1534 	u64 alignment = pf_get_lmem_alignment(gt);
1535 	u64 spare = pf_get_spare_lmem(gt);
1536 	u64 free = pf_query_free_lmem(gt);
1537 	u64 avail;
1538 
1539 	/* XXX: need to account for 2MB blocks only */
1540 	avail = free > spare ? free - spare : 0;
1541 	avail = round_down(avail, alignment);
1542 
1543 	return avail;
1544 }
1545 
1546 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1547 #define MAX_FAIR_LMEM	SZ_128M	/* XXX: make it small for the driver bringup */
1548 #endif
1549 
pf_estimate_fair_lmem(struct xe_gt * gt,unsigned int num_vfs)1550 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1551 {
1552 	u64 available = pf_query_max_lmem(gt);
1553 	u64 alignment = pf_get_lmem_alignment(gt);
1554 	u64 fair;
1555 
1556 	fair = div_u64(available, num_vfs);
1557 	fair = rounddown_pow_of_two(fair);	/* XXX: ttm_vram_mgr & drm_buddy limitation */
1558 	fair = ALIGN_DOWN(fair, alignment);
1559 #ifdef MAX_FAIR_LMEM
1560 	fair = min_t(u64, MAX_FAIR_LMEM, fair);
1561 #endif
1562 	xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1563 				available / SZ_1M, num_vfs, fair / SZ_1M);
1564 	return fair;
1565 }
1566 
1567 /**
1568  * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1569  * @gt: the &xe_gt (can't be media)
1570  * @vfid: starting VF identifier (can't be 0)
1571  * @num_vfs: number of VFs to provision (can't be 0)
1572  *
1573  * This function can only be called on PF.
1574  *
1575  * Return: 0 on success or a negative error code on failure.
1576  */
xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1577 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1578 					unsigned int num_vfs)
1579 {
1580 	u64 fair;
1581 
1582 	xe_gt_assert(gt, vfid);
1583 	xe_gt_assert(gt, num_vfs);
1584 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1585 
1586 	if (!IS_DGFX(gt_to_xe(gt)))
1587 		return 0;
1588 
1589 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1590 	fair = pf_estimate_fair_lmem(gt, num_vfs);
1591 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1592 
1593 	if (!fair)
1594 		return -ENOSPC;
1595 
1596 	return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1597 }
1598 
1599 /**
1600  * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1601  * @gt: the &xe_gt
1602  * @vfid: starting VF identifier (can't be 0)
1603  * @num_vfs: number of VFs to provision (can't be 0)
1604  *
1605  * This function can only be called on PF.
1606  *
1607  * Return: 0 on success or a negative error code on failure.
1608  */
xe_gt_sriov_pf_config_set_fair(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1609 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1610 				   unsigned int num_vfs)
1611 {
1612 	int result = 0;
1613 	int err;
1614 
1615 	xe_gt_assert(gt, vfid);
1616 	xe_gt_assert(gt, num_vfs);
1617 
1618 	if (!xe_gt_is_media_type(gt)) {
1619 		err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1620 		result = result ?: err;
1621 		err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1622 		result = result ?: err;
1623 	}
1624 	err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1625 	result = result ?: err;
1626 	err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1627 	result = result ?: err;
1628 
1629 	return result;
1630 }
1631 
exec_quantum_unit(u32 exec_quantum)1632 static const char *exec_quantum_unit(u32 exec_quantum)
1633 {
1634 	return exec_quantum ? "ms" : "(infinity)";
1635 }
1636 
pf_provision_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1637 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1638 				     u32 exec_quantum)
1639 {
1640 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1641 	int err;
1642 
1643 	err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1644 	if (unlikely(err))
1645 		return err;
1646 
1647 	config->exec_quantum = exec_quantum;
1648 	return 0;
1649 }
1650 
pf_get_exec_quantum(struct xe_gt * gt,unsigned int vfid)1651 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1652 {
1653 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1654 
1655 	return config->exec_quantum;
1656 }
1657 
1658 /**
1659  * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1660  * @gt: the &xe_gt
1661  * @vfid: the VF identifier
1662  * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1663  *
1664  * This function can only be called on PF.
1665  *
1666  * Return: 0 on success or a negative error code on failure.
1667  */
xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1668 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1669 					   u32 exec_quantum)
1670 {
1671 	int err;
1672 
1673 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1674 	err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1675 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1676 
1677 	return pf_config_set_u32_done(gt, vfid, exec_quantum,
1678 				      xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1679 				      "execution quantum", exec_quantum_unit, err);
1680 }
1681 
1682 /**
1683  * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1684  * @gt: the &xe_gt
1685  * @vfid: the VF identifier
1686  *
1687  * This function can only be called on PF.
1688  *
1689  * Return: VF's (or PF's) execution quantum in milliseconds.
1690  */
xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt * gt,unsigned int vfid)1691 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1692 {
1693 	u32 exec_quantum;
1694 
1695 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1696 	exec_quantum = pf_get_exec_quantum(gt, vfid);
1697 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1698 
1699 	return exec_quantum;
1700 }
1701 
preempt_timeout_unit(u32 preempt_timeout)1702 static const char *preempt_timeout_unit(u32 preempt_timeout)
1703 {
1704 	return preempt_timeout ? "us" : "(infinity)";
1705 }
1706 
pf_provision_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)1707 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1708 					u32 preempt_timeout)
1709 {
1710 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1711 	int err;
1712 
1713 	err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1714 	if (unlikely(err))
1715 		return err;
1716 
1717 	config->preempt_timeout = preempt_timeout;
1718 
1719 	return 0;
1720 }
1721 
pf_get_preempt_timeout(struct xe_gt * gt,unsigned int vfid)1722 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1723 {
1724 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1725 
1726 	return config->preempt_timeout;
1727 }
1728 
1729 /**
1730  * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1731  * @gt: the &xe_gt
1732  * @vfid: the VF identifier
1733  * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1734  *
1735  * This function can only be called on PF.
1736  *
1737  * Return: 0 on success or a negative error code on failure.
1738  */
xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)1739 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1740 					      u32 preempt_timeout)
1741 {
1742 	int err;
1743 
1744 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1745 	err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1746 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1747 
1748 	return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1749 				      xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1750 				      "preemption timeout", preempt_timeout_unit, err);
1751 }
1752 
1753 /**
1754  * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1755  * @gt: the &xe_gt
1756  * @vfid: the VF identifier
1757  *
1758  * This function can only be called on PF.
1759  *
1760  * Return: VF's (or PF's) preemption timeout in microseconds.
1761  */
xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt * gt,unsigned int vfid)1762 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1763 {
1764 	u32 preempt_timeout;
1765 
1766 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1767 	preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1768 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1769 
1770 	return preempt_timeout;
1771 }
1772 
sched_priority_unit(u32 priority)1773 static const char *sched_priority_unit(u32 priority)
1774 {
1775 	return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" :
1776 		priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" :
1777 		priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" :
1778 		"(?)";
1779 }
1780 
pf_provision_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)1781 static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
1782 {
1783 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1784 	int err;
1785 
1786 	err = pf_push_vf_cfg_sched_priority(gt, vfid, priority);
1787 	if (unlikely(err))
1788 		return err;
1789 
1790 	config->sched_priority = priority;
1791 	return 0;
1792 }
1793 
pf_get_sched_priority(struct xe_gt * gt,unsigned int vfid)1794 static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
1795 {
1796 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1797 
1798 	return config->sched_priority;
1799 }
1800 
1801 /**
1802  * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority.
1803  * @gt: the &xe_gt
1804  * @vfid: the VF identifier
1805  * @priority: requested scheduling priority
1806  *
1807  * This function can only be called on PF.
1808  *
1809  * Return: 0 on success or a negative error code on failure.
1810  */
xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)1811 int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
1812 {
1813 	int err;
1814 
1815 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1816 	err = pf_provision_sched_priority(gt, vfid, priority);
1817 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1818 
1819 	return pf_config_set_u32_done(gt, vfid, priority,
1820 				      xe_gt_sriov_pf_config_get_sched_priority(gt, vfid),
1821 				      "scheduling priority", sched_priority_unit, err);
1822 }
1823 
1824 /**
1825  * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority.
1826  * @gt: the &xe_gt
1827  * @vfid: the VF identifier
1828  *
1829  * This function can only be called on PF.
1830  *
1831  * Return: VF's (or PF's) scheduling priority.
1832  */
xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt * gt,unsigned int vfid)1833 u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
1834 {
1835 	u32 priority;
1836 
1837 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1838 	priority = pf_get_sched_priority(gt, vfid);
1839 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1840 
1841 	return priority;
1842 }
1843 
pf_reset_config_sched(struct xe_gt * gt,struct xe_gt_sriov_config * config)1844 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1845 {
1846 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1847 
1848 	config->exec_quantum = 0;
1849 	config->preempt_timeout = 0;
1850 }
1851 
pf_provision_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)1852 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
1853 				  enum xe_guc_klv_threshold_index index, u32 value)
1854 {
1855 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1856 	int err;
1857 
1858 	err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
1859 	if (unlikely(err))
1860 		return err;
1861 
1862 	config->thresholds[index] = value;
1863 
1864 	return 0;
1865 }
1866 
pf_get_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index)1867 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
1868 			    enum xe_guc_klv_threshold_index index)
1869 {
1870 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1871 
1872 	return config->thresholds[index];
1873 }
1874 
threshold_unit(u32 threshold)1875 static const char *threshold_unit(u32 threshold)
1876 {
1877 	return threshold ? "" : "(disabled)";
1878 }
1879 
1880 /**
1881  * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
1882  * @gt: the &xe_gt
1883  * @vfid: the VF identifier
1884  * @index: the threshold index
1885  * @value: requested value (0 means disabled)
1886  *
1887  * This function can only be called on PF.
1888  *
1889  * Return: 0 on success or a negative error code on failure.
1890  */
xe_gt_sriov_pf_config_set_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)1891 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
1892 					enum xe_guc_klv_threshold_index index, u32 value)
1893 {
1894 	u32 key = xe_guc_klv_threshold_index_to_key(index);
1895 	const char *name = xe_guc_klv_key_to_string(key);
1896 	int err;
1897 
1898 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1899 	err = pf_provision_threshold(gt, vfid, index, value);
1900 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1901 
1902 	return pf_config_set_u32_done(gt, vfid, value,
1903 				      xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
1904 				      name, threshold_unit, err);
1905 }
1906 
1907 /**
1908  * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
1909  * @gt: the &xe_gt
1910  * @vfid: the VF identifier
1911  * @index: the threshold index
1912  *
1913  * This function can only be called on PF.
1914  *
1915  * Return: value of VF's (or PF's) threshold.
1916  */
xe_gt_sriov_pf_config_get_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index)1917 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
1918 					enum xe_guc_klv_threshold_index index)
1919 {
1920 	u32 value;
1921 
1922 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1923 	value = pf_get_threshold(gt, vfid, index);
1924 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1925 
1926 	return value;
1927 }
1928 
pf_reset_config_thresholds(struct xe_gt * gt,struct xe_gt_sriov_config * config)1929 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1930 {
1931 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1932 
1933 #define reset_threshold_config(TAG, ...) ({				\
1934 	config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0;	\
1935 });
1936 
1937 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
1938 #undef reset_threshold_config
1939 }
1940 
pf_release_vf_config(struct xe_gt * gt,unsigned int vfid)1941 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
1942 {
1943 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1944 	struct xe_device *xe = gt_to_xe(gt);
1945 
1946 	if (!xe_gt_is_media_type(gt)) {
1947 		pf_release_vf_config_ggtt(gt, config);
1948 		if (IS_DGFX(xe)) {
1949 			pf_release_vf_config_lmem(gt, config);
1950 			pf_update_vf_lmtt(xe, vfid);
1951 		}
1952 	}
1953 	pf_release_config_ctxs(gt, config);
1954 	pf_release_config_dbs(gt, config);
1955 	pf_reset_config_sched(gt, config);
1956 	pf_reset_config_thresholds(gt, config);
1957 }
1958 
1959 /**
1960  * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
1961  * @gt: the &xe_gt
1962  * @vfid: the VF identifier (can't be PF)
1963  * @force: force configuration release
1964  *
1965  * This function can only be called on PF.
1966  *
1967  * Return: 0 on success or a negative error code on failure.
1968  */
xe_gt_sriov_pf_config_release(struct xe_gt * gt,unsigned int vfid,bool force)1969 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
1970 {
1971 	int err;
1972 
1973 	xe_gt_assert(gt, vfid);
1974 
1975 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1976 	err = pf_send_vf_cfg_reset(gt, vfid);
1977 	if (!err || force)
1978 		pf_release_vf_config(gt, vfid);
1979 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1980 
1981 	if (unlikely(err)) {
1982 		xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
1983 				   vfid, ERR_PTR(err),
1984 				   force ? " but all resources were released anyway!" : "");
1985 	}
1986 
1987 	return force ? 0 : err;
1988 }
1989 
pf_sanitize_ggtt(struct xe_ggtt_node * ggtt_region,unsigned int vfid)1990 static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
1991 {
1992 	if (xe_ggtt_node_allocated(ggtt_region))
1993 		xe_ggtt_assign(ggtt_region, vfid);
1994 }
1995 
pf_sanitize_lmem(struct xe_tile * tile,struct xe_bo * bo,long timeout)1996 static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
1997 {
1998 	struct xe_migrate *m = tile->migrate;
1999 	struct dma_fence *fence;
2000 	int err;
2001 
2002 	if (!bo)
2003 		return 0;
2004 
2005 	xe_bo_lock(bo, false);
2006 	fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
2007 	if (IS_ERR(fence)) {
2008 		err = PTR_ERR(fence);
2009 	} else if (!fence) {
2010 		err = -ENOMEM;
2011 	} else {
2012 		long ret = dma_fence_wait_timeout(fence, false, timeout);
2013 
2014 		err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
2015 		dma_fence_put(fence);
2016 		if (!err)
2017 			xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
2018 						jiffies_to_msecs(timeout - ret));
2019 	}
2020 	xe_bo_unlock(bo);
2021 
2022 	return err;
2023 }
2024 
pf_sanitize_vf_resources(struct xe_gt * gt,u32 vfid,long timeout)2025 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
2026 {
2027 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2028 	struct xe_tile *tile = gt_to_tile(gt);
2029 	struct xe_device *xe = gt_to_xe(gt);
2030 	int err = 0;
2031 
2032 	/*
2033 	 * Only GGTT and LMEM requires to be cleared by the PF.
2034 	 * GuC doorbell IDs and context IDs do not need any clearing.
2035 	 */
2036 	if (!xe_gt_is_media_type(gt)) {
2037 		pf_sanitize_ggtt(config->ggtt_region, vfid);
2038 		if (IS_DGFX(xe))
2039 			err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
2040 	}
2041 
2042 	return err;
2043 }
2044 
2045 /**
2046  * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
2047  * @gt: the &xe_gt
2048  * @vfid: the VF identifier (can't be PF)
2049  * @timeout: maximum timeout to wait for completion in jiffies
2050  *
2051  * This function can only be called on PF.
2052  *
2053  * Return: 0 on success or a negative error code on failure.
2054  */
xe_gt_sriov_pf_config_sanitize(struct xe_gt * gt,unsigned int vfid,long timeout)2055 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
2056 {
2057 	int err;
2058 
2059 	xe_gt_assert(gt, vfid != PFID);
2060 
2061 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2062 	err = pf_sanitize_vf_resources(gt, vfid, timeout);
2063 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2064 
2065 	if (unlikely(err))
2066 		xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
2067 				   vfid, ERR_PTR(err));
2068 	return err;
2069 }
2070 
2071 /**
2072  * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
2073  * @gt: the &xe_gt
2074  * @vfid: the VF identifier (can't be PF)
2075  * @refresh: explicit refresh
2076  *
2077  * This function can only be called on PF.
2078  *
2079  * Return: 0 on success or a negative error code on failure.
2080  */
xe_gt_sriov_pf_config_push(struct xe_gt * gt,unsigned int vfid,bool refresh)2081 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
2082 {
2083 	int err = 0;
2084 
2085 	xe_gt_assert(gt, vfid);
2086 
2087 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2088 	if (refresh)
2089 		err = pf_send_vf_cfg_reset(gt, vfid);
2090 	if (!err)
2091 		err = pf_push_full_vf_config(gt, vfid);
2092 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2093 
2094 	if (unlikely(err)) {
2095 		xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
2096 				   refresh ? "refresh" : "push", vfid, ERR_PTR(err));
2097 	}
2098 
2099 	return err;
2100 }
2101 
pf_validate_vf_config(struct xe_gt * gt,unsigned int vfid)2102 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
2103 {
2104 	struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
2105 	struct xe_device *xe = gt_to_xe(gt);
2106 	bool is_primary = !xe_gt_is_media_type(gt);
2107 	bool valid_ggtt, valid_ctxs, valid_dbs;
2108 	bool valid_any, valid_all;
2109 
2110 	valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
2111 	valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
2112 	valid_dbs = pf_get_vf_config_dbs(gt, vfid);
2113 
2114 	/* note that GuC doorbells are optional */
2115 	valid_any = valid_ctxs || valid_dbs;
2116 	valid_all = valid_ctxs;
2117 
2118 	/* and GGTT/LMEM is configured on primary GT only */
2119 	valid_all = valid_all && valid_ggtt;
2120 	valid_any = valid_any || (valid_ggtt && is_primary);
2121 
2122 	if (IS_DGFX(xe)) {
2123 		bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
2124 
2125 		valid_any = valid_any || (valid_lmem && is_primary);
2126 		valid_all = valid_all && valid_lmem;
2127 	}
2128 
2129 	return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA;
2130 }
2131 
2132 /**
2133  * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
2134  * @gt: the &xe_gt
2135  * @vfid: the VF identifier (can't be PF)
2136  *
2137  * This function can only be called on PF.
2138  *
2139  * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
2140  */
xe_gt_sriov_pf_config_is_empty(struct xe_gt * gt,unsigned int vfid)2141 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
2142 {
2143 	bool empty;
2144 
2145 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2146 	xe_gt_assert(gt, vfid);
2147 
2148 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2149 	empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
2150 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2151 
2152 	return empty;
2153 }
2154 
2155 /**
2156  * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob.
2157  * @gt: the &xe_gt
2158  * @vfid: the VF identifier (can't be PF)
2159  * @buf: the buffer to save a config to (or NULL if query the buf size)
2160  * @size: the size of the buffer (or 0 if query the buf size)
2161  *
2162  * This function can only be called on PF.
2163  *
2164  * Return: minimum size of the buffer or the number of bytes saved,
2165  *         or a negative error code on failure.
2166  */
xe_gt_sriov_pf_config_save(struct xe_gt * gt,unsigned int vfid,void * buf,size_t size)2167 ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
2168 {
2169 	struct xe_gt_sriov_config *config;
2170 	ssize_t ret;
2171 
2172 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2173 	xe_gt_assert(gt, vfid);
2174 	xe_gt_assert(gt, !(!buf ^ !size));
2175 
2176 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2177 	ret = pf_validate_vf_config(gt, vfid);
2178 	if (!size) {
2179 		ret = ret ? 0 : SZ_4K;
2180 	} else if (!ret) {
2181 		if (size < SZ_4K) {
2182 			ret = -ENOBUFS;
2183 		} else {
2184 			config = pf_pick_vf_config(gt, vfid);
2185 			ret = encode_config(buf, config, false) * sizeof(u32);
2186 		}
2187 	}
2188 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2189 
2190 	return ret;
2191 }
2192 
pf_restore_vf_config_klv(struct xe_gt * gt,unsigned int vfid,u32 key,u32 len,const u32 * value)2193 static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid,
2194 				    u32 key, u32 len, const u32 *value)
2195 {
2196 	switch (key) {
2197 	case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
2198 		if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN)
2199 			return -EBADMSG;
2200 		return pf_provision_vf_ctxs(gt, vfid, value[0]);
2201 
2202 	case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
2203 		if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN)
2204 			return -EBADMSG;
2205 		return pf_provision_vf_dbs(gt, vfid, value[0]);
2206 
2207 	case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
2208 		if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN)
2209 			return -EBADMSG;
2210 		return pf_provision_exec_quantum(gt, vfid, value[0]);
2211 
2212 	case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
2213 		if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN)
2214 			return -EBADMSG;
2215 		return pf_provision_preempt_timeout(gt, vfid, value[0]);
2216 
2217 	/* auto-generate case statements */
2218 #define define_threshold_key_to_provision_case(TAG, ...)				\
2219 	case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG):					\
2220 		BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u);		\
2221 		if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG))			\
2222 			return -EBADMSG;						\
2223 		return pf_provision_threshold(gt, vfid,					\
2224 					      MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG),	\
2225 					      value[0]);
2226 
2227 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case)
2228 #undef define_threshold_key_to_provision_case
2229 	}
2230 
2231 	if (xe_gt_is_media_type(gt))
2232 		return -EKEYREJECTED;
2233 
2234 	switch (key) {
2235 	case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
2236 		if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN)
2237 			return -EBADMSG;
2238 		return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0]));
2239 
2240 	case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
2241 		if (!IS_DGFX(gt_to_xe(gt)))
2242 			return -EKEYREJECTED;
2243 		if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN)
2244 			return -EBADMSG;
2245 		return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0]));
2246 	}
2247 
2248 	return -EKEYREJECTED;
2249 }
2250 
pf_restore_vf_config(struct xe_gt * gt,unsigned int vfid,const u32 * klvs,size_t num_dwords)2251 static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid,
2252 				const u32 *klvs, size_t num_dwords)
2253 {
2254 	int err;
2255 
2256 	while (num_dwords >= GUC_KLV_LEN_MIN) {
2257 		u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
2258 		u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
2259 
2260 		klvs += GUC_KLV_LEN_MIN;
2261 		num_dwords -= GUC_KLV_LEN_MIN;
2262 
2263 		if (num_dwords < len)
2264 			err = -EBADMSG;
2265 		else
2266 			err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs);
2267 
2268 		if (err) {
2269 			xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err));
2270 			return err;
2271 		}
2272 
2273 		klvs += len;
2274 		num_dwords -= len;
2275 	}
2276 
2277 	return pf_validate_vf_config(gt, vfid);
2278 }
2279 
2280 /**
2281  * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob.
2282  * @gt: the &xe_gt
2283  * @vfid: the VF identifier (can't be PF)
2284  * @buf: the buffer with config data
2285  * @size: the size of the config data
2286  *
2287  * This function can only be called on PF.
2288  *
2289  * Return: 0 on success or a negative error code on failure.
2290  */
xe_gt_sriov_pf_config_restore(struct xe_gt * gt,unsigned int vfid,const void * buf,size_t size)2291 int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
2292 				  const void *buf, size_t size)
2293 {
2294 	int err;
2295 
2296 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2297 	xe_gt_assert(gt, vfid);
2298 
2299 	if (!size)
2300 		return -ENODATA;
2301 
2302 	if (size % sizeof(u32))
2303 		return -EINVAL;
2304 
2305 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
2306 		struct drm_printer p = xe_gt_info_printer(gt);
2307 
2308 		drm_printf(&p, "restoring VF%u config:\n", vfid);
2309 		xe_guc_klv_print(buf, size / sizeof(u32), &p);
2310 	}
2311 
2312 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2313 	err = pf_send_vf_cfg_reset(gt, vfid);
2314 	if (!err) {
2315 		pf_release_vf_config(gt, vfid);
2316 		err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32));
2317 	}
2318 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2319 
2320 	return err;
2321 }
2322 
2323 /**
2324  * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
2325  * @gt: the &xe_gt
2326  *
2327  * Any prior configurations pushed to GuC are lost when the GT is reset.
2328  * Push again all non-empty VF configurations to the GuC.
2329  *
2330  * This function can only be called on PF.
2331  */
xe_gt_sriov_pf_config_restart(struct xe_gt * gt)2332 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
2333 {
2334 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2335 	unsigned int fail = 0, skip = 0;
2336 
2337 	for (n = 1; n <= total_vfs; n++) {
2338 		if (xe_gt_sriov_pf_config_is_empty(gt, n))
2339 			skip++;
2340 		else if (xe_gt_sriov_pf_config_push(gt, n, false))
2341 			fail++;
2342 	}
2343 
2344 	if (fail)
2345 		xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
2346 				   fail, total_vfs - skip, str_plural(total_vfs));
2347 
2348 	if (fail != total_vfs)
2349 		xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
2350 				total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
2351 }
2352 
2353 /**
2354  * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2355  * @gt: the &xe_gt
2356  * @p: the &drm_printer
2357  *
2358  * Print GGTT configuration data for all VFs.
2359  * VFs without provisioned GGTT are ignored.
2360  *
2361  * This function can only be called on PF.
2362  */
xe_gt_sriov_pf_config_print_ggtt(struct xe_gt * gt,struct drm_printer * p)2363 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2364 {
2365 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2366 	const struct xe_gt_sriov_config *config;
2367 	char buf[10];
2368 
2369 	for (n = 1; n <= total_vfs; n++) {
2370 		config = &gt->sriov.pf.vfs[n].config;
2371 		if (!xe_ggtt_node_allocated(config->ggtt_region))
2372 			continue;
2373 
2374 		string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
2375 				buf, sizeof(buf));
2376 		drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2377 			   n, config->ggtt_region->base.start,
2378 			   config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
2379 			   buf);
2380 	}
2381 
2382 	return 0;
2383 }
2384 
2385 /**
2386  * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2387  * @gt: the &xe_gt
2388  * @p: the &drm_printer
2389  *
2390  * Print GuC context ID allocations across all VFs.
2391  * VFs without GuC context IDs are skipped.
2392  *
2393  * This function can only be called on PF.
2394  * Return: 0 on success or a negative error code on failure.
2395  */
xe_gt_sriov_pf_config_print_ctxs(struct xe_gt * gt,struct drm_printer * p)2396 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2397 {
2398 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2399 	const struct xe_gt_sriov_config *config;
2400 
2401 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2402 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2403 
2404 	for (n = 1; n <= total_vfs; n++) {
2405 		config = &gt->sriov.pf.vfs[n].config;
2406 		if (!config->num_ctxs)
2407 			continue;
2408 
2409 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2410 			   n,
2411 			   config->begin_ctx,
2412 			   config->begin_ctx + config->num_ctxs - 1,
2413 			   config->num_ctxs);
2414 	}
2415 
2416 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2417 	return 0;
2418 }
2419 
2420 /**
2421  * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2422  * @gt: the &xe_gt
2423  * @p: the &drm_printer
2424  *
2425  * Print GuC doorbell IDs allocations across all VFs.
2426  * VFs without GuC doorbell IDs are skipped.
2427  *
2428  * This function can only be called on PF.
2429  * Return: 0 on success or a negative error code on failure.
2430  */
xe_gt_sriov_pf_config_print_dbs(struct xe_gt * gt,struct drm_printer * p)2431 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2432 {
2433 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2434 	const struct xe_gt_sriov_config *config;
2435 
2436 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2437 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2438 
2439 	for (n = 1; n <= total_vfs; n++) {
2440 		config = &gt->sriov.pf.vfs[n].config;
2441 		if (!config->num_dbs)
2442 			continue;
2443 
2444 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2445 			   n,
2446 			   config->begin_db,
2447 			   config->begin_db + config->num_dbs - 1,
2448 			   config->num_dbs);
2449 	}
2450 
2451 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2452 	return 0;
2453 }
2454 
2455 /**
2456  * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations.
2457  * @gt: the &xe_gt
2458  * @p: the &drm_printer
2459  *
2460  * Print LMEM allocations across all VFs.
2461  * VFs without LMEM allocation are skipped.
2462  *
2463  * This function can only be called on PF.
2464  * Return: 0 on success or a negative error code on failure.
2465  */
xe_gt_sriov_pf_config_print_lmem(struct xe_gt * gt,struct drm_printer * p)2466 int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
2467 {
2468 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2469 	const struct xe_gt_sriov_config *config;
2470 	char buf[10];
2471 
2472 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2473 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2474 
2475 	for (n = 1; n <= total_vfs; n++) {
2476 		config = &gt->sriov.pf.vfs[n].config;
2477 		if (!config->lmem_obj)
2478 			continue;
2479 
2480 		string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2,
2481 				buf, sizeof(buf));
2482 		drm_printf(p, "VF%u:\t%zu\t(%s)\n",
2483 			   n, config->lmem_obj->size, buf);
2484 	}
2485 
2486 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2487 	return 0;
2488 }
2489 
2490 /**
2491  * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2492  * @gt: the &xe_gt
2493  * @p: the &drm_printer
2494  *
2495  * Print GGTT ranges that are available for the provisioning.
2496  *
2497  * This function can only be called on PF.
2498  */
xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt * gt,struct drm_printer * p)2499 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2500 {
2501 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2502 	u64 alignment = pf_get_ggtt_alignment(gt);
2503 	u64 spare, avail, total;
2504 	char buf[10];
2505 
2506 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2507 
2508 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2509 
2510 	spare = pf_get_spare_ggtt(gt);
2511 	total = xe_ggtt_print_holes(ggtt, alignment, p);
2512 
2513 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2514 
2515 	string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2516 	drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2517 
2518 	string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2519 	drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2520 
2521 	avail = total > spare ? total - spare : 0;
2522 
2523 	string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2524 	drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2525 
2526 	return 0;
2527 }
2528