xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c (revision deb879faa9d2f327ac5c079d9d1a1747b79260e3)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8 
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11 
12 #include "regs/xe_gtt_defs.h"
13 #include "regs/xe_guc_regs.h"
14 
15 #include "xe_bo.h"
16 #include "xe_device.h"
17 #include "xe_ggtt.h"
18 #include "xe_gt.h"
19 #include "xe_gt_sriov_pf_config.h"
20 #include "xe_gt_sriov_pf_helpers.h"
21 #include "xe_gt_sriov_pf_policy.h"
22 #include "xe_gt_sriov_printk.h"
23 #include "xe_guc.h"
24 #include "xe_guc_buf.h"
25 #include "xe_guc_ct.h"
26 #include "xe_guc_db_mgr.h"
27 #include "xe_guc_fwif.h"
28 #include "xe_guc_id_mgr.h"
29 #include "xe_guc_klv_helpers.h"
30 #include "xe_guc_klv_thresholds_set.h"
31 #include "xe_guc_submit.h"
32 #include "xe_lmtt.h"
33 #include "xe_map.h"
34 #include "xe_migrate.h"
35 #include "xe_sriov.h"
36 #include "xe_ttm_vram_mgr.h"
37 #include "xe_vram_types.h"
38 #include "xe_wopcm.h"
39 
40 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
41 
42 /*
43  * Return: number of KLVs that were successfully parsed and saved,
44  *         negative error code on failure.
45  */
guc_action_update_vf_cfg(struct xe_guc * guc,u32 vfid,u64 addr,u32 size)46 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
47 				    u64 addr, u32 size)
48 {
49 	u32 request[] = {
50 		GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
51 		vfid,
52 		lower_32_bits(addr),
53 		upper_32_bits(addr),
54 		size,
55 	};
56 
57 	return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
58 }
59 
60 /*
61  * Return: 0 on success, negative error code on failure.
62  */
pf_send_vf_cfg_reset(struct xe_gt * gt,u32 vfid)63 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
64 {
65 	struct xe_guc *guc = &gt->uc.guc;
66 	int ret;
67 
68 	ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
69 
70 	return ret <= 0 ? ret : -EPROTO;
71 }
72 
73 /*
74  * Return: number of KLVs that were successfully parsed and saved,
75  *         negative error code on failure.
76  */
pf_send_vf_buf_klvs(struct xe_gt * gt,u32 vfid,struct xe_guc_buf buf,u32 num_dwords)77 static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords)
78 {
79 	struct xe_guc *guc = &gt->uc.guc;
80 
81 	return guc_action_update_vf_cfg(guc, vfid, xe_guc_buf_flush(buf), num_dwords);
82 }
83 
84 /*
85  * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
86  *         negative error code on failure.
87  */
pf_push_vf_buf_klvs(struct xe_gt * gt,unsigned int vfid,u32 num_klvs,struct xe_guc_buf buf,u32 num_dwords)88 static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
89 			       struct xe_guc_buf buf, u32 num_dwords)
90 {
91 	int ret;
92 
93 	ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords);
94 
95 	if (ret != num_klvs) {
96 		int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
97 		void *klvs = xe_guc_buf_cpu_ptr(buf);
98 		struct drm_printer p = xe_gt_info_printer(gt);
99 		char name[8];
100 
101 		xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
102 				   xe_sriov_function_name(vfid, name, sizeof(name)),
103 				   num_klvs, str_plural(num_klvs), ERR_PTR(err));
104 		xe_guc_klv_print(klvs, num_dwords, &p);
105 		return err;
106 	}
107 
108 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
109 		struct drm_printer p = xe_gt_dbg_printer(gt);
110 		void *klvs = xe_guc_buf_cpu_ptr(buf);
111 		char name[8];
112 
113 		xe_gt_sriov_dbg(gt, "pushed %s config with %u KLV%s:\n",
114 				xe_sriov_function_name(vfid, name, sizeof(name)),
115 				num_klvs, str_plural(num_klvs));
116 		xe_guc_klv_print(klvs, num_dwords, &p);
117 	}
118 
119 	return 0;
120 }
121 
122 /*
123  * Return: 0 on success, -ENOBUFS if no free buffer for the indirect data,
124  *         negative error code on failure.
125  */
pf_push_vf_cfg_klvs(struct xe_gt * gt,unsigned int vfid,u32 num_klvs,const u32 * klvs,u32 num_dwords)126 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
127 			       const u32 *klvs, u32 num_dwords)
128 {
129 	CLASS(xe_guc_buf_from_data, buf)(&gt->uc.guc.buf, klvs, num_dwords * sizeof(u32));
130 
131 	xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
132 
133 	if (!xe_guc_buf_is_valid(buf))
134 		return -ENOBUFS;
135 
136 	return pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
137 }
138 
pf_push_vf_cfg_u32(struct xe_gt * gt,unsigned int vfid,u16 key,u32 value)139 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
140 {
141 	u32 klv[] = {
142 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
143 		value,
144 	};
145 
146 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
147 }
148 
pf_push_vf_cfg_u64(struct xe_gt * gt,unsigned int vfid,u16 key,u64 value)149 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
150 {
151 	u32 klv[] = {
152 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
153 		lower_32_bits(value),
154 		upper_32_bits(value),
155 	};
156 
157 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
158 }
159 
pf_push_vf_cfg_ggtt(struct xe_gt * gt,unsigned int vfid,u64 start,u64 size)160 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
161 {
162 	u32 klvs[] = {
163 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
164 		lower_32_bits(start),
165 		upper_32_bits(start),
166 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
167 		lower_32_bits(size),
168 		upper_32_bits(size),
169 	};
170 
171 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
172 }
173 
pf_push_vf_cfg_ctxs(struct xe_gt * gt,unsigned int vfid,u32 begin,u32 num)174 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
175 {
176 	u32 klvs[] = {
177 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
178 		begin,
179 		PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
180 		num,
181 	};
182 
183 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
184 }
185 
pf_push_vf_cfg_dbs(struct xe_gt * gt,unsigned int vfid,u32 begin,u32 num)186 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
187 {
188 	u32 klvs[] = {
189 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
190 		begin,
191 		PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
192 		num,
193 	};
194 
195 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
196 }
197 
pf_push_vf_cfg_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 * exec_quantum)198 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
199 {
200 	/* GuC will silently clamp values exceeding max */
201 	*exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
202 
203 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
204 }
205 
pf_push_vf_cfg_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 * preempt_timeout)206 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
207 {
208 	/* GuC will silently clamp values exceeding max */
209 	*preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
210 
211 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
212 }
213 
pf_push_vf_cfg_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)214 static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
215 {
216 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority);
217 }
218 
pf_push_vf_cfg_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)219 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
220 {
221 	return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
222 }
223 
pf_push_vf_cfg_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)224 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
225 				    enum xe_guc_klv_threshold_index index, u32 value)
226 {
227 	u32 key = xe_guc_klv_threshold_index_to_key(index);
228 
229 	xe_gt_assert(gt, key);
230 	return pf_push_vf_cfg_u32(gt, vfid, key, value);
231 }
232 
pf_pick_vf_config(struct xe_gt * gt,unsigned int vfid)233 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
234 {
235 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
236 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
237 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
238 
239 	return &gt->sriov.pf.vfs[vfid].config;
240 }
241 
242 /* Return: number of configuration dwords written */
encode_ggtt(u32 * cfg,u64 start,u64 size,bool details)243 static u32 encode_ggtt(u32 *cfg, u64 start, u64 size, bool details)
244 {
245 	u32 n = 0;
246 
247 	if (details) {
248 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
249 		cfg[n++] = lower_32_bits(start);
250 		cfg[n++] = upper_32_bits(start);
251 	}
252 
253 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
254 	cfg[n++] = lower_32_bits(size);
255 	cfg[n++] = upper_32_bits(size);
256 
257 	return n;
258 }
259 
260 /* Return: number of configuration dwords written */
encode_config_ggtt(u32 * cfg,const struct xe_gt_sriov_config * config,bool details)261 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
262 {
263 	struct xe_ggtt_node *node = config->ggtt_region;
264 
265 	if (!xe_ggtt_node_allocated(node))
266 		return 0;
267 
268 	return encode_ggtt(cfg, node->base.start, node->base.size, details);
269 }
270 
271 /* Return: number of configuration dwords written */
encode_config(u32 * cfg,const struct xe_gt_sriov_config * config,bool details)272 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
273 {
274 	u32 n = 0;
275 
276 	n += encode_config_ggtt(cfg, config, details);
277 
278 	if (details && config->num_ctxs) {
279 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
280 		cfg[n++] = config->begin_ctx;
281 	}
282 
283 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
284 	cfg[n++] = config->num_ctxs;
285 
286 	if (details && config->num_dbs) {
287 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
288 		cfg[n++] = config->begin_db;
289 	}
290 
291 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
292 	cfg[n++] = config->num_dbs;
293 
294 	if (config->lmem_obj) {
295 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
296 		cfg[n++] = lower_32_bits(xe_bo_size(config->lmem_obj));
297 		cfg[n++] = upper_32_bits(xe_bo_size(config->lmem_obj));
298 	}
299 
300 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
301 	cfg[n++] = config->exec_quantum;
302 
303 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
304 	cfg[n++] = config->preempt_timeout;
305 
306 #define encode_threshold_config(TAG, ...) ({					\
307 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG);			\
308 	cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)];	\
309 });
310 
311 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
312 #undef encode_threshold_config
313 
314 	return n;
315 }
316 
pf_push_full_vf_config(struct xe_gt * gt,unsigned int vfid)317 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
318 {
319 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
320 	u32 max_cfg_dwords = xe_guc_buf_cache_dwords(&gt->uc.guc.buf);
321 	CLASS(xe_guc_buf, buf)(&gt->uc.guc.buf, max_cfg_dwords);
322 	u32 num_dwords;
323 	int num_klvs;
324 	u32 *cfg;
325 	int err;
326 
327 	if (!xe_guc_buf_is_valid(buf))
328 		return -ENOBUFS;
329 
330 	cfg = xe_guc_buf_cpu_ptr(buf);
331 	num_dwords = encode_config(cfg, config, true);
332 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
333 
334 	if (xe_gt_is_media_type(gt)) {
335 		struct xe_gt *primary = gt->tile->primary_gt;
336 		struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
337 
338 		/* media-GT will never include a GGTT config */
339 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
340 
341 		/* the GGTT config must be taken from the primary-GT instead */
342 		num_dwords += encode_config_ggtt(cfg + num_dwords, other, true);
343 	}
344 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
345 
346 	if (vfid == PFID) {
347 		u64 ggtt_start = xe_wopcm_size(gt_to_xe(gt));
348 		u64 ggtt_size = gt_to_tile(gt)->mem.ggtt->size - ggtt_start;
349 
350 		/* plain PF config data will never include a real GGTT region */
351 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
352 
353 		/* fake PF GGTT config covers full GGTT range except reserved WOPCM */
354 		num_dwords += encode_ggtt(cfg + num_dwords, ggtt_start, ggtt_size, true);
355 	}
356 
357 	num_klvs = xe_guc_klv_count(cfg, num_dwords);
358 	err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
359 
360 	return err;
361 }
362 
pf_push_vf_cfg(struct xe_gt * gt,unsigned int vfid,bool reset)363 static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset)
364 {
365 	int err = 0;
366 
367 	xe_gt_assert(gt, vfid);
368 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
369 
370 	if (reset)
371 		err = pf_send_vf_cfg_reset(gt, vfid);
372 	if (!err)
373 		err = pf_push_full_vf_config(gt, vfid);
374 
375 	return err;
376 }
377 
pf_refresh_vf_cfg(struct xe_gt * gt,unsigned int vfid)378 static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid)
379 {
380 	return pf_push_vf_cfg(gt, vfid, true);
381 }
382 
pf_get_ggtt_alignment(struct xe_gt * gt)383 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
384 {
385 	struct xe_device *xe = gt_to_xe(gt);
386 
387 	return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
388 }
389 
pf_get_min_spare_ggtt(struct xe_gt * gt)390 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
391 {
392 	/* XXX: preliminary */
393 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
394 		pf_get_ggtt_alignment(gt) : SZ_64M;
395 }
396 
pf_get_spare_ggtt(struct xe_gt * gt)397 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
398 {
399 	u64 spare;
400 
401 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
402 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
403 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
404 
405 	spare = gt->sriov.pf.spare.ggtt_size;
406 	spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
407 
408 	return spare;
409 }
410 
pf_set_spare_ggtt(struct xe_gt * gt,u64 size)411 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
412 {
413 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
414 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
415 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
416 
417 	if (size && size < pf_get_min_spare_ggtt(gt))
418 		return -EINVAL;
419 
420 	size = round_up(size, pf_get_ggtt_alignment(gt));
421 	gt->sriov.pf.spare.ggtt_size = size;
422 
423 	return 0;
424 }
425 
pf_distribute_config_ggtt(struct xe_tile * tile,unsigned int vfid,u64 start,u64 size)426 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
427 {
428 	int err, err2 = 0;
429 
430 	err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
431 
432 	if (tile->media_gt && !err)
433 		err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
434 
435 	return err ?: err2;
436 }
437 
pf_release_ggtt(struct xe_tile * tile,struct xe_ggtt_node * node)438 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
439 {
440 	if (xe_ggtt_node_allocated(node)) {
441 		/*
442 		 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
443 		 * is redundant, as PTE will be implicitly re-assigned to PF by
444 		 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
445 		 */
446 		xe_ggtt_node_remove(node, false);
447 	} else {
448 		xe_ggtt_node_fini(node);
449 	}
450 }
451 
pf_release_vf_config_ggtt(struct xe_gt * gt,struct xe_gt_sriov_config * config)452 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
453 {
454 	pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
455 	config->ggtt_region = NULL;
456 }
457 
pf_provision_vf_ggtt(struct xe_gt * gt,unsigned int vfid,u64 size)458 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
459 {
460 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
461 	struct xe_ggtt_node *node;
462 	struct xe_tile *tile = gt_to_tile(gt);
463 	struct xe_ggtt *ggtt = tile->mem.ggtt;
464 	u64 alignment = pf_get_ggtt_alignment(gt);
465 	int err;
466 
467 	xe_gt_assert(gt, vfid);
468 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
469 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
470 
471 	size = round_up(size, alignment);
472 
473 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
474 		err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
475 		if (unlikely(err))
476 			return err;
477 
478 		pf_release_vf_config_ggtt(gt, config);
479 
480 		err = pf_refresh_vf_cfg(gt, vfid);
481 		if (unlikely(err))
482 			return err;
483 	}
484 	xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
485 
486 	if (!size)
487 		return 0;
488 
489 	node = xe_ggtt_node_init(ggtt);
490 	if (IS_ERR(node))
491 		return PTR_ERR(node);
492 
493 	err = xe_ggtt_node_insert(node, size, alignment);
494 	if (unlikely(err))
495 		goto err;
496 
497 	xe_ggtt_assign(node, vfid);
498 	xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
499 				vfid, node->base.start, node->base.start + node->base.size - 1);
500 
501 	err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
502 	if (unlikely(err))
503 		goto err;
504 
505 	config->ggtt_region = node;
506 	return 0;
507 err:
508 	pf_release_ggtt(tile, node);
509 	return err;
510 }
511 
pf_get_vf_config_ggtt(struct xe_gt * gt,unsigned int vfid)512 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
513 {
514 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
515 	struct xe_ggtt_node *node = config->ggtt_region;
516 
517 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
518 	return xe_ggtt_node_allocated(node) ? node->base.size : 0;
519 }
520 
521 /**
522  * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
523  * @gt: the &xe_gt
524  * @vfid: the VF identifier
525  *
526  * This function can only be called on PF.
527  *
528  * Return: size of the VF's assigned (or PF's spare) GGTT address space.
529  */
xe_gt_sriov_pf_config_get_ggtt(struct xe_gt * gt,unsigned int vfid)530 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
531 {
532 	u64 size;
533 
534 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
535 	if (vfid)
536 		size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
537 	else
538 		size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
539 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
540 
541 	return size;
542 }
543 
pf_config_set_u64_done(struct xe_gt * gt,unsigned int vfid,u64 value,u64 actual,const char * what,int err)544 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
545 				  u64 actual, const char *what, int err)
546 {
547 	char size[10];
548 	char name[8];
549 
550 	xe_sriov_function_name(vfid, name, sizeof(name));
551 
552 	if (unlikely(err)) {
553 		string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
554 		xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
555 				   name, value, size, what, ERR_PTR(err));
556 		string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
557 		xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
558 				 name, actual, size, what);
559 		return err;
560 	}
561 
562 	/* the actual value may have changed during provisioning */
563 	string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
564 	xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
565 			 name, actual, size, what);
566 	return 0;
567 }
568 
569 /**
570  * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
571  * @gt: the &xe_gt (can't be media)
572  * @vfid: the VF identifier
573  * @size: requested GGTT size
574  *
575  * If &vfid represents PF, then function will change PF's spare GGTT config.
576  *
577  * This function can only be called on PF.
578  *
579  * Return: 0 on success or a negative error code on failure.
580  */
xe_gt_sriov_pf_config_set_ggtt(struct xe_gt * gt,unsigned int vfid,u64 size)581 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
582 {
583 	int err;
584 
585 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
586 
587 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
588 	if (vfid)
589 		err = pf_provision_vf_ggtt(gt, vfid, size);
590 	else
591 		err = pf_set_spare_ggtt(gt, size);
592 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
593 
594 	return pf_config_set_u64_done(gt, vfid, size,
595 				      xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
596 				      vfid ? "GGTT" : "spare GGTT", err);
597 }
598 
pf_config_bulk_set_u64_done(struct xe_gt * gt,unsigned int first,unsigned int num_vfs,u64 value,u64 (* get)(struct xe_gt *,unsigned int),const char * what,unsigned int last,int err)599 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
600 				       u64 value, u64 (*get)(struct xe_gt*, unsigned int),
601 				       const char *what, unsigned int last, int err)
602 {
603 	char size[10];
604 
605 	xe_gt_assert(gt, first);
606 	xe_gt_assert(gt, num_vfs);
607 	xe_gt_assert(gt, first <= last);
608 
609 	if (num_vfs == 1)
610 		return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
611 
612 	if (unlikely(err)) {
613 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
614 				   first, first + num_vfs - 1, what);
615 		if (last > first)
616 			pf_config_bulk_set_u64_done(gt, first, last - first, value,
617 						    get, what, last, 0);
618 		return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
619 	}
620 
621 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
622 	value = get(gt, first);
623 	string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
624 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
625 			 first, first + num_vfs - 1, value, size, what);
626 	return 0;
627 }
628 
629 /**
630  * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
631  * @gt: the &xe_gt (can't be media)
632  * @vfid: starting VF identifier (can't be 0)
633  * @num_vfs: number of VFs to provision
634  * @size: requested GGTT size
635  *
636  * This function can only be called on PF.
637  *
638  * Return: 0 on success or a negative error code on failure.
639  */
xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u64 size)640 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
641 					unsigned int num_vfs, u64 size)
642 {
643 	unsigned int n;
644 	int err = 0;
645 
646 	xe_gt_assert(gt, vfid);
647 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
648 
649 	if (!num_vfs)
650 		return 0;
651 
652 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
653 	for (n = vfid; n < vfid + num_vfs; n++) {
654 		err = pf_provision_vf_ggtt(gt, n, size);
655 		if (err)
656 			break;
657 	}
658 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
659 
660 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
661 					   xe_gt_sriov_pf_config_get_ggtt,
662 					   "GGTT", n, err);
663 }
664 
665 /* Return: size of the largest continuous GGTT region */
pf_get_max_ggtt(struct xe_gt * gt)666 static u64 pf_get_max_ggtt(struct xe_gt *gt)
667 {
668 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
669 	u64 alignment = pf_get_ggtt_alignment(gt);
670 	u64 spare = pf_get_spare_ggtt(gt);
671 	u64 max_hole;
672 
673 	max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
674 
675 	xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
676 				max_hole / SZ_1K, spare / SZ_1K);
677 	return max_hole > spare ? max_hole - spare : 0;
678 }
679 
pf_estimate_fair_ggtt(struct xe_gt * gt,unsigned int num_vfs)680 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
681 {
682 	u64 available = pf_get_max_ggtt(gt);
683 	u64 alignment = pf_get_ggtt_alignment(gt);
684 	u64 fair;
685 
686 	/*
687 	 * To simplify the logic we only look at single largest GGTT region
688 	 * as that will be always the best fit for 1 VF case, and most likely
689 	 * will also nicely cover other cases where VFs are provisioned on the
690 	 * fresh and idle PF driver, without any stale GGTT allocations spread
691 	 * in the middle of the full GGTT range.
692 	 */
693 
694 	fair = div_u64(available, num_vfs);
695 	fair = ALIGN_DOWN(fair, alignment);
696 	xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
697 				available / SZ_1K, num_vfs, fair / SZ_1K);
698 	return fair;
699 }
700 
pf_profile_fair_ggtt(struct xe_gt * gt,unsigned int num_vfs)701 static u64 pf_profile_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
702 {
703 	bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt));
704 	u64 shareable = ALIGN_DOWN(GUC_GGTT_TOP, SZ_512M);
705 	u64 alignment = pf_get_ggtt_alignment(gt);
706 
707 	if (admin_only_pf && num_vfs == 1)
708 		return ALIGN_DOWN(shareable, alignment);
709 
710 	/* need to hardcode due to ~512M of GGTT being reserved */
711 	if (num_vfs > 56)
712 		return SZ_64M - SZ_8M;
713 
714 	return rounddown_pow_of_two(div_u64(shareable, num_vfs));
715 }
716 
717 /**
718  * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
719  * @gt: the &xe_gt (can't be media)
720  * @vfid: starting VF identifier (can't be 0)
721  * @num_vfs: number of VFs to provision
722  *
723  * This function can only be called on PF.
724  *
725  * Return: 0 on success or a negative error code on failure.
726  */
xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)727 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
728 					unsigned int num_vfs)
729 {
730 	u64 profile = pf_profile_fair_ggtt(gt, num_vfs);
731 	u64 fair;
732 
733 	xe_gt_assert(gt, vfid);
734 	xe_gt_assert(gt, num_vfs);
735 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
736 
737 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
738 	fair = pf_estimate_fair_ggtt(gt, num_vfs);
739 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
740 
741 	if (!fair)
742 		return -ENOSPC;
743 
744 	fair = min(fair, profile);
745 	if (fair < profile)
746 		xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %llu vs %llu)\n",
747 				 "GGTT", fair, profile);
748 
749 	return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
750 }
751 
752 /**
753  * xe_gt_sriov_pf_config_ggtt_save() - Save a VF provisioned GGTT data into a buffer.
754  * @gt: the &xe_gt
755  * @vfid: VF identifier (can't be 0)
756  * @buf: the GGTT data destination buffer (or NULL to query the buf size)
757  * @size: the size of the buffer (or 0 to query the buf size)
758  *
759  * This function can only be called on PF.
760  *
761  * Return: size of the buffer needed to save GGTT data if querying,
762  *         0 on successful save or a negative error code on failure.
763  */
xe_gt_sriov_pf_config_ggtt_save(struct xe_gt * gt,unsigned int vfid,void * buf,size_t size)764 ssize_t xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid,
765 					void *buf, size_t size)
766 {
767 	struct xe_ggtt_node *node;
768 
769 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
770 	xe_gt_assert(gt, vfid);
771 	xe_gt_assert(gt, !(!buf ^ !size));
772 
773 	guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
774 
775 	node = pf_pick_vf_config(gt, vfid)->ggtt_region;
776 
777 	if (!buf)
778 		return xe_ggtt_node_pt_size(node);
779 
780 	return xe_ggtt_node_save(node, buf, size, vfid);
781 }
782 
783 /**
784  * xe_gt_sriov_pf_config_ggtt_restore() - Restore a VF provisioned GGTT data from a buffer.
785  * @gt: the &xe_gt
786  * @vfid: VF identifier (can't be 0)
787  * @buf: the GGTT data source buffer
788  * @size: the size of the buffer
789  *
790  * This function can only be called on PF.
791  *
792  * Return: 0 on success or a negative error code on failure.
793  */
xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt * gt,unsigned int vfid,const void * buf,size_t size)794 int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
795 				       const void *buf, size_t size)
796 {
797 	struct xe_ggtt_node *node;
798 
799 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
800 	xe_gt_assert(gt, vfid);
801 
802 	guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
803 
804 	node = pf_pick_vf_config(gt, vfid)->ggtt_region;
805 
806 	return xe_ggtt_node_load(node, buf, size, vfid);
807 }
808 
pf_get_min_spare_ctxs(struct xe_gt * gt)809 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
810 {
811 	/* XXX: preliminary */
812 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
813 		hweight64(gt->info.engine_mask) : SZ_256;
814 }
815 
pf_get_spare_ctxs(struct xe_gt * gt)816 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
817 {
818 	u32 spare;
819 
820 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
821 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
822 
823 	spare = gt->sriov.pf.spare.num_ctxs;
824 	spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
825 
826 	return spare;
827 }
828 
pf_set_spare_ctxs(struct xe_gt * gt,u32 spare)829 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
830 {
831 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
832 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
833 
834 	if (spare > GUC_ID_MAX)
835 		return -EINVAL;
836 
837 	if (spare && spare < pf_get_min_spare_ctxs(gt))
838 		return -EINVAL;
839 
840 	gt->sriov.pf.spare.num_ctxs = spare;
841 
842 	return 0;
843 }
844 
845 /* Return: start ID or negative error code on failure */
pf_reserve_ctxs(struct xe_gt * gt,u32 num)846 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
847 {
848 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
849 	unsigned int spare = pf_get_spare_ctxs(gt);
850 
851 	return xe_guc_id_mgr_reserve(idm, num, spare);
852 }
853 
pf_release_ctxs(struct xe_gt * gt,u32 start,u32 num)854 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
855 {
856 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
857 
858 	if (num)
859 		xe_guc_id_mgr_release(idm, start, num);
860 }
861 
pf_release_config_ctxs(struct xe_gt * gt,struct xe_gt_sriov_config * config)862 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
863 {
864 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
865 
866 	pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
867 	config->begin_ctx = 0;
868 	config->num_ctxs = 0;
869 }
870 
pf_provision_vf_ctxs(struct xe_gt * gt,unsigned int vfid,u32 num_ctxs)871 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
872 {
873 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
874 	int ret;
875 
876 	xe_gt_assert(gt, vfid);
877 
878 	if (num_ctxs > GUC_ID_MAX)
879 		return -EINVAL;
880 
881 	if (config->num_ctxs) {
882 		ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
883 		if (unlikely(ret))
884 			return ret;
885 
886 		pf_release_config_ctxs(gt, config);
887 
888 		ret = pf_refresh_vf_cfg(gt, vfid);
889 		if (unlikely(ret))
890 			return ret;
891 	}
892 
893 	if (!num_ctxs)
894 		return 0;
895 
896 	ret = pf_reserve_ctxs(gt, num_ctxs);
897 	if (unlikely(ret < 0))
898 		return ret;
899 
900 	config->begin_ctx = ret;
901 	config->num_ctxs = num_ctxs;
902 
903 	ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
904 	if (unlikely(ret)) {
905 		pf_release_config_ctxs(gt, config);
906 		return ret;
907 	}
908 
909 	xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
910 				vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
911 	return 0;
912 }
913 
pf_get_vf_config_ctxs(struct xe_gt * gt,unsigned int vfid)914 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
915 {
916 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
917 
918 	return config->num_ctxs;
919 }
920 
921 /**
922  * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
923  * @gt: the &xe_gt
924  * @vfid: the VF identifier
925  *
926  * This function can only be called on PF.
927  * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
928  *
929  * Return: VF's quota (or PF's spare).
930  */
xe_gt_sriov_pf_config_get_ctxs(struct xe_gt * gt,unsigned int vfid)931 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
932 {
933 	u32 num_ctxs;
934 
935 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
936 	if (vfid)
937 		num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
938 	else
939 		num_ctxs = pf_get_spare_ctxs(gt);
940 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
941 
942 	return num_ctxs;
943 }
944 
no_unit(u32 unused)945 static const char *no_unit(u32 unused)
946 {
947 	return "";
948 }
949 
spare_unit(u32 unused)950 static const char *spare_unit(u32 unused)
951 {
952 	return " spare";
953 }
954 
pf_config_set_u32_done(struct xe_gt * gt,unsigned int vfid,u32 value,u32 actual,const char * what,const char * (* unit)(u32),int err)955 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
956 				  const char *what, const char *(*unit)(u32), int err)
957 {
958 	char name[8];
959 
960 	xe_sriov_function_name(vfid, name, sizeof(name));
961 
962 	if (unlikely(err)) {
963 		xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
964 				   name, value, unit(value), what, ERR_PTR(err));
965 		xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
966 				 name, actual, unit(actual), what);
967 		return err;
968 	}
969 
970 	/* the actual value may have changed during provisioning */
971 	xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
972 			 name, actual, unit(actual), what);
973 	return 0;
974 }
975 
976 /**
977  * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
978  * @gt: the &xe_gt
979  * @vfid: the VF identifier
980  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
981  *
982  * This function can only be called on PF.
983  *
984  * Return: 0 on success or a negative error code on failure.
985  */
xe_gt_sriov_pf_config_set_ctxs(struct xe_gt * gt,unsigned int vfid,u32 num_ctxs)986 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
987 {
988 	int err;
989 
990 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
991 	if (vfid)
992 		err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
993 	else
994 		err = pf_set_spare_ctxs(gt, num_ctxs);
995 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
996 
997 	return pf_config_set_u32_done(gt, vfid, num_ctxs,
998 				      xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
999 				      "GuC context IDs", vfid ? no_unit : spare_unit, err);
1000 }
1001 
pf_config_bulk_set_u32_done(struct xe_gt * gt,unsigned int first,unsigned int num_vfs,u32 value,u32 (* get)(struct xe_gt *,unsigned int),const char * what,const char * (* unit)(u32),unsigned int last,int err)1002 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
1003 				       u32 value, u32 (*get)(struct xe_gt*, unsigned int),
1004 				       const char *what, const char *(*unit)(u32),
1005 				       unsigned int last, int err)
1006 {
1007 	char name[8];
1008 
1009 	xe_gt_assert(gt, num_vfs);
1010 	xe_gt_assert(gt, first <= last);
1011 
1012 	if (num_vfs == 1)
1013 		return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
1014 
1015 	if (unlikely(err)) {
1016 		xe_gt_sriov_notice(gt, "Failed to bulk provision %s..VF%u with %s\n",
1017 				   xe_sriov_function_name(first, name, sizeof(name)),
1018 				   first + num_vfs - 1, what);
1019 		if (last > first)
1020 			pf_config_bulk_set_u32_done(gt, first, last - first, value,
1021 						    get, what, unit, last, 0);
1022 		return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
1023 	}
1024 
1025 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
1026 	value = get(gt, first);
1027 	xe_gt_sriov_info(gt, "%s..VF%u provisioned with %u%s %s\n",
1028 			 xe_sriov_function_name(first, name, sizeof(name)),
1029 			 first + num_vfs - 1, value, unit(value), what);
1030 	return 0;
1031 }
1032 
1033 /**
1034  * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
1035  * @gt: the &xe_gt
1036  * @vfid: starting VF identifier
1037  * @num_vfs: number of VFs to provision
1038  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
1039  *
1040  * This function can only be called on PF.
1041  *
1042  * Return: 0 on success or a negative error code on failure.
1043  */
xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u32 num_ctxs)1044 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
1045 					unsigned int num_vfs, u32 num_ctxs)
1046 {
1047 	unsigned int n;
1048 	int err = 0;
1049 
1050 	xe_gt_assert(gt, vfid);
1051 
1052 	if (!num_vfs)
1053 		return 0;
1054 
1055 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1056 	for (n = vfid; n < vfid + num_vfs; n++) {
1057 		err = pf_provision_vf_ctxs(gt, n, num_ctxs);
1058 		if (err)
1059 			break;
1060 	}
1061 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1062 
1063 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
1064 					   xe_gt_sriov_pf_config_get_ctxs,
1065 					   "GuC context IDs", no_unit, n, err);
1066 }
1067 
pf_profile_fair_ctxs(struct xe_gt * gt,unsigned int num_vfs)1068 static u32 pf_profile_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
1069 {
1070 	bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt));
1071 
1072 	if (admin_only_pf && num_vfs == 1)
1073 		return ALIGN_DOWN(GUC_ID_MAX, SZ_1K);
1074 
1075 	return rounddown_pow_of_two(GUC_ID_MAX / num_vfs);
1076 }
1077 
pf_estimate_fair_ctxs(struct xe_gt * gt,unsigned int num_vfs)1078 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
1079 {
1080 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
1081 	u32 spare = pf_get_spare_ctxs(gt);
1082 	u32 fair = (idm->total - spare) / num_vfs;
1083 	int ret;
1084 
1085 	for (; fair; --fair) {
1086 		ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
1087 		if (ret < 0)
1088 			continue;
1089 		xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
1090 		break;
1091 	}
1092 
1093 	xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
1094 	return fair;
1095 }
1096 
1097 /**
1098  * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
1099  * @gt: the &xe_gt
1100  * @vfid: starting VF identifier (can't be 0)
1101  * @num_vfs: number of VFs to provision (can't be 0)
1102  *
1103  * This function can only be called on PF.
1104  *
1105  * Return: 0 on success or a negative error code on failure.
1106  */
xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1107 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
1108 					unsigned int num_vfs)
1109 {
1110 	u32 profile = pf_profile_fair_ctxs(gt, num_vfs);
1111 	u32 fair;
1112 
1113 	xe_gt_assert(gt, vfid);
1114 	xe_gt_assert(gt, num_vfs);
1115 
1116 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1117 	fair = pf_estimate_fair_ctxs(gt, num_vfs);
1118 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1119 
1120 	if (!fair)
1121 		return -ENOSPC;
1122 
1123 	fair = min(fair, profile);
1124 	if (fair < profile)
1125 		xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %u vs %u)\n",
1126 				 "GuC context IDs", fair, profile);
1127 
1128 	return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
1129 }
1130 
pf_get_min_spare_dbs(struct xe_gt * gt)1131 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
1132 {
1133 	/* XXX: preliminary, we don't use doorbells yet! */
1134 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
1135 }
1136 
pf_get_spare_dbs(struct xe_gt * gt)1137 static u32 pf_get_spare_dbs(struct xe_gt *gt)
1138 {
1139 	u32 spare;
1140 
1141 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1142 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1143 
1144 	spare = gt->sriov.pf.spare.num_dbs;
1145 	spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
1146 
1147 	return spare;
1148 }
1149 
pf_set_spare_dbs(struct xe_gt * gt,u32 spare)1150 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
1151 {
1152 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1153 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1154 
1155 	if (spare > GUC_NUM_DOORBELLS)
1156 		return -EINVAL;
1157 
1158 	if (spare && spare < pf_get_min_spare_dbs(gt))
1159 		return -EINVAL;
1160 
1161 	gt->sriov.pf.spare.num_dbs = spare;
1162 	return 0;
1163 }
1164 
1165 /* Return: start ID or negative error code on failure */
pf_reserve_dbs(struct xe_gt * gt,u32 num)1166 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
1167 {
1168 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1169 	unsigned int spare = pf_get_spare_dbs(gt);
1170 
1171 	return xe_guc_db_mgr_reserve_range(dbm, num, spare);
1172 }
1173 
pf_release_dbs(struct xe_gt * gt,u32 start,u32 num)1174 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1175 {
1176 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1177 
1178 	if (num)
1179 		xe_guc_db_mgr_release_range(dbm, start, num);
1180 }
1181 
pf_release_config_dbs(struct xe_gt * gt,struct xe_gt_sriov_config * config)1182 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1183 {
1184 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1185 
1186 	pf_release_dbs(gt, config->begin_db, config->num_dbs);
1187 	config->begin_db = 0;
1188 	config->num_dbs = 0;
1189 }
1190 
pf_provision_vf_dbs(struct xe_gt * gt,unsigned int vfid,u32 num_dbs)1191 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1192 {
1193 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1194 	int ret;
1195 
1196 	xe_gt_assert(gt, vfid);
1197 
1198 	if (num_dbs > GUC_NUM_DOORBELLS)
1199 		return -EINVAL;
1200 
1201 	if (config->num_dbs) {
1202 		ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1203 		if (unlikely(ret))
1204 			return ret;
1205 
1206 		pf_release_config_dbs(gt, config);
1207 
1208 		ret = pf_refresh_vf_cfg(gt, vfid);
1209 		if (unlikely(ret))
1210 			return ret;
1211 	}
1212 
1213 	if (!num_dbs)
1214 		return 0;
1215 
1216 	ret = pf_reserve_dbs(gt, num_dbs);
1217 	if (unlikely(ret < 0))
1218 		return ret;
1219 
1220 	config->begin_db = ret;
1221 	config->num_dbs = num_dbs;
1222 
1223 	ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1224 	if (unlikely(ret)) {
1225 		pf_release_config_dbs(gt, config);
1226 		return ret;
1227 	}
1228 
1229 	xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1230 				vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1231 	return 0;
1232 }
1233 
pf_get_vf_config_dbs(struct xe_gt * gt,unsigned int vfid)1234 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1235 {
1236 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1237 
1238 	return config->num_dbs;
1239 }
1240 
1241 /**
1242  * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1243  * @gt: the &xe_gt
1244  * @vfid: the VF identifier
1245  *
1246  * This function can only be called on PF.
1247  * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1248  *
1249  * Return: VF's quota (or PF's spare).
1250  */
xe_gt_sriov_pf_config_get_dbs(struct xe_gt * gt,unsigned int vfid)1251 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1252 {
1253 	u32 num_dbs;
1254 
1255 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1256 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1257 
1258 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1259 	if (vfid)
1260 		num_dbs = pf_get_vf_config_dbs(gt, vfid);
1261 	else
1262 		num_dbs = pf_get_spare_dbs(gt);
1263 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1264 
1265 	return num_dbs;
1266 }
1267 
1268 /**
1269  * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1270  * @gt: the &xe_gt
1271  * @vfid: the VF identifier
1272  * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1273  *
1274  * This function can only be called on PF.
1275  *
1276  * Return: 0 on success or a negative error code on failure.
1277  */
xe_gt_sriov_pf_config_set_dbs(struct xe_gt * gt,unsigned int vfid,u32 num_dbs)1278 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1279 {
1280 	int err;
1281 
1282 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1283 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1284 
1285 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1286 	if (vfid)
1287 		err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1288 	else
1289 		err = pf_set_spare_dbs(gt, num_dbs);
1290 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1291 
1292 	return pf_config_set_u32_done(gt, vfid, num_dbs,
1293 				      xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1294 				      "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1295 }
1296 
1297 /**
1298  * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1299  * @gt: the &xe_gt
1300  * @vfid: starting VF identifier (can't be 0)
1301  * @num_vfs: number of VFs to provision
1302  * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1303  *
1304  * This function can only be called on PF.
1305  *
1306  * Return: 0 on success or a negative error code on failure.
1307  */
xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u32 num_dbs)1308 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1309 				       unsigned int num_vfs, u32 num_dbs)
1310 {
1311 	unsigned int n;
1312 	int err = 0;
1313 
1314 	xe_gt_assert(gt, vfid);
1315 
1316 	if (!num_vfs)
1317 		return 0;
1318 
1319 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1320 	for (n = vfid; n < vfid + num_vfs; n++) {
1321 		err = pf_provision_vf_dbs(gt, n, num_dbs);
1322 		if (err)
1323 			break;
1324 	}
1325 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1326 
1327 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1328 					   xe_gt_sriov_pf_config_get_dbs,
1329 					   "GuC doorbell IDs", no_unit, n, err);
1330 }
1331 
pf_profile_fair_dbs(struct xe_gt * gt,unsigned int num_vfs)1332 static u32 pf_profile_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1333 {
1334 	bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt));
1335 
1336 	/* XXX: preliminary */
1337 	if (admin_only_pf && num_vfs == 1)
1338 		return GUC_NUM_DOORBELLS - SZ_16;
1339 
1340 	return rounddown_pow_of_two(GUC_NUM_DOORBELLS / (num_vfs + 1));
1341 }
1342 
pf_estimate_fair_dbs(struct xe_gt * gt,unsigned int num_vfs)1343 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1344 {
1345 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1346 	u32 spare = pf_get_spare_dbs(gt);
1347 	u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1348 	int ret;
1349 
1350 	for (; fair; --fair) {
1351 		ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1352 		if (ret < 0)
1353 			continue;
1354 		xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1355 		break;
1356 	}
1357 
1358 	xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1359 	return fair;
1360 }
1361 
1362 /**
1363  * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell  IDs.
1364  * @gt: the &xe_gt
1365  * @vfid: starting VF identifier (can't be 0)
1366  * @num_vfs: number of VFs to provision (can't be 0)
1367  *
1368  * This function can only be called on PF.
1369  *
1370  * Return: 0 on success or a negative error code on failure.
1371  */
xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1372 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1373 				       unsigned int num_vfs)
1374 {
1375 	u32 profile = pf_profile_fair_dbs(gt, num_vfs);
1376 	u32 fair;
1377 
1378 	xe_gt_assert(gt, vfid);
1379 	xe_gt_assert(gt, num_vfs);
1380 
1381 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1382 	fair = pf_estimate_fair_dbs(gt, num_vfs);
1383 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1384 
1385 	if (!fair)
1386 		return -ENOSPC;
1387 
1388 	fair = min(fair, profile);
1389 	if (fair < profile)
1390 		xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %u vs %u)\n",
1391 				 "GuC doorbell IDs", fair, profile);
1392 
1393 	return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1394 }
1395 
pf_get_lmem_alignment(struct xe_gt * gt)1396 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1397 {
1398 	/* this might be platform dependent */
1399 	return SZ_2M;
1400 }
1401 
pf_get_min_spare_lmem(struct xe_gt * gt)1402 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1403 {
1404 	/* this might be platform dependent */
1405 	return SZ_128M; /* XXX: preliminary */
1406 }
1407 
pf_get_spare_lmem(struct xe_gt * gt)1408 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1409 {
1410 	u64 spare;
1411 
1412 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1413 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1414 
1415 	spare = gt->sriov.pf.spare.lmem_size;
1416 	spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1417 
1418 	return spare;
1419 }
1420 
pf_set_spare_lmem(struct xe_gt * gt,u64 size)1421 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1422 {
1423 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1424 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1425 
1426 	if (size && size < pf_get_min_spare_lmem(gt))
1427 		return -EINVAL;
1428 
1429 	gt->sriov.pf.spare.lmem_size = size;
1430 	return 0;
1431 }
1432 
pf_get_vf_config_lmem(struct xe_gt * gt,unsigned int vfid)1433 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1434 {
1435 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1436 	struct xe_bo *bo;
1437 
1438 	bo = config->lmem_obj;
1439 	return bo ? xe_bo_size(bo) : 0;
1440 }
1441 
pf_distribute_config_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1442 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1443 {
1444 	struct xe_device *xe = gt_to_xe(gt);
1445 	struct xe_tile *tile;
1446 	unsigned int tid;
1447 	int err;
1448 
1449 	for_each_tile(tile, xe, tid) {
1450 		if (tile->primary_gt == gt) {
1451 			err = pf_push_vf_cfg_lmem(gt, vfid, size);
1452 		} else {
1453 			u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1454 
1455 			if (!lmem)
1456 				continue;
1457 			err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1458 		}
1459 		if (unlikely(err))
1460 			return err;
1461 	}
1462 	return 0;
1463 }
1464 
pf_force_lmtt_invalidate(struct xe_device * xe)1465 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1466 {
1467 	struct xe_lmtt *lmtt;
1468 	struct xe_tile *tile;
1469 	unsigned int tid;
1470 
1471 	xe_assert(xe, xe_device_has_lmtt(xe));
1472 	xe_assert(xe, IS_SRIOV_PF(xe));
1473 
1474 	for_each_tile(tile, xe, tid) {
1475 		lmtt = &tile->sriov.pf.lmtt;
1476 		xe_lmtt_invalidate_hw(lmtt);
1477 	}
1478 }
1479 
pf_reset_vf_lmtt(struct xe_device * xe,unsigned int vfid)1480 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1481 {
1482 	struct xe_lmtt *lmtt;
1483 	struct xe_tile *tile;
1484 	unsigned int tid;
1485 
1486 	xe_assert(xe, xe_device_has_lmtt(xe));
1487 	xe_assert(xe, IS_SRIOV_PF(xe));
1488 
1489 	for_each_tile(tile, xe, tid) {
1490 		lmtt = &tile->sriov.pf.lmtt;
1491 		xe_lmtt_drop_pages(lmtt, vfid);
1492 	}
1493 }
1494 
pf_update_vf_lmtt(struct xe_device * xe,unsigned int vfid)1495 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1496 {
1497 	struct xe_gt_sriov_config *config;
1498 	struct xe_tile *tile;
1499 	struct xe_lmtt *lmtt;
1500 	struct xe_bo *bo;
1501 	struct xe_gt *gt;
1502 	u64 total, offset;
1503 	unsigned int gtid;
1504 	unsigned int tid;
1505 	int err;
1506 
1507 	xe_assert(xe, xe_device_has_lmtt(xe));
1508 	xe_assert(xe, IS_SRIOV_PF(xe));
1509 
1510 	total = 0;
1511 	for_each_tile(tile, xe, tid)
1512 		total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1513 
1514 	for_each_tile(tile, xe, tid) {
1515 		lmtt = &tile->sriov.pf.lmtt;
1516 
1517 		xe_lmtt_drop_pages(lmtt, vfid);
1518 		if (!total)
1519 			continue;
1520 
1521 		err  = xe_lmtt_prepare_pages(lmtt, vfid, total);
1522 		if (err)
1523 			goto fail;
1524 
1525 		offset = 0;
1526 		for_each_gt(gt, xe, gtid) {
1527 			if (xe_gt_is_media_type(gt))
1528 				continue;
1529 
1530 			config = pf_pick_vf_config(gt, vfid);
1531 			bo = config->lmem_obj;
1532 			if (!bo)
1533 				continue;
1534 
1535 			err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1536 			if (err)
1537 				goto fail;
1538 			offset += xe_bo_size(bo);
1539 		}
1540 	}
1541 
1542 	pf_force_lmtt_invalidate(xe);
1543 	return 0;
1544 
1545 fail:
1546 	for_each_tile(tile, xe, tid) {
1547 		lmtt = &tile->sriov.pf.lmtt;
1548 		xe_lmtt_drop_pages(lmtt, vfid);
1549 	}
1550 	return err;
1551 }
1552 
1553 /* Return: %true if there was an LMEM provisioned, %false otherwise */
pf_release_vf_config_lmem(struct xe_gt * gt,struct xe_gt_sriov_config * config)1554 static bool pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1555 {
1556 	xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1557 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
1558 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1559 
1560 	if (config->lmem_obj) {
1561 		xe_bo_unpin_map_no_vm(config->lmem_obj);
1562 		config->lmem_obj = NULL;
1563 		return true;
1564 	}
1565 	return false;
1566 }
1567 
pf_provision_vf_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1568 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1569 {
1570 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1571 	struct xe_device *xe = gt_to_xe(gt);
1572 	struct xe_tile *tile = gt_to_tile(gt);
1573 	struct xe_bo *bo;
1574 	int err;
1575 
1576 	xe_gt_assert(gt, vfid);
1577 	xe_gt_assert(gt, IS_DGFX(xe));
1578 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
1579 
1580 	size = round_up(size, pf_get_lmem_alignment(gt));
1581 
1582 	if (config->lmem_obj) {
1583 		err = pf_distribute_config_lmem(gt, vfid, 0);
1584 		if (unlikely(err))
1585 			return err;
1586 
1587 		if (xe_device_has_lmtt(xe))
1588 			pf_reset_vf_lmtt(xe, vfid);
1589 		pf_release_vf_config_lmem(gt, config);
1590 	}
1591 	xe_gt_assert(gt, !config->lmem_obj);
1592 
1593 	if (!size)
1594 		return 0;
1595 
1596 	xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1597 	bo = xe_bo_create_pin_range_novm(xe, tile,
1598 					 ALIGN(size, PAGE_SIZE), 0, ~0ull,
1599 					 ttm_bo_type_kernel,
1600 					 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1601 					 XE_BO_FLAG_NEEDS_2M |
1602 					 XE_BO_FLAG_PINNED |
1603 					 XE_BO_FLAG_PINNED_LATE_RESTORE |
1604 					 XE_BO_FLAG_FORCE_USER_VRAM);
1605 	if (IS_ERR(bo))
1606 		return PTR_ERR(bo);
1607 
1608 	config->lmem_obj = bo;
1609 
1610 	if (xe_device_has_lmtt(xe)) {
1611 		err = pf_update_vf_lmtt(xe, vfid);
1612 		if (unlikely(err))
1613 			goto release;
1614 	}
1615 
1616 	err = pf_push_vf_cfg_lmem(gt, vfid, xe_bo_size(bo));
1617 	if (unlikely(err))
1618 		goto reset_lmtt;
1619 
1620 	xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1621 				vfid, xe_bo_size(bo), xe_bo_size(bo) / SZ_1M);
1622 	return 0;
1623 
1624 reset_lmtt:
1625 	if (xe_device_has_lmtt(xe))
1626 		pf_reset_vf_lmtt(xe, vfid);
1627 release:
1628 	pf_release_vf_config_lmem(gt, config);
1629 	return err;
1630 }
1631 
1632 /**
1633  * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1634  * @gt: the &xe_gt
1635  * @vfid: the VF identifier
1636  *
1637  * This function can only be called on PF.
1638  *
1639  * Return: VF's (or PF's spare) LMEM quota.
1640  */
xe_gt_sriov_pf_config_get_lmem(struct xe_gt * gt,unsigned int vfid)1641 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1642 {
1643 	u64 size;
1644 
1645 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1646 	if (vfid)
1647 		size = pf_get_vf_config_lmem(gt, vfid);
1648 	else
1649 		size = pf_get_spare_lmem(gt);
1650 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1651 
1652 	return size;
1653 }
1654 
1655 /**
1656  * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1657  * @gt: the &xe_gt (can't be media)
1658  * @vfid: the VF identifier
1659  * @size: requested LMEM size
1660  *
1661  * This function can only be called on PF.
1662  */
xe_gt_sriov_pf_config_set_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1663 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1664 {
1665 	int err;
1666 
1667 	if (!xe_device_has_lmtt(gt_to_xe(gt)))
1668 		return -EPERM;
1669 
1670 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1671 	if (vfid)
1672 		err = pf_provision_vf_lmem(gt, vfid, size);
1673 	else
1674 		err = pf_set_spare_lmem(gt, size);
1675 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1676 
1677 	return pf_config_set_u64_done(gt, vfid, size,
1678 				      xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1679 				      vfid ? "LMEM" : "spare LMEM", err);
1680 }
1681 
1682 /**
1683  * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1684  * @gt: the &xe_gt (can't be media)
1685  * @vfid: starting VF identifier (can't be 0)
1686  * @num_vfs: number of VFs to provision
1687  * @size: requested LMEM size
1688  *
1689  * This function can only be called on PF.
1690  *
1691  * Return: 0 on success or a negative error code on failure.
1692  */
xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u64 size)1693 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1694 					unsigned int num_vfs, u64 size)
1695 {
1696 	unsigned int n;
1697 	int err = 0;
1698 
1699 	xe_gt_assert(gt, vfid);
1700 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
1701 
1702 	if (!num_vfs)
1703 		return 0;
1704 
1705 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1706 	for (n = vfid; n < vfid + num_vfs; n++) {
1707 		err = pf_provision_vf_lmem(gt, n, size);
1708 		if (err)
1709 			break;
1710 	}
1711 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1712 
1713 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1714 					   xe_gt_sriov_pf_config_get_lmem,
1715 					   "LMEM", n, err);
1716 }
1717 
pf_get_vf_config_lmem_obj(struct xe_gt * gt,unsigned int vfid)1718 static struct xe_bo *pf_get_vf_config_lmem_obj(struct xe_gt *gt, unsigned int vfid)
1719 {
1720 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1721 
1722 	return config->lmem_obj;
1723 }
1724 
1725 /**
1726  * xe_gt_sriov_pf_config_get_lmem_obj() - Take a reference to the struct &xe_bo backing VF LMEM.
1727  * @gt: the &xe_gt
1728  * @vfid: the VF identifier (can't be 0)
1729  *
1730  * This function can only be called on PF.
1731  * The caller is responsible for calling xe_bo_put() on the returned object.
1732  *
1733  * Return: pointer to struct &xe_bo backing VF LMEM (if any).
1734  */
xe_gt_sriov_pf_config_get_lmem_obj(struct xe_gt * gt,unsigned int vfid)1735 struct xe_bo *xe_gt_sriov_pf_config_get_lmem_obj(struct xe_gt *gt, unsigned int vfid)
1736 {
1737 	xe_gt_assert(gt, vfid);
1738 
1739 	guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
1740 
1741 	return xe_bo_get(pf_get_vf_config_lmem_obj(gt, vfid));
1742 }
1743 
pf_query_free_lmem(struct xe_gt * gt)1744 static u64 pf_query_free_lmem(struct xe_gt *gt)
1745 {
1746 	struct xe_tile *tile = gt->tile;
1747 
1748 	return xe_ttm_vram_get_avail(&tile->mem.vram->ttm.manager);
1749 }
1750 
pf_query_max_lmem(struct xe_gt * gt)1751 static u64 pf_query_max_lmem(struct xe_gt *gt)
1752 {
1753 	u64 alignment = pf_get_lmem_alignment(gt);
1754 	u64 spare = pf_get_spare_lmem(gt);
1755 	u64 free = pf_query_free_lmem(gt);
1756 	u64 avail;
1757 
1758 	/* XXX: need to account for 2MB blocks only */
1759 	avail = free > spare ? free - spare : 0;
1760 	avail = round_down(avail, alignment);
1761 
1762 	return avail;
1763 }
1764 
1765 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1766 #define MAX_FAIR_LMEM	SZ_128M	/* XXX: make it small for the driver bringup */
1767 #endif
1768 
pf_estimate_fair_lmem(struct xe_gt * gt,unsigned int num_vfs)1769 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1770 {
1771 	u64 available = pf_query_max_lmem(gt);
1772 	u64 alignment = pf_get_lmem_alignment(gt);
1773 	u64 fair;
1774 
1775 	fair = div_u64(available, num_vfs);
1776 	fair = ALIGN_DOWN(fair, alignment);
1777 #ifdef MAX_FAIR_LMEM
1778 	fair = min_t(u64, MAX_FAIR_LMEM, fair);
1779 #endif
1780 	xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1781 				available / SZ_1M, num_vfs, fair / SZ_1M);
1782 	return fair;
1783 }
1784 
1785 /**
1786  * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1787  * @gt: the &xe_gt (can't be media)
1788  * @vfid: starting VF identifier (can't be 0)
1789  * @num_vfs: number of VFs to provision (can't be 0)
1790  *
1791  * This function can only be called on PF.
1792  *
1793  * Return: 0 on success or a negative error code on failure.
1794  */
xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1795 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1796 					unsigned int num_vfs)
1797 {
1798 	u64 fair;
1799 
1800 	xe_gt_assert(gt, vfid);
1801 	xe_gt_assert(gt, num_vfs);
1802 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
1803 
1804 	if (!xe_device_has_lmtt(gt_to_xe(gt)))
1805 		return 0;
1806 
1807 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1808 	fair = pf_estimate_fair_lmem(gt, num_vfs);
1809 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1810 
1811 	if (!fair)
1812 		return -ENOSPC;
1813 
1814 	return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1815 }
1816 
1817 /**
1818  * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1819  * @gt: the &xe_gt
1820  * @vfid: starting VF identifier (can't be 0)
1821  * @num_vfs: number of VFs to provision (can't be 0)
1822  *
1823  * This function can only be called on PF.
1824  *
1825  * Return: 0 on success or a negative error code on failure.
1826  */
xe_gt_sriov_pf_config_set_fair(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1827 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1828 				   unsigned int num_vfs)
1829 {
1830 	int result = 0;
1831 	int err;
1832 
1833 	xe_gt_assert(gt, vfid);
1834 	xe_gt_assert(gt, num_vfs);
1835 
1836 	if (xe_gt_is_main_type(gt)) {
1837 		err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1838 		result = result ?: err;
1839 		err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1840 		result = result ?: err;
1841 	}
1842 	err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1843 	result = result ?: err;
1844 	err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1845 	result = result ?: err;
1846 
1847 	return result;
1848 }
1849 
exec_quantum_unit(u32 exec_quantum)1850 static const char *exec_quantum_unit(u32 exec_quantum)
1851 {
1852 	return exec_quantum ? "ms" : "(infinity)";
1853 }
1854 
pf_provision_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1855 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1856 				     u32 exec_quantum)
1857 {
1858 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1859 	int err;
1860 
1861 	err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1862 	if (unlikely(err))
1863 		return err;
1864 
1865 	config->exec_quantum = exec_quantum;
1866 	return 0;
1867 }
1868 
pf_get_exec_quantum(struct xe_gt * gt,unsigned int vfid)1869 static u32 pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1870 {
1871 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1872 
1873 	return config->exec_quantum;
1874 }
1875 
1876 /**
1877  * xe_gt_sriov_pf_config_set_exec_quantum_locked() - Configure PF/VF execution quantum.
1878  * @gt: the &xe_gt
1879  * @vfid: the PF or VF identifier
1880  * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1881  *
1882  * This function can only be called on PF with the master mutex hold.
1883  * It will log the provisioned value or an error in case of the failure.
1884  *
1885  * Return: 0 on success or a negative error code on failure.
1886  */
xe_gt_sriov_pf_config_set_exec_quantum_locked(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1887 int xe_gt_sriov_pf_config_set_exec_quantum_locked(struct xe_gt *gt, unsigned int vfid,
1888 						  u32 exec_quantum)
1889 {
1890 	int err;
1891 
1892 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1893 
1894 	err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1895 
1896 	return pf_config_set_u32_done(gt, vfid, exec_quantum,
1897 				      pf_get_exec_quantum(gt, vfid),
1898 				      "execution quantum", exec_quantum_unit, err);
1899 }
1900 
1901 /**
1902  * xe_gt_sriov_pf_config_set_exec_quantum() - Configure PF/VF execution quantum.
1903  * @gt: the &xe_gt
1904  * @vfid: the PF or VF identifier
1905  * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1906  *
1907  * This function can only be called on PF.
1908  * It will log the provisioned value or an error in case of the failure.
1909  *
1910  * Return: 0 on success or a negative error code on failure.
1911  */
xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1912 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1913 					   u32 exec_quantum)
1914 {
1915 	guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
1916 
1917 	return xe_gt_sriov_pf_config_set_exec_quantum_locked(gt, vfid, exec_quantum);
1918 }
1919 
1920 /**
1921  * xe_gt_sriov_pf_config_get_exec_quantum_locked() - Get PF/VF execution quantum.
1922  * @gt: the &xe_gt
1923  * @vfid: the PF or VF identifier
1924  *
1925  * This function can only be called on PF with the master mutex hold.
1926  *
1927  * Return: execution quantum in milliseconds (or 0 if infinity).
1928  */
xe_gt_sriov_pf_config_get_exec_quantum_locked(struct xe_gt * gt,unsigned int vfid)1929 u32 xe_gt_sriov_pf_config_get_exec_quantum_locked(struct xe_gt *gt, unsigned int vfid)
1930 {
1931 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1932 
1933 	return pf_get_exec_quantum(gt, vfid);
1934 }
1935 
1936 /**
1937  * xe_gt_sriov_pf_config_get_exec_quantum() - Get PF/VF execution quantum.
1938  * @gt: the &xe_gt
1939  * @vfid: the PF or VF identifier
1940  *
1941  * This function can only be called on PF.
1942  *
1943  * Return: execution quantum in milliseconds (or 0 if infinity).
1944  */
xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt * gt,unsigned int vfid)1945 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1946 {
1947 	guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
1948 
1949 	return pf_get_exec_quantum(gt, vfid);
1950 }
1951 
1952 /**
1953  * xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked() - Configure EQ for PF and VFs.
1954  * @gt: the &xe_gt to configure
1955  * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1956  *
1957  * This function can only be called on PF with the master mutex hold.
1958  *
1959  * Return: 0 on success or a negative error code on failure.
1960  */
xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked(struct xe_gt * gt,u32 exec_quantum)1961 int xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked(struct xe_gt *gt, u32 exec_quantum)
1962 {
1963 	unsigned int totalvfs = xe_gt_sriov_pf_get_totalvfs(gt);
1964 	unsigned int n;
1965 	int err = 0;
1966 
1967 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1968 
1969 	for (n = 0; n <= totalvfs; n++) {
1970 		err = pf_provision_exec_quantum(gt, VFID(n), exec_quantum);
1971 		if (err)
1972 			break;
1973 	}
1974 
1975 	return pf_config_bulk_set_u32_done(gt, 0, 1 + totalvfs, exec_quantum,
1976 					   pf_get_exec_quantum, "execution quantum",
1977 					   exec_quantum_unit, n, err);
1978 }
1979 
preempt_timeout_unit(u32 preempt_timeout)1980 static const char *preempt_timeout_unit(u32 preempt_timeout)
1981 {
1982 	return preempt_timeout ? "us" : "(infinity)";
1983 }
1984 
pf_provision_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)1985 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1986 					u32 preempt_timeout)
1987 {
1988 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1989 	int err;
1990 
1991 	err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1992 	if (unlikely(err))
1993 		return err;
1994 
1995 	config->preempt_timeout = preempt_timeout;
1996 
1997 	return 0;
1998 }
1999 
pf_get_preempt_timeout(struct xe_gt * gt,unsigned int vfid)2000 static u32 pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
2001 {
2002 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2003 
2004 	return config->preempt_timeout;
2005 }
2006 
2007 /**
2008  * xe_gt_sriov_pf_config_set_preempt_timeout_locked() - Configure PF/VF preemption timeout.
2009  * @gt: the &xe_gt
2010  * @vfid: the PF or VF identifier
2011  * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
2012  *
2013  * This function can only be called on PF with the master mutex hold.
2014  * It will log the provisioned value or an error in case of the failure.
2015  *
2016  * Return: 0 on success or a negative error code on failure.
2017  */
xe_gt_sriov_pf_config_set_preempt_timeout_locked(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)2018 int xe_gt_sriov_pf_config_set_preempt_timeout_locked(struct xe_gt *gt, unsigned int vfid,
2019 						     u32 preempt_timeout)
2020 {
2021 	int err;
2022 
2023 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
2024 
2025 	err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
2026 
2027 	return pf_config_set_u32_done(gt, vfid, preempt_timeout,
2028 				      pf_get_preempt_timeout(gt, vfid),
2029 				      "preemption timeout", preempt_timeout_unit, err);
2030 }
2031 
2032 /**
2033  * xe_gt_sriov_pf_config_set_preempt_timeout() - Configure PF/VF preemption timeout.
2034  * @gt: the &xe_gt
2035  * @vfid: the PF or VF identifier
2036  * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
2037  *
2038  * This function can only be called on PF.
2039  *
2040  * Return: 0 on success or a negative error code on failure.
2041  */
xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)2042 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
2043 					      u32 preempt_timeout)
2044 {
2045 	guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
2046 
2047 	return xe_gt_sriov_pf_config_set_preempt_timeout_locked(gt, vfid, preempt_timeout);
2048 }
2049 
2050 /**
2051  * xe_gt_sriov_pf_config_get_preempt_timeout_locked() - Get PF/VF preemption timeout.
2052  * @gt: the &xe_gt
2053  * @vfid: the PF or VF identifier
2054  *
2055  * This function can only be called on PF with the master mutex hold.
2056  *
2057  * Return: preemption timeout in microseconds (or 0 if infinity).
2058  */
xe_gt_sriov_pf_config_get_preempt_timeout_locked(struct xe_gt * gt,unsigned int vfid)2059 u32 xe_gt_sriov_pf_config_get_preempt_timeout_locked(struct xe_gt *gt, unsigned int vfid)
2060 {
2061 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
2062 
2063 	return pf_get_preempt_timeout(gt, vfid);
2064 }
2065 
2066 /**
2067  * xe_gt_sriov_pf_config_get_preempt_timeout() - Get PF/VF preemption timeout.
2068  * @gt: the &xe_gt
2069  * @vfid: the PF or VF identifier
2070  *
2071  * This function can only be called on PF.
2072  *
2073  * Return: preemption timeout in microseconds (or 0 if infinity).
2074  */
xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt * gt,unsigned int vfid)2075 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
2076 {
2077 	guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
2078 
2079 	return pf_get_preempt_timeout(gt, vfid);
2080 }
2081 
2082 /**
2083  * xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked() - Configure PT for PF and VFs.
2084  * @gt: the &xe_gt to configure
2085  * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
2086  *
2087  * This function can only be called on PF with the master mutex hold.
2088  *
2089  * Return: 0 on success or a negative error code on failure.
2090  */
xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked(struct xe_gt * gt,u32 preempt_timeout)2091 int xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked(struct xe_gt *gt, u32 preempt_timeout)
2092 {
2093 	unsigned int totalvfs = xe_gt_sriov_pf_get_totalvfs(gt);
2094 	unsigned int n;
2095 	int err = 0;
2096 
2097 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
2098 
2099 	for (n = 0; n <= totalvfs; n++) {
2100 		err = pf_provision_preempt_timeout(gt, VFID(n), preempt_timeout);
2101 		if (err)
2102 			break;
2103 	}
2104 
2105 	return pf_config_bulk_set_u32_done(gt, 0, 1 + totalvfs, preempt_timeout,
2106 					   pf_get_preempt_timeout, "preemption timeout",
2107 					   preempt_timeout_unit, n, err);
2108 }
2109 
sched_priority_unit(u32 priority)2110 static const char *sched_priority_unit(u32 priority)
2111 {
2112 	return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" :
2113 		priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" :
2114 		priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" :
2115 		"(?)";
2116 }
2117 
pf_provision_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)2118 static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
2119 {
2120 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2121 	int err;
2122 
2123 	err = pf_push_vf_cfg_sched_priority(gt, vfid, priority);
2124 	if (unlikely(err))
2125 		return err;
2126 
2127 	config->sched_priority = priority;
2128 	return 0;
2129 }
2130 
pf_get_sched_priority(struct xe_gt * gt,unsigned int vfid)2131 static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
2132 {
2133 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2134 
2135 	return config->sched_priority;
2136 }
2137 
2138 /**
2139  * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority.
2140  * @gt: the &xe_gt
2141  * @vfid: the VF identifier
2142  * @priority: requested scheduling priority
2143  *
2144  * This function can only be called on PF.
2145  *
2146  * Return: 0 on success or a negative error code on failure.
2147  */
xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)2148 int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
2149 {
2150 	int err;
2151 
2152 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2153 	err = pf_provision_sched_priority(gt, vfid, priority);
2154 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2155 
2156 	return pf_config_set_u32_done(gt, vfid, priority,
2157 				      xe_gt_sriov_pf_config_get_sched_priority(gt, vfid),
2158 				      "scheduling priority", sched_priority_unit, err);
2159 }
2160 
2161 /**
2162  * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority.
2163  * @gt: the &xe_gt
2164  * @vfid: the VF identifier
2165  *
2166  * This function can only be called on PF.
2167  *
2168  * Return: VF's (or PF's) scheduling priority.
2169  */
xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt * gt,unsigned int vfid)2170 u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
2171 {
2172 	u32 priority;
2173 
2174 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2175 	priority = pf_get_sched_priority(gt, vfid);
2176 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2177 
2178 	return priority;
2179 }
2180 
pf_reset_config_sched(struct xe_gt * gt,struct xe_gt_sriov_config * config)2181 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
2182 {
2183 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
2184 
2185 	config->exec_quantum = 0;
2186 	config->preempt_timeout = 0;
2187 }
2188 
pf_provision_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)2189 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
2190 				  enum xe_guc_klv_threshold_index index, u32 value)
2191 {
2192 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2193 	int err;
2194 
2195 	err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
2196 	if (unlikely(err))
2197 		return err;
2198 
2199 	config->thresholds[index] = value;
2200 
2201 	return 0;
2202 }
2203 
pf_get_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index)2204 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
2205 			    enum xe_guc_klv_threshold_index index)
2206 {
2207 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2208 
2209 	return config->thresholds[index];
2210 }
2211 
threshold_unit(u32 threshold)2212 static const char *threshold_unit(u32 threshold)
2213 {
2214 	return threshold ? "" : "(disabled)";
2215 }
2216 
2217 /**
2218  * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
2219  * @gt: the &xe_gt
2220  * @vfid: the VF identifier
2221  * @index: the threshold index
2222  * @value: requested value (0 means disabled)
2223  *
2224  * This function can only be called on PF.
2225  *
2226  * Return: 0 on success or a negative error code on failure.
2227  */
xe_gt_sriov_pf_config_set_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)2228 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
2229 					enum xe_guc_klv_threshold_index index, u32 value)
2230 {
2231 	u32 key = xe_guc_klv_threshold_index_to_key(index);
2232 	const char *name = xe_guc_klv_key_to_string(key);
2233 	int err;
2234 
2235 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2236 	err = pf_provision_threshold(gt, vfid, index, value);
2237 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2238 
2239 	return pf_config_set_u32_done(gt, vfid, value,
2240 				      xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
2241 				      name, threshold_unit, err);
2242 }
2243 
2244 /**
2245  * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
2246  * @gt: the &xe_gt
2247  * @vfid: the VF identifier
2248  * @index: the threshold index
2249  *
2250  * This function can only be called on PF.
2251  *
2252  * Return: value of VF's (or PF's) threshold.
2253  */
xe_gt_sriov_pf_config_get_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index)2254 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
2255 					enum xe_guc_klv_threshold_index index)
2256 {
2257 	u32 value;
2258 
2259 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2260 	value = pf_get_threshold(gt, vfid, index);
2261 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2262 
2263 	return value;
2264 }
2265 
pf_reset_config_thresholds(struct xe_gt * gt,struct xe_gt_sriov_config * config)2266 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
2267 {
2268 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
2269 
2270 #define reset_threshold_config(TAG, ...) ({				\
2271 	config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0;	\
2272 });
2273 
2274 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
2275 #undef reset_threshold_config
2276 }
2277 
pf_release_vf_config(struct xe_gt * gt,unsigned int vfid)2278 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
2279 {
2280 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2281 	struct xe_device *xe = gt_to_xe(gt);
2282 	bool released;
2283 
2284 	if (xe_gt_is_main_type(gt)) {
2285 		pf_release_vf_config_ggtt(gt, config);
2286 		if (IS_DGFX(xe)) {
2287 			released = pf_release_vf_config_lmem(gt, config);
2288 			if (released && xe_device_has_lmtt(xe))
2289 				pf_update_vf_lmtt(xe, vfid);
2290 		}
2291 	}
2292 	pf_release_config_ctxs(gt, config);
2293 	pf_release_config_dbs(gt, config);
2294 	pf_reset_config_sched(gt, config);
2295 	pf_reset_config_thresholds(gt, config);
2296 }
2297 
2298 /**
2299  * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
2300  * @gt: the &xe_gt
2301  * @vfid: the VF identifier (can't be PF)
2302  * @force: force configuration release
2303  *
2304  * This function can only be called on PF.
2305  *
2306  * Return: 0 on success or a negative error code on failure.
2307  */
xe_gt_sriov_pf_config_release(struct xe_gt * gt,unsigned int vfid,bool force)2308 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
2309 {
2310 	int err;
2311 
2312 	xe_gt_assert(gt, vfid);
2313 
2314 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2315 	err = pf_send_vf_cfg_reset(gt, vfid);
2316 	if (!err || force)
2317 		pf_release_vf_config(gt, vfid);
2318 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2319 
2320 	if (unlikely(err)) {
2321 		xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
2322 				   vfid, ERR_PTR(err),
2323 				   force ? " but all resources were released anyway!" : "");
2324 	}
2325 
2326 	return force ? 0 : err;
2327 }
2328 
pf_sanitize_ggtt(struct xe_ggtt_node * ggtt_region,unsigned int vfid)2329 static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
2330 {
2331 	if (xe_ggtt_node_allocated(ggtt_region))
2332 		xe_ggtt_assign(ggtt_region, vfid);
2333 }
2334 
pf_sanitize_lmem(struct xe_tile * tile,struct xe_bo * bo,long timeout)2335 static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
2336 {
2337 	struct xe_migrate *m = tile->migrate;
2338 	struct dma_fence *fence;
2339 	int err;
2340 
2341 	if (!bo)
2342 		return 0;
2343 
2344 	xe_bo_lock(bo, false);
2345 	fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
2346 	if (IS_ERR(fence)) {
2347 		err = PTR_ERR(fence);
2348 	} else if (!fence) {
2349 		err = -ENOMEM;
2350 	} else {
2351 		long ret = dma_fence_wait_timeout(fence, false, timeout);
2352 
2353 		err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
2354 		dma_fence_put(fence);
2355 		if (!err)
2356 			xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
2357 						jiffies_to_msecs(timeout - ret));
2358 	}
2359 	xe_bo_unlock(bo);
2360 
2361 	return err;
2362 }
2363 
pf_sanitize_vf_resources(struct xe_gt * gt,u32 vfid,long timeout)2364 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
2365 {
2366 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2367 	struct xe_tile *tile = gt_to_tile(gt);
2368 	struct xe_device *xe = gt_to_xe(gt);
2369 	int err = 0;
2370 
2371 	/*
2372 	 * Only GGTT and LMEM requires to be cleared by the PF.
2373 	 * GuC doorbell IDs and context IDs do not need any clearing.
2374 	 */
2375 	if (xe_gt_is_main_type(gt)) {
2376 		pf_sanitize_ggtt(config->ggtt_region, vfid);
2377 		if (IS_DGFX(xe))
2378 			err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
2379 	}
2380 
2381 	return err;
2382 }
2383 
2384 /**
2385  * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
2386  * @gt: the &xe_gt
2387  * @vfid: the VF identifier (can't be PF)
2388  * @timeout: maximum timeout to wait for completion in jiffies
2389  *
2390  * This function can only be called on PF.
2391  *
2392  * Return: 0 on success or a negative error code on failure.
2393  */
xe_gt_sriov_pf_config_sanitize(struct xe_gt * gt,unsigned int vfid,long timeout)2394 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
2395 {
2396 	int err;
2397 
2398 	xe_gt_assert(gt, vfid != PFID);
2399 
2400 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2401 	err = pf_sanitize_vf_resources(gt, vfid, timeout);
2402 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2403 
2404 	if (unlikely(err))
2405 		xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
2406 				   vfid, ERR_PTR(err));
2407 	return err;
2408 }
2409 
2410 /**
2411  * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
2412  * @gt: the &xe_gt
2413  * @vfid: the VF identifier (can't be PF)
2414  * @refresh: explicit refresh
2415  *
2416  * This function can only be called on PF.
2417  *
2418  * Return: 0 on success or a negative error code on failure.
2419  */
xe_gt_sriov_pf_config_push(struct xe_gt * gt,unsigned int vfid,bool refresh)2420 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
2421 {
2422 	int err = 0;
2423 
2424 	xe_gt_assert(gt, vfid);
2425 
2426 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2427 	err = pf_push_vf_cfg(gt, vfid, refresh);
2428 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2429 
2430 	if (unlikely(err)) {
2431 		xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
2432 				   refresh ? "refresh" : "push", vfid, ERR_PTR(err));
2433 	}
2434 
2435 	return err;
2436 }
2437 
pf_validate_vf_config(struct xe_gt * gt,unsigned int vfid)2438 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
2439 {
2440 	struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
2441 	struct xe_device *xe = gt_to_xe(gt);
2442 	bool is_primary = xe_gt_is_main_type(gt);
2443 	bool valid_ggtt, valid_ctxs, valid_dbs;
2444 	bool valid_any, valid_all;
2445 
2446 	valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
2447 	valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
2448 	valid_dbs = pf_get_vf_config_dbs(gt, vfid);
2449 
2450 	/* note that GuC doorbells are optional */
2451 	valid_any = valid_ctxs || valid_dbs;
2452 	valid_all = valid_ctxs;
2453 
2454 	/* and GGTT/LMEM is configured on primary GT only */
2455 	valid_all = valid_all && valid_ggtt;
2456 	valid_any = valid_any || (valid_ggtt && is_primary);
2457 
2458 	if (xe_device_has_lmtt(xe)) {
2459 		bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
2460 
2461 		valid_any = valid_any || (valid_lmem && is_primary);
2462 		valid_all = valid_all && valid_lmem;
2463 	}
2464 
2465 	return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA;
2466 }
2467 
2468 /**
2469  * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
2470  * @gt: the &xe_gt
2471  * @vfid: the VF identifier (can't be PF)
2472  *
2473  * This function can only be called on PF.
2474  *
2475  * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
2476  */
xe_gt_sriov_pf_config_is_empty(struct xe_gt * gt,unsigned int vfid)2477 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
2478 {
2479 	bool empty;
2480 
2481 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2482 	xe_gt_assert(gt, vfid);
2483 
2484 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2485 	empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
2486 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2487 
2488 	return empty;
2489 }
2490 
2491 /**
2492  * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob.
2493  * @gt: the &xe_gt
2494  * @vfid: the VF identifier (can't be PF)
2495  * @buf: the buffer to save a config to (or NULL if query the buf size)
2496  * @size: the size of the buffer (or 0 if query the buf size)
2497  *
2498  * This function can only be called on PF.
2499  *
2500  * Return: minimum size of the buffer or the number of bytes saved,
2501  *         or a negative error code on failure.
2502  */
xe_gt_sriov_pf_config_save(struct xe_gt * gt,unsigned int vfid,void * buf,size_t size)2503 ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
2504 {
2505 	struct xe_gt_sriov_config *config;
2506 	ssize_t ret;
2507 
2508 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2509 	xe_gt_assert(gt, vfid);
2510 	xe_gt_assert(gt, !(!buf ^ !size));
2511 
2512 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2513 	ret = pf_validate_vf_config(gt, vfid);
2514 	if (!size) {
2515 		ret = ret ? 0 : SZ_4K;
2516 	} else if (!ret) {
2517 		if (size < SZ_4K) {
2518 			ret = -ENOBUFS;
2519 		} else {
2520 			config = pf_pick_vf_config(gt, vfid);
2521 			ret = encode_config(buf, config, false) * sizeof(u32);
2522 		}
2523 	}
2524 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2525 
2526 	return ret;
2527 }
2528 
pf_restore_vf_config_klv(struct xe_gt * gt,unsigned int vfid,u32 key,u32 len,const u32 * value)2529 static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid,
2530 				    u32 key, u32 len, const u32 *value)
2531 {
2532 	switch (key) {
2533 	case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
2534 		if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN)
2535 			return -EBADMSG;
2536 		return pf_provision_vf_ctxs(gt, vfid, value[0]);
2537 
2538 	case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
2539 		if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN)
2540 			return -EBADMSG;
2541 		return pf_provision_vf_dbs(gt, vfid, value[0]);
2542 
2543 	case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
2544 		if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN)
2545 			return -EBADMSG;
2546 		return pf_provision_exec_quantum(gt, vfid, value[0]);
2547 
2548 	case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
2549 		if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN)
2550 			return -EBADMSG;
2551 		return pf_provision_preempt_timeout(gt, vfid, value[0]);
2552 
2553 	/* auto-generate case statements */
2554 #define define_threshold_key_to_provision_case(TAG, ...)				\
2555 	case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG):					\
2556 		BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u);		\
2557 		if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG))			\
2558 			return -EBADMSG;						\
2559 		return pf_provision_threshold(gt, vfid,					\
2560 					      MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG),	\
2561 					      value[0]);
2562 
2563 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case)
2564 #undef define_threshold_key_to_provision_case
2565 	}
2566 
2567 	if (xe_gt_is_media_type(gt))
2568 		return -EKEYREJECTED;
2569 
2570 	switch (key) {
2571 	case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
2572 		if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN)
2573 			return -EBADMSG;
2574 		return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0]));
2575 
2576 	case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
2577 		if (!IS_DGFX(gt_to_xe(gt)))
2578 			return -EKEYREJECTED;
2579 		if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN)
2580 			return -EBADMSG;
2581 		return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0]));
2582 	}
2583 
2584 	return -EKEYREJECTED;
2585 }
2586 
pf_restore_vf_config(struct xe_gt * gt,unsigned int vfid,const u32 * klvs,size_t num_dwords)2587 static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid,
2588 				const u32 *klvs, size_t num_dwords)
2589 {
2590 	int err;
2591 
2592 	while (num_dwords >= GUC_KLV_LEN_MIN) {
2593 		u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
2594 		u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
2595 
2596 		klvs += GUC_KLV_LEN_MIN;
2597 		num_dwords -= GUC_KLV_LEN_MIN;
2598 
2599 		if (num_dwords < len)
2600 			err = -EBADMSG;
2601 		else
2602 			err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs);
2603 
2604 		if (err) {
2605 			xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err));
2606 			return err;
2607 		}
2608 
2609 		klvs += len;
2610 		num_dwords -= len;
2611 	}
2612 
2613 	return pf_validate_vf_config(gt, vfid);
2614 }
2615 
2616 /**
2617  * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob.
2618  * @gt: the &xe_gt
2619  * @vfid: the VF identifier (can't be PF)
2620  * @buf: the buffer with config data
2621  * @size: the size of the config data
2622  *
2623  * This function can only be called on PF.
2624  *
2625  * Return: 0 on success or a negative error code on failure.
2626  */
xe_gt_sriov_pf_config_restore(struct xe_gt * gt,unsigned int vfid,const void * buf,size_t size)2627 int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
2628 				  const void *buf, size_t size)
2629 {
2630 	int err;
2631 
2632 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2633 	xe_gt_assert(gt, vfid);
2634 
2635 	if (!size)
2636 		return -ENODATA;
2637 
2638 	if (size % sizeof(u32))
2639 		return -EINVAL;
2640 
2641 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
2642 		struct drm_printer p = xe_gt_dbg_printer(gt);
2643 
2644 		drm_printf(&p, "restoring VF%u config:\n", vfid);
2645 		xe_guc_klv_print(buf, size / sizeof(u32), &p);
2646 	}
2647 
2648 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2649 	err = pf_send_vf_cfg_reset(gt, vfid);
2650 	if (!err) {
2651 		pf_release_vf_config(gt, vfid);
2652 		err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32));
2653 	}
2654 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2655 
2656 	return err;
2657 }
2658 
pf_prepare_self_config(struct xe_gt * gt)2659 static void pf_prepare_self_config(struct xe_gt *gt)
2660 {
2661 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, PFID);
2662 
2663 	/*
2664 	 * We want PF to be allowed to use all of context ID, doorbells IDs
2665 	 * and whole usable GGTT area. While we can store ctxs/dbs numbers
2666 	 * directly in the config structure, can't do the same with the GGTT
2667 	 * configuration, so let it be prepared on demand while pushing KLVs.
2668 	 */
2669 	config->num_ctxs = GUC_ID_MAX;
2670 	config->num_dbs = GUC_NUM_DOORBELLS;
2671 }
2672 
pf_push_self_config(struct xe_gt * gt)2673 static int pf_push_self_config(struct xe_gt *gt)
2674 {
2675 	int err;
2676 
2677 	err = pf_push_full_vf_config(gt, PFID);
2678 	if (err) {
2679 		xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n",
2680 				ERR_PTR(err));
2681 		return err;
2682 	}
2683 
2684 	xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n");
2685 	return 0;
2686 }
2687 
fini_config(void * arg)2688 static void fini_config(void *arg)
2689 {
2690 	struct xe_gt *gt = arg;
2691 	struct xe_device *xe = gt_to_xe(gt);
2692 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
2693 
2694 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2695 	for (n = 1; n <= total_vfs; n++)
2696 		pf_release_vf_config(gt, n);
2697 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2698 }
2699 
2700 /**
2701  * xe_gt_sriov_pf_config_init - Initialize SR-IOV configuration data.
2702  * @gt: the &xe_gt
2703  *
2704  * This function can only be called on PF.
2705  *
2706  * Return: 0 on success or a negative error code on failure.
2707  */
xe_gt_sriov_pf_config_init(struct xe_gt * gt)2708 int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
2709 {
2710 	struct xe_device *xe = gt_to_xe(gt);
2711 	int err;
2712 
2713 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
2714 
2715 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2716 	pf_prepare_self_config(gt);
2717 	err = pf_push_self_config(gt);
2718 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2719 
2720 	if (err)
2721 		return err;
2722 
2723 	return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
2724 }
2725 
2726 /**
2727  * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
2728  * @gt: the &xe_gt
2729  *
2730  * Any prior configurations pushed to GuC are lost when the GT is reset.
2731  * Push again all non-empty VF configurations to the GuC.
2732  *
2733  * This function can only be called on PF.
2734  */
xe_gt_sriov_pf_config_restart(struct xe_gt * gt)2735 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
2736 {
2737 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2738 	unsigned int fail = 0, skip = 0;
2739 
2740 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2741 	pf_push_self_config(gt);
2742 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2743 
2744 	for (n = 1; n <= total_vfs; n++) {
2745 		if (xe_gt_sriov_pf_config_is_empty(gt, n))
2746 			skip++;
2747 		else if (xe_gt_sriov_pf_config_push(gt, n, false))
2748 			fail++;
2749 	}
2750 
2751 	if (fail)
2752 		xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
2753 				   fail, total_vfs - skip, str_plural(total_vfs));
2754 
2755 	if (fail != total_vfs)
2756 		xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
2757 				total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
2758 }
2759 
2760 /**
2761  * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2762  * @gt: the &xe_gt
2763  * @p: the &drm_printer
2764  *
2765  * Print GGTT configuration data for all VFs.
2766  * VFs without provisioned GGTT are ignored.
2767  *
2768  * This function can only be called on PF.
2769  */
xe_gt_sriov_pf_config_print_ggtt(struct xe_gt * gt,struct drm_printer * p)2770 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2771 {
2772 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2773 	const struct xe_gt_sriov_config *config;
2774 	char buf[10];
2775 
2776 	for (n = 1; n <= total_vfs; n++) {
2777 		config = &gt->sriov.pf.vfs[n].config;
2778 		if (!xe_ggtt_node_allocated(config->ggtt_region))
2779 			continue;
2780 
2781 		string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
2782 				buf, sizeof(buf));
2783 		drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2784 			   n, config->ggtt_region->base.start,
2785 			   config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
2786 			   buf);
2787 	}
2788 
2789 	return 0;
2790 }
2791 
2792 /**
2793  * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2794  * @gt: the &xe_gt
2795  * @p: the &drm_printer
2796  *
2797  * Print GuC context ID allocations across all VFs.
2798  * VFs without GuC context IDs are skipped.
2799  *
2800  * This function can only be called on PF.
2801  * Return: 0 on success or a negative error code on failure.
2802  */
xe_gt_sriov_pf_config_print_ctxs(struct xe_gt * gt,struct drm_printer * p)2803 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2804 {
2805 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2806 	const struct xe_gt_sriov_config *config;
2807 
2808 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2809 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2810 
2811 	for (n = 1; n <= total_vfs; n++) {
2812 		config = &gt->sriov.pf.vfs[n].config;
2813 		if (!config->num_ctxs)
2814 			continue;
2815 
2816 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2817 			   n,
2818 			   config->begin_ctx,
2819 			   config->begin_ctx + config->num_ctxs - 1,
2820 			   config->num_ctxs);
2821 	}
2822 
2823 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2824 	return 0;
2825 }
2826 
2827 /**
2828  * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2829  * @gt: the &xe_gt
2830  * @p: the &drm_printer
2831  *
2832  * Print GuC doorbell IDs allocations across all VFs.
2833  * VFs without GuC doorbell IDs are skipped.
2834  *
2835  * This function can only be called on PF.
2836  * Return: 0 on success or a negative error code on failure.
2837  */
xe_gt_sriov_pf_config_print_dbs(struct xe_gt * gt,struct drm_printer * p)2838 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2839 {
2840 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2841 	const struct xe_gt_sriov_config *config;
2842 
2843 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2844 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2845 
2846 	for (n = 1; n <= total_vfs; n++) {
2847 		config = &gt->sriov.pf.vfs[n].config;
2848 		if (!config->num_dbs)
2849 			continue;
2850 
2851 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2852 			   n,
2853 			   config->begin_db,
2854 			   config->begin_db + config->num_dbs - 1,
2855 			   config->num_dbs);
2856 	}
2857 
2858 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2859 	return 0;
2860 }
2861 
2862 /**
2863  * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations.
2864  * @gt: the &xe_gt
2865  * @p: the &drm_printer
2866  *
2867  * Print LMEM allocations across all VFs.
2868  * VFs without LMEM allocation are skipped.
2869  *
2870  * This function can only be called on PF.
2871  * Return: 0 on success or a negative error code on failure.
2872  */
xe_gt_sriov_pf_config_print_lmem(struct xe_gt * gt,struct drm_printer * p)2873 int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
2874 {
2875 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2876 	const struct xe_gt_sriov_config *config;
2877 	char buf[10];
2878 
2879 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2880 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2881 
2882 	for (n = 1; n <= total_vfs; n++) {
2883 		config = &gt->sriov.pf.vfs[n].config;
2884 		if (!config->lmem_obj)
2885 			continue;
2886 
2887 		string_get_size(xe_bo_size(config->lmem_obj), 1, STRING_UNITS_2,
2888 				buf, sizeof(buf));
2889 		drm_printf(p, "VF%u:\t%zu\t(%s)\n",
2890 			   n, xe_bo_size(config->lmem_obj), buf);
2891 	}
2892 
2893 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2894 	return 0;
2895 }
2896 
2897 /**
2898  * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2899  * @gt: the &xe_gt
2900  * @p: the &drm_printer
2901  *
2902  * Print GGTT ranges that are available for the provisioning.
2903  *
2904  * This function can only be called on PF.
2905  */
xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt * gt,struct drm_printer * p)2906 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2907 {
2908 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2909 	u64 alignment = pf_get_ggtt_alignment(gt);
2910 	u64 spare, avail, total;
2911 	char buf[10];
2912 
2913 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2914 
2915 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2916 
2917 	spare = pf_get_spare_ggtt(gt);
2918 	total = xe_ggtt_print_holes(ggtt, alignment, p);
2919 
2920 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2921 
2922 	string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2923 	drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2924 
2925 	string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2926 	drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2927 
2928 	avail = total > spare ? total - spare : 0;
2929 
2930 	string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2931 	drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2932 
2933 	return 0;
2934 }
2935 
2936 #if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
2937 #include "tests/xe_gt_sriov_pf_config_kunit.c"
2938 #endif
2939