1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023-2024 Intel Corporation
4 */
5
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11
12 #include "regs/xe_guc_regs.h"
13
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_gt_sriov_pf_config.h"
19 #include "xe_gt_sriov_pf_helpers.h"
20 #include "xe_gt_sriov_pf_policy.h"
21 #include "xe_gt_sriov_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_buf.h"
24 #include "xe_guc_ct.h"
25 #include "xe_guc_db_mgr.h"
26 #include "xe_guc_fwif.h"
27 #include "xe_guc_id_mgr.h"
28 #include "xe_guc_klv_helpers.h"
29 #include "xe_guc_klv_thresholds_set.h"
30 #include "xe_guc_submit.h"
31 #include "xe_lmtt.h"
32 #include "xe_map.h"
33 #include "xe_migrate.h"
34 #include "xe_sriov.h"
35 #include "xe_ttm_vram_mgr.h"
36 #include "xe_vram_types.h"
37 #include "xe_wopcm.h"
38
39 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
40
41 /*
42 * Return: number of KLVs that were successfully parsed and saved,
43 * negative error code on failure.
44 */
guc_action_update_vf_cfg(struct xe_guc * guc,u32 vfid,u64 addr,u32 size)45 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
46 u64 addr, u32 size)
47 {
48 u32 request[] = {
49 GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
50 vfid,
51 lower_32_bits(addr),
52 upper_32_bits(addr),
53 size,
54 };
55
56 return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
57 }
58
59 /*
60 * Return: 0 on success, negative error code on failure.
61 */
pf_send_vf_cfg_reset(struct xe_gt * gt,u32 vfid)62 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
63 {
64 struct xe_guc *guc = >->uc.guc;
65 int ret;
66
67 ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
68
69 return ret <= 0 ? ret : -EPROTO;
70 }
71
72 /*
73 * Return: number of KLVs that were successfully parsed and saved,
74 * negative error code on failure.
75 */
pf_send_vf_buf_klvs(struct xe_gt * gt,u32 vfid,struct xe_guc_buf buf,u32 num_dwords)76 static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords)
77 {
78 struct xe_guc *guc = >->uc.guc;
79
80 return guc_action_update_vf_cfg(guc, vfid, xe_guc_buf_flush(buf), num_dwords);
81 }
82
83 /*
84 * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
85 * negative error code on failure.
86 */
pf_push_vf_buf_klvs(struct xe_gt * gt,unsigned int vfid,u32 num_klvs,struct xe_guc_buf buf,u32 num_dwords)87 static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
88 struct xe_guc_buf buf, u32 num_dwords)
89 {
90 int ret;
91
92 ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords);
93
94 if (ret != num_klvs) {
95 int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
96 void *klvs = xe_guc_buf_cpu_ptr(buf);
97 struct drm_printer p = xe_gt_info_printer(gt);
98 char name[8];
99
100 xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
101 xe_sriov_function_name(vfid, name, sizeof(name)),
102 num_klvs, str_plural(num_klvs), ERR_PTR(err));
103 xe_guc_klv_print(klvs, num_dwords, &p);
104 return err;
105 }
106
107 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
108 struct drm_printer p = xe_gt_dbg_printer(gt);
109 void *klvs = xe_guc_buf_cpu_ptr(buf);
110 char name[8];
111
112 xe_gt_sriov_dbg(gt, "pushed %s config with %u KLV%s:\n",
113 xe_sriov_function_name(vfid, name, sizeof(name)),
114 num_klvs, str_plural(num_klvs));
115 xe_guc_klv_print(klvs, num_dwords, &p);
116 }
117
118 return 0;
119 }
120
121 /*
122 * Return: 0 on success, -ENOBUFS if no free buffer for the indirect data,
123 * negative error code on failure.
124 */
pf_push_vf_cfg_klvs(struct xe_gt * gt,unsigned int vfid,u32 num_klvs,const u32 * klvs,u32 num_dwords)125 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
126 const u32 *klvs, u32 num_dwords)
127 {
128 CLASS(xe_guc_buf_from_data, buf)(>->uc.guc.buf, klvs, num_dwords * sizeof(u32));
129
130 xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
131
132 if (!xe_guc_buf_is_valid(buf))
133 return -ENOBUFS;
134
135 return pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
136 }
137
pf_push_vf_cfg_u32(struct xe_gt * gt,unsigned int vfid,u16 key,u32 value)138 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
139 {
140 u32 klv[] = {
141 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
142 value,
143 };
144
145 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
146 }
147
pf_push_vf_cfg_u64(struct xe_gt * gt,unsigned int vfid,u16 key,u64 value)148 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
149 {
150 u32 klv[] = {
151 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
152 lower_32_bits(value),
153 upper_32_bits(value),
154 };
155
156 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
157 }
158
pf_push_vf_cfg_ggtt(struct xe_gt * gt,unsigned int vfid,u64 start,u64 size)159 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
160 {
161 u32 klvs[] = {
162 PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
163 lower_32_bits(start),
164 upper_32_bits(start),
165 PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
166 lower_32_bits(size),
167 upper_32_bits(size),
168 };
169
170 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
171 }
172
pf_push_vf_cfg_ctxs(struct xe_gt * gt,unsigned int vfid,u32 begin,u32 num)173 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
174 {
175 u32 klvs[] = {
176 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
177 begin,
178 PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
179 num,
180 };
181
182 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
183 }
184
pf_push_vf_cfg_dbs(struct xe_gt * gt,unsigned int vfid,u32 begin,u32 num)185 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
186 {
187 u32 klvs[] = {
188 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
189 begin,
190 PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
191 num,
192 };
193
194 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
195 }
196
pf_push_vf_cfg_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 * exec_quantum)197 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
198 {
199 /* GuC will silently clamp values exceeding max */
200 *exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
201
202 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
203 }
204
pf_push_vf_cfg_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 * preempt_timeout)205 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
206 {
207 /* GuC will silently clamp values exceeding max */
208 *preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
209
210 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
211 }
212
pf_push_vf_cfg_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)213 static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
214 {
215 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority);
216 }
217
pf_push_vf_cfg_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)218 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
219 {
220 return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
221 }
222
pf_push_vf_cfg_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)223 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
224 enum xe_guc_klv_threshold_index index, u32 value)
225 {
226 u32 key = xe_guc_klv_threshold_index_to_key(index);
227
228 xe_gt_assert(gt, key);
229 return pf_push_vf_cfg_u32(gt, vfid, key, value);
230 }
231
pf_pick_vf_config(struct xe_gt * gt,unsigned int vfid)232 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
233 {
234 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
235 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
236 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
237
238 return >->sriov.pf.vfs[vfid].config;
239 }
240
241 /* Return: number of configuration dwords written */
encode_ggtt(u32 * cfg,u64 start,u64 size,bool details)242 static u32 encode_ggtt(u32 *cfg, u64 start, u64 size, bool details)
243 {
244 u32 n = 0;
245
246 if (details) {
247 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
248 cfg[n++] = lower_32_bits(start);
249 cfg[n++] = upper_32_bits(start);
250 }
251
252 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
253 cfg[n++] = lower_32_bits(size);
254 cfg[n++] = upper_32_bits(size);
255
256 return n;
257 }
258
259 /* Return: number of configuration dwords written */
encode_config_ggtt(u32 * cfg,const struct xe_gt_sriov_config * config,bool details)260 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
261 {
262 struct xe_ggtt_node *node = config->ggtt_region;
263
264 if (!xe_ggtt_node_allocated(node))
265 return 0;
266
267 return encode_ggtt(cfg, node->base.start, node->base.size, details);
268 }
269
270 /* Return: number of configuration dwords written */
encode_config(u32 * cfg,const struct xe_gt_sriov_config * config,bool details)271 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
272 {
273 u32 n = 0;
274
275 n += encode_config_ggtt(cfg, config, details);
276
277 if (details && config->num_ctxs) {
278 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
279 cfg[n++] = config->begin_ctx;
280 }
281
282 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
283 cfg[n++] = config->num_ctxs;
284
285 if (details && config->num_dbs) {
286 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
287 cfg[n++] = config->begin_db;
288 }
289
290 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
291 cfg[n++] = config->num_dbs;
292
293 if (config->lmem_obj) {
294 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
295 cfg[n++] = lower_32_bits(xe_bo_size(config->lmem_obj));
296 cfg[n++] = upper_32_bits(xe_bo_size(config->lmem_obj));
297 }
298
299 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
300 cfg[n++] = config->exec_quantum;
301
302 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
303 cfg[n++] = config->preempt_timeout;
304
305 #define encode_threshold_config(TAG, ...) ({ \
306 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG); \
307 cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)]; \
308 });
309
310 MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
311 #undef encode_threshold_config
312
313 return n;
314 }
315
pf_push_full_vf_config(struct xe_gt * gt,unsigned int vfid)316 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
317 {
318 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
319 u32 max_cfg_dwords = xe_guc_buf_cache_dwords(>->uc.guc.buf);
320 CLASS(xe_guc_buf, buf)(>->uc.guc.buf, max_cfg_dwords);
321 u32 num_dwords;
322 int num_klvs;
323 u32 *cfg;
324 int err;
325
326 if (!xe_guc_buf_is_valid(buf))
327 return -ENOBUFS;
328
329 cfg = xe_guc_buf_cpu_ptr(buf);
330 num_dwords = encode_config(cfg, config, true);
331 xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
332
333 if (xe_gt_is_media_type(gt)) {
334 struct xe_gt *primary = gt->tile->primary_gt;
335 struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
336
337 /* media-GT will never include a GGTT config */
338 xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
339
340 /* the GGTT config must be taken from the primary-GT instead */
341 num_dwords += encode_config_ggtt(cfg + num_dwords, other, true);
342 }
343 xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
344
345 if (vfid == PFID) {
346 u64 ggtt_start = xe_wopcm_size(gt_to_xe(gt));
347 u64 ggtt_size = gt_to_tile(gt)->mem.ggtt->size - ggtt_start;
348
349 /* plain PF config data will never include a real GGTT region */
350 xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
351
352 /* fake PF GGTT config covers full GGTT range except reserved WOPCM */
353 num_dwords += encode_ggtt(cfg + num_dwords, ggtt_start, ggtt_size, true);
354 }
355
356 num_klvs = xe_guc_klv_count(cfg, num_dwords);
357 err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
358
359 return err;
360 }
361
pf_push_vf_cfg(struct xe_gt * gt,unsigned int vfid,bool reset)362 static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset)
363 {
364 int err = 0;
365
366 xe_gt_assert(gt, vfid);
367 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
368
369 if (reset)
370 err = pf_send_vf_cfg_reset(gt, vfid);
371 if (!err)
372 err = pf_push_full_vf_config(gt, vfid);
373
374 return err;
375 }
376
pf_refresh_vf_cfg(struct xe_gt * gt,unsigned int vfid)377 static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid)
378 {
379 return pf_push_vf_cfg(gt, vfid, true);
380 }
381
pf_get_ggtt_alignment(struct xe_gt * gt)382 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
383 {
384 struct xe_device *xe = gt_to_xe(gt);
385
386 return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
387 }
388
pf_get_min_spare_ggtt(struct xe_gt * gt)389 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
390 {
391 /* XXX: preliminary */
392 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
393 pf_get_ggtt_alignment(gt) : SZ_64M;
394 }
395
pf_get_spare_ggtt(struct xe_gt * gt)396 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
397 {
398 u64 spare;
399
400 xe_gt_assert(gt, xe_gt_is_main_type(gt));
401 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
402 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
403
404 spare = gt->sriov.pf.spare.ggtt_size;
405 spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
406
407 return spare;
408 }
409
pf_set_spare_ggtt(struct xe_gt * gt,u64 size)410 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
411 {
412 xe_gt_assert(gt, xe_gt_is_main_type(gt));
413 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
414 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
415
416 if (size && size < pf_get_min_spare_ggtt(gt))
417 return -EINVAL;
418
419 size = round_up(size, pf_get_ggtt_alignment(gt));
420 gt->sriov.pf.spare.ggtt_size = size;
421
422 return 0;
423 }
424
pf_distribute_config_ggtt(struct xe_tile * tile,unsigned int vfid,u64 start,u64 size)425 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
426 {
427 int err, err2 = 0;
428
429 err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
430
431 if (tile->media_gt && !err)
432 err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
433
434 return err ?: err2;
435 }
436
pf_release_ggtt(struct xe_tile * tile,struct xe_ggtt_node * node)437 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
438 {
439 if (xe_ggtt_node_allocated(node)) {
440 /*
441 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
442 * is redundant, as PTE will be implicitly re-assigned to PF by
443 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
444 */
445 xe_ggtt_node_remove(node, false);
446 } else {
447 xe_ggtt_node_fini(node);
448 }
449 }
450
pf_release_vf_config_ggtt(struct xe_gt * gt,struct xe_gt_sriov_config * config)451 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
452 {
453 pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
454 config->ggtt_region = NULL;
455 }
456
pf_provision_vf_ggtt(struct xe_gt * gt,unsigned int vfid,u64 size)457 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
458 {
459 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
460 struct xe_ggtt_node *node;
461 struct xe_tile *tile = gt_to_tile(gt);
462 struct xe_ggtt *ggtt = tile->mem.ggtt;
463 u64 alignment = pf_get_ggtt_alignment(gt);
464 int err;
465
466 xe_gt_assert(gt, vfid);
467 xe_gt_assert(gt, xe_gt_is_main_type(gt));
468 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
469
470 size = round_up(size, alignment);
471
472 if (xe_ggtt_node_allocated(config->ggtt_region)) {
473 err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
474 if (unlikely(err))
475 return err;
476
477 pf_release_vf_config_ggtt(gt, config);
478
479 err = pf_refresh_vf_cfg(gt, vfid);
480 if (unlikely(err))
481 return err;
482 }
483 xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
484
485 if (!size)
486 return 0;
487
488 node = xe_ggtt_node_init(ggtt);
489 if (IS_ERR(node))
490 return PTR_ERR(node);
491
492 err = xe_ggtt_node_insert(node, size, alignment);
493 if (unlikely(err))
494 goto err;
495
496 xe_ggtt_assign(node, vfid);
497 xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
498 vfid, node->base.start, node->base.start + node->base.size - 1);
499
500 err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
501 if (unlikely(err))
502 goto err;
503
504 config->ggtt_region = node;
505 return 0;
506 err:
507 pf_release_ggtt(tile, node);
508 return err;
509 }
510
pf_get_vf_config_ggtt(struct xe_gt * gt,unsigned int vfid)511 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
512 {
513 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
514 struct xe_ggtt_node *node = config->ggtt_region;
515
516 xe_gt_assert(gt, xe_gt_is_main_type(gt));
517 return xe_ggtt_node_allocated(node) ? node->base.size : 0;
518 }
519
520 /**
521 * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
522 * @gt: the &xe_gt
523 * @vfid: the VF identifier
524 *
525 * This function can only be called on PF.
526 *
527 * Return: size of the VF's assigned (or PF's spare) GGTT address space.
528 */
xe_gt_sriov_pf_config_get_ggtt(struct xe_gt * gt,unsigned int vfid)529 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
530 {
531 u64 size;
532
533 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
534 if (vfid)
535 size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
536 else
537 size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
538 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
539
540 return size;
541 }
542
pf_config_set_u64_done(struct xe_gt * gt,unsigned int vfid,u64 value,u64 actual,const char * what,int err)543 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
544 u64 actual, const char *what, int err)
545 {
546 char size[10];
547 char name[8];
548
549 xe_sriov_function_name(vfid, name, sizeof(name));
550
551 if (unlikely(err)) {
552 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
553 xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
554 name, value, size, what, ERR_PTR(err));
555 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
556 xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
557 name, actual, size, what);
558 return err;
559 }
560
561 /* the actual value may have changed during provisioning */
562 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
563 xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
564 name, actual, size, what);
565 return 0;
566 }
567
568 /**
569 * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
570 * @gt: the &xe_gt (can't be media)
571 * @vfid: the VF identifier
572 * @size: requested GGTT size
573 *
574 * If &vfid represents PF, then function will change PF's spare GGTT config.
575 *
576 * This function can only be called on PF.
577 *
578 * Return: 0 on success or a negative error code on failure.
579 */
xe_gt_sriov_pf_config_set_ggtt(struct xe_gt * gt,unsigned int vfid,u64 size)580 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
581 {
582 int err;
583
584 xe_gt_assert(gt, xe_gt_is_main_type(gt));
585
586 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
587 if (vfid)
588 err = pf_provision_vf_ggtt(gt, vfid, size);
589 else
590 err = pf_set_spare_ggtt(gt, size);
591 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
592
593 return pf_config_set_u64_done(gt, vfid, size,
594 xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
595 vfid ? "GGTT" : "spare GGTT", err);
596 }
597
pf_config_bulk_set_u64_done(struct xe_gt * gt,unsigned int first,unsigned int num_vfs,u64 value,u64 (* get)(struct xe_gt *,unsigned int),const char * what,unsigned int last,int err)598 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
599 u64 value, u64 (*get)(struct xe_gt*, unsigned int),
600 const char *what, unsigned int last, int err)
601 {
602 char size[10];
603
604 xe_gt_assert(gt, first);
605 xe_gt_assert(gt, num_vfs);
606 xe_gt_assert(gt, first <= last);
607
608 if (num_vfs == 1)
609 return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
610
611 if (unlikely(err)) {
612 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
613 first, first + num_vfs - 1, what);
614 if (last > first)
615 pf_config_bulk_set_u64_done(gt, first, last - first, value,
616 get, what, last, 0);
617 return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
618 }
619
620 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
621 value = get(gt, first);
622 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
623 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
624 first, first + num_vfs - 1, value, size, what);
625 return 0;
626 }
627
628 /**
629 * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
630 * @gt: the &xe_gt (can't be media)
631 * @vfid: starting VF identifier (can't be 0)
632 * @num_vfs: number of VFs to provision
633 * @size: requested GGTT size
634 *
635 * This function can only be called on PF.
636 *
637 * Return: 0 on success or a negative error code on failure.
638 */
xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u64 size)639 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
640 unsigned int num_vfs, u64 size)
641 {
642 unsigned int n;
643 int err = 0;
644
645 xe_gt_assert(gt, vfid);
646 xe_gt_assert(gt, xe_gt_is_main_type(gt));
647
648 if (!num_vfs)
649 return 0;
650
651 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
652 for (n = vfid; n < vfid + num_vfs; n++) {
653 err = pf_provision_vf_ggtt(gt, n, size);
654 if (err)
655 break;
656 }
657 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
658
659 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
660 xe_gt_sriov_pf_config_get_ggtt,
661 "GGTT", n, err);
662 }
663
664 /* Return: size of the largest continuous GGTT region */
pf_get_max_ggtt(struct xe_gt * gt)665 static u64 pf_get_max_ggtt(struct xe_gt *gt)
666 {
667 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
668 u64 alignment = pf_get_ggtt_alignment(gt);
669 u64 spare = pf_get_spare_ggtt(gt);
670 u64 max_hole;
671
672 max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
673
674 xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
675 max_hole / SZ_1K, spare / SZ_1K);
676 return max_hole > spare ? max_hole - spare : 0;
677 }
678
pf_estimate_fair_ggtt(struct xe_gt * gt,unsigned int num_vfs)679 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
680 {
681 u64 available = pf_get_max_ggtt(gt);
682 u64 alignment = pf_get_ggtt_alignment(gt);
683 u64 fair;
684
685 /*
686 * To simplify the logic we only look at single largest GGTT region
687 * as that will be always the best fit for 1 VF case, and most likely
688 * will also nicely cover other cases where VFs are provisioned on the
689 * fresh and idle PF driver, without any stale GGTT allocations spread
690 * in the middle of the full GGTT range.
691 */
692
693 fair = div_u64(available, num_vfs);
694 fair = ALIGN_DOWN(fair, alignment);
695 xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
696 available / SZ_1K, num_vfs, fair / SZ_1K);
697 return fair;
698 }
699
700 /**
701 * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
702 * @gt: the &xe_gt (can't be media)
703 * @vfid: starting VF identifier (can't be 0)
704 * @num_vfs: number of VFs to provision
705 *
706 * This function can only be called on PF.
707 *
708 * Return: 0 on success or a negative error code on failure.
709 */
xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)710 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
711 unsigned int num_vfs)
712 {
713 u64 fair;
714
715 xe_gt_assert(gt, vfid);
716 xe_gt_assert(gt, num_vfs);
717 xe_gt_assert(gt, xe_gt_is_main_type(gt));
718
719 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
720 fair = pf_estimate_fair_ggtt(gt, num_vfs);
721 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
722
723 if (!fair)
724 return -ENOSPC;
725
726 return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
727 }
728
pf_get_min_spare_ctxs(struct xe_gt * gt)729 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
730 {
731 /* XXX: preliminary */
732 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
733 hweight64(gt->info.engine_mask) : SZ_256;
734 }
735
pf_get_spare_ctxs(struct xe_gt * gt)736 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
737 {
738 u32 spare;
739
740 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
741 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
742
743 spare = gt->sriov.pf.spare.num_ctxs;
744 spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
745
746 return spare;
747 }
748
pf_set_spare_ctxs(struct xe_gt * gt,u32 spare)749 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
750 {
751 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
752 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
753
754 if (spare > GUC_ID_MAX)
755 return -EINVAL;
756
757 if (spare && spare < pf_get_min_spare_ctxs(gt))
758 return -EINVAL;
759
760 gt->sriov.pf.spare.num_ctxs = spare;
761
762 return 0;
763 }
764
765 /* Return: start ID or negative error code on failure */
pf_reserve_ctxs(struct xe_gt * gt,u32 num)766 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
767 {
768 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm;
769 unsigned int spare = pf_get_spare_ctxs(gt);
770
771 return xe_guc_id_mgr_reserve(idm, num, spare);
772 }
773
pf_release_ctxs(struct xe_gt * gt,u32 start,u32 num)774 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
775 {
776 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm;
777
778 if (num)
779 xe_guc_id_mgr_release(idm, start, num);
780 }
781
pf_release_config_ctxs(struct xe_gt * gt,struct xe_gt_sriov_config * config)782 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
783 {
784 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
785
786 pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
787 config->begin_ctx = 0;
788 config->num_ctxs = 0;
789 }
790
pf_provision_vf_ctxs(struct xe_gt * gt,unsigned int vfid,u32 num_ctxs)791 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
792 {
793 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
794 int ret;
795
796 xe_gt_assert(gt, vfid);
797
798 if (num_ctxs > GUC_ID_MAX)
799 return -EINVAL;
800
801 if (config->num_ctxs) {
802 ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
803 if (unlikely(ret))
804 return ret;
805
806 pf_release_config_ctxs(gt, config);
807
808 ret = pf_refresh_vf_cfg(gt, vfid);
809 if (unlikely(ret))
810 return ret;
811 }
812
813 if (!num_ctxs)
814 return 0;
815
816 ret = pf_reserve_ctxs(gt, num_ctxs);
817 if (unlikely(ret < 0))
818 return ret;
819
820 config->begin_ctx = ret;
821 config->num_ctxs = num_ctxs;
822
823 ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
824 if (unlikely(ret)) {
825 pf_release_config_ctxs(gt, config);
826 return ret;
827 }
828
829 xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
830 vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
831 return 0;
832 }
833
pf_get_vf_config_ctxs(struct xe_gt * gt,unsigned int vfid)834 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
835 {
836 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
837
838 return config->num_ctxs;
839 }
840
841 /**
842 * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
843 * @gt: the &xe_gt
844 * @vfid: the VF identifier
845 *
846 * This function can only be called on PF.
847 * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
848 *
849 * Return: VF's quota (or PF's spare).
850 */
xe_gt_sriov_pf_config_get_ctxs(struct xe_gt * gt,unsigned int vfid)851 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
852 {
853 u32 num_ctxs;
854
855 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
856 if (vfid)
857 num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
858 else
859 num_ctxs = pf_get_spare_ctxs(gt);
860 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
861
862 return num_ctxs;
863 }
864
no_unit(u32 unused)865 static const char *no_unit(u32 unused)
866 {
867 return "";
868 }
869
spare_unit(u32 unused)870 static const char *spare_unit(u32 unused)
871 {
872 return " spare";
873 }
874
pf_config_set_u32_done(struct xe_gt * gt,unsigned int vfid,u32 value,u32 actual,const char * what,const char * (* unit)(u32),int err)875 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
876 const char *what, const char *(*unit)(u32), int err)
877 {
878 char name[8];
879
880 xe_sriov_function_name(vfid, name, sizeof(name));
881
882 if (unlikely(err)) {
883 xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
884 name, value, unit(value), what, ERR_PTR(err));
885 xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
886 name, actual, unit(actual), what);
887 return err;
888 }
889
890 /* the actual value may have changed during provisioning */
891 xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
892 name, actual, unit(actual), what);
893 return 0;
894 }
895
896 /**
897 * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
898 * @gt: the &xe_gt
899 * @vfid: the VF identifier
900 * @num_ctxs: requested number of GuC contexts IDs (0 to release)
901 *
902 * This function can only be called on PF.
903 *
904 * Return: 0 on success or a negative error code on failure.
905 */
xe_gt_sriov_pf_config_set_ctxs(struct xe_gt * gt,unsigned int vfid,u32 num_ctxs)906 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
907 {
908 int err;
909
910 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
911 if (vfid)
912 err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
913 else
914 err = pf_set_spare_ctxs(gt, num_ctxs);
915 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
916
917 return pf_config_set_u32_done(gt, vfid, num_ctxs,
918 xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
919 "GuC context IDs", vfid ? no_unit : spare_unit, err);
920 }
921
pf_config_bulk_set_u32_done(struct xe_gt * gt,unsigned int first,unsigned int num_vfs,u32 value,u32 (* get)(struct xe_gt *,unsigned int),const char * what,const char * (* unit)(u32),unsigned int last,int err)922 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
923 u32 value, u32 (*get)(struct xe_gt*, unsigned int),
924 const char *what, const char *(*unit)(u32),
925 unsigned int last, int err)
926 {
927 xe_gt_assert(gt, first);
928 xe_gt_assert(gt, num_vfs);
929 xe_gt_assert(gt, first <= last);
930
931 if (num_vfs == 1)
932 return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
933
934 if (unlikely(err)) {
935 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
936 first, first + num_vfs - 1, what);
937 if (last > first)
938 pf_config_bulk_set_u32_done(gt, first, last - first, value,
939 get, what, unit, last, 0);
940 return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
941 }
942
943 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
944 value = get(gt, first);
945 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
946 first, first + num_vfs - 1, value, unit(value), what);
947 return 0;
948 }
949
950 /**
951 * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
952 * @gt: the &xe_gt
953 * @vfid: starting VF identifier
954 * @num_vfs: number of VFs to provision
955 * @num_ctxs: requested number of GuC contexts IDs (0 to release)
956 *
957 * This function can only be called on PF.
958 *
959 * Return: 0 on success or a negative error code on failure.
960 */
xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u32 num_ctxs)961 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
962 unsigned int num_vfs, u32 num_ctxs)
963 {
964 unsigned int n;
965 int err = 0;
966
967 xe_gt_assert(gt, vfid);
968
969 if (!num_vfs)
970 return 0;
971
972 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
973 for (n = vfid; n < vfid + num_vfs; n++) {
974 err = pf_provision_vf_ctxs(gt, n, num_ctxs);
975 if (err)
976 break;
977 }
978 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
979
980 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
981 xe_gt_sriov_pf_config_get_ctxs,
982 "GuC context IDs", no_unit, n, err);
983 }
984
pf_estimate_fair_ctxs(struct xe_gt * gt,unsigned int num_vfs)985 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
986 {
987 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm;
988 u32 spare = pf_get_spare_ctxs(gt);
989 u32 fair = (idm->total - spare) / num_vfs;
990 int ret;
991
992 for (; fair; --fair) {
993 ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
994 if (ret < 0)
995 continue;
996 xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
997 break;
998 }
999
1000 xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
1001 return fair;
1002 }
1003
1004 /**
1005 * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
1006 * @gt: the &xe_gt
1007 * @vfid: starting VF identifier (can't be 0)
1008 * @num_vfs: number of VFs to provision (can't be 0)
1009 *
1010 * This function can only be called on PF.
1011 *
1012 * Return: 0 on success or a negative error code on failure.
1013 */
xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1014 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
1015 unsigned int num_vfs)
1016 {
1017 u32 fair;
1018
1019 xe_gt_assert(gt, vfid);
1020 xe_gt_assert(gt, num_vfs);
1021
1022 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1023 fair = pf_estimate_fair_ctxs(gt, num_vfs);
1024 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1025
1026 if (!fair)
1027 return -ENOSPC;
1028
1029 return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
1030 }
1031
pf_get_min_spare_dbs(struct xe_gt * gt)1032 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
1033 {
1034 /* XXX: preliminary, we don't use doorbells yet! */
1035 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
1036 }
1037
pf_get_spare_dbs(struct xe_gt * gt)1038 static u32 pf_get_spare_dbs(struct xe_gt *gt)
1039 {
1040 u32 spare;
1041
1042 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1043 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1044
1045 spare = gt->sriov.pf.spare.num_dbs;
1046 spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
1047
1048 return spare;
1049 }
1050
pf_set_spare_dbs(struct xe_gt * gt,u32 spare)1051 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
1052 {
1053 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1054 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1055
1056 if (spare > GUC_NUM_DOORBELLS)
1057 return -EINVAL;
1058
1059 if (spare && spare < pf_get_min_spare_dbs(gt))
1060 return -EINVAL;
1061
1062 gt->sriov.pf.spare.num_dbs = spare;
1063 return 0;
1064 }
1065
1066 /* Return: start ID or negative error code on failure */
pf_reserve_dbs(struct xe_gt * gt,u32 num)1067 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
1068 {
1069 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm;
1070 unsigned int spare = pf_get_spare_dbs(gt);
1071
1072 return xe_guc_db_mgr_reserve_range(dbm, num, spare);
1073 }
1074
pf_release_dbs(struct xe_gt * gt,u32 start,u32 num)1075 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1076 {
1077 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm;
1078
1079 if (num)
1080 xe_guc_db_mgr_release_range(dbm, start, num);
1081 }
1082
pf_release_config_dbs(struct xe_gt * gt,struct xe_gt_sriov_config * config)1083 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1084 {
1085 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1086
1087 pf_release_dbs(gt, config->begin_db, config->num_dbs);
1088 config->begin_db = 0;
1089 config->num_dbs = 0;
1090 }
1091
pf_provision_vf_dbs(struct xe_gt * gt,unsigned int vfid,u32 num_dbs)1092 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1093 {
1094 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1095 int ret;
1096
1097 xe_gt_assert(gt, vfid);
1098
1099 if (num_dbs > GUC_NUM_DOORBELLS)
1100 return -EINVAL;
1101
1102 if (config->num_dbs) {
1103 ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1104 if (unlikely(ret))
1105 return ret;
1106
1107 pf_release_config_dbs(gt, config);
1108
1109 ret = pf_refresh_vf_cfg(gt, vfid);
1110 if (unlikely(ret))
1111 return ret;
1112 }
1113
1114 if (!num_dbs)
1115 return 0;
1116
1117 ret = pf_reserve_dbs(gt, num_dbs);
1118 if (unlikely(ret < 0))
1119 return ret;
1120
1121 config->begin_db = ret;
1122 config->num_dbs = num_dbs;
1123
1124 ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1125 if (unlikely(ret)) {
1126 pf_release_config_dbs(gt, config);
1127 return ret;
1128 }
1129
1130 xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1131 vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1132 return 0;
1133 }
1134
pf_get_vf_config_dbs(struct xe_gt * gt,unsigned int vfid)1135 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1136 {
1137 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1138
1139 return config->num_dbs;
1140 }
1141
1142 /**
1143 * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1144 * @gt: the &xe_gt
1145 * @vfid: the VF identifier
1146 *
1147 * This function can only be called on PF.
1148 * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1149 *
1150 * Return: VF's quota (or PF's spare).
1151 */
xe_gt_sriov_pf_config_get_dbs(struct xe_gt * gt,unsigned int vfid)1152 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1153 {
1154 u32 num_dbs;
1155
1156 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1157 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1158
1159 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1160 if (vfid)
1161 num_dbs = pf_get_vf_config_dbs(gt, vfid);
1162 else
1163 num_dbs = pf_get_spare_dbs(gt);
1164 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1165
1166 return num_dbs;
1167 }
1168
1169 /**
1170 * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1171 * @gt: the &xe_gt
1172 * @vfid: the VF identifier
1173 * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1174 *
1175 * This function can only be called on PF.
1176 *
1177 * Return: 0 on success or a negative error code on failure.
1178 */
xe_gt_sriov_pf_config_set_dbs(struct xe_gt * gt,unsigned int vfid,u32 num_dbs)1179 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1180 {
1181 int err;
1182
1183 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1184 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1185
1186 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1187 if (vfid)
1188 err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1189 else
1190 err = pf_set_spare_dbs(gt, num_dbs);
1191 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1192
1193 return pf_config_set_u32_done(gt, vfid, num_dbs,
1194 xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1195 "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1196 }
1197
1198 /**
1199 * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1200 * @gt: the &xe_gt
1201 * @vfid: starting VF identifier (can't be 0)
1202 * @num_vfs: number of VFs to provision
1203 * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1204 *
1205 * This function can only be called on PF.
1206 *
1207 * Return: 0 on success or a negative error code on failure.
1208 */
xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u32 num_dbs)1209 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1210 unsigned int num_vfs, u32 num_dbs)
1211 {
1212 unsigned int n;
1213 int err = 0;
1214
1215 xe_gt_assert(gt, vfid);
1216
1217 if (!num_vfs)
1218 return 0;
1219
1220 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1221 for (n = vfid; n < vfid + num_vfs; n++) {
1222 err = pf_provision_vf_dbs(gt, n, num_dbs);
1223 if (err)
1224 break;
1225 }
1226 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1227
1228 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1229 xe_gt_sriov_pf_config_get_dbs,
1230 "GuC doorbell IDs", no_unit, n, err);
1231 }
1232
pf_estimate_fair_dbs(struct xe_gt * gt,unsigned int num_vfs)1233 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1234 {
1235 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm;
1236 u32 spare = pf_get_spare_dbs(gt);
1237 u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1238 int ret;
1239
1240 for (; fair; --fair) {
1241 ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1242 if (ret < 0)
1243 continue;
1244 xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1245 break;
1246 }
1247
1248 xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1249 return fair;
1250 }
1251
1252 /**
1253 * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell IDs.
1254 * @gt: the &xe_gt
1255 * @vfid: starting VF identifier (can't be 0)
1256 * @num_vfs: number of VFs to provision (can't be 0)
1257 *
1258 * This function can only be called on PF.
1259 *
1260 * Return: 0 on success or a negative error code on failure.
1261 */
xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1262 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1263 unsigned int num_vfs)
1264 {
1265 u32 fair;
1266
1267 xe_gt_assert(gt, vfid);
1268 xe_gt_assert(gt, num_vfs);
1269
1270 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1271 fair = pf_estimate_fair_dbs(gt, num_vfs);
1272 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1273
1274 if (!fair)
1275 return -ENOSPC;
1276
1277 return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1278 }
1279
pf_get_lmem_alignment(struct xe_gt * gt)1280 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1281 {
1282 /* this might be platform dependent */
1283 return SZ_2M;
1284 }
1285
pf_get_min_spare_lmem(struct xe_gt * gt)1286 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1287 {
1288 /* this might be platform dependent */
1289 return SZ_128M; /* XXX: preliminary */
1290 }
1291
pf_get_spare_lmem(struct xe_gt * gt)1292 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1293 {
1294 u64 spare;
1295
1296 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1297 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1298
1299 spare = gt->sriov.pf.spare.lmem_size;
1300 spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1301
1302 return spare;
1303 }
1304
pf_set_spare_lmem(struct xe_gt * gt,u64 size)1305 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1306 {
1307 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1308 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1309
1310 if (size && size < pf_get_min_spare_lmem(gt))
1311 return -EINVAL;
1312
1313 gt->sriov.pf.spare.lmem_size = size;
1314 return 0;
1315 }
1316
pf_get_vf_config_lmem(struct xe_gt * gt,unsigned int vfid)1317 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1318 {
1319 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1320 struct xe_bo *bo;
1321
1322 bo = config->lmem_obj;
1323 return bo ? xe_bo_size(bo) : 0;
1324 }
1325
pf_distribute_config_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1326 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1327 {
1328 struct xe_device *xe = gt_to_xe(gt);
1329 struct xe_tile *tile;
1330 unsigned int tid;
1331 int err;
1332
1333 for_each_tile(tile, xe, tid) {
1334 if (tile->primary_gt == gt) {
1335 err = pf_push_vf_cfg_lmem(gt, vfid, size);
1336 } else {
1337 u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1338
1339 if (!lmem)
1340 continue;
1341 err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1342 }
1343 if (unlikely(err))
1344 return err;
1345 }
1346 return 0;
1347 }
1348
pf_force_lmtt_invalidate(struct xe_device * xe)1349 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1350 {
1351 struct xe_lmtt *lmtt;
1352 struct xe_tile *tile;
1353 unsigned int tid;
1354
1355 xe_assert(xe, xe_device_has_lmtt(xe));
1356 xe_assert(xe, IS_SRIOV_PF(xe));
1357
1358 for_each_tile(tile, xe, tid) {
1359 lmtt = &tile->sriov.pf.lmtt;
1360 xe_lmtt_invalidate_hw(lmtt);
1361 }
1362 }
1363
pf_reset_vf_lmtt(struct xe_device * xe,unsigned int vfid)1364 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1365 {
1366 struct xe_lmtt *lmtt;
1367 struct xe_tile *tile;
1368 unsigned int tid;
1369
1370 xe_assert(xe, xe_device_has_lmtt(xe));
1371 xe_assert(xe, IS_SRIOV_PF(xe));
1372
1373 for_each_tile(tile, xe, tid) {
1374 lmtt = &tile->sriov.pf.lmtt;
1375 xe_lmtt_drop_pages(lmtt, vfid);
1376 }
1377 }
1378
pf_update_vf_lmtt(struct xe_device * xe,unsigned int vfid)1379 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1380 {
1381 struct xe_gt_sriov_config *config;
1382 struct xe_tile *tile;
1383 struct xe_lmtt *lmtt;
1384 struct xe_bo *bo;
1385 struct xe_gt *gt;
1386 u64 total, offset;
1387 unsigned int gtid;
1388 unsigned int tid;
1389 int err;
1390
1391 xe_assert(xe, xe_device_has_lmtt(xe));
1392 xe_assert(xe, IS_SRIOV_PF(xe));
1393
1394 total = 0;
1395 for_each_tile(tile, xe, tid)
1396 total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1397
1398 for_each_tile(tile, xe, tid) {
1399 lmtt = &tile->sriov.pf.lmtt;
1400
1401 xe_lmtt_drop_pages(lmtt, vfid);
1402 if (!total)
1403 continue;
1404
1405 err = xe_lmtt_prepare_pages(lmtt, vfid, total);
1406 if (err)
1407 goto fail;
1408
1409 offset = 0;
1410 for_each_gt(gt, xe, gtid) {
1411 if (xe_gt_is_media_type(gt))
1412 continue;
1413
1414 config = pf_pick_vf_config(gt, vfid);
1415 bo = config->lmem_obj;
1416 if (!bo)
1417 continue;
1418
1419 err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1420 if (err)
1421 goto fail;
1422 offset += xe_bo_size(bo);
1423 }
1424 }
1425
1426 pf_force_lmtt_invalidate(xe);
1427 return 0;
1428
1429 fail:
1430 for_each_tile(tile, xe, tid) {
1431 lmtt = &tile->sriov.pf.lmtt;
1432 xe_lmtt_drop_pages(lmtt, vfid);
1433 }
1434 return err;
1435 }
1436
1437 /* Return: %true if there was an LMEM provisioned, %false otherwise */
pf_release_vf_config_lmem(struct xe_gt * gt,struct xe_gt_sriov_config * config)1438 static bool pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1439 {
1440 xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1441 xe_gt_assert(gt, xe_gt_is_main_type(gt));
1442 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1443
1444 if (config->lmem_obj) {
1445 xe_bo_unpin_map_no_vm(config->lmem_obj);
1446 config->lmem_obj = NULL;
1447 return true;
1448 }
1449 return false;
1450 }
1451
pf_provision_vf_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1452 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1453 {
1454 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1455 struct xe_device *xe = gt_to_xe(gt);
1456 struct xe_tile *tile = gt_to_tile(gt);
1457 struct xe_bo *bo;
1458 int err;
1459
1460 xe_gt_assert(gt, vfid);
1461 xe_gt_assert(gt, IS_DGFX(xe));
1462 xe_gt_assert(gt, xe_gt_is_main_type(gt));
1463
1464 size = round_up(size, pf_get_lmem_alignment(gt));
1465
1466 if (config->lmem_obj) {
1467 err = pf_distribute_config_lmem(gt, vfid, 0);
1468 if (unlikely(err))
1469 return err;
1470
1471 if (xe_device_has_lmtt(xe))
1472 pf_reset_vf_lmtt(xe, vfid);
1473 pf_release_vf_config_lmem(gt, config);
1474 }
1475 xe_gt_assert(gt, !config->lmem_obj);
1476
1477 if (!size)
1478 return 0;
1479
1480 xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1481 bo = xe_bo_create_pin_range_novm(xe, tile,
1482 ALIGN(size, PAGE_SIZE), 0, ~0ull,
1483 ttm_bo_type_kernel,
1484 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1485 XE_BO_FLAG_NEEDS_2M |
1486 XE_BO_FLAG_PINNED |
1487 XE_BO_FLAG_PINNED_LATE_RESTORE);
1488 if (IS_ERR(bo))
1489 return PTR_ERR(bo);
1490
1491 config->lmem_obj = bo;
1492
1493 if (xe_device_has_lmtt(xe)) {
1494 err = pf_update_vf_lmtt(xe, vfid);
1495 if (unlikely(err))
1496 goto release;
1497 }
1498
1499 err = pf_push_vf_cfg_lmem(gt, vfid, xe_bo_size(bo));
1500 if (unlikely(err))
1501 goto reset_lmtt;
1502
1503 xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1504 vfid, xe_bo_size(bo), xe_bo_size(bo) / SZ_1M);
1505 return 0;
1506
1507 reset_lmtt:
1508 if (xe_device_has_lmtt(xe))
1509 pf_reset_vf_lmtt(xe, vfid);
1510 release:
1511 pf_release_vf_config_lmem(gt, config);
1512 return err;
1513 }
1514
1515 /**
1516 * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1517 * @gt: the &xe_gt
1518 * @vfid: the VF identifier
1519 *
1520 * This function can only be called on PF.
1521 *
1522 * Return: VF's (or PF's spare) LMEM quota.
1523 */
xe_gt_sriov_pf_config_get_lmem(struct xe_gt * gt,unsigned int vfid)1524 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1525 {
1526 u64 size;
1527
1528 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1529 if (vfid)
1530 size = pf_get_vf_config_lmem(gt, vfid);
1531 else
1532 size = pf_get_spare_lmem(gt);
1533 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1534
1535 return size;
1536 }
1537
1538 /**
1539 * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1540 * @gt: the &xe_gt (can't be media)
1541 * @vfid: the VF identifier
1542 * @size: requested LMEM size
1543 *
1544 * This function can only be called on PF.
1545 */
xe_gt_sriov_pf_config_set_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1546 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1547 {
1548 int err;
1549
1550 xe_gt_assert(gt, xe_device_has_lmtt(gt_to_xe(gt)));
1551
1552 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1553 if (vfid)
1554 err = pf_provision_vf_lmem(gt, vfid, size);
1555 else
1556 err = pf_set_spare_lmem(gt, size);
1557 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1558
1559 return pf_config_set_u64_done(gt, vfid, size,
1560 xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1561 vfid ? "LMEM" : "spare LMEM", err);
1562 }
1563
1564 /**
1565 * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1566 * @gt: the &xe_gt (can't be media)
1567 * @vfid: starting VF identifier (can't be 0)
1568 * @num_vfs: number of VFs to provision
1569 * @size: requested LMEM size
1570 *
1571 * This function can only be called on PF.
1572 *
1573 * Return: 0 on success or a negative error code on failure.
1574 */
xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u64 size)1575 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1576 unsigned int num_vfs, u64 size)
1577 {
1578 unsigned int n;
1579 int err = 0;
1580
1581 xe_gt_assert(gt, vfid);
1582 xe_gt_assert(gt, xe_gt_is_main_type(gt));
1583
1584 if (!num_vfs)
1585 return 0;
1586
1587 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1588 for (n = vfid; n < vfid + num_vfs; n++) {
1589 err = pf_provision_vf_lmem(gt, n, size);
1590 if (err)
1591 break;
1592 }
1593 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1594
1595 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1596 xe_gt_sriov_pf_config_get_lmem,
1597 "LMEM", n, err);
1598 }
1599
pf_query_free_lmem(struct xe_gt * gt)1600 static u64 pf_query_free_lmem(struct xe_gt *gt)
1601 {
1602 struct xe_tile *tile = gt->tile;
1603
1604 return xe_ttm_vram_get_avail(&tile->mem.vram->ttm.manager);
1605 }
1606
pf_query_max_lmem(struct xe_gt * gt)1607 static u64 pf_query_max_lmem(struct xe_gt *gt)
1608 {
1609 u64 alignment = pf_get_lmem_alignment(gt);
1610 u64 spare = pf_get_spare_lmem(gt);
1611 u64 free = pf_query_free_lmem(gt);
1612 u64 avail;
1613
1614 /* XXX: need to account for 2MB blocks only */
1615 avail = free > spare ? free - spare : 0;
1616 avail = round_down(avail, alignment);
1617
1618 return avail;
1619 }
1620
1621 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1622 #define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */
1623 #endif
1624
pf_estimate_fair_lmem(struct xe_gt * gt,unsigned int num_vfs)1625 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1626 {
1627 u64 available = pf_query_max_lmem(gt);
1628 u64 alignment = pf_get_lmem_alignment(gt);
1629 u64 fair;
1630
1631 fair = div_u64(available, num_vfs);
1632 fair = ALIGN_DOWN(fair, alignment);
1633 #ifdef MAX_FAIR_LMEM
1634 fair = min_t(u64, MAX_FAIR_LMEM, fair);
1635 #endif
1636 xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1637 available / SZ_1M, num_vfs, fair / SZ_1M);
1638 return fair;
1639 }
1640
1641 /**
1642 * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1643 * @gt: the &xe_gt (can't be media)
1644 * @vfid: starting VF identifier (can't be 0)
1645 * @num_vfs: number of VFs to provision (can't be 0)
1646 *
1647 * This function can only be called on PF.
1648 *
1649 * Return: 0 on success or a negative error code on failure.
1650 */
xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1651 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1652 unsigned int num_vfs)
1653 {
1654 u64 fair;
1655
1656 xe_gt_assert(gt, vfid);
1657 xe_gt_assert(gt, num_vfs);
1658 xe_gt_assert(gt, xe_gt_is_main_type(gt));
1659
1660 if (!xe_device_has_lmtt(gt_to_xe(gt)))
1661 return 0;
1662
1663 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1664 fair = pf_estimate_fair_lmem(gt, num_vfs);
1665 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1666
1667 if (!fair)
1668 return -ENOSPC;
1669
1670 return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1671 }
1672
1673 /**
1674 * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1675 * @gt: the &xe_gt
1676 * @vfid: starting VF identifier (can't be 0)
1677 * @num_vfs: number of VFs to provision (can't be 0)
1678 *
1679 * This function can only be called on PF.
1680 *
1681 * Return: 0 on success or a negative error code on failure.
1682 */
xe_gt_sriov_pf_config_set_fair(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1683 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1684 unsigned int num_vfs)
1685 {
1686 int result = 0;
1687 int err;
1688
1689 xe_gt_assert(gt, vfid);
1690 xe_gt_assert(gt, num_vfs);
1691
1692 if (xe_gt_is_main_type(gt)) {
1693 err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1694 result = result ?: err;
1695 err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1696 result = result ?: err;
1697 }
1698 err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1699 result = result ?: err;
1700 err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1701 result = result ?: err;
1702
1703 return result;
1704 }
1705
exec_quantum_unit(u32 exec_quantum)1706 static const char *exec_quantum_unit(u32 exec_quantum)
1707 {
1708 return exec_quantum ? "ms" : "(infinity)";
1709 }
1710
pf_provision_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1711 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1712 u32 exec_quantum)
1713 {
1714 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1715 int err;
1716
1717 err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1718 if (unlikely(err))
1719 return err;
1720
1721 config->exec_quantum = exec_quantum;
1722 return 0;
1723 }
1724
pf_get_exec_quantum(struct xe_gt * gt,unsigned int vfid)1725 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1726 {
1727 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1728
1729 return config->exec_quantum;
1730 }
1731
1732 /**
1733 * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1734 * @gt: the &xe_gt
1735 * @vfid: the VF identifier
1736 * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1737 *
1738 * This function can only be called on PF.
1739 *
1740 * Return: 0 on success or a negative error code on failure.
1741 */
xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1742 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1743 u32 exec_quantum)
1744 {
1745 int err;
1746
1747 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1748 err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1749 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1750
1751 return pf_config_set_u32_done(gt, vfid, exec_quantum,
1752 xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1753 "execution quantum", exec_quantum_unit, err);
1754 }
1755
1756 /**
1757 * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1758 * @gt: the &xe_gt
1759 * @vfid: the VF identifier
1760 *
1761 * This function can only be called on PF.
1762 *
1763 * Return: VF's (or PF's) execution quantum in milliseconds.
1764 */
xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt * gt,unsigned int vfid)1765 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1766 {
1767 u32 exec_quantum;
1768
1769 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1770 exec_quantum = pf_get_exec_quantum(gt, vfid);
1771 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1772
1773 return exec_quantum;
1774 }
1775
preempt_timeout_unit(u32 preempt_timeout)1776 static const char *preempt_timeout_unit(u32 preempt_timeout)
1777 {
1778 return preempt_timeout ? "us" : "(infinity)";
1779 }
1780
pf_provision_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)1781 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1782 u32 preempt_timeout)
1783 {
1784 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1785 int err;
1786
1787 err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1788 if (unlikely(err))
1789 return err;
1790
1791 config->preempt_timeout = preempt_timeout;
1792
1793 return 0;
1794 }
1795
pf_get_preempt_timeout(struct xe_gt * gt,unsigned int vfid)1796 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1797 {
1798 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1799
1800 return config->preempt_timeout;
1801 }
1802
1803 /**
1804 * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1805 * @gt: the &xe_gt
1806 * @vfid: the VF identifier
1807 * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1808 *
1809 * This function can only be called on PF.
1810 *
1811 * Return: 0 on success or a negative error code on failure.
1812 */
xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)1813 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1814 u32 preempt_timeout)
1815 {
1816 int err;
1817
1818 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1819 err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1820 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1821
1822 return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1823 xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1824 "preemption timeout", preempt_timeout_unit, err);
1825 }
1826
1827 /**
1828 * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1829 * @gt: the &xe_gt
1830 * @vfid: the VF identifier
1831 *
1832 * This function can only be called on PF.
1833 *
1834 * Return: VF's (or PF's) preemption timeout in microseconds.
1835 */
xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt * gt,unsigned int vfid)1836 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1837 {
1838 u32 preempt_timeout;
1839
1840 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1841 preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1842 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1843
1844 return preempt_timeout;
1845 }
1846
sched_priority_unit(u32 priority)1847 static const char *sched_priority_unit(u32 priority)
1848 {
1849 return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" :
1850 priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" :
1851 priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" :
1852 "(?)";
1853 }
1854
pf_provision_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)1855 static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
1856 {
1857 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1858 int err;
1859
1860 err = pf_push_vf_cfg_sched_priority(gt, vfid, priority);
1861 if (unlikely(err))
1862 return err;
1863
1864 config->sched_priority = priority;
1865 return 0;
1866 }
1867
pf_get_sched_priority(struct xe_gt * gt,unsigned int vfid)1868 static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
1869 {
1870 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1871
1872 return config->sched_priority;
1873 }
1874
1875 /**
1876 * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority.
1877 * @gt: the &xe_gt
1878 * @vfid: the VF identifier
1879 * @priority: requested scheduling priority
1880 *
1881 * This function can only be called on PF.
1882 *
1883 * Return: 0 on success or a negative error code on failure.
1884 */
xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)1885 int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
1886 {
1887 int err;
1888
1889 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1890 err = pf_provision_sched_priority(gt, vfid, priority);
1891 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1892
1893 return pf_config_set_u32_done(gt, vfid, priority,
1894 xe_gt_sriov_pf_config_get_sched_priority(gt, vfid),
1895 "scheduling priority", sched_priority_unit, err);
1896 }
1897
1898 /**
1899 * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority.
1900 * @gt: the &xe_gt
1901 * @vfid: the VF identifier
1902 *
1903 * This function can only be called on PF.
1904 *
1905 * Return: VF's (or PF's) scheduling priority.
1906 */
xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt * gt,unsigned int vfid)1907 u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
1908 {
1909 u32 priority;
1910
1911 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1912 priority = pf_get_sched_priority(gt, vfid);
1913 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1914
1915 return priority;
1916 }
1917
pf_reset_config_sched(struct xe_gt * gt,struct xe_gt_sriov_config * config)1918 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1919 {
1920 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1921
1922 config->exec_quantum = 0;
1923 config->preempt_timeout = 0;
1924 }
1925
pf_provision_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)1926 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
1927 enum xe_guc_klv_threshold_index index, u32 value)
1928 {
1929 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1930 int err;
1931
1932 err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
1933 if (unlikely(err))
1934 return err;
1935
1936 config->thresholds[index] = value;
1937
1938 return 0;
1939 }
1940
pf_get_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index)1941 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
1942 enum xe_guc_klv_threshold_index index)
1943 {
1944 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1945
1946 return config->thresholds[index];
1947 }
1948
threshold_unit(u32 threshold)1949 static const char *threshold_unit(u32 threshold)
1950 {
1951 return threshold ? "" : "(disabled)";
1952 }
1953
1954 /**
1955 * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
1956 * @gt: the &xe_gt
1957 * @vfid: the VF identifier
1958 * @index: the threshold index
1959 * @value: requested value (0 means disabled)
1960 *
1961 * This function can only be called on PF.
1962 *
1963 * Return: 0 on success or a negative error code on failure.
1964 */
xe_gt_sriov_pf_config_set_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)1965 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
1966 enum xe_guc_klv_threshold_index index, u32 value)
1967 {
1968 u32 key = xe_guc_klv_threshold_index_to_key(index);
1969 const char *name = xe_guc_klv_key_to_string(key);
1970 int err;
1971
1972 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1973 err = pf_provision_threshold(gt, vfid, index, value);
1974 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1975
1976 return pf_config_set_u32_done(gt, vfid, value,
1977 xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
1978 name, threshold_unit, err);
1979 }
1980
1981 /**
1982 * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
1983 * @gt: the &xe_gt
1984 * @vfid: the VF identifier
1985 * @index: the threshold index
1986 *
1987 * This function can only be called on PF.
1988 *
1989 * Return: value of VF's (or PF's) threshold.
1990 */
xe_gt_sriov_pf_config_get_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index)1991 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
1992 enum xe_guc_klv_threshold_index index)
1993 {
1994 u32 value;
1995
1996 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1997 value = pf_get_threshold(gt, vfid, index);
1998 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1999
2000 return value;
2001 }
2002
pf_reset_config_thresholds(struct xe_gt * gt,struct xe_gt_sriov_config * config)2003 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
2004 {
2005 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
2006
2007 #define reset_threshold_config(TAG, ...) ({ \
2008 config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0; \
2009 });
2010
2011 MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
2012 #undef reset_threshold_config
2013 }
2014
pf_release_vf_config(struct xe_gt * gt,unsigned int vfid)2015 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
2016 {
2017 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2018 struct xe_device *xe = gt_to_xe(gt);
2019 bool released;
2020
2021 if (xe_gt_is_main_type(gt)) {
2022 pf_release_vf_config_ggtt(gt, config);
2023 if (IS_DGFX(xe)) {
2024 released = pf_release_vf_config_lmem(gt, config);
2025 if (released && xe_device_has_lmtt(xe))
2026 pf_update_vf_lmtt(xe, vfid);
2027 }
2028 }
2029 pf_release_config_ctxs(gt, config);
2030 pf_release_config_dbs(gt, config);
2031 pf_reset_config_sched(gt, config);
2032 pf_reset_config_thresholds(gt, config);
2033 }
2034
2035 /**
2036 * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
2037 * @gt: the &xe_gt
2038 * @vfid: the VF identifier (can't be PF)
2039 * @force: force configuration release
2040 *
2041 * This function can only be called on PF.
2042 *
2043 * Return: 0 on success or a negative error code on failure.
2044 */
xe_gt_sriov_pf_config_release(struct xe_gt * gt,unsigned int vfid,bool force)2045 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
2046 {
2047 int err;
2048
2049 xe_gt_assert(gt, vfid);
2050
2051 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2052 err = pf_send_vf_cfg_reset(gt, vfid);
2053 if (!err || force)
2054 pf_release_vf_config(gt, vfid);
2055 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2056
2057 if (unlikely(err)) {
2058 xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
2059 vfid, ERR_PTR(err),
2060 force ? " but all resources were released anyway!" : "");
2061 }
2062
2063 return force ? 0 : err;
2064 }
2065
pf_sanitize_ggtt(struct xe_ggtt_node * ggtt_region,unsigned int vfid)2066 static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
2067 {
2068 if (xe_ggtt_node_allocated(ggtt_region))
2069 xe_ggtt_assign(ggtt_region, vfid);
2070 }
2071
pf_sanitize_lmem(struct xe_tile * tile,struct xe_bo * bo,long timeout)2072 static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
2073 {
2074 struct xe_migrate *m = tile->migrate;
2075 struct dma_fence *fence;
2076 int err;
2077
2078 if (!bo)
2079 return 0;
2080
2081 xe_bo_lock(bo, false);
2082 fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
2083 if (IS_ERR(fence)) {
2084 err = PTR_ERR(fence);
2085 } else if (!fence) {
2086 err = -ENOMEM;
2087 } else {
2088 long ret = dma_fence_wait_timeout(fence, false, timeout);
2089
2090 err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
2091 dma_fence_put(fence);
2092 if (!err)
2093 xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
2094 jiffies_to_msecs(timeout - ret));
2095 }
2096 xe_bo_unlock(bo);
2097
2098 return err;
2099 }
2100
pf_sanitize_vf_resources(struct xe_gt * gt,u32 vfid,long timeout)2101 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
2102 {
2103 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2104 struct xe_tile *tile = gt_to_tile(gt);
2105 struct xe_device *xe = gt_to_xe(gt);
2106 int err = 0;
2107
2108 /*
2109 * Only GGTT and LMEM requires to be cleared by the PF.
2110 * GuC doorbell IDs and context IDs do not need any clearing.
2111 */
2112 if (xe_gt_is_main_type(gt)) {
2113 pf_sanitize_ggtt(config->ggtt_region, vfid);
2114 if (IS_DGFX(xe))
2115 err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
2116 }
2117
2118 return err;
2119 }
2120
2121 /**
2122 * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
2123 * @gt: the &xe_gt
2124 * @vfid: the VF identifier (can't be PF)
2125 * @timeout: maximum timeout to wait for completion in jiffies
2126 *
2127 * This function can only be called on PF.
2128 *
2129 * Return: 0 on success or a negative error code on failure.
2130 */
xe_gt_sriov_pf_config_sanitize(struct xe_gt * gt,unsigned int vfid,long timeout)2131 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
2132 {
2133 int err;
2134
2135 xe_gt_assert(gt, vfid != PFID);
2136
2137 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2138 err = pf_sanitize_vf_resources(gt, vfid, timeout);
2139 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2140
2141 if (unlikely(err))
2142 xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
2143 vfid, ERR_PTR(err));
2144 return err;
2145 }
2146
2147 /**
2148 * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
2149 * @gt: the &xe_gt
2150 * @vfid: the VF identifier (can't be PF)
2151 * @refresh: explicit refresh
2152 *
2153 * This function can only be called on PF.
2154 *
2155 * Return: 0 on success or a negative error code on failure.
2156 */
xe_gt_sriov_pf_config_push(struct xe_gt * gt,unsigned int vfid,bool refresh)2157 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
2158 {
2159 int err = 0;
2160
2161 xe_gt_assert(gt, vfid);
2162
2163 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2164 err = pf_push_vf_cfg(gt, vfid, refresh);
2165 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2166
2167 if (unlikely(err)) {
2168 xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
2169 refresh ? "refresh" : "push", vfid, ERR_PTR(err));
2170 }
2171
2172 return err;
2173 }
2174
pf_validate_vf_config(struct xe_gt * gt,unsigned int vfid)2175 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
2176 {
2177 struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
2178 struct xe_device *xe = gt_to_xe(gt);
2179 bool is_primary = xe_gt_is_main_type(gt);
2180 bool valid_ggtt, valid_ctxs, valid_dbs;
2181 bool valid_any, valid_all;
2182
2183 valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
2184 valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
2185 valid_dbs = pf_get_vf_config_dbs(gt, vfid);
2186
2187 /* note that GuC doorbells are optional */
2188 valid_any = valid_ctxs || valid_dbs;
2189 valid_all = valid_ctxs;
2190
2191 /* and GGTT/LMEM is configured on primary GT only */
2192 valid_all = valid_all && valid_ggtt;
2193 valid_any = valid_any || (valid_ggtt && is_primary);
2194
2195 if (xe_device_has_lmtt(xe)) {
2196 bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
2197
2198 valid_any = valid_any || (valid_lmem && is_primary);
2199 valid_all = valid_all && valid_lmem;
2200 }
2201
2202 return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA;
2203 }
2204
2205 /**
2206 * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
2207 * @gt: the &xe_gt
2208 * @vfid: the VF identifier (can't be PF)
2209 *
2210 * This function can only be called on PF.
2211 *
2212 * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
2213 */
xe_gt_sriov_pf_config_is_empty(struct xe_gt * gt,unsigned int vfid)2214 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
2215 {
2216 bool empty;
2217
2218 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2219 xe_gt_assert(gt, vfid);
2220
2221 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2222 empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
2223 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2224
2225 return empty;
2226 }
2227
2228 /**
2229 * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob.
2230 * @gt: the &xe_gt
2231 * @vfid: the VF identifier (can't be PF)
2232 * @buf: the buffer to save a config to (or NULL if query the buf size)
2233 * @size: the size of the buffer (or 0 if query the buf size)
2234 *
2235 * This function can only be called on PF.
2236 *
2237 * Return: minimum size of the buffer or the number of bytes saved,
2238 * or a negative error code on failure.
2239 */
xe_gt_sriov_pf_config_save(struct xe_gt * gt,unsigned int vfid,void * buf,size_t size)2240 ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
2241 {
2242 struct xe_gt_sriov_config *config;
2243 ssize_t ret;
2244
2245 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2246 xe_gt_assert(gt, vfid);
2247 xe_gt_assert(gt, !(!buf ^ !size));
2248
2249 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2250 ret = pf_validate_vf_config(gt, vfid);
2251 if (!size) {
2252 ret = ret ? 0 : SZ_4K;
2253 } else if (!ret) {
2254 if (size < SZ_4K) {
2255 ret = -ENOBUFS;
2256 } else {
2257 config = pf_pick_vf_config(gt, vfid);
2258 ret = encode_config(buf, config, false) * sizeof(u32);
2259 }
2260 }
2261 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2262
2263 return ret;
2264 }
2265
pf_restore_vf_config_klv(struct xe_gt * gt,unsigned int vfid,u32 key,u32 len,const u32 * value)2266 static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid,
2267 u32 key, u32 len, const u32 *value)
2268 {
2269 switch (key) {
2270 case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
2271 if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN)
2272 return -EBADMSG;
2273 return pf_provision_vf_ctxs(gt, vfid, value[0]);
2274
2275 case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
2276 if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN)
2277 return -EBADMSG;
2278 return pf_provision_vf_dbs(gt, vfid, value[0]);
2279
2280 case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
2281 if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN)
2282 return -EBADMSG;
2283 return pf_provision_exec_quantum(gt, vfid, value[0]);
2284
2285 case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
2286 if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN)
2287 return -EBADMSG;
2288 return pf_provision_preempt_timeout(gt, vfid, value[0]);
2289
2290 /* auto-generate case statements */
2291 #define define_threshold_key_to_provision_case(TAG, ...) \
2292 case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG): \
2293 BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u); \
2294 if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG)) \
2295 return -EBADMSG; \
2296 return pf_provision_threshold(gt, vfid, \
2297 MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG), \
2298 value[0]);
2299
2300 MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case)
2301 #undef define_threshold_key_to_provision_case
2302 }
2303
2304 if (xe_gt_is_media_type(gt))
2305 return -EKEYREJECTED;
2306
2307 switch (key) {
2308 case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
2309 if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN)
2310 return -EBADMSG;
2311 return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0]));
2312
2313 case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
2314 if (!IS_DGFX(gt_to_xe(gt)))
2315 return -EKEYREJECTED;
2316 if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN)
2317 return -EBADMSG;
2318 return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0]));
2319 }
2320
2321 return -EKEYREJECTED;
2322 }
2323
pf_restore_vf_config(struct xe_gt * gt,unsigned int vfid,const u32 * klvs,size_t num_dwords)2324 static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid,
2325 const u32 *klvs, size_t num_dwords)
2326 {
2327 int err;
2328
2329 while (num_dwords >= GUC_KLV_LEN_MIN) {
2330 u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
2331 u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
2332
2333 klvs += GUC_KLV_LEN_MIN;
2334 num_dwords -= GUC_KLV_LEN_MIN;
2335
2336 if (num_dwords < len)
2337 err = -EBADMSG;
2338 else
2339 err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs);
2340
2341 if (err) {
2342 xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err));
2343 return err;
2344 }
2345
2346 klvs += len;
2347 num_dwords -= len;
2348 }
2349
2350 return pf_validate_vf_config(gt, vfid);
2351 }
2352
2353 /**
2354 * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob.
2355 * @gt: the &xe_gt
2356 * @vfid: the VF identifier (can't be PF)
2357 * @buf: the buffer with config data
2358 * @size: the size of the config data
2359 *
2360 * This function can only be called on PF.
2361 *
2362 * Return: 0 on success or a negative error code on failure.
2363 */
xe_gt_sriov_pf_config_restore(struct xe_gt * gt,unsigned int vfid,const void * buf,size_t size)2364 int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
2365 const void *buf, size_t size)
2366 {
2367 int err;
2368
2369 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2370 xe_gt_assert(gt, vfid);
2371
2372 if (!size)
2373 return -ENODATA;
2374
2375 if (size % sizeof(u32))
2376 return -EINVAL;
2377
2378 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
2379 struct drm_printer p = xe_gt_dbg_printer(gt);
2380
2381 drm_printf(&p, "restoring VF%u config:\n", vfid);
2382 xe_guc_klv_print(buf, size / sizeof(u32), &p);
2383 }
2384
2385 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2386 err = pf_send_vf_cfg_reset(gt, vfid);
2387 if (!err) {
2388 pf_release_vf_config(gt, vfid);
2389 err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32));
2390 }
2391 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2392
2393 return err;
2394 }
2395
pf_prepare_self_config(struct xe_gt * gt)2396 static void pf_prepare_self_config(struct xe_gt *gt)
2397 {
2398 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, PFID);
2399
2400 /*
2401 * We want PF to be allowed to use all of context ID, doorbells IDs
2402 * and whole usable GGTT area. While we can store ctxs/dbs numbers
2403 * directly in the config structure, can't do the same with the GGTT
2404 * configuration, so let it be prepared on demand while pushing KLVs.
2405 */
2406 config->num_ctxs = GUC_ID_MAX;
2407 config->num_dbs = GUC_NUM_DOORBELLS;
2408 }
2409
pf_push_self_config(struct xe_gt * gt)2410 static int pf_push_self_config(struct xe_gt *gt)
2411 {
2412 int err;
2413
2414 err = pf_push_full_vf_config(gt, PFID);
2415 if (err) {
2416 xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n",
2417 ERR_PTR(err));
2418 return err;
2419 }
2420
2421 xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n");
2422 return 0;
2423 }
2424
fini_config(void * arg)2425 static void fini_config(void *arg)
2426 {
2427 struct xe_gt *gt = arg;
2428 struct xe_device *xe = gt_to_xe(gt);
2429 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
2430
2431 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2432 for (n = 1; n <= total_vfs; n++)
2433 pf_release_vf_config(gt, n);
2434 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2435 }
2436
2437 /**
2438 * xe_gt_sriov_pf_config_init - Initialize SR-IOV configuration data.
2439 * @gt: the &xe_gt
2440 *
2441 * This function can only be called on PF.
2442 *
2443 * Return: 0 on success or a negative error code on failure.
2444 */
xe_gt_sriov_pf_config_init(struct xe_gt * gt)2445 int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
2446 {
2447 struct xe_device *xe = gt_to_xe(gt);
2448 int err;
2449
2450 xe_gt_assert(gt, IS_SRIOV_PF(xe));
2451
2452 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2453 pf_prepare_self_config(gt);
2454 err = pf_push_self_config(gt);
2455 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2456
2457 if (err)
2458 return err;
2459
2460 return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
2461 }
2462
2463 /**
2464 * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
2465 * @gt: the &xe_gt
2466 *
2467 * Any prior configurations pushed to GuC are lost when the GT is reset.
2468 * Push again all non-empty VF configurations to the GuC.
2469 *
2470 * This function can only be called on PF.
2471 */
xe_gt_sriov_pf_config_restart(struct xe_gt * gt)2472 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
2473 {
2474 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2475 unsigned int fail = 0, skip = 0;
2476
2477 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2478 pf_push_self_config(gt);
2479 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2480
2481 for (n = 1; n <= total_vfs; n++) {
2482 if (xe_gt_sriov_pf_config_is_empty(gt, n))
2483 skip++;
2484 else if (xe_gt_sriov_pf_config_push(gt, n, false))
2485 fail++;
2486 }
2487
2488 if (fail)
2489 xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
2490 fail, total_vfs - skip, str_plural(total_vfs));
2491
2492 if (fail != total_vfs)
2493 xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
2494 total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
2495 }
2496
2497 /**
2498 * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2499 * @gt: the &xe_gt
2500 * @p: the &drm_printer
2501 *
2502 * Print GGTT configuration data for all VFs.
2503 * VFs without provisioned GGTT are ignored.
2504 *
2505 * This function can only be called on PF.
2506 */
xe_gt_sriov_pf_config_print_ggtt(struct xe_gt * gt,struct drm_printer * p)2507 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2508 {
2509 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2510 const struct xe_gt_sriov_config *config;
2511 char buf[10];
2512
2513 for (n = 1; n <= total_vfs; n++) {
2514 config = >->sriov.pf.vfs[n].config;
2515 if (!xe_ggtt_node_allocated(config->ggtt_region))
2516 continue;
2517
2518 string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
2519 buf, sizeof(buf));
2520 drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2521 n, config->ggtt_region->base.start,
2522 config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
2523 buf);
2524 }
2525
2526 return 0;
2527 }
2528
2529 /**
2530 * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2531 * @gt: the &xe_gt
2532 * @p: the &drm_printer
2533 *
2534 * Print GuC context ID allocations across all VFs.
2535 * VFs without GuC context IDs are skipped.
2536 *
2537 * This function can only be called on PF.
2538 * Return: 0 on success or a negative error code on failure.
2539 */
xe_gt_sriov_pf_config_print_ctxs(struct xe_gt * gt,struct drm_printer * p)2540 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2541 {
2542 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2543 const struct xe_gt_sriov_config *config;
2544
2545 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2546 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2547
2548 for (n = 1; n <= total_vfs; n++) {
2549 config = >->sriov.pf.vfs[n].config;
2550 if (!config->num_ctxs)
2551 continue;
2552
2553 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2554 n,
2555 config->begin_ctx,
2556 config->begin_ctx + config->num_ctxs - 1,
2557 config->num_ctxs);
2558 }
2559
2560 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2561 return 0;
2562 }
2563
2564 /**
2565 * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2566 * @gt: the &xe_gt
2567 * @p: the &drm_printer
2568 *
2569 * Print GuC doorbell IDs allocations across all VFs.
2570 * VFs without GuC doorbell IDs are skipped.
2571 *
2572 * This function can only be called on PF.
2573 * Return: 0 on success or a negative error code on failure.
2574 */
xe_gt_sriov_pf_config_print_dbs(struct xe_gt * gt,struct drm_printer * p)2575 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2576 {
2577 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2578 const struct xe_gt_sriov_config *config;
2579
2580 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2581 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2582
2583 for (n = 1; n <= total_vfs; n++) {
2584 config = >->sriov.pf.vfs[n].config;
2585 if (!config->num_dbs)
2586 continue;
2587
2588 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2589 n,
2590 config->begin_db,
2591 config->begin_db + config->num_dbs - 1,
2592 config->num_dbs);
2593 }
2594
2595 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2596 return 0;
2597 }
2598
2599 /**
2600 * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations.
2601 * @gt: the &xe_gt
2602 * @p: the &drm_printer
2603 *
2604 * Print LMEM allocations across all VFs.
2605 * VFs without LMEM allocation are skipped.
2606 *
2607 * This function can only be called on PF.
2608 * Return: 0 on success or a negative error code on failure.
2609 */
xe_gt_sriov_pf_config_print_lmem(struct xe_gt * gt,struct drm_printer * p)2610 int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
2611 {
2612 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2613 const struct xe_gt_sriov_config *config;
2614 char buf[10];
2615
2616 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2617 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2618
2619 for (n = 1; n <= total_vfs; n++) {
2620 config = >->sriov.pf.vfs[n].config;
2621 if (!config->lmem_obj)
2622 continue;
2623
2624 string_get_size(xe_bo_size(config->lmem_obj), 1, STRING_UNITS_2,
2625 buf, sizeof(buf));
2626 drm_printf(p, "VF%u:\t%zu\t(%s)\n",
2627 n, xe_bo_size(config->lmem_obj), buf);
2628 }
2629
2630 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2631 return 0;
2632 }
2633
2634 /**
2635 * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2636 * @gt: the &xe_gt
2637 * @p: the &drm_printer
2638 *
2639 * Print GGTT ranges that are available for the provisioning.
2640 *
2641 * This function can only be called on PF.
2642 */
xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt * gt,struct drm_printer * p)2643 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2644 {
2645 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2646 u64 alignment = pf_get_ggtt_alignment(gt);
2647 u64 spare, avail, total;
2648 char buf[10];
2649
2650 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2651
2652 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2653
2654 spare = pf_get_spare_ggtt(gt);
2655 total = xe_ggtt_print_holes(ggtt, alignment, p);
2656
2657 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2658
2659 string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2660 drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2661
2662 string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2663 drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2664
2665 avail = total > spare ? total - spare : 0;
2666
2667 string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2668 drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2669
2670 return 0;
2671 }
2672