xref: /linux/drivers/gpu/drm/xe/xe_sriov_pf_provision.c (revision d30c1683aaecb93d2ab95685dc4300a33d3cea7a)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include "xe_assert.h"
7 #include "xe_device.h"
8 #include "xe_gt_sriov_pf_config.h"
9 #include "xe_gt_sriov_pf_policy.h"
10 #include "xe_sriov.h"
11 #include "xe_sriov_pf_helpers.h"
12 #include "xe_sriov_pf_provision.h"
13 #include "xe_sriov_pf_provision_types.h"
14 #include "xe_sriov_printk.h"
15 
16 static const char *mode_to_string(enum xe_sriov_provisioning_mode mode)
17 {
18 	switch (mode) {
19 	case XE_SRIOV_PROVISIONING_MODE_AUTO:
20 		return "auto";
21 	case XE_SRIOV_PROVISIONING_MODE_CUSTOM:
22 		return "custom";
23 	default:
24 		return "<invalid>";
25 	}
26 }
27 
28 static bool pf_auto_provisioning_mode(struct xe_device *xe)
29 {
30 	xe_assert(xe, IS_SRIOV_PF(xe));
31 
32 	return xe->sriov.pf.provision.mode == XE_SRIOV_PROVISIONING_MODE_AUTO;
33 }
34 
35 static bool pf_needs_provisioning(struct xe_gt *gt, unsigned int num_vfs)
36 {
37 	unsigned int n;
38 
39 	for (n = 1; n <= num_vfs; n++)
40 		if (!xe_gt_sriov_pf_config_is_empty(gt, n))
41 			return false;
42 
43 	return true;
44 }
45 
46 static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs)
47 {
48 	struct xe_gt *gt;
49 	unsigned int id;
50 	int result = 0;
51 	int err;
52 
53 	for_each_gt(gt, xe, id) {
54 		if (!pf_needs_provisioning(gt, num_vfs))
55 			return -EUCLEAN;
56 		err = xe_gt_sriov_pf_config_set_fair(gt, VFID(1), num_vfs);
57 		result = result ?: err;
58 	}
59 
60 	return result;
61 }
62 
63 static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs)
64 {
65 	struct xe_gt *gt;
66 	unsigned int id;
67 	unsigned int n;
68 
69 	for_each_gt(gt, xe, id)
70 		for (n = 1; n <= num_vfs; n++)
71 			xe_gt_sriov_pf_config_release(gt, n, true);
72 }
73 
74 static void pf_unprovision_all_vfs(struct xe_device *xe)
75 {
76 	pf_unprovision_vfs(xe, xe_sriov_pf_get_totalvfs(xe));
77 }
78 
79 /**
80  * xe_sriov_pf_provision_vfs() - Provision VFs in auto-mode.
81  * @xe: the PF &xe_device
82  * @num_vfs: the number of VFs to auto-provision
83  *
84  * This function can only be called on PF.
85  *
86  * Return: 0 on success or a negative error code on failure.
87  */
88 int xe_sriov_pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs)
89 {
90 	xe_assert(xe, IS_SRIOV_PF(xe));
91 
92 	if (!pf_auto_provisioning_mode(xe))
93 		return 0;
94 
95 	return pf_provision_vfs(xe, num_vfs);
96 }
97 
98 /**
99  * xe_sriov_pf_unprovision_vfs() - Unprovision VFs in auto-mode.
100  * @xe: the PF &xe_device
101  * @num_vfs: the number of VFs to unprovision
102  *
103  * This function can only be called on PF.
104  *
105  * Return: 0 on success or a negative error code on failure.
106  */
107 int xe_sriov_pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs)
108 {
109 	xe_assert(xe, IS_SRIOV_PF(xe));
110 
111 	if (!pf_auto_provisioning_mode(xe))
112 		return 0;
113 
114 	pf_unprovision_vfs(xe, num_vfs);
115 	return 0;
116 }
117 
118 /**
119  * xe_sriov_pf_provision_set_mode() - Change VFs provision mode.
120  * @xe: the PF &xe_device
121  * @mode: the new VFs provisioning mode
122  *
123  * When changing from AUTO to CUSTOM mode, any already allocated VFs resources
124  * will remain allocated and will not be released upon VFs disabling.
125  *
126  * When changing back to AUTO mode, if VFs are not enabled, already allocated
127  * VFs resources will be immediately released. If VFs are still enabled, such
128  * mode change is rejected.
129  *
130  * This function can only be called on PF.
131  *
132  * Return: 0 on success or a negative error code on failure.
133  */
134 int xe_sriov_pf_provision_set_mode(struct xe_device *xe, enum xe_sriov_provisioning_mode mode)
135 {
136 	xe_assert(xe, IS_SRIOV_PF(xe));
137 
138 	if (mode == xe->sriov.pf.provision.mode)
139 		return 0;
140 
141 	if (mode == XE_SRIOV_PROVISIONING_MODE_AUTO) {
142 		if (xe_sriov_pf_num_vfs(xe)) {
143 			xe_sriov_dbg(xe, "can't restore %s: VFs must be disabled!\n",
144 				     mode_to_string(mode));
145 			return -EBUSY;
146 		}
147 		pf_unprovision_all_vfs(xe);
148 	}
149 
150 	xe_sriov_dbg(xe, "mode %s changed to %s by %ps\n",
151 		     mode_to_string(xe->sriov.pf.provision.mode),
152 		     mode_to_string(mode), __builtin_return_address(0));
153 	xe->sriov.pf.provision.mode = mode;
154 	return 0;
155 }
156 
157 /**
158  * xe_sriov_pf_provision_bulk_apply_eq() - Change execution quantum for all VFs and PF.
159  * @xe: the PF &xe_device
160  * @eq: execution quantum in [ms] to set
161  *
162  * Change execution quantum (EQ) provisioning on all tiles/GTs.
163  *
164  * This function can only be called on PF.
165  *
166  * Return: 0 on success or a negative error code on failure.
167  */
168 int xe_sriov_pf_provision_bulk_apply_eq(struct xe_device *xe, u32 eq)
169 {
170 	struct xe_gt *gt;
171 	unsigned int id;
172 	int result = 0;
173 	int err;
174 
175 	guard(mutex)(xe_sriov_pf_master_mutex(xe));
176 
177 	for_each_gt(gt, xe, id) {
178 		err = xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked(gt, eq);
179 		result = result ?: err;
180 	}
181 
182 	return result;
183 }
184 
185 /**
186  * xe_sriov_pf_provision_apply_vf_eq() - Change VF's execution quantum.
187  * @xe: the PF &xe_device
188  * @vfid: the VF identifier
189  * @eq: execution quantum in [ms] to set
190  *
191  * Change VF's execution quantum (EQ) provisioning on all tiles/GTs.
192  *
193  * This function can only be called on PF.
194  *
195  * Return: 0 on success or a negative error code on failure.
196  */
197 int xe_sriov_pf_provision_apply_vf_eq(struct xe_device *xe, unsigned int vfid, u32 eq)
198 {
199 	struct xe_gt *gt;
200 	unsigned int id;
201 	int result = 0;
202 	int err;
203 
204 	guard(mutex)(xe_sriov_pf_master_mutex(xe));
205 
206 	for_each_gt(gt, xe, id) {
207 		err = xe_gt_sriov_pf_config_set_exec_quantum_locked(gt, vfid, eq);
208 		result = result ?: err;
209 	}
210 
211 	return result;
212 }
213 
214 static int pf_report_unclean(struct xe_gt *gt, unsigned int vfid,
215 			     const char *what, u32 found, u32 expected)
216 {
217 	char name[8];
218 
219 	xe_sriov_dbg(gt_to_xe(gt), "%s on GT%u has %s=%u (expected %u)\n",
220 		     xe_sriov_function_name(vfid, name, sizeof(name)),
221 		     gt->info.id, what, found, expected);
222 	return -EUCLEAN;
223 }
224 
225 /**
226  * xe_sriov_pf_provision_query_vf_eq() - Query VF's execution quantum.
227  * @xe: the PF &xe_device
228  * @vfid: the VF identifier
229  * @eq: placeholder for the returned execution quantum in [ms]
230  *
231  * Query VF's execution quantum (EQ) provisioning from all tiles/GTs.
232  * If values across tiles/GTs are inconsistent then -EUCLEAN error will be returned.
233  *
234  * This function can only be called on PF.
235  *
236  * Return: 0 on success or a negative error code on failure.
237  */
238 int xe_sriov_pf_provision_query_vf_eq(struct xe_device *xe, unsigned int vfid, u32 *eq)
239 {
240 	struct xe_gt *gt;
241 	unsigned int id;
242 	int count = 0;
243 	u32 value;
244 
245 	guard(mutex)(xe_sriov_pf_master_mutex(xe));
246 
247 	for_each_gt(gt, xe, id) {
248 		value = xe_gt_sriov_pf_config_get_exec_quantum_locked(gt, vfid);
249 		if (!count++)
250 			*eq = value;
251 		else if (value != *eq)
252 			return pf_report_unclean(gt, vfid, "EQ", value, *eq);
253 	}
254 
255 	return !count ? -ENODATA : 0;
256 }
257 
258 /**
259  * xe_sriov_pf_provision_bulk_apply_pt() - Change preemption timeout for all VFs and PF.
260  * @xe: the PF &xe_device
261  * @pt: preemption timeout in [us] to set
262  *
263  * Change preemption timeout (PT) provisioning on all tiles/GTs.
264  *
265  * This function can only be called on PF.
266  *
267  * Return: 0 on success or a negative error code on failure.
268  */
269 int xe_sriov_pf_provision_bulk_apply_pt(struct xe_device *xe, u32 pt)
270 {
271 	struct xe_gt *gt;
272 	unsigned int id;
273 	int result = 0;
274 	int err;
275 
276 	guard(mutex)(xe_sriov_pf_master_mutex(xe));
277 
278 	for_each_gt(gt, xe, id) {
279 		err = xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked(gt, pt);
280 		result = result ?: err;
281 	}
282 
283 	return result;
284 }
285 
286 /**
287  * xe_sriov_pf_provision_apply_vf_pt() - Change VF's preemption timeout.
288  * @xe: the PF &xe_device
289  * @vfid: the VF identifier
290  * @pt: preemption timeout in [us] to set
291  *
292  * Change VF's preemption timeout (PT) provisioning on all tiles/GTs.
293  *
294  * This function can only be called on PF.
295  *
296  * Return: 0 on success or a negative error code on failure.
297  */
298 int xe_sriov_pf_provision_apply_vf_pt(struct xe_device *xe, unsigned int vfid, u32 pt)
299 {
300 	struct xe_gt *gt;
301 	unsigned int id;
302 	int result = 0;
303 	int err;
304 
305 	guard(mutex)(xe_sriov_pf_master_mutex(xe));
306 
307 	for_each_gt(gt, xe, id) {
308 		err = xe_gt_sriov_pf_config_set_preempt_timeout_locked(gt, vfid, pt);
309 		result = result ?: err;
310 	}
311 
312 	return result;
313 }
314 
315 /**
316  * xe_sriov_pf_provision_query_vf_pt() - Query VF's preemption timeout.
317  * @xe: the PF &xe_device
318  * @vfid: the VF identifier
319  * @pt: placeholder for the returned preemption timeout in [us]
320  *
321  * Query VF's preemption timeout (PT) provisioning from all tiles/GTs.
322  * If values across tiles/GTs are inconsistent then -EUCLEAN error will be returned.
323  *
324  * This function can only be called on PF.
325  *
326  * Return: 0 on success or a negative error code on failure.
327  */
328 int xe_sriov_pf_provision_query_vf_pt(struct xe_device *xe, unsigned int vfid, u32 *pt)
329 {
330 	struct xe_gt *gt;
331 	unsigned int id;
332 	int count = 0;
333 	u32 value;
334 
335 	guard(mutex)(xe_sriov_pf_master_mutex(xe));
336 
337 	for_each_gt(gt, xe, id) {
338 		value = xe_gt_sriov_pf_config_get_preempt_timeout_locked(gt, vfid);
339 		if (!count++)
340 			*pt = value;
341 		else if (value != *pt)
342 			return pf_report_unclean(gt, vfid, "PT", value, *pt);
343 	}
344 
345 	return !count ? -ENODATA : 0;
346 }
347 
348 /**
349  * xe_sriov_pf_provision_bulk_apply_priority() - Change scheduling priority of all VFs and PF.
350  * @xe: the PF &xe_device
351  * @prio: scheduling priority to set
352  *
353  * Change the scheduling priority provisioning on all tiles/GTs.
354  *
355  * This function can only be called on PF.
356  *
357  * Return: 0 on success or a negative error code on failure.
358  */
359 int xe_sriov_pf_provision_bulk_apply_priority(struct xe_device *xe, u32 prio)
360 {
361 	bool sched_if_idle;
362 	struct xe_gt *gt;
363 	unsigned int id;
364 	int result = 0;
365 	int err;
366 
367 	/*
368 	 * Currently, priority changes that involves VFs are only allowed using
369 	 * the 'sched_if_idle' policy KLV, so only LOW and NORMAL are supported.
370 	 */
371 	xe_assert(xe, prio < GUC_SCHED_PRIORITY_HIGH);
372 	sched_if_idle = prio == GUC_SCHED_PRIORITY_NORMAL;
373 
374 	for_each_gt(gt, xe, id) {
375 		err = xe_gt_sriov_pf_policy_set_sched_if_idle(gt, sched_if_idle);
376 		result = result ?: err;
377 	}
378 
379 	return result;
380 }
381 
382 /**
383  * xe_sriov_pf_provision_apply_vf_priority() - Change VF's scheduling priority.
384  * @xe: the PF &xe_device
385  * @vfid: the VF identifier
386  * @prio: scheduling priority to set
387  *
388  * Change VF's scheduling priority provisioning on all tiles/GTs.
389  *
390  * This function can only be called on PF.
391  *
392  * Return: 0 on success or a negative error code on failure.
393  */
394 int xe_sriov_pf_provision_apply_vf_priority(struct xe_device *xe, unsigned int vfid, u32 prio)
395 {
396 	struct xe_gt *gt;
397 	unsigned int id;
398 	int result = 0;
399 	int err;
400 
401 	for_each_gt(gt, xe, id) {
402 		err = xe_gt_sriov_pf_config_set_sched_priority(gt, vfid, prio);
403 		result = result ?: err;
404 	}
405 
406 	return result;
407 }
408 
409 /**
410  * xe_sriov_pf_provision_query_vf_priority() - Query VF's scheduling priority.
411  * @xe: the PF &xe_device
412  * @vfid: the VF identifier
413  * @prio: placeholder for the returned scheduling priority
414  *
415  * Query VF's scheduling priority provisioning from all tiles/GTs.
416  * If values across tiles/GTs are inconsistent then -EUCLEAN error will be returned.
417  *
418  * This function can only be called on PF.
419  *
420  * Return: 0 on success or a negative error code on failure.
421  */
422 int xe_sriov_pf_provision_query_vf_priority(struct xe_device *xe, unsigned int vfid, u32 *prio)
423 {
424 	struct xe_gt *gt;
425 	unsigned int id;
426 	int count = 0;
427 	u32 value;
428 
429 	for_each_gt(gt, xe, id) {
430 		value = xe_gt_sriov_pf_config_get_sched_priority(gt, vfid);
431 		if (!count++)
432 			*prio = value;
433 		else if (value != *prio)
434 			return pf_report_unclean(gt, vfid, "priority", value, *prio);
435 	}
436 
437 	return !count ? -ENODATA : 0;
438 }
439