1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023-2024 Intel Corporation
4 */
5
6 #include <linux/bitops.h>
7 #include <linux/pci.h>
8
9 #include "regs/xe_bars.h"
10 #include "xe_assert.h"
11 #include "xe_device.h"
12 #include "xe_gt_sriov_pf_config.h"
13 #include "xe_gt_sriov_pf_control.h"
14 #include "xe_gt_sriov_printk.h"
15 #include "xe_guc_engine_activity.h"
16 #include "xe_pci_sriov.h"
17 #include "xe_pm.h"
18 #include "xe_sriov.h"
19 #include "xe_sriov_pf.h"
20 #include "xe_sriov_pf_helpers.h"
21 #include "xe_sriov_printk.h"
22
pf_needs_provisioning(struct xe_gt * gt,unsigned int num_vfs)23 static int pf_needs_provisioning(struct xe_gt *gt, unsigned int num_vfs)
24 {
25 unsigned int n;
26
27 for (n = 1; n <= num_vfs; n++)
28 if (!xe_gt_sriov_pf_config_is_empty(gt, n))
29 return false;
30
31 return true;
32 }
33
pf_provision_vfs(struct xe_device * xe,unsigned int num_vfs)34 static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs)
35 {
36 struct xe_gt *gt;
37 unsigned int id;
38 int result = 0, err;
39
40 for_each_gt(gt, xe, id) {
41 if (!pf_needs_provisioning(gt, num_vfs))
42 continue;
43 err = xe_gt_sriov_pf_config_set_fair(gt, VFID(1), num_vfs);
44 result = result ?: err;
45 }
46
47 return result;
48 }
49
pf_unprovision_vfs(struct xe_device * xe,unsigned int num_vfs)50 static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs)
51 {
52 struct xe_gt *gt;
53 unsigned int id;
54 unsigned int n;
55
56 for_each_gt(gt, xe, id)
57 for (n = 1; n <= num_vfs; n++)
58 xe_gt_sriov_pf_config_release(gt, n, true);
59 }
60
pf_reset_vfs(struct xe_device * xe,unsigned int num_vfs)61 static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs)
62 {
63 struct xe_gt *gt;
64 unsigned int id;
65 unsigned int n;
66
67 for_each_gt(gt, xe, id)
68 for (n = 1; n <= num_vfs; n++)
69 xe_gt_sriov_pf_control_trigger_flr(gt, n);
70 }
71
xe_pci_pf_get_vf_dev(struct xe_device * xe,unsigned int vf_id)72 static struct pci_dev *xe_pci_pf_get_vf_dev(struct xe_device *xe, unsigned int vf_id)
73 {
74 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
75
76 xe_assert(xe, IS_SRIOV_PF(xe));
77
78 /* caller must use pci_dev_put() */
79 return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
80 pdev->bus->number,
81 pci_iov_virtfn_devfn(pdev, vf_id));
82 }
83
pf_link_vfs(struct xe_device * xe,int num_vfs)84 static void pf_link_vfs(struct xe_device *xe, int num_vfs)
85 {
86 struct pci_dev *pdev_pf = to_pci_dev(xe->drm.dev);
87 struct device_link *link;
88 struct pci_dev *pdev_vf;
89 unsigned int n;
90
91 /*
92 * When both PF and VF devices are enabled on the host, during system
93 * resume they are resuming in parallel.
94 *
95 * But PF has to complete the provision of VF first to allow any VFs to
96 * successfully resume.
97 *
98 * Create a parent-child device link between PF and VF devices that will
99 * enforce correct resume order.
100 */
101 for (n = 1; n <= num_vfs; n++) {
102 pdev_vf = xe_pci_pf_get_vf_dev(xe, n - 1);
103
104 /* unlikely, something weird is happening, abort */
105 if (!pdev_vf) {
106 xe_sriov_err(xe, "Cannot find VF%u device, aborting link%s creation!\n",
107 n, str_plural(num_vfs));
108 break;
109 }
110
111 link = device_link_add(&pdev_vf->dev, &pdev_pf->dev,
112 DL_FLAG_AUTOREMOVE_CONSUMER);
113 /* unlikely and harmless, continue with other VFs */
114 if (!link)
115 xe_sriov_notice(xe, "Failed linking VF%u\n", n);
116
117 pci_dev_put(pdev_vf);
118 }
119 }
120
pf_engine_activity_stats(struct xe_device * xe,unsigned int num_vfs,bool enable)121 static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs, bool enable)
122 {
123 struct xe_gt *gt;
124 unsigned int id;
125 int ret = 0;
126
127 for_each_gt(gt, xe, id) {
128 ret = xe_guc_engine_activity_function_stats(>->uc.guc, num_vfs, enable);
129 if (ret)
130 xe_gt_sriov_info(gt, "Failed to %s engine activity function stats (%pe)\n",
131 str_enable_disable(enable), ERR_PTR(ret));
132 }
133 }
134
resize_vf_vram_bar(struct xe_device * xe,int num_vfs)135 static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs)
136 {
137 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
138 u32 sizes;
139
140 sizes = pci_iov_vf_bar_get_sizes(pdev, VF_LMEM_BAR, num_vfs);
141 if (!sizes)
142 return 0;
143
144 return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes));
145 }
146
pf_enable_vfs(struct xe_device * xe,int num_vfs)147 static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
148 {
149 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
150 int total_vfs = xe_sriov_pf_get_totalvfs(xe);
151 int err;
152
153 xe_assert(xe, IS_SRIOV_PF(xe));
154 xe_assert(xe, num_vfs > 0);
155 xe_assert(xe, num_vfs <= total_vfs);
156 xe_sriov_dbg(xe, "enabling %u VF%s\n", num_vfs, str_plural(num_vfs));
157
158 err = xe_sriov_pf_wait_ready(xe);
159 if (err)
160 goto out;
161
162 /*
163 * We must hold additional reference to the runtime PM to keep PF in D0
164 * during VFs lifetime, as our VFs do not implement the PM capability.
165 *
166 * With PF being in D0 state, all VFs will also behave as in D0 state.
167 * This will also keep GuC alive with all VFs' configurations.
168 *
169 * We will release this additional PM reference in pf_disable_vfs().
170 */
171 xe_pm_runtime_get_noresume(xe);
172
173 err = pf_provision_vfs(xe, num_vfs);
174 if (err < 0)
175 goto failed;
176
177 if (IS_DGFX(xe)) {
178 err = resize_vf_vram_bar(xe, num_vfs);
179 if (err)
180 xe_sriov_info(xe, "Failed to set VF LMEM BAR size: %d\n", err);
181 }
182
183 err = pci_enable_sriov(pdev, num_vfs);
184 if (err < 0)
185 goto failed;
186
187 pf_link_vfs(xe, num_vfs);
188
189 xe_sriov_info(xe, "Enabled %u of %u VF%s\n",
190 num_vfs, total_vfs, str_plural(total_vfs));
191
192 pf_engine_activity_stats(xe, num_vfs, true);
193
194 return num_vfs;
195
196 failed:
197 pf_unprovision_vfs(xe, num_vfs);
198 xe_pm_runtime_put(xe);
199 out:
200 xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n",
201 num_vfs, str_plural(num_vfs), ERR_PTR(err));
202 return err;
203 }
204
pf_disable_vfs(struct xe_device * xe)205 static int pf_disable_vfs(struct xe_device *xe)
206 {
207 struct device *dev = xe->drm.dev;
208 struct pci_dev *pdev = to_pci_dev(dev);
209 u16 num_vfs = pci_num_vf(pdev);
210
211 xe_assert(xe, IS_SRIOV_PF(xe));
212 xe_sriov_dbg(xe, "disabling %u VF%s\n", num_vfs, str_plural(num_vfs));
213
214 if (!num_vfs)
215 return 0;
216
217 pf_engine_activity_stats(xe, num_vfs, false);
218
219 pci_disable_sriov(pdev);
220
221 pf_reset_vfs(xe, num_vfs);
222
223 pf_unprovision_vfs(xe, num_vfs);
224
225 /* not needed anymore - see pf_enable_vfs() */
226 xe_pm_runtime_put(xe);
227
228 xe_sriov_info(xe, "Disabled %u VF%s\n", num_vfs, str_plural(num_vfs));
229 return 0;
230 }
231
232 /**
233 * xe_pci_sriov_configure - Configure SR-IOV (enable/disable VFs).
234 * @pdev: the &pci_dev
235 * @num_vfs: number of VFs to enable or zero to disable all VFs
236 *
237 * This is the Xe implementation of struct pci_driver.sriov_configure callback.
238 *
239 * This callback will be called by the PCI subsystem to enable or disable SR-IOV
240 * Virtual Functions (VFs) as requested by the used via the PCI sysfs interface.
241 *
242 * Return: number of configured VFs or a negative error code on failure.
243 */
xe_pci_sriov_configure(struct pci_dev * pdev,int num_vfs)244 int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
245 {
246 struct xe_device *xe = pdev_to_xe_device(pdev);
247 int ret;
248
249 if (!IS_SRIOV_PF(xe))
250 return -ENODEV;
251
252 if (num_vfs < 0)
253 return -EINVAL;
254
255 if (num_vfs > xe_sriov_pf_get_totalvfs(xe))
256 return -ERANGE;
257
258 if (num_vfs && pci_num_vf(pdev))
259 return -EBUSY;
260
261 xe_pm_runtime_get(xe);
262 if (num_vfs > 0)
263 ret = pf_enable_vfs(xe, num_vfs);
264 else
265 ret = pf_disable_vfs(xe);
266 xe_pm_runtime_put(xe);
267
268 return ret;
269 }
270