1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI Express I/O Virtualization (IOV) support
4 * Single Root IOV 1.0
5 * Address Translation Service 1.0
6 *
7 * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/bits.h>
12 #include <linux/log2.h>
13 #include <linux/pci.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <linux/export.h>
17 #include <linux/string.h>
18 #include <linux/delay.h>
19 #include <asm/div64.h>
20 #include "pci.h"
21
22 #define VIRTFN_ID_LEN 17 /* "virtfn%u\0" for 2^32 - 1 */
23
pci_iov_virtfn_bus(struct pci_dev * dev,int vf_id)24 int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id)
25 {
26 if (!dev->is_physfn)
27 return -EINVAL;
28 return dev->bus->number + ((dev->devfn + dev->sriov->offset +
29 dev->sriov->stride * vf_id) >> 8);
30 }
31
pci_iov_virtfn_devfn(struct pci_dev * dev,int vf_id)32 int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id)
33 {
34 if (!dev->is_physfn)
35 return -EINVAL;
36 return (dev->devfn + dev->sriov->offset +
37 dev->sriov->stride * vf_id) & 0xff;
38 }
39 EXPORT_SYMBOL_GPL(pci_iov_virtfn_devfn);
40
pci_iov_vf_id(struct pci_dev * dev)41 int pci_iov_vf_id(struct pci_dev *dev)
42 {
43 struct pci_dev *pf;
44
45 if (!dev->is_virtfn)
46 return -EINVAL;
47
48 pf = pci_physfn(dev);
49 return (pci_dev_id(dev) - (pci_dev_id(pf) + pf->sriov->offset)) /
50 pf->sriov->stride;
51 }
52 EXPORT_SYMBOL_GPL(pci_iov_vf_id);
53
54 /**
55 * pci_iov_get_pf_drvdata - Return the drvdata of a PF
56 * @dev: VF pci_dev
57 * @pf_driver: Device driver required to own the PF
58 *
59 * This must be called from a context that ensures that a VF driver is attached.
60 * The value returned is invalid once the VF driver completes its remove()
61 * callback.
62 *
63 * Locking is achieved by the driver core. A VF driver cannot be probed until
64 * pci_enable_sriov() is called and pci_disable_sriov() does not return until
65 * all VF drivers have completed their remove().
66 *
67 * The PF driver must call pci_disable_sriov() before it begins to destroy the
68 * drvdata.
69 */
pci_iov_get_pf_drvdata(struct pci_dev * dev,struct pci_driver * pf_driver)70 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver)
71 {
72 struct pci_dev *pf_dev;
73
74 if (!dev->is_virtfn)
75 return ERR_PTR(-EINVAL);
76 pf_dev = dev->physfn;
77 if (pf_dev->driver != pf_driver)
78 return ERR_PTR(-EINVAL);
79 return pci_get_drvdata(pf_dev);
80 }
81 EXPORT_SYMBOL_GPL(pci_iov_get_pf_drvdata);
82
83 /*
84 * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may
85 * change when NumVFs changes.
86 *
87 * Update iov->offset and iov->stride when NumVFs is written.
88 */
pci_iov_set_numvfs(struct pci_dev * dev,int nr_virtfn)89 static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn)
90 {
91 struct pci_sriov *iov = dev->sriov;
92
93 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
94 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
95 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
96 }
97
98 /*
99 * The PF consumes one bus number. NumVFs, First VF Offset, and VF Stride
100 * determine how many additional bus numbers will be consumed by VFs.
101 *
102 * Iterate over all valid NumVFs, validate offset and stride, and calculate
103 * the maximum number of bus numbers that could ever be required.
104 */
compute_max_vf_buses(struct pci_dev * dev)105 static int compute_max_vf_buses(struct pci_dev *dev)
106 {
107 struct pci_sriov *iov = dev->sriov;
108 int nr_virtfn, busnr, rc = 0;
109
110 for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) {
111 pci_iov_set_numvfs(dev, nr_virtfn);
112 if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) {
113 rc = -EIO;
114 goto out;
115 }
116
117 busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
118 if (busnr > iov->max_VF_buses)
119 iov->max_VF_buses = busnr;
120 }
121
122 out:
123 pci_iov_set_numvfs(dev, 0);
124 return rc;
125 }
126
virtfn_add_bus(struct pci_bus * bus,int busnr)127 static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
128 {
129 struct pci_bus *child;
130
131 if (bus->number == busnr)
132 return bus;
133
134 child = pci_find_bus(pci_domain_nr(bus), busnr);
135 if (child)
136 return child;
137
138 child = pci_add_new_bus(bus, NULL, busnr);
139 if (!child)
140 return NULL;
141
142 pci_bus_insert_busn_res(child, busnr, busnr);
143
144 return child;
145 }
146
virtfn_remove_bus(struct pci_bus * physbus,struct pci_bus * virtbus)147 static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus)
148 {
149 if (physbus != virtbus && list_empty(&virtbus->devices))
150 pci_remove_bus(virtbus);
151 }
152
pci_iov_resource_size(struct pci_dev * dev,int resno)153 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
154 {
155 if (!dev->is_physfn)
156 return 0;
157
158 return dev->sriov->barsz[pci_resource_num_to_vf_bar(resno)];
159 }
160
pci_iov_resource_set_size(struct pci_dev * dev,int resno,int size)161 void pci_iov_resource_set_size(struct pci_dev *dev, int resno, int size)
162 {
163 if (!pci_resource_is_iov(resno)) {
164 pci_warn(dev, "%s is not an IOV resource\n",
165 pci_resource_name(dev, resno));
166 return;
167 }
168
169 resno = pci_resource_num_to_vf_bar(resno);
170 dev->sriov->barsz[resno] = pci_rebar_size_to_bytes(size);
171 }
172
pci_iov_is_memory_decoding_enabled(struct pci_dev * dev)173 bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev)
174 {
175 u16 cmd;
176
177 pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_CTRL, &cmd);
178
179 return cmd & PCI_SRIOV_CTRL_MSE;
180 }
181
pci_read_vf_config_common(struct pci_dev * virtfn)182 static void pci_read_vf_config_common(struct pci_dev *virtfn)
183 {
184 struct pci_dev *physfn = virtfn->physfn;
185
186 /*
187 * Some config registers are the same across all associated VFs.
188 * Read them once from VF0 so we can skip reading them from the
189 * other VFs.
190 *
191 * PCIe r4.0, sec 9.3.4.1, technically doesn't require all VFs to
192 * have the same Revision ID and Subsystem ID, but we assume they
193 * do.
194 */
195 pci_read_config_dword(virtfn, PCI_CLASS_REVISION,
196 &physfn->sriov->class);
197 pci_read_config_byte(virtfn, PCI_HEADER_TYPE,
198 &physfn->sriov->hdr_type);
199 pci_read_config_word(virtfn, PCI_SUBSYSTEM_VENDOR_ID,
200 &physfn->sriov->subsystem_vendor);
201 pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID,
202 &physfn->sriov->subsystem_device);
203 }
204
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)205 int pci_iov_sysfs_link(struct pci_dev *dev,
206 struct pci_dev *virtfn, int id)
207 {
208 char buf[VIRTFN_ID_LEN];
209 int rc;
210
211 sprintf(buf, "virtfn%u", id);
212 rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
213 if (rc)
214 goto failed;
215 rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
216 if (rc)
217 goto failed1;
218
219 kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
220
221 return 0;
222
223 failed1:
224 sysfs_remove_link(&dev->dev.kobj, buf);
225 failed:
226 return rc;
227 }
228
229 #ifdef CONFIG_PCI_MSI
sriov_vf_total_msix_show(struct device * dev,struct device_attribute * attr,char * buf)230 static ssize_t sriov_vf_total_msix_show(struct device *dev,
231 struct device_attribute *attr,
232 char *buf)
233 {
234 struct pci_dev *pdev = to_pci_dev(dev);
235 u32 vf_total_msix = 0;
236
237 device_lock(dev);
238 if (!pdev->driver || !pdev->driver->sriov_get_vf_total_msix)
239 goto unlock;
240
241 vf_total_msix = pdev->driver->sriov_get_vf_total_msix(pdev);
242 unlock:
243 device_unlock(dev);
244 return sysfs_emit(buf, "%u\n", vf_total_msix);
245 }
246 static DEVICE_ATTR_RO(sriov_vf_total_msix);
247
sriov_vf_msix_count_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)248 static ssize_t sriov_vf_msix_count_store(struct device *dev,
249 struct device_attribute *attr,
250 const char *buf, size_t count)
251 {
252 struct pci_dev *vf_dev = to_pci_dev(dev);
253 struct pci_dev *pdev = pci_physfn(vf_dev);
254 int val, ret = 0;
255
256 if (kstrtoint(buf, 0, &val) < 0)
257 return -EINVAL;
258
259 if (val < 0)
260 return -EINVAL;
261
262 device_lock(&pdev->dev);
263 if (!pdev->driver || !pdev->driver->sriov_set_msix_vec_count) {
264 ret = -EOPNOTSUPP;
265 goto err_pdev;
266 }
267
268 device_lock(&vf_dev->dev);
269 if (vf_dev->driver) {
270 /*
271 * A driver is already attached to this VF and has configured
272 * itself based on the current MSI-X vector count. Changing
273 * the vector size could mess up the driver, so block it.
274 */
275 ret = -EBUSY;
276 goto err_dev;
277 }
278
279 ret = pdev->driver->sriov_set_msix_vec_count(vf_dev, val);
280
281 err_dev:
282 device_unlock(&vf_dev->dev);
283 err_pdev:
284 device_unlock(&pdev->dev);
285 return ret ? : count;
286 }
287 static DEVICE_ATTR_WO(sriov_vf_msix_count);
288 #endif
289
290 static struct attribute *sriov_vf_dev_attrs[] = {
291 #ifdef CONFIG_PCI_MSI
292 &dev_attr_sriov_vf_msix_count.attr,
293 #endif
294 NULL,
295 };
296
sriov_vf_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)297 static umode_t sriov_vf_attrs_are_visible(struct kobject *kobj,
298 struct attribute *a, int n)
299 {
300 struct device *dev = kobj_to_dev(kobj);
301 struct pci_dev *pdev = to_pci_dev(dev);
302
303 if (!pdev->is_virtfn)
304 return 0;
305
306 return a->mode;
307 }
308
309 const struct attribute_group sriov_vf_dev_attr_group = {
310 .attrs = sriov_vf_dev_attrs,
311 .is_visible = sriov_vf_attrs_are_visible,
312 };
313
pci_iov_scan_device(struct pci_dev * dev,int id,struct pci_bus * bus)314 static struct pci_dev *pci_iov_scan_device(struct pci_dev *dev, int id,
315 struct pci_bus *bus)
316 {
317 struct pci_sriov *iov = dev->sriov;
318 struct pci_dev *virtfn;
319 int rc;
320
321 virtfn = pci_alloc_dev(bus);
322 if (!virtfn)
323 return ERR_PTR(-ENOMEM);
324
325 virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
326 virtfn->vendor = dev->vendor;
327 virtfn->device = iov->vf_device;
328 virtfn->is_virtfn = 1;
329 virtfn->physfn = pci_dev_get(dev);
330 virtfn->no_command_memory = 1;
331
332 if (id == 0)
333 pci_read_vf_config_common(virtfn);
334
335 rc = pci_setup_device(virtfn);
336 if (rc) {
337 pci_dev_put(dev);
338 pci_bus_put(virtfn->bus);
339 kfree(virtfn);
340 return ERR_PTR(rc);
341 }
342
343 return virtfn;
344 }
345
pci_iov_add_virtfn(struct pci_dev * dev,int id)346 int pci_iov_add_virtfn(struct pci_dev *dev, int id)
347 {
348 struct pci_bus *bus;
349 struct pci_dev *virtfn;
350 struct resource *res;
351 int rc, i;
352 u64 size;
353
354 bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
355 if (!bus) {
356 rc = -ENOMEM;
357 goto failed;
358 }
359
360 virtfn = pci_iov_scan_device(dev, id, bus);
361 if (IS_ERR(virtfn)) {
362 rc = PTR_ERR(virtfn);
363 goto failed0;
364 }
365
366 virtfn->dev.parent = dev->dev.parent;
367 virtfn->multifunction = 0;
368
369 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
370 int idx = pci_resource_num_from_vf_bar(i);
371
372 res = &dev->resource[idx];
373 if (!res->parent)
374 continue;
375 virtfn->resource[i].name = pci_name(virtfn);
376 virtfn->resource[i].flags = res->flags;
377 size = pci_iov_resource_size(dev, idx);
378 resource_set_range(&virtfn->resource[i],
379 res->start + size * id, size);
380 rc = request_resource(res, &virtfn->resource[i]);
381 BUG_ON(rc);
382 }
383
384 pci_device_add(virtfn, virtfn->bus);
385 rc = pci_iov_sysfs_link(dev, virtfn, id);
386 if (rc)
387 goto failed1;
388
389 pci_bus_add_device(virtfn);
390
391 return 0;
392
393 failed1:
394 pci_stop_and_remove_bus_device(virtfn);
395 pci_dev_put(dev);
396 failed0:
397 virtfn_remove_bus(dev->bus, bus);
398 failed:
399
400 return rc;
401 }
402
pci_iov_remove_virtfn(struct pci_dev * dev,int id)403 void pci_iov_remove_virtfn(struct pci_dev *dev, int id)
404 {
405 char buf[VIRTFN_ID_LEN];
406 struct pci_dev *virtfn;
407
408 virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
409 pci_iov_virtfn_bus(dev, id),
410 pci_iov_virtfn_devfn(dev, id));
411 if (!virtfn)
412 return;
413
414 sprintf(buf, "virtfn%u", id);
415 sysfs_remove_link(&dev->dev.kobj, buf);
416 /*
417 * pci_stop_dev() could have been called for this virtfn already,
418 * so the directory for the virtfn may have been removed before.
419 * Double check to avoid spurious sysfs warnings.
420 */
421 if (virtfn->dev.kobj.sd)
422 sysfs_remove_link(&virtfn->dev.kobj, "physfn");
423
424 pci_stop_and_remove_bus_device(virtfn);
425 virtfn_remove_bus(dev->bus, virtfn->bus);
426
427 /* balance pci_get_domain_bus_and_slot() */
428 pci_dev_put(virtfn);
429 pci_dev_put(dev);
430 }
431
sriov_totalvfs_show(struct device * dev,struct device_attribute * attr,char * buf)432 static ssize_t sriov_totalvfs_show(struct device *dev,
433 struct device_attribute *attr,
434 char *buf)
435 {
436 struct pci_dev *pdev = to_pci_dev(dev);
437
438 return sysfs_emit(buf, "%u\n", pci_sriov_get_totalvfs(pdev));
439 }
440
sriov_numvfs_show(struct device * dev,struct device_attribute * attr,char * buf)441 static ssize_t sriov_numvfs_show(struct device *dev,
442 struct device_attribute *attr,
443 char *buf)
444 {
445 struct pci_dev *pdev = to_pci_dev(dev);
446 u16 num_vfs;
447
448 /* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */
449 device_lock(&pdev->dev);
450 num_vfs = pdev->sriov->num_VFs;
451 device_unlock(&pdev->dev);
452
453 return sysfs_emit(buf, "%u\n", num_vfs);
454 }
455
456 /*
457 * num_vfs > 0; number of VFs to enable
458 * num_vfs = 0; disable all VFs
459 *
460 * Note: SRIOV spec does not allow partial VF
461 * disable, so it's all or none.
462 */
sriov_numvfs_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)463 static ssize_t sriov_numvfs_store(struct device *dev,
464 struct device_attribute *attr,
465 const char *buf, size_t count)
466 {
467 struct pci_dev *pdev = to_pci_dev(dev);
468 int ret = 0;
469 u16 num_vfs;
470
471 if (kstrtou16(buf, 0, &num_vfs) < 0)
472 return -EINVAL;
473
474 if (num_vfs > pci_sriov_get_totalvfs(pdev))
475 return -ERANGE;
476
477 device_lock(&pdev->dev);
478
479 if (num_vfs == pdev->sriov->num_VFs)
480 goto exit;
481
482 /* is PF driver loaded */
483 if (!pdev->driver) {
484 pci_info(pdev, "no driver bound to device; cannot configure SR-IOV\n");
485 ret = -ENOENT;
486 goto exit;
487 }
488
489 /* is PF driver loaded w/callback */
490 if (!pdev->driver->sriov_configure) {
491 pci_info(pdev, "driver does not support SR-IOV configuration via sysfs\n");
492 ret = -ENOENT;
493 goto exit;
494 }
495
496 if (num_vfs == 0) {
497 /* disable VFs */
498 pci_lock_rescan_remove();
499 ret = pdev->driver->sriov_configure(pdev, 0);
500 pci_unlock_rescan_remove();
501 goto exit;
502 }
503
504 /* enable VFs */
505 if (pdev->sriov->num_VFs) {
506 pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n",
507 pdev->sriov->num_VFs, num_vfs);
508 ret = -EBUSY;
509 goto exit;
510 }
511
512 pci_lock_rescan_remove();
513 ret = pdev->driver->sriov_configure(pdev, num_vfs);
514 pci_unlock_rescan_remove();
515 if (ret < 0)
516 goto exit;
517
518 if (ret != num_vfs)
519 pci_warn(pdev, "%d VFs requested; only %d enabled\n",
520 num_vfs, ret);
521
522 exit:
523 device_unlock(&pdev->dev);
524
525 if (ret < 0)
526 return ret;
527
528 return count;
529 }
530
sriov_offset_show(struct device * dev,struct device_attribute * attr,char * buf)531 static ssize_t sriov_offset_show(struct device *dev,
532 struct device_attribute *attr,
533 char *buf)
534 {
535 struct pci_dev *pdev = to_pci_dev(dev);
536
537 return sysfs_emit(buf, "%u\n", pdev->sriov->offset);
538 }
539
sriov_stride_show(struct device * dev,struct device_attribute * attr,char * buf)540 static ssize_t sriov_stride_show(struct device *dev,
541 struct device_attribute *attr,
542 char *buf)
543 {
544 struct pci_dev *pdev = to_pci_dev(dev);
545
546 return sysfs_emit(buf, "%u\n", pdev->sriov->stride);
547 }
548
sriov_vf_device_show(struct device * dev,struct device_attribute * attr,char * buf)549 static ssize_t sriov_vf_device_show(struct device *dev,
550 struct device_attribute *attr,
551 char *buf)
552 {
553 struct pci_dev *pdev = to_pci_dev(dev);
554
555 return sysfs_emit(buf, "%x\n", pdev->sriov->vf_device);
556 }
557
sriov_drivers_autoprobe_show(struct device * dev,struct device_attribute * attr,char * buf)558 static ssize_t sriov_drivers_autoprobe_show(struct device *dev,
559 struct device_attribute *attr,
560 char *buf)
561 {
562 struct pci_dev *pdev = to_pci_dev(dev);
563
564 return sysfs_emit(buf, "%u\n", pdev->sriov->drivers_autoprobe);
565 }
566
sriov_drivers_autoprobe_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)567 static ssize_t sriov_drivers_autoprobe_store(struct device *dev,
568 struct device_attribute *attr,
569 const char *buf, size_t count)
570 {
571 struct pci_dev *pdev = to_pci_dev(dev);
572 bool drivers_autoprobe;
573
574 if (kstrtobool(buf, &drivers_autoprobe) < 0)
575 return -EINVAL;
576
577 pdev->sriov->drivers_autoprobe = drivers_autoprobe;
578
579 return count;
580 }
581
582 static DEVICE_ATTR_RO(sriov_totalvfs);
583 static DEVICE_ATTR_RW(sriov_numvfs);
584 static DEVICE_ATTR_RO(sriov_offset);
585 static DEVICE_ATTR_RO(sriov_stride);
586 static DEVICE_ATTR_RO(sriov_vf_device);
587 static DEVICE_ATTR_RW(sriov_drivers_autoprobe);
588
589 static struct attribute *sriov_pf_dev_attrs[] = {
590 &dev_attr_sriov_totalvfs.attr,
591 &dev_attr_sriov_numvfs.attr,
592 &dev_attr_sriov_offset.attr,
593 &dev_attr_sriov_stride.attr,
594 &dev_attr_sriov_vf_device.attr,
595 &dev_attr_sriov_drivers_autoprobe.attr,
596 #ifdef CONFIG_PCI_MSI
597 &dev_attr_sriov_vf_total_msix.attr,
598 #endif
599 NULL,
600 };
601
sriov_pf_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)602 static umode_t sriov_pf_attrs_are_visible(struct kobject *kobj,
603 struct attribute *a, int n)
604 {
605 struct device *dev = kobj_to_dev(kobj);
606
607 if (!dev_is_pf(dev))
608 return 0;
609
610 return a->mode;
611 }
612
613 const struct attribute_group sriov_pf_dev_attr_group = {
614 .attrs = sriov_pf_dev_attrs,
615 .is_visible = sriov_pf_attrs_are_visible,
616 };
617
pcibios_sriov_enable(struct pci_dev * pdev,u16 num_vfs)618 int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
619 {
620 return 0;
621 }
622
pcibios_sriov_disable(struct pci_dev * pdev)623 int __weak pcibios_sriov_disable(struct pci_dev *pdev)
624 {
625 return 0;
626 }
627
sriov_add_vfs(struct pci_dev * dev,u16 num_vfs)628 static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs)
629 {
630 unsigned int i;
631 int rc;
632
633 if (dev->no_vf_scan)
634 return 0;
635
636 for (i = 0; i < num_vfs; i++) {
637 rc = pci_iov_add_virtfn(dev, i);
638 if (rc)
639 goto failed;
640 }
641 return 0;
642 failed:
643 while (i--)
644 pci_iov_remove_virtfn(dev, i);
645
646 return rc;
647 }
648
sriov_enable(struct pci_dev * dev,int nr_virtfn)649 static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
650 {
651 int rc;
652 int i;
653 int nres;
654 u16 initial;
655 struct resource *res;
656 struct pci_dev *pdev;
657 struct pci_sriov *iov = dev->sriov;
658 int bars = 0;
659 int bus;
660
661 if (!nr_virtfn)
662 return 0;
663
664 if (iov->num_VFs)
665 return -EINVAL;
666
667 pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
668 if (initial > iov->total_VFs ||
669 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs)))
670 return -EIO;
671
672 if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs ||
673 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
674 return -EINVAL;
675
676 nres = 0;
677 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
678 int idx = pci_resource_num_from_vf_bar(i);
679 resource_size_t vf_bar_sz = pci_iov_resource_size(dev, idx);
680
681 bars |= (1 << idx);
682 res = &dev->resource[idx];
683 if (vf_bar_sz * nr_virtfn > resource_size(res))
684 continue;
685 if (res->parent)
686 nres++;
687 }
688 if (nres != iov->nres) {
689 pci_err(dev, "not enough MMIO resources for SR-IOV\n");
690 return -ENOMEM;
691 }
692
693 bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
694 if (bus > dev->bus->busn_res.end) {
695 pci_err(dev, "can't enable %d VFs (bus %02x out of range of %pR)\n",
696 nr_virtfn, bus, &dev->bus->busn_res);
697 return -ENOMEM;
698 }
699
700 if (pci_enable_resources(dev, bars)) {
701 pci_err(dev, "SR-IOV: IOV BARS not allocated\n");
702 return -ENOMEM;
703 }
704
705 if (iov->link != dev->devfn) {
706 pdev = pci_get_slot(dev->bus, iov->link);
707 if (!pdev)
708 return -ENODEV;
709
710 if (!pdev->is_physfn) {
711 pci_dev_put(pdev);
712 return -ENOSYS;
713 }
714
715 rc = sysfs_create_link(&dev->dev.kobj,
716 &pdev->dev.kobj, "dep_link");
717 pci_dev_put(pdev);
718 if (rc)
719 return rc;
720 }
721
722 iov->initial_VFs = initial;
723 if (nr_virtfn < initial)
724 initial = nr_virtfn;
725
726 rc = pcibios_sriov_enable(dev, initial);
727 if (rc) {
728 pci_err(dev, "failure %d from pcibios_sriov_enable()\n", rc);
729 goto err_pcibios;
730 }
731
732 pci_iov_set_numvfs(dev, nr_virtfn);
733 iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
734 pci_cfg_access_lock(dev);
735 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
736 msleep(100);
737 pci_cfg_access_unlock(dev);
738
739 rc = sriov_add_vfs(dev, initial);
740 if (rc)
741 goto err_pcibios;
742
743 kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
744 iov->num_VFs = nr_virtfn;
745
746 return 0;
747
748 err_pcibios:
749 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
750 pci_cfg_access_lock(dev);
751 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
752 ssleep(1);
753 pci_cfg_access_unlock(dev);
754
755 pcibios_sriov_disable(dev);
756
757 if (iov->link != dev->devfn)
758 sysfs_remove_link(&dev->dev.kobj, "dep_link");
759
760 pci_iov_set_numvfs(dev, 0);
761 return rc;
762 }
763
sriov_del_vfs(struct pci_dev * dev)764 static void sriov_del_vfs(struct pci_dev *dev)
765 {
766 struct pci_sriov *iov = dev->sriov;
767 int i;
768
769 for (i = 0; i < iov->num_VFs; i++)
770 pci_iov_remove_virtfn(dev, i);
771 }
772
sriov_disable(struct pci_dev * dev)773 static void sriov_disable(struct pci_dev *dev)
774 {
775 struct pci_sriov *iov = dev->sriov;
776
777 if (!iov->num_VFs)
778 return;
779
780 sriov_del_vfs(dev);
781 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
782 pci_cfg_access_lock(dev);
783 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
784 ssleep(1);
785 pci_cfg_access_unlock(dev);
786
787 pcibios_sriov_disable(dev);
788
789 if (iov->link != dev->devfn)
790 sysfs_remove_link(&dev->dev.kobj, "dep_link");
791
792 iov->num_VFs = 0;
793 pci_iov_set_numvfs(dev, 0);
794 }
795
sriov_init(struct pci_dev * dev,int pos)796 static int sriov_init(struct pci_dev *dev, int pos)
797 {
798 int i, bar64;
799 int rc;
800 int nres;
801 u32 pgsz;
802 u16 ctrl, total;
803 struct pci_sriov *iov;
804 struct resource *res;
805 const char *res_name;
806 struct pci_dev *pdev;
807 u32 sriovbars[PCI_SRIOV_NUM_BARS];
808
809 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
810 if (ctrl & PCI_SRIOV_CTRL_VFE) {
811 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
812 ssleep(1);
813 }
814
815 ctrl = 0;
816 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
817 if (pdev->is_physfn)
818 goto found;
819
820 pdev = NULL;
821 if (pci_ari_enabled(dev->bus))
822 ctrl |= PCI_SRIOV_CTRL_ARI;
823
824 found:
825 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
826
827 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
828 if (!total)
829 return 0;
830
831 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
832 i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
833 pgsz &= ~((1 << i) - 1);
834 if (!pgsz)
835 return -EIO;
836
837 pgsz &= ~(pgsz - 1);
838 pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
839
840 iov = kzalloc_obj(*iov);
841 if (!iov)
842 return -ENOMEM;
843
844 /* Sizing SR-IOV BARs with VF Enable cleared - no decode */
845 __pci_size_stdbars(dev, PCI_SRIOV_NUM_BARS,
846 pos + PCI_SRIOV_BAR, sriovbars);
847
848 nres = 0;
849 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
850 int idx = pci_resource_num_from_vf_bar(i);
851
852 res = &dev->resource[idx];
853 res_name = pci_resource_name(dev, idx);
854
855 /*
856 * If it is already FIXED, don't change it, something
857 * (perhaps EA or header fixups) wants it this way.
858 */
859 if (res->flags & IORESOURCE_PCI_FIXED)
860 bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
861 else
862 bar64 = __pci_read_base(dev, pci_bar_unknown, res,
863 pos + PCI_SRIOV_BAR + i * 4,
864 &sriovbars[i]);
865 if (!res->flags)
866 continue;
867 if (resource_size(res) & (PAGE_SIZE - 1)) {
868 rc = -EIO;
869 goto failed;
870 }
871 iov->barsz[i] = resource_size(res);
872 resource_set_size(res, resource_size(res) * total);
873 pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n",
874 res_name, res, i, total);
875 i += bar64;
876 nres++;
877 }
878
879 iov->pos = pos;
880 iov->nres = nres;
881 iov->ctrl = ctrl;
882 iov->total_VFs = total;
883 iov->driver_max_VFs = total;
884 pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device);
885 iov->pgsz = pgsz;
886 iov->self = dev;
887 iov->drivers_autoprobe = true;
888 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
889 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
890 if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
891 iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
892 iov->vf_rebar_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VF_REBAR);
893
894 if (pdev)
895 iov->dev = pci_dev_get(pdev);
896 else
897 iov->dev = dev;
898
899 dev->sriov = iov;
900 dev->is_physfn = 1;
901 rc = compute_max_vf_buses(dev);
902 if (rc)
903 goto fail_max_buses;
904
905 return 0;
906
907 fail_max_buses:
908 dev->sriov = NULL;
909 dev->is_physfn = 0;
910 failed:
911 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
912 res = &dev->resource[pci_resource_num_from_vf_bar(i)];
913 res->flags = 0;
914 }
915
916 kfree(iov);
917 return rc;
918 }
919
sriov_release(struct pci_dev * dev)920 static void sriov_release(struct pci_dev *dev)
921 {
922 BUG_ON(dev->sriov->num_VFs);
923
924 if (dev != dev->sriov->dev)
925 pci_dev_put(dev->sriov->dev);
926
927 kfree(dev->sriov);
928 dev->sriov = NULL;
929 }
930
sriov_restore_vf_rebar_state(struct pci_dev * dev)931 static void sriov_restore_vf_rebar_state(struct pci_dev *dev)
932 {
933 unsigned int pos, nbars, i;
934 u32 ctrl;
935
936 pos = pci_iov_vf_rebar_cap(dev);
937 if (!pos)
938 return;
939
940 pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl);
941 nbars = FIELD_GET(PCI_VF_REBAR_CTRL_NBAR_MASK, ctrl);
942
943 for (i = 0; i < nbars; i++, pos += 8) {
944 int bar_idx, size;
945
946 pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl);
947 bar_idx = FIELD_GET(PCI_VF_REBAR_CTRL_BAR_IDX, ctrl);
948 size = pci_rebar_bytes_to_size(dev->sriov->barsz[bar_idx]);
949 ctrl &= ~PCI_VF_REBAR_CTRL_BAR_SIZE;
950 ctrl |= FIELD_PREP(PCI_VF_REBAR_CTRL_BAR_SIZE, size);
951 pci_write_config_dword(dev, pos + PCI_VF_REBAR_CTRL, ctrl);
952 }
953 }
954
sriov_restore_state(struct pci_dev * dev)955 static void sriov_restore_state(struct pci_dev *dev)
956 {
957 int i;
958 u16 ctrl;
959 struct pci_sriov *iov = dev->sriov;
960
961 pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
962 if (ctrl & PCI_SRIOV_CTRL_VFE)
963 return;
964
965 /*
966 * Restore PCI_SRIOV_CTRL_ARI before pci_iov_set_numvfs() because
967 * it reads offset & stride, which depend on PCI_SRIOV_CTRL_ARI.
968 */
969 ctrl &= ~PCI_SRIOV_CTRL_ARI;
970 ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI;
971 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl);
972
973 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
974 pci_update_resource(dev, pci_resource_num_from_vf_bar(i));
975
976 pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
977 pci_iov_set_numvfs(dev, iov->num_VFs);
978 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
979 if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
980 msleep(100);
981 }
982
983 /**
984 * pci_iov_init - initialize the IOV capability
985 * @dev: the PCI device
986 *
987 * Returns 0 on success, or negative on failure.
988 */
pci_iov_init(struct pci_dev * dev)989 int pci_iov_init(struct pci_dev *dev)
990 {
991 int pos;
992
993 if (!pci_is_pcie(dev))
994 return -ENODEV;
995
996 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
997 if (pos)
998 return sriov_init(dev, pos);
999
1000 return -ENODEV;
1001 }
1002
1003 /**
1004 * pci_iov_release - release resources used by the IOV capability
1005 * @dev: the PCI device
1006 */
pci_iov_release(struct pci_dev * dev)1007 void pci_iov_release(struct pci_dev *dev)
1008 {
1009 if (dev->is_physfn)
1010 sriov_release(dev);
1011 }
1012
1013 /**
1014 * pci_iov_remove - clean up SR-IOV state after PF driver is detached
1015 * @dev: the PCI device
1016 */
pci_iov_remove(struct pci_dev * dev)1017 void pci_iov_remove(struct pci_dev *dev)
1018 {
1019 struct pci_sriov *iov = dev->sriov;
1020
1021 if (!dev->is_physfn)
1022 return;
1023
1024 iov->driver_max_VFs = iov->total_VFs;
1025 if (iov->num_VFs)
1026 pci_warn(dev, "driver left SR-IOV enabled after remove\n");
1027 }
1028
1029 /**
1030 * pci_iov_update_resource - update a VF BAR
1031 * @dev: the PCI device
1032 * @resno: the resource number
1033 *
1034 * Update a VF BAR in the SR-IOV capability of a PF.
1035 */
pci_iov_update_resource(struct pci_dev * dev,int resno)1036 void pci_iov_update_resource(struct pci_dev *dev, int resno)
1037 {
1038 struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
1039 struct resource *res = pci_resource_n(dev, resno);
1040 int vf_bar = pci_resource_num_to_vf_bar(resno);
1041 struct pci_bus_region region;
1042 u16 cmd;
1043 u32 new;
1044 int reg;
1045
1046 /*
1047 * The generic pci_restore_bars() path calls this for all devices,
1048 * including VFs and non-SR-IOV devices. If this is not a PF, we
1049 * have nothing to do.
1050 */
1051 if (!iov)
1052 return;
1053
1054 pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
1055 if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
1056 dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
1057 vf_bar, res);
1058 return;
1059 }
1060
1061 /*
1062 * Ignore unimplemented BARs, unused resource slots for 64-bit
1063 * BARs, and non-movable resources, e.g., those described via
1064 * Enhanced Allocation.
1065 */
1066 if (!res->flags)
1067 return;
1068
1069 if (res->flags & IORESOURCE_UNSET)
1070 return;
1071
1072 if (res->flags & IORESOURCE_PCI_FIXED)
1073 return;
1074
1075 pcibios_resource_to_bus(dev->bus, ®ion, res);
1076 new = region.start;
1077 new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
1078
1079 reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
1080 pci_write_config_dword(dev, reg, new);
1081 if (res->flags & IORESOURCE_MEM_64) {
1082 new = region.start >> 16 >> 16;
1083 pci_write_config_dword(dev, reg + 4, new);
1084 }
1085 }
1086
pcibios_iov_resource_alignment(struct pci_dev * dev,int resno)1087 resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
1088 int resno)
1089 {
1090 return pci_iov_resource_size(dev, resno);
1091 }
1092
1093 /**
1094 * pci_sriov_resource_alignment - get resource alignment for VF BAR
1095 * @dev: the PCI device
1096 * @resno: the resource number
1097 *
1098 * Returns the alignment of the VF BAR found in the SR-IOV capability.
1099 * This is not the same as the resource size which is defined as
1100 * the VF BAR size multiplied by the number of VFs. The alignment
1101 * is just the VF BAR size.
1102 */
pci_sriov_resource_alignment(struct pci_dev * dev,int resno)1103 resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
1104 {
1105 return pcibios_iov_resource_alignment(dev, resno);
1106 }
1107
1108 /**
1109 * pci_restore_iov_state - restore the state of the IOV capability
1110 * @dev: the PCI device
1111 */
pci_restore_iov_state(struct pci_dev * dev)1112 void pci_restore_iov_state(struct pci_dev *dev)
1113 {
1114 if (dev->is_physfn) {
1115 sriov_restore_vf_rebar_state(dev);
1116 sriov_restore_state(dev);
1117 }
1118 }
1119
1120 /**
1121 * pci_vf_drivers_autoprobe - set PF property drivers_autoprobe for VFs
1122 * @dev: the PCI device
1123 * @auto_probe: set VF drivers auto probe flag
1124 */
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool auto_probe)1125 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool auto_probe)
1126 {
1127 if (dev->is_physfn)
1128 dev->sriov->drivers_autoprobe = auto_probe;
1129 }
1130
1131 /**
1132 * pci_iov_bus_range - find bus range used by Virtual Function
1133 * @bus: the PCI bus
1134 *
1135 * Returns max number of buses (exclude current one) used by Virtual
1136 * Functions.
1137 */
pci_iov_bus_range(struct pci_bus * bus)1138 int pci_iov_bus_range(struct pci_bus *bus)
1139 {
1140 int max = 0;
1141 struct pci_dev *dev;
1142
1143 list_for_each_entry(dev, &bus->devices, bus_list) {
1144 if (!dev->is_physfn)
1145 continue;
1146 if (dev->sriov->max_VF_buses > max)
1147 max = dev->sriov->max_VF_buses;
1148 }
1149
1150 return max ? max - bus->number : 0;
1151 }
1152
1153 /**
1154 * pci_enable_sriov - enable the SR-IOV capability
1155 * @dev: the PCI device
1156 * @nr_virtfn: number of virtual functions to enable
1157 *
1158 * Returns 0 on success, or negative on failure.
1159 */
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)1160 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
1161 {
1162 might_sleep();
1163
1164 if (!dev->is_physfn)
1165 return -ENOSYS;
1166
1167 return sriov_enable(dev, nr_virtfn);
1168 }
1169 EXPORT_SYMBOL_GPL(pci_enable_sriov);
1170
1171 /**
1172 * pci_disable_sriov - disable the SR-IOV capability
1173 * @dev: the PCI device
1174 */
pci_disable_sriov(struct pci_dev * dev)1175 void pci_disable_sriov(struct pci_dev *dev)
1176 {
1177 might_sleep();
1178
1179 if (!dev->is_physfn)
1180 return;
1181
1182 sriov_disable(dev);
1183 }
1184 EXPORT_SYMBOL_GPL(pci_disable_sriov);
1185
1186 /**
1187 * pci_num_vf - return number of VFs associated with a PF device_release_driver
1188 * @dev: the PCI device
1189 *
1190 * Returns number of VFs, or 0 if SR-IOV is not enabled.
1191 */
pci_num_vf(struct pci_dev * dev)1192 int pci_num_vf(struct pci_dev *dev)
1193 {
1194 if (!dev->is_physfn)
1195 return 0;
1196
1197 return dev->sriov->num_VFs;
1198 }
1199 EXPORT_SYMBOL_GPL(pci_num_vf);
1200
1201 /**
1202 * pci_vfs_assigned - returns number of VFs are assigned to a guest
1203 * @dev: the PCI device
1204 *
1205 * Returns number of VFs belonging to this device that are assigned to a guest.
1206 * If device is not a physical function returns 0.
1207 */
pci_vfs_assigned(struct pci_dev * dev)1208 int pci_vfs_assigned(struct pci_dev *dev)
1209 {
1210 struct pci_dev *vfdev;
1211 unsigned int vfs_assigned = 0;
1212 unsigned short dev_id;
1213
1214 /* only search if we are a PF */
1215 if (!dev->is_physfn)
1216 return 0;
1217
1218 /*
1219 * determine the device ID for the VFs, the vendor ID will be the
1220 * same as the PF so there is no need to check for that one
1221 */
1222 dev_id = dev->sriov->vf_device;
1223
1224 /* loop through all the VFs to see if we own any that are assigned */
1225 vfdev = pci_get_device(dev->vendor, dev_id, NULL);
1226 while (vfdev) {
1227 /*
1228 * It is considered assigned if it is a virtual function with
1229 * our dev as the physical function and the assigned bit is set
1230 */
1231 if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
1232 pci_is_dev_assigned(vfdev))
1233 vfs_assigned++;
1234
1235 vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
1236 }
1237
1238 return vfs_assigned;
1239 }
1240 EXPORT_SYMBOL_GPL(pci_vfs_assigned);
1241
1242 /**
1243 * pci_sriov_set_totalvfs -- reduce the TotalVFs available
1244 * @dev: the PCI PF device
1245 * @numvfs: number that should be used for TotalVFs supported
1246 *
1247 * Should be called from PF driver's probe routine with
1248 * device's mutex held.
1249 *
1250 * Returns 0 if PF is an SRIOV-capable device and
1251 * value of numvfs valid. If not a PF return -ENOSYS;
1252 * if numvfs is invalid return -EINVAL;
1253 * if VFs already enabled, return -EBUSY.
1254 */
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)1255 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
1256 {
1257 if (!dev->is_physfn)
1258 return -ENOSYS;
1259
1260 if (numvfs > dev->sriov->total_VFs)
1261 return -EINVAL;
1262
1263 /* Shouldn't change if VFs already enabled */
1264 if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
1265 return -EBUSY;
1266
1267 dev->sriov->driver_max_VFs = numvfs;
1268 return 0;
1269 }
1270 EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
1271
1272 /**
1273 * pci_sriov_get_totalvfs -- get total VFs supported on this device
1274 * @dev: the PCI PF device
1275 *
1276 * For a PCIe device with SRIOV support, return the PCIe
1277 * SRIOV capability value of TotalVFs or the value of driver_max_VFs
1278 * if the driver reduced it. Otherwise 0.
1279 */
pci_sriov_get_totalvfs(struct pci_dev * dev)1280 int pci_sriov_get_totalvfs(struct pci_dev *dev)
1281 {
1282 if (!dev->is_physfn)
1283 return 0;
1284
1285 return dev->sriov->driver_max_VFs;
1286 }
1287 EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);
1288
1289 /**
1290 * pci_sriov_configure_simple - helper to configure SR-IOV
1291 * @dev: the PCI device
1292 * @nr_virtfn: number of virtual functions to enable, 0 to disable
1293 *
1294 * Enable or disable SR-IOV for devices that don't require any PF setup
1295 * before enabling SR-IOV. Return value is negative on error, or number of
1296 * VFs allocated on success.
1297 */
pci_sriov_configure_simple(struct pci_dev * dev,int nr_virtfn)1298 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn)
1299 {
1300 int rc;
1301
1302 might_sleep();
1303
1304 if (!dev->is_physfn)
1305 return -ENODEV;
1306
1307 if (pci_vfs_assigned(dev)) {
1308 pci_warn(dev, "Cannot modify SR-IOV while VFs are assigned\n");
1309 return -EPERM;
1310 }
1311
1312 if (nr_virtfn == 0) {
1313 sriov_disable(dev);
1314 return 0;
1315 }
1316
1317 rc = sriov_enable(dev, nr_virtfn);
1318 if (rc < 0)
1319 return rc;
1320
1321 return nr_virtfn;
1322 }
1323 EXPORT_SYMBOL_GPL(pci_sriov_configure_simple);
1324
1325 /**
1326 * pci_iov_vf_bar_set_size - set a new size for a VF BAR
1327 * @dev: the PCI device
1328 * @resno: the resource number
1329 * @size: new size as defined in the spec (0=1MB, 31=128TB)
1330 *
1331 * Set the new size of a VF BAR that supports VF resizable BAR capability.
1332 * Unlike pci_resize_resource(), this does not cause the resource that
1333 * reserves the MMIO space (originally up to total_VFs) to be resized, which
1334 * means that following calls to pci_enable_sriov() can fail if the resources
1335 * no longer fit.
1336 *
1337 * Return: 0 on success, or negative on failure.
1338 */
pci_iov_vf_bar_set_size(struct pci_dev * dev,int resno,int size)1339 int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size)
1340 {
1341 if (!pci_resource_is_iov(resno))
1342 return -EINVAL;
1343
1344 if (pci_iov_is_memory_decoding_enabled(dev))
1345 return -EBUSY;
1346
1347 if (!pci_rebar_size_supported(dev, resno, size))
1348 return -EINVAL;
1349
1350 return pci_rebar_set_size(dev, resno, size);
1351 }
1352 EXPORT_SYMBOL_GPL(pci_iov_vf_bar_set_size);
1353
1354 /**
1355 * pci_iov_vf_bar_get_sizes - get VF BAR sizes allowing to create up to num_vfs
1356 * @dev: the PCI device
1357 * @resno: the resource number
1358 * @num_vfs: number of VFs
1359 *
1360 * Get the sizes of a VF resizable BAR that can accommodate @num_vfs within
1361 * the currently assigned size of the resource @resno.
1362 *
1363 * Return: A bitmask of sizes in format defined in the spec (bit 0=1MB,
1364 * bit 31=128TB).
1365 */
pci_iov_vf_bar_get_sizes(struct pci_dev * dev,int resno,int num_vfs)1366 u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs)
1367 {
1368 u64 vf_len = pci_resource_len(dev, resno);
1369 u64 sizes;
1370
1371 if (!num_vfs)
1372 return 0;
1373
1374 do_div(vf_len, num_vfs);
1375 sizes = (roundup_pow_of_two(vf_len + 1) - 1) >> ilog2(SZ_1M);
1376
1377 return sizes & pci_rebar_get_possible_sizes(dev, resno);
1378 }
1379 EXPORT_SYMBOL_GPL(pci_iov_vf_bar_get_sizes);
1380