1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI Express I/O Virtualization (IOV) support 4 * Single Root IOV 1.0 5 * Address Translation Service 1.0 6 * 7 * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com> 8 */ 9 10 #include <linux/bitfield.h> 11 #include <linux/bits.h> 12 #include <linux/log2.h> 13 #include <linux/pci.h> 14 #include <linux/sizes.h> 15 #include <linux/slab.h> 16 #include <linux/export.h> 17 #include <linux/string.h> 18 #include <linux/delay.h> 19 #include <asm/div64.h> 20 #include "pci.h" 21 22 #define VIRTFN_ID_LEN 17 /* "virtfn%u\0" for 2^32 - 1 */ 23 24 int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id) 25 { 26 if (!dev->is_physfn) 27 return -EINVAL; 28 return dev->bus->number + ((dev->devfn + dev->sriov->offset + 29 dev->sriov->stride * vf_id) >> 8); 30 } 31 32 int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id) 33 { 34 if (!dev->is_physfn) 35 return -EINVAL; 36 return (dev->devfn + dev->sriov->offset + 37 dev->sriov->stride * vf_id) & 0xff; 38 } 39 EXPORT_SYMBOL_GPL(pci_iov_virtfn_devfn); 40 41 int pci_iov_vf_id(struct pci_dev *dev) 42 { 43 struct pci_dev *pf; 44 45 if (!dev->is_virtfn) 46 return -EINVAL; 47 48 pf = pci_physfn(dev); 49 return (pci_dev_id(dev) - (pci_dev_id(pf) + pf->sriov->offset)) / 50 pf->sriov->stride; 51 } 52 EXPORT_SYMBOL_GPL(pci_iov_vf_id); 53 54 /** 55 * pci_iov_get_pf_drvdata - Return the drvdata of a PF 56 * @dev: VF pci_dev 57 * @pf_driver: Device driver required to own the PF 58 * 59 * This must be called from a context that ensures that a VF driver is attached. 60 * The value returned is invalid once the VF driver completes its remove() 61 * callback. 62 * 63 * Locking is achieved by the driver core. A VF driver cannot be probed until 64 * pci_enable_sriov() is called and pci_disable_sriov() does not return until 65 * all VF drivers have completed their remove(). 66 * 67 * The PF driver must call pci_disable_sriov() before it begins to destroy the 68 * drvdata. 69 */ 70 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver) 71 { 72 struct pci_dev *pf_dev; 73 74 if (!dev->is_virtfn) 75 return ERR_PTR(-EINVAL); 76 pf_dev = dev->physfn; 77 if (pf_dev->driver != pf_driver) 78 return ERR_PTR(-EINVAL); 79 return pci_get_drvdata(pf_dev); 80 } 81 EXPORT_SYMBOL_GPL(pci_iov_get_pf_drvdata); 82 83 /* 84 * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may 85 * change when NumVFs changes. 86 * 87 * Update iov->offset and iov->stride when NumVFs is written. 88 */ 89 static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn) 90 { 91 struct pci_sriov *iov = dev->sriov; 92 93 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn); 94 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 95 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 96 } 97 98 /* 99 * The PF consumes one bus number. NumVFs, First VF Offset, and VF Stride 100 * determine how many additional bus numbers will be consumed by VFs. 101 * 102 * Iterate over all valid NumVFs, validate offset and stride, and calculate 103 * the maximum number of bus numbers that could ever be required. 104 */ 105 static int compute_max_vf_buses(struct pci_dev *dev) 106 { 107 struct pci_sriov *iov = dev->sriov; 108 int nr_virtfn, busnr, rc = 0; 109 110 for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) { 111 pci_iov_set_numvfs(dev, nr_virtfn); 112 if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) { 113 rc = -EIO; 114 goto out; 115 } 116 117 busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1); 118 if (busnr > iov->max_VF_buses) 119 iov->max_VF_buses = busnr; 120 } 121 122 out: 123 pci_iov_set_numvfs(dev, 0); 124 return rc; 125 } 126 127 static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr) 128 { 129 struct pci_bus *child; 130 131 if (bus->number == busnr) 132 return bus; 133 134 child = pci_find_bus(pci_domain_nr(bus), busnr); 135 if (child) 136 return child; 137 138 child = pci_add_new_bus(bus, NULL, busnr); 139 if (!child) 140 return NULL; 141 142 pci_bus_insert_busn_res(child, busnr, busnr); 143 144 return child; 145 } 146 147 static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus) 148 { 149 if (physbus != virtbus && list_empty(&virtbus->devices)) 150 pci_remove_bus(virtbus); 151 } 152 153 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) 154 { 155 if (!dev->is_physfn) 156 return 0; 157 158 return dev->sriov->barsz[pci_resource_num_to_vf_bar(resno)]; 159 } 160 161 void pci_iov_resource_set_size(struct pci_dev *dev, int resno, 162 resource_size_t size) 163 { 164 if (!pci_resource_is_iov(resno)) { 165 pci_warn(dev, "%s is not an IOV resource\n", 166 pci_resource_name(dev, resno)); 167 return; 168 } 169 170 dev->sriov->barsz[pci_resource_num_to_vf_bar(resno)] = size; 171 } 172 173 bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev) 174 { 175 u16 cmd; 176 177 pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_CTRL, &cmd); 178 179 return cmd & PCI_SRIOV_CTRL_MSE; 180 } 181 182 static void pci_read_vf_config_common(struct pci_dev *virtfn) 183 { 184 struct pci_dev *physfn = virtfn->physfn; 185 186 /* 187 * Some config registers are the same across all associated VFs. 188 * Read them once from VF0 so we can skip reading them from the 189 * other VFs. 190 * 191 * PCIe r4.0, sec 9.3.4.1, technically doesn't require all VFs to 192 * have the same Revision ID and Subsystem ID, but we assume they 193 * do. 194 */ 195 pci_read_config_dword(virtfn, PCI_CLASS_REVISION, 196 &physfn->sriov->class); 197 pci_read_config_byte(virtfn, PCI_HEADER_TYPE, 198 &physfn->sriov->hdr_type); 199 pci_read_config_word(virtfn, PCI_SUBSYSTEM_VENDOR_ID, 200 &physfn->sriov->subsystem_vendor); 201 pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID, 202 &physfn->sriov->subsystem_device); 203 } 204 205 int pci_iov_sysfs_link(struct pci_dev *dev, 206 struct pci_dev *virtfn, int id) 207 { 208 char buf[VIRTFN_ID_LEN]; 209 int rc; 210 211 sprintf(buf, "virtfn%u", id); 212 rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf); 213 if (rc) 214 goto failed; 215 rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn"); 216 if (rc) 217 goto failed1; 218 219 kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE); 220 221 return 0; 222 223 failed1: 224 sysfs_remove_link(&dev->dev.kobj, buf); 225 failed: 226 return rc; 227 } 228 229 #ifdef CONFIG_PCI_MSI 230 static ssize_t sriov_vf_total_msix_show(struct device *dev, 231 struct device_attribute *attr, 232 char *buf) 233 { 234 struct pci_dev *pdev = to_pci_dev(dev); 235 u32 vf_total_msix = 0; 236 237 device_lock(dev); 238 if (!pdev->driver || !pdev->driver->sriov_get_vf_total_msix) 239 goto unlock; 240 241 vf_total_msix = pdev->driver->sriov_get_vf_total_msix(pdev); 242 unlock: 243 device_unlock(dev); 244 return sysfs_emit(buf, "%u\n", vf_total_msix); 245 } 246 static DEVICE_ATTR_RO(sriov_vf_total_msix); 247 248 static ssize_t sriov_vf_msix_count_store(struct device *dev, 249 struct device_attribute *attr, 250 const char *buf, size_t count) 251 { 252 struct pci_dev *vf_dev = to_pci_dev(dev); 253 struct pci_dev *pdev = pci_physfn(vf_dev); 254 int val, ret = 0; 255 256 if (kstrtoint(buf, 0, &val) < 0) 257 return -EINVAL; 258 259 if (val < 0) 260 return -EINVAL; 261 262 device_lock(&pdev->dev); 263 if (!pdev->driver || !pdev->driver->sriov_set_msix_vec_count) { 264 ret = -EOPNOTSUPP; 265 goto err_pdev; 266 } 267 268 device_lock(&vf_dev->dev); 269 if (vf_dev->driver) { 270 /* 271 * A driver is already attached to this VF and has configured 272 * itself based on the current MSI-X vector count. Changing 273 * the vector size could mess up the driver, so block it. 274 */ 275 ret = -EBUSY; 276 goto err_dev; 277 } 278 279 ret = pdev->driver->sriov_set_msix_vec_count(vf_dev, val); 280 281 err_dev: 282 device_unlock(&vf_dev->dev); 283 err_pdev: 284 device_unlock(&pdev->dev); 285 return ret ? : count; 286 } 287 static DEVICE_ATTR_WO(sriov_vf_msix_count); 288 #endif 289 290 static struct attribute *sriov_vf_dev_attrs[] = { 291 #ifdef CONFIG_PCI_MSI 292 &dev_attr_sriov_vf_msix_count.attr, 293 #endif 294 NULL, 295 }; 296 297 static umode_t sriov_vf_attrs_are_visible(struct kobject *kobj, 298 struct attribute *a, int n) 299 { 300 struct device *dev = kobj_to_dev(kobj); 301 struct pci_dev *pdev = to_pci_dev(dev); 302 303 if (!pdev->is_virtfn) 304 return 0; 305 306 return a->mode; 307 } 308 309 const struct attribute_group sriov_vf_dev_attr_group = { 310 .attrs = sriov_vf_dev_attrs, 311 .is_visible = sriov_vf_attrs_are_visible, 312 }; 313 314 static struct pci_dev *pci_iov_scan_device(struct pci_dev *dev, int id, 315 struct pci_bus *bus) 316 { 317 struct pci_sriov *iov = dev->sriov; 318 struct pci_dev *virtfn; 319 int rc; 320 321 virtfn = pci_alloc_dev(bus); 322 if (!virtfn) 323 return ERR_PTR(-ENOMEM); 324 325 virtfn->devfn = pci_iov_virtfn_devfn(dev, id); 326 virtfn->vendor = dev->vendor; 327 virtfn->device = iov->vf_device; 328 virtfn->is_virtfn = 1; 329 virtfn->physfn = pci_dev_get(dev); 330 virtfn->no_command_memory = 1; 331 332 if (id == 0) 333 pci_read_vf_config_common(virtfn); 334 335 rc = pci_setup_device(virtfn); 336 if (rc) { 337 pci_dev_put(dev); 338 pci_bus_put(virtfn->bus); 339 kfree(virtfn); 340 return ERR_PTR(rc); 341 } 342 343 return virtfn; 344 } 345 346 int pci_iov_add_virtfn(struct pci_dev *dev, int id) 347 { 348 struct pci_bus *bus; 349 struct pci_dev *virtfn; 350 struct resource *res; 351 int rc, i; 352 u64 size; 353 354 bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id)); 355 if (!bus) { 356 rc = -ENOMEM; 357 goto failed; 358 } 359 360 virtfn = pci_iov_scan_device(dev, id, bus); 361 if (IS_ERR(virtfn)) { 362 rc = PTR_ERR(virtfn); 363 goto failed0; 364 } 365 366 virtfn->dev.parent = dev->dev.parent; 367 virtfn->multifunction = 0; 368 369 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { 370 int idx = pci_resource_num_from_vf_bar(i); 371 372 res = &dev->resource[idx]; 373 if (!res->parent) 374 continue; 375 virtfn->resource[i].name = pci_name(virtfn); 376 virtfn->resource[i].flags = res->flags; 377 size = pci_iov_resource_size(dev, idx); 378 resource_set_range(&virtfn->resource[i], 379 res->start + size * id, size); 380 rc = request_resource(res, &virtfn->resource[i]); 381 BUG_ON(rc); 382 } 383 384 pci_device_add(virtfn, virtfn->bus); 385 rc = pci_iov_sysfs_link(dev, virtfn, id); 386 if (rc) 387 goto failed1; 388 389 pci_bus_add_device(virtfn); 390 391 return 0; 392 393 failed1: 394 pci_stop_and_remove_bus_device(virtfn); 395 pci_dev_put(dev); 396 failed0: 397 virtfn_remove_bus(dev->bus, bus); 398 failed: 399 400 return rc; 401 } 402 403 void pci_iov_remove_virtfn(struct pci_dev *dev, int id) 404 { 405 char buf[VIRTFN_ID_LEN]; 406 struct pci_dev *virtfn; 407 408 virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), 409 pci_iov_virtfn_bus(dev, id), 410 pci_iov_virtfn_devfn(dev, id)); 411 if (!virtfn) 412 return; 413 414 sprintf(buf, "virtfn%u", id); 415 sysfs_remove_link(&dev->dev.kobj, buf); 416 /* 417 * pci_stop_dev() could have been called for this virtfn already, 418 * so the directory for the virtfn may have been removed before. 419 * Double check to avoid spurious sysfs warnings. 420 */ 421 if (virtfn->dev.kobj.sd) 422 sysfs_remove_link(&virtfn->dev.kobj, "physfn"); 423 424 pci_stop_and_remove_bus_device(virtfn); 425 virtfn_remove_bus(dev->bus, virtfn->bus); 426 427 /* balance pci_get_domain_bus_and_slot() */ 428 pci_dev_put(virtfn); 429 pci_dev_put(dev); 430 } 431 432 static ssize_t sriov_totalvfs_show(struct device *dev, 433 struct device_attribute *attr, 434 char *buf) 435 { 436 struct pci_dev *pdev = to_pci_dev(dev); 437 438 return sysfs_emit(buf, "%u\n", pci_sriov_get_totalvfs(pdev)); 439 } 440 441 static ssize_t sriov_numvfs_show(struct device *dev, 442 struct device_attribute *attr, 443 char *buf) 444 { 445 struct pci_dev *pdev = to_pci_dev(dev); 446 u16 num_vfs; 447 448 /* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */ 449 device_lock(&pdev->dev); 450 num_vfs = pdev->sriov->num_VFs; 451 device_unlock(&pdev->dev); 452 453 return sysfs_emit(buf, "%u\n", num_vfs); 454 } 455 456 /* 457 * num_vfs > 0; number of VFs to enable 458 * num_vfs = 0; disable all VFs 459 * 460 * Note: SRIOV spec does not allow partial VF 461 * disable, so it's all or none. 462 */ 463 static ssize_t sriov_numvfs_store(struct device *dev, 464 struct device_attribute *attr, 465 const char *buf, size_t count) 466 { 467 struct pci_dev *pdev = to_pci_dev(dev); 468 int ret = 0; 469 u16 num_vfs; 470 471 if (kstrtou16(buf, 0, &num_vfs) < 0) 472 return -EINVAL; 473 474 if (num_vfs > pci_sriov_get_totalvfs(pdev)) 475 return -ERANGE; 476 477 device_lock(&pdev->dev); 478 479 if (num_vfs == pdev->sriov->num_VFs) 480 goto exit; 481 482 /* is PF driver loaded */ 483 if (!pdev->driver) { 484 pci_info(pdev, "no driver bound to device; cannot configure SR-IOV\n"); 485 ret = -ENOENT; 486 goto exit; 487 } 488 489 /* is PF driver loaded w/callback */ 490 if (!pdev->driver->sriov_configure) { 491 pci_info(pdev, "driver does not support SR-IOV configuration via sysfs\n"); 492 ret = -ENOENT; 493 goto exit; 494 } 495 496 if (num_vfs == 0) { 497 /* disable VFs */ 498 ret = pdev->driver->sriov_configure(pdev, 0); 499 goto exit; 500 } 501 502 /* enable VFs */ 503 if (pdev->sriov->num_VFs) { 504 pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n", 505 pdev->sriov->num_VFs, num_vfs); 506 ret = -EBUSY; 507 goto exit; 508 } 509 510 ret = pdev->driver->sriov_configure(pdev, num_vfs); 511 if (ret < 0) 512 goto exit; 513 514 if (ret != num_vfs) 515 pci_warn(pdev, "%d VFs requested; only %d enabled\n", 516 num_vfs, ret); 517 518 exit: 519 device_unlock(&pdev->dev); 520 521 if (ret < 0) 522 return ret; 523 524 return count; 525 } 526 527 static ssize_t sriov_offset_show(struct device *dev, 528 struct device_attribute *attr, 529 char *buf) 530 { 531 struct pci_dev *pdev = to_pci_dev(dev); 532 533 return sysfs_emit(buf, "%u\n", pdev->sriov->offset); 534 } 535 536 static ssize_t sriov_stride_show(struct device *dev, 537 struct device_attribute *attr, 538 char *buf) 539 { 540 struct pci_dev *pdev = to_pci_dev(dev); 541 542 return sysfs_emit(buf, "%u\n", pdev->sriov->stride); 543 } 544 545 static ssize_t sriov_vf_device_show(struct device *dev, 546 struct device_attribute *attr, 547 char *buf) 548 { 549 struct pci_dev *pdev = to_pci_dev(dev); 550 551 return sysfs_emit(buf, "%x\n", pdev->sriov->vf_device); 552 } 553 554 static ssize_t sriov_drivers_autoprobe_show(struct device *dev, 555 struct device_attribute *attr, 556 char *buf) 557 { 558 struct pci_dev *pdev = to_pci_dev(dev); 559 560 return sysfs_emit(buf, "%u\n", pdev->sriov->drivers_autoprobe); 561 } 562 563 static ssize_t sriov_drivers_autoprobe_store(struct device *dev, 564 struct device_attribute *attr, 565 const char *buf, size_t count) 566 { 567 struct pci_dev *pdev = to_pci_dev(dev); 568 bool drivers_autoprobe; 569 570 if (kstrtobool(buf, &drivers_autoprobe) < 0) 571 return -EINVAL; 572 573 pdev->sriov->drivers_autoprobe = drivers_autoprobe; 574 575 return count; 576 } 577 578 static DEVICE_ATTR_RO(sriov_totalvfs); 579 static DEVICE_ATTR_RW(sriov_numvfs); 580 static DEVICE_ATTR_RO(sriov_offset); 581 static DEVICE_ATTR_RO(sriov_stride); 582 static DEVICE_ATTR_RO(sriov_vf_device); 583 static DEVICE_ATTR_RW(sriov_drivers_autoprobe); 584 585 static struct attribute *sriov_pf_dev_attrs[] = { 586 &dev_attr_sriov_totalvfs.attr, 587 &dev_attr_sriov_numvfs.attr, 588 &dev_attr_sriov_offset.attr, 589 &dev_attr_sriov_stride.attr, 590 &dev_attr_sriov_vf_device.attr, 591 &dev_attr_sriov_drivers_autoprobe.attr, 592 #ifdef CONFIG_PCI_MSI 593 &dev_attr_sriov_vf_total_msix.attr, 594 #endif 595 NULL, 596 }; 597 598 static umode_t sriov_pf_attrs_are_visible(struct kobject *kobj, 599 struct attribute *a, int n) 600 { 601 struct device *dev = kobj_to_dev(kobj); 602 603 if (!dev_is_pf(dev)) 604 return 0; 605 606 return a->mode; 607 } 608 609 const struct attribute_group sriov_pf_dev_attr_group = { 610 .attrs = sriov_pf_dev_attrs, 611 .is_visible = sriov_pf_attrs_are_visible, 612 }; 613 614 int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) 615 { 616 return 0; 617 } 618 619 int __weak pcibios_sriov_disable(struct pci_dev *pdev) 620 { 621 return 0; 622 } 623 624 static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs) 625 { 626 unsigned int i; 627 int rc; 628 629 if (dev->no_vf_scan) 630 return 0; 631 632 pci_lock_rescan_remove(); 633 for (i = 0; i < num_vfs; i++) { 634 rc = pci_iov_add_virtfn(dev, i); 635 if (rc) 636 goto failed; 637 } 638 pci_unlock_rescan_remove(); 639 return 0; 640 failed: 641 while (i--) 642 pci_iov_remove_virtfn(dev, i); 643 pci_unlock_rescan_remove(); 644 645 return rc; 646 } 647 648 static int sriov_enable(struct pci_dev *dev, int nr_virtfn) 649 { 650 int rc; 651 int i; 652 int nres; 653 u16 initial; 654 struct resource *res; 655 struct pci_dev *pdev; 656 struct pci_sriov *iov = dev->sriov; 657 int bars = 0; 658 int bus; 659 660 if (!nr_virtfn) 661 return 0; 662 663 if (iov->num_VFs) 664 return -EINVAL; 665 666 pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial); 667 if (initial > iov->total_VFs || 668 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs))) 669 return -EIO; 670 671 if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs || 672 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial))) 673 return -EINVAL; 674 675 nres = 0; 676 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { 677 int idx = pci_resource_num_from_vf_bar(i); 678 resource_size_t vf_bar_sz = pci_iov_resource_size(dev, idx); 679 680 bars |= (1 << idx); 681 res = &dev->resource[idx]; 682 if (vf_bar_sz * nr_virtfn > resource_size(res)) 683 continue; 684 if (res->parent) 685 nres++; 686 } 687 if (nres != iov->nres) { 688 pci_err(dev, "not enough MMIO resources for SR-IOV\n"); 689 return -ENOMEM; 690 } 691 692 bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1); 693 if (bus > dev->bus->busn_res.end) { 694 pci_err(dev, "can't enable %d VFs (bus %02x out of range of %pR)\n", 695 nr_virtfn, bus, &dev->bus->busn_res); 696 return -ENOMEM; 697 } 698 699 if (pci_enable_resources(dev, bars)) { 700 pci_err(dev, "SR-IOV: IOV BARS not allocated\n"); 701 return -ENOMEM; 702 } 703 704 if (iov->link != dev->devfn) { 705 pdev = pci_get_slot(dev->bus, iov->link); 706 if (!pdev) 707 return -ENODEV; 708 709 if (!pdev->is_physfn) { 710 pci_dev_put(pdev); 711 return -ENOSYS; 712 } 713 714 rc = sysfs_create_link(&dev->dev.kobj, 715 &pdev->dev.kobj, "dep_link"); 716 pci_dev_put(pdev); 717 if (rc) 718 return rc; 719 } 720 721 iov->initial_VFs = initial; 722 if (nr_virtfn < initial) 723 initial = nr_virtfn; 724 725 rc = pcibios_sriov_enable(dev, initial); 726 if (rc) { 727 pci_err(dev, "failure %d from pcibios_sriov_enable()\n", rc); 728 goto err_pcibios; 729 } 730 731 pci_iov_set_numvfs(dev, nr_virtfn); 732 iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; 733 pci_cfg_access_lock(dev); 734 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); 735 msleep(100); 736 pci_cfg_access_unlock(dev); 737 738 rc = sriov_add_vfs(dev, initial); 739 if (rc) 740 goto err_pcibios; 741 742 kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE); 743 iov->num_VFs = nr_virtfn; 744 745 return 0; 746 747 err_pcibios: 748 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); 749 pci_cfg_access_lock(dev); 750 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); 751 ssleep(1); 752 pci_cfg_access_unlock(dev); 753 754 pcibios_sriov_disable(dev); 755 756 if (iov->link != dev->devfn) 757 sysfs_remove_link(&dev->dev.kobj, "dep_link"); 758 759 pci_iov_set_numvfs(dev, 0); 760 return rc; 761 } 762 763 static void sriov_del_vfs(struct pci_dev *dev) 764 { 765 struct pci_sriov *iov = dev->sriov; 766 int i; 767 768 pci_lock_rescan_remove(); 769 for (i = 0; i < iov->num_VFs; i++) 770 pci_iov_remove_virtfn(dev, i); 771 pci_unlock_rescan_remove(); 772 } 773 774 static void sriov_disable(struct pci_dev *dev) 775 { 776 struct pci_sriov *iov = dev->sriov; 777 778 if (!iov->num_VFs) 779 return; 780 781 sriov_del_vfs(dev); 782 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); 783 pci_cfg_access_lock(dev); 784 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); 785 ssleep(1); 786 pci_cfg_access_unlock(dev); 787 788 pcibios_sriov_disable(dev); 789 790 if (iov->link != dev->devfn) 791 sysfs_remove_link(&dev->dev.kobj, "dep_link"); 792 793 iov->num_VFs = 0; 794 pci_iov_set_numvfs(dev, 0); 795 } 796 797 static int sriov_init(struct pci_dev *dev, int pos) 798 { 799 int i, bar64; 800 int rc; 801 int nres; 802 u32 pgsz; 803 u16 ctrl, total; 804 struct pci_sriov *iov; 805 struct resource *res; 806 const char *res_name; 807 struct pci_dev *pdev; 808 u32 sriovbars[PCI_SRIOV_NUM_BARS]; 809 810 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl); 811 if (ctrl & PCI_SRIOV_CTRL_VFE) { 812 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0); 813 ssleep(1); 814 } 815 816 ctrl = 0; 817 list_for_each_entry(pdev, &dev->bus->devices, bus_list) 818 if (pdev->is_physfn) 819 goto found; 820 821 pdev = NULL; 822 if (pci_ari_enabled(dev->bus)) 823 ctrl |= PCI_SRIOV_CTRL_ARI; 824 825 found: 826 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl); 827 828 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total); 829 if (!total) 830 return 0; 831 832 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz); 833 i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0; 834 pgsz &= ~((1 << i) - 1); 835 if (!pgsz) 836 return -EIO; 837 838 pgsz &= ~(pgsz - 1); 839 pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz); 840 841 iov = kzalloc(sizeof(*iov), GFP_KERNEL); 842 if (!iov) 843 return -ENOMEM; 844 845 /* Sizing SR-IOV BARs with VF Enable cleared - no decode */ 846 __pci_size_stdbars(dev, PCI_SRIOV_NUM_BARS, 847 pos + PCI_SRIOV_BAR, sriovbars); 848 849 nres = 0; 850 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { 851 int idx = pci_resource_num_from_vf_bar(i); 852 853 res = &dev->resource[idx]; 854 res_name = pci_resource_name(dev, idx); 855 856 /* 857 * If it is already FIXED, don't change it, something 858 * (perhaps EA or header fixups) wants it this way. 859 */ 860 if (res->flags & IORESOURCE_PCI_FIXED) 861 bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0; 862 else 863 bar64 = __pci_read_base(dev, pci_bar_unknown, res, 864 pos + PCI_SRIOV_BAR + i * 4, 865 &sriovbars[i]); 866 if (!res->flags) 867 continue; 868 if (resource_size(res) & (PAGE_SIZE - 1)) { 869 rc = -EIO; 870 goto failed; 871 } 872 iov->barsz[i] = resource_size(res); 873 resource_set_size(res, resource_size(res) * total); 874 pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n", 875 res_name, res, i, total); 876 i += bar64; 877 nres++; 878 } 879 880 iov->pos = pos; 881 iov->nres = nres; 882 iov->ctrl = ctrl; 883 iov->total_VFs = total; 884 iov->driver_max_VFs = total; 885 pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device); 886 iov->pgsz = pgsz; 887 iov->self = dev; 888 iov->drivers_autoprobe = true; 889 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 890 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 891 if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) 892 iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link); 893 iov->vf_rebar_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VF_REBAR); 894 895 if (pdev) 896 iov->dev = pci_dev_get(pdev); 897 else 898 iov->dev = dev; 899 900 dev->sriov = iov; 901 dev->is_physfn = 1; 902 rc = compute_max_vf_buses(dev); 903 if (rc) 904 goto fail_max_buses; 905 906 return 0; 907 908 fail_max_buses: 909 dev->sriov = NULL; 910 dev->is_physfn = 0; 911 failed: 912 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { 913 res = &dev->resource[pci_resource_num_from_vf_bar(i)]; 914 res->flags = 0; 915 } 916 917 kfree(iov); 918 return rc; 919 } 920 921 static void sriov_release(struct pci_dev *dev) 922 { 923 BUG_ON(dev->sriov->num_VFs); 924 925 if (dev != dev->sriov->dev) 926 pci_dev_put(dev->sriov->dev); 927 928 kfree(dev->sriov); 929 dev->sriov = NULL; 930 } 931 932 static void sriov_restore_vf_rebar_state(struct pci_dev *dev) 933 { 934 unsigned int pos, nbars, i; 935 u32 ctrl; 936 937 pos = pci_iov_vf_rebar_cap(dev); 938 if (!pos) 939 return; 940 941 pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl); 942 nbars = FIELD_GET(PCI_VF_REBAR_CTRL_NBAR_MASK, ctrl); 943 944 for (i = 0; i < nbars; i++, pos += 8) { 945 int bar_idx, size; 946 947 pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl); 948 bar_idx = FIELD_GET(PCI_VF_REBAR_CTRL_BAR_IDX, ctrl); 949 size = pci_rebar_bytes_to_size(dev->sriov->barsz[bar_idx]); 950 ctrl &= ~PCI_VF_REBAR_CTRL_BAR_SIZE; 951 ctrl |= FIELD_PREP(PCI_VF_REBAR_CTRL_BAR_SIZE, size); 952 pci_write_config_dword(dev, pos + PCI_VF_REBAR_CTRL, ctrl); 953 } 954 } 955 956 static void sriov_restore_state(struct pci_dev *dev) 957 { 958 int i; 959 u16 ctrl; 960 struct pci_sriov *iov = dev->sriov; 961 962 pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl); 963 if (ctrl & PCI_SRIOV_CTRL_VFE) 964 return; 965 966 /* 967 * Restore PCI_SRIOV_CTRL_ARI before pci_iov_set_numvfs() because 968 * it reads offset & stride, which depend on PCI_SRIOV_CTRL_ARI. 969 */ 970 ctrl &= ~PCI_SRIOV_CTRL_ARI; 971 ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI; 972 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl); 973 974 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) 975 pci_update_resource(dev, pci_resource_num_from_vf_bar(i)); 976 977 pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz); 978 pci_iov_set_numvfs(dev, iov->num_VFs); 979 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); 980 if (iov->ctrl & PCI_SRIOV_CTRL_VFE) 981 msleep(100); 982 } 983 984 /** 985 * pci_iov_init - initialize the IOV capability 986 * @dev: the PCI device 987 * 988 * Returns 0 on success, or negative on failure. 989 */ 990 int pci_iov_init(struct pci_dev *dev) 991 { 992 int pos; 993 994 if (!pci_is_pcie(dev)) 995 return -ENODEV; 996 997 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 998 if (pos) 999 return sriov_init(dev, pos); 1000 1001 return -ENODEV; 1002 } 1003 1004 /** 1005 * pci_iov_release - release resources used by the IOV capability 1006 * @dev: the PCI device 1007 */ 1008 void pci_iov_release(struct pci_dev *dev) 1009 { 1010 if (dev->is_physfn) 1011 sriov_release(dev); 1012 } 1013 1014 /** 1015 * pci_iov_remove - clean up SR-IOV state after PF driver is detached 1016 * @dev: the PCI device 1017 */ 1018 void pci_iov_remove(struct pci_dev *dev) 1019 { 1020 struct pci_sriov *iov = dev->sriov; 1021 1022 if (!dev->is_physfn) 1023 return; 1024 1025 iov->driver_max_VFs = iov->total_VFs; 1026 if (iov->num_VFs) 1027 pci_warn(dev, "driver left SR-IOV enabled after remove\n"); 1028 } 1029 1030 /** 1031 * pci_iov_update_resource - update a VF BAR 1032 * @dev: the PCI device 1033 * @resno: the resource number 1034 * 1035 * Update a VF BAR in the SR-IOV capability of a PF. 1036 */ 1037 void pci_iov_update_resource(struct pci_dev *dev, int resno) 1038 { 1039 struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL; 1040 struct resource *res = pci_resource_n(dev, resno); 1041 int vf_bar = pci_resource_num_to_vf_bar(resno); 1042 struct pci_bus_region region; 1043 u16 cmd; 1044 u32 new; 1045 int reg; 1046 1047 /* 1048 * The generic pci_restore_bars() path calls this for all devices, 1049 * including VFs and non-SR-IOV devices. If this is not a PF, we 1050 * have nothing to do. 1051 */ 1052 if (!iov) 1053 return; 1054 1055 pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd); 1056 if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) { 1057 dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n", 1058 vf_bar, res); 1059 return; 1060 } 1061 1062 /* 1063 * Ignore unimplemented BARs, unused resource slots for 64-bit 1064 * BARs, and non-movable resources, e.g., those described via 1065 * Enhanced Allocation. 1066 */ 1067 if (!res->flags) 1068 return; 1069 1070 if (res->flags & IORESOURCE_UNSET) 1071 return; 1072 1073 if (res->flags & IORESOURCE_PCI_FIXED) 1074 return; 1075 1076 pcibios_resource_to_bus(dev->bus, ®ion, res); 1077 new = region.start; 1078 new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; 1079 1080 reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar; 1081 pci_write_config_dword(dev, reg, new); 1082 if (res->flags & IORESOURCE_MEM_64) { 1083 new = region.start >> 16 >> 16; 1084 pci_write_config_dword(dev, reg + 4, new); 1085 } 1086 } 1087 1088 resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev, 1089 int resno) 1090 { 1091 return pci_iov_resource_size(dev, resno); 1092 } 1093 1094 /** 1095 * pci_sriov_resource_alignment - get resource alignment for VF BAR 1096 * @dev: the PCI device 1097 * @resno: the resource number 1098 * 1099 * Returns the alignment of the VF BAR found in the SR-IOV capability. 1100 * This is not the same as the resource size which is defined as 1101 * the VF BAR size multiplied by the number of VFs. The alignment 1102 * is just the VF BAR size. 1103 */ 1104 resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) 1105 { 1106 return pcibios_iov_resource_alignment(dev, resno); 1107 } 1108 1109 /** 1110 * pci_restore_iov_state - restore the state of the IOV capability 1111 * @dev: the PCI device 1112 */ 1113 void pci_restore_iov_state(struct pci_dev *dev) 1114 { 1115 if (dev->is_physfn) { 1116 sriov_restore_vf_rebar_state(dev); 1117 sriov_restore_state(dev); 1118 } 1119 } 1120 1121 /** 1122 * pci_vf_drivers_autoprobe - set PF property drivers_autoprobe for VFs 1123 * @dev: the PCI device 1124 * @auto_probe: set VF drivers auto probe flag 1125 */ 1126 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool auto_probe) 1127 { 1128 if (dev->is_physfn) 1129 dev->sriov->drivers_autoprobe = auto_probe; 1130 } 1131 1132 /** 1133 * pci_iov_bus_range - find bus range used by Virtual Function 1134 * @bus: the PCI bus 1135 * 1136 * Returns max number of buses (exclude current one) used by Virtual 1137 * Functions. 1138 */ 1139 int pci_iov_bus_range(struct pci_bus *bus) 1140 { 1141 int max = 0; 1142 struct pci_dev *dev; 1143 1144 list_for_each_entry(dev, &bus->devices, bus_list) { 1145 if (!dev->is_physfn) 1146 continue; 1147 if (dev->sriov->max_VF_buses > max) 1148 max = dev->sriov->max_VF_buses; 1149 } 1150 1151 return max ? max - bus->number : 0; 1152 } 1153 1154 /** 1155 * pci_enable_sriov - enable the SR-IOV capability 1156 * @dev: the PCI device 1157 * @nr_virtfn: number of virtual functions to enable 1158 * 1159 * Returns 0 on success, or negative on failure. 1160 */ 1161 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) 1162 { 1163 might_sleep(); 1164 1165 if (!dev->is_physfn) 1166 return -ENOSYS; 1167 1168 return sriov_enable(dev, nr_virtfn); 1169 } 1170 EXPORT_SYMBOL_GPL(pci_enable_sriov); 1171 1172 /** 1173 * pci_disable_sriov - disable the SR-IOV capability 1174 * @dev: the PCI device 1175 */ 1176 void pci_disable_sriov(struct pci_dev *dev) 1177 { 1178 might_sleep(); 1179 1180 if (!dev->is_physfn) 1181 return; 1182 1183 sriov_disable(dev); 1184 } 1185 EXPORT_SYMBOL_GPL(pci_disable_sriov); 1186 1187 /** 1188 * pci_num_vf - return number of VFs associated with a PF device_release_driver 1189 * @dev: the PCI device 1190 * 1191 * Returns number of VFs, or 0 if SR-IOV is not enabled. 1192 */ 1193 int pci_num_vf(struct pci_dev *dev) 1194 { 1195 if (!dev->is_physfn) 1196 return 0; 1197 1198 return dev->sriov->num_VFs; 1199 } 1200 EXPORT_SYMBOL_GPL(pci_num_vf); 1201 1202 /** 1203 * pci_vfs_assigned - returns number of VFs are assigned to a guest 1204 * @dev: the PCI device 1205 * 1206 * Returns number of VFs belonging to this device that are assigned to a guest. 1207 * If device is not a physical function returns 0. 1208 */ 1209 int pci_vfs_assigned(struct pci_dev *dev) 1210 { 1211 struct pci_dev *vfdev; 1212 unsigned int vfs_assigned = 0; 1213 unsigned short dev_id; 1214 1215 /* only search if we are a PF */ 1216 if (!dev->is_physfn) 1217 return 0; 1218 1219 /* 1220 * determine the device ID for the VFs, the vendor ID will be the 1221 * same as the PF so there is no need to check for that one 1222 */ 1223 dev_id = dev->sriov->vf_device; 1224 1225 /* loop through all the VFs to see if we own any that are assigned */ 1226 vfdev = pci_get_device(dev->vendor, dev_id, NULL); 1227 while (vfdev) { 1228 /* 1229 * It is considered assigned if it is a virtual function with 1230 * our dev as the physical function and the assigned bit is set 1231 */ 1232 if (vfdev->is_virtfn && (vfdev->physfn == dev) && 1233 pci_is_dev_assigned(vfdev)) 1234 vfs_assigned++; 1235 1236 vfdev = pci_get_device(dev->vendor, dev_id, vfdev); 1237 } 1238 1239 return vfs_assigned; 1240 } 1241 EXPORT_SYMBOL_GPL(pci_vfs_assigned); 1242 1243 /** 1244 * pci_sriov_set_totalvfs -- reduce the TotalVFs available 1245 * @dev: the PCI PF device 1246 * @numvfs: number that should be used for TotalVFs supported 1247 * 1248 * Should be called from PF driver's probe routine with 1249 * device's mutex held. 1250 * 1251 * Returns 0 if PF is an SRIOV-capable device and 1252 * value of numvfs valid. If not a PF return -ENOSYS; 1253 * if numvfs is invalid return -EINVAL; 1254 * if VFs already enabled, return -EBUSY. 1255 */ 1256 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) 1257 { 1258 if (!dev->is_physfn) 1259 return -ENOSYS; 1260 1261 if (numvfs > dev->sriov->total_VFs) 1262 return -EINVAL; 1263 1264 /* Shouldn't change if VFs already enabled */ 1265 if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE) 1266 return -EBUSY; 1267 1268 dev->sriov->driver_max_VFs = numvfs; 1269 return 0; 1270 } 1271 EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs); 1272 1273 /** 1274 * pci_sriov_get_totalvfs -- get total VFs supported on this device 1275 * @dev: the PCI PF device 1276 * 1277 * For a PCIe device with SRIOV support, return the PCIe 1278 * SRIOV capability value of TotalVFs or the value of driver_max_VFs 1279 * if the driver reduced it. Otherwise 0. 1280 */ 1281 int pci_sriov_get_totalvfs(struct pci_dev *dev) 1282 { 1283 if (!dev->is_physfn) 1284 return 0; 1285 1286 return dev->sriov->driver_max_VFs; 1287 } 1288 EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs); 1289 1290 /** 1291 * pci_sriov_configure_simple - helper to configure SR-IOV 1292 * @dev: the PCI device 1293 * @nr_virtfn: number of virtual functions to enable, 0 to disable 1294 * 1295 * Enable or disable SR-IOV for devices that don't require any PF setup 1296 * before enabling SR-IOV. Return value is negative on error, or number of 1297 * VFs allocated on success. 1298 */ 1299 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn) 1300 { 1301 int rc; 1302 1303 might_sleep(); 1304 1305 if (!dev->is_physfn) 1306 return -ENODEV; 1307 1308 if (pci_vfs_assigned(dev)) { 1309 pci_warn(dev, "Cannot modify SR-IOV while VFs are assigned\n"); 1310 return -EPERM; 1311 } 1312 1313 if (nr_virtfn == 0) { 1314 sriov_disable(dev); 1315 return 0; 1316 } 1317 1318 rc = sriov_enable(dev, nr_virtfn); 1319 if (rc < 0) 1320 return rc; 1321 1322 return nr_virtfn; 1323 } 1324 EXPORT_SYMBOL_GPL(pci_sriov_configure_simple); 1325 1326 /** 1327 * pci_iov_vf_bar_set_size - set a new size for a VF BAR 1328 * @dev: the PCI device 1329 * @resno: the resource number 1330 * @size: new size as defined in the spec (0=1MB, 31=128TB) 1331 * 1332 * Set the new size of a VF BAR that supports VF resizable BAR capability. 1333 * Unlike pci_resize_resource(), this does not cause the resource that 1334 * reserves the MMIO space (originally up to total_VFs) to be resized, which 1335 * means that following calls to pci_enable_sriov() can fail if the resources 1336 * no longer fit. 1337 * 1338 * Return: 0 on success, or negative on failure. 1339 */ 1340 int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size) 1341 { 1342 u32 sizes; 1343 int ret; 1344 1345 if (!pci_resource_is_iov(resno)) 1346 return -EINVAL; 1347 1348 if (pci_iov_is_memory_decoding_enabled(dev)) 1349 return -EBUSY; 1350 1351 sizes = pci_rebar_get_possible_sizes(dev, resno); 1352 if (!sizes) 1353 return -ENOTSUPP; 1354 1355 if (!(sizes & BIT(size))) 1356 return -EINVAL; 1357 1358 ret = pci_rebar_set_size(dev, resno, size); 1359 if (ret) 1360 return ret; 1361 1362 pci_iov_resource_set_size(dev, resno, pci_rebar_size_to_bytes(size)); 1363 1364 return 0; 1365 } 1366 EXPORT_SYMBOL_GPL(pci_iov_vf_bar_set_size); 1367 1368 /** 1369 * pci_iov_vf_bar_get_sizes - get VF BAR sizes allowing to create up to num_vfs 1370 * @dev: the PCI device 1371 * @resno: the resource number 1372 * @num_vfs: number of VFs 1373 * 1374 * Get the sizes of a VF resizable BAR that can accommodate @num_vfs within 1375 * the currently assigned size of the resource @resno. 1376 * 1377 * Return: A bitmask of sizes in format defined in the spec (bit 0=1MB, 1378 * bit 31=128TB). 1379 */ 1380 u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs) 1381 { 1382 u64 vf_len = pci_resource_len(dev, resno); 1383 u32 sizes; 1384 1385 if (!num_vfs) 1386 return 0; 1387 1388 do_div(vf_len, num_vfs); 1389 sizes = (roundup_pow_of_two(vf_len + 1) - 1) >> ilog2(SZ_1M); 1390 1391 return sizes & pci_rebar_get_possible_sizes(dev, resno); 1392 } 1393 EXPORT_SYMBOL_GPL(pci_iov_vf_bar_get_sizes); 1394