1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com> 4 * (C) Copyright 2002-2004 IBM Corp. 5 * (C) Copyright 2003 Matthew Wilcox 6 * (C) Copyright 2003 Hewlett-Packard 7 * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com> 8 * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com> 9 * 10 * File attributes for PCI devices 11 * 12 * Modeled after usb's driverfs.c 13 */ 14 15 #include <linux/bitfield.h> 16 #include <linux/kernel.h> 17 #include <linux/sched.h> 18 #include <linux/pci.h> 19 #include <linux/stat.h> 20 #include <linux/export.h> 21 #include <linux/topology.h> 22 #include <linux/mm.h> 23 #include <linux/fs.h> 24 #include <linux/capability.h> 25 #include <linux/security.h> 26 #include <linux/slab.h> 27 #include <linux/vgaarb.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/msi.h> 30 #include <linux/of.h> 31 #include <linux/aperture.h> 32 #include "pci.h" 33 34 static int sysfs_initialized; /* = 0 */ 35 36 /* show configuration fields */ 37 #define pci_config_attr(field, format_string) \ 38 static ssize_t \ 39 field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ 40 { \ 41 struct pci_dev *pdev; \ 42 \ 43 pdev = to_pci_dev(dev); \ 44 return sysfs_emit(buf, format_string, pdev->field); \ 45 } \ 46 static DEVICE_ATTR_RO(field) 47 48 pci_config_attr(vendor, "0x%04x\n"); 49 pci_config_attr(device, "0x%04x\n"); 50 pci_config_attr(subsystem_vendor, "0x%04x\n"); 51 pci_config_attr(subsystem_device, "0x%04x\n"); 52 pci_config_attr(revision, "0x%02x\n"); 53 pci_config_attr(class, "0x%06x\n"); 54 55 static ssize_t irq_show(struct device *dev, 56 struct device_attribute *attr, 57 char *buf) 58 { 59 struct pci_dev *pdev = to_pci_dev(dev); 60 61 #ifdef CONFIG_PCI_MSI 62 /* 63 * For MSI, show the first MSI IRQ; for all other cases including 64 * MSI-X, show the legacy INTx IRQ. 65 */ 66 if (pdev->msi_enabled) 67 return sysfs_emit(buf, "%u\n", pci_irq_vector(pdev, 0)); 68 #endif 69 70 return sysfs_emit(buf, "%u\n", pdev->irq); 71 } 72 static DEVICE_ATTR_RO(irq); 73 74 static ssize_t broken_parity_status_show(struct device *dev, 75 struct device_attribute *attr, 76 char *buf) 77 { 78 struct pci_dev *pdev = to_pci_dev(dev); 79 return sysfs_emit(buf, "%u\n", pdev->broken_parity_status); 80 } 81 82 static ssize_t broken_parity_status_store(struct device *dev, 83 struct device_attribute *attr, 84 const char *buf, size_t count) 85 { 86 struct pci_dev *pdev = to_pci_dev(dev); 87 unsigned long val; 88 89 if (kstrtoul(buf, 0, &val) < 0) 90 return -EINVAL; 91 92 pdev->broken_parity_status = !!val; 93 94 return count; 95 } 96 static DEVICE_ATTR_RW(broken_parity_status); 97 98 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list, 99 struct device_attribute *attr, char *buf) 100 { 101 const struct cpumask *mask; 102 103 #ifdef CONFIG_NUMA 104 if (dev_to_node(dev) == NUMA_NO_NODE) 105 mask = cpu_online_mask; 106 else 107 mask = cpumask_of_node(dev_to_node(dev)); 108 #else 109 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 110 #endif 111 return cpumap_print_to_pagebuf(list, buf, mask); 112 } 113 114 static ssize_t local_cpus_show(struct device *dev, 115 struct device_attribute *attr, char *buf) 116 { 117 return pci_dev_show_local_cpu(dev, false, attr, buf); 118 } 119 static DEVICE_ATTR_RO(local_cpus); 120 121 static ssize_t local_cpulist_show(struct device *dev, 122 struct device_attribute *attr, char *buf) 123 { 124 return pci_dev_show_local_cpu(dev, true, attr, buf); 125 } 126 static DEVICE_ATTR_RO(local_cpulist); 127 128 /* 129 * PCI Bus Class Devices 130 */ 131 static ssize_t cpuaffinity_show(struct device *dev, 132 struct device_attribute *attr, char *buf) 133 { 134 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 135 136 return cpumap_print_to_pagebuf(false, buf, cpumask); 137 } 138 static DEVICE_ATTR_RO(cpuaffinity); 139 140 static ssize_t cpulistaffinity_show(struct device *dev, 141 struct device_attribute *attr, char *buf) 142 { 143 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 144 145 return cpumap_print_to_pagebuf(true, buf, cpumask); 146 } 147 static DEVICE_ATTR_RO(cpulistaffinity); 148 149 static ssize_t power_state_show(struct device *dev, 150 struct device_attribute *attr, char *buf) 151 { 152 struct pci_dev *pdev = to_pci_dev(dev); 153 154 return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state)); 155 } 156 static DEVICE_ATTR_RO(power_state); 157 158 /* show resources */ 159 static ssize_t resource_show(struct device *dev, struct device_attribute *attr, 160 char *buf) 161 { 162 struct pci_dev *pci_dev = to_pci_dev(dev); 163 int i; 164 int max; 165 resource_size_t start, end; 166 size_t len = 0; 167 168 if (pci_dev->subordinate) 169 max = DEVICE_COUNT_RESOURCE; 170 else 171 max = PCI_BRIDGE_RESOURCES; 172 173 for (i = 0; i < max; i++) { 174 struct resource *res = &pci_dev->resource[i]; 175 pci_resource_to_user(pci_dev, i, res, &start, &end); 176 len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n", 177 (unsigned long long)start, 178 (unsigned long long)end, 179 (unsigned long long)res->flags); 180 } 181 return len; 182 } 183 static DEVICE_ATTR_RO(resource); 184 185 static ssize_t max_link_speed_show(struct device *dev, 186 struct device_attribute *attr, char *buf) 187 { 188 struct pci_dev *pdev = to_pci_dev(dev); 189 190 return sysfs_emit(buf, "%s\n", 191 pci_speed_string(pcie_get_speed_cap(pdev))); 192 } 193 static DEVICE_ATTR_RO(max_link_speed); 194 195 static ssize_t max_link_width_show(struct device *dev, 196 struct device_attribute *attr, char *buf) 197 { 198 struct pci_dev *pdev = to_pci_dev(dev); 199 200 return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev)); 201 } 202 static DEVICE_ATTR_RO(max_link_width); 203 204 static ssize_t current_link_speed_show(struct device *dev, 205 struct device_attribute *attr, char *buf) 206 { 207 struct pci_dev *pci_dev = to_pci_dev(dev); 208 u16 linkstat; 209 int err; 210 enum pci_bus_speed speed; 211 212 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); 213 if (err) 214 return -EINVAL; 215 216 speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS]; 217 218 return sysfs_emit(buf, "%s\n", pci_speed_string(speed)); 219 } 220 static DEVICE_ATTR_RO(current_link_speed); 221 222 static ssize_t current_link_width_show(struct device *dev, 223 struct device_attribute *attr, char *buf) 224 { 225 struct pci_dev *pci_dev = to_pci_dev(dev); 226 u16 linkstat; 227 int err; 228 229 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); 230 if (err) 231 return -EINVAL; 232 233 return sysfs_emit(buf, "%u\n", FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat)); 234 } 235 static DEVICE_ATTR_RO(current_link_width); 236 237 static ssize_t secondary_bus_number_show(struct device *dev, 238 struct device_attribute *attr, 239 char *buf) 240 { 241 struct pci_dev *pci_dev = to_pci_dev(dev); 242 u8 sec_bus; 243 int err; 244 245 err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus); 246 if (err) 247 return -EINVAL; 248 249 return sysfs_emit(buf, "%u\n", sec_bus); 250 } 251 static DEVICE_ATTR_RO(secondary_bus_number); 252 253 static ssize_t subordinate_bus_number_show(struct device *dev, 254 struct device_attribute *attr, 255 char *buf) 256 { 257 struct pci_dev *pci_dev = to_pci_dev(dev); 258 u8 sub_bus; 259 int err; 260 261 err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus); 262 if (err) 263 return -EINVAL; 264 265 return sysfs_emit(buf, "%u\n", sub_bus); 266 } 267 static DEVICE_ATTR_RO(subordinate_bus_number); 268 269 static ssize_t ari_enabled_show(struct device *dev, 270 struct device_attribute *attr, 271 char *buf) 272 { 273 struct pci_dev *pci_dev = to_pci_dev(dev); 274 275 return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus)); 276 } 277 static DEVICE_ATTR_RO(ari_enabled); 278 279 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 280 char *buf) 281 { 282 struct pci_dev *pci_dev = to_pci_dev(dev); 283 284 return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n", 285 pci_dev->vendor, pci_dev->device, 286 pci_dev->subsystem_vendor, pci_dev->subsystem_device, 287 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), 288 (u8)(pci_dev->class)); 289 } 290 static DEVICE_ATTR_RO(modalias); 291 292 static ssize_t enable_store(struct device *dev, struct device_attribute *attr, 293 const char *buf, size_t count) 294 { 295 struct pci_dev *pdev = to_pci_dev(dev); 296 unsigned long val; 297 ssize_t result = 0; 298 299 /* this can crash the machine when done on the "wrong" device */ 300 if (!capable(CAP_SYS_ADMIN)) 301 return -EPERM; 302 303 if (kstrtoul(buf, 0, &val) < 0) 304 return -EINVAL; 305 306 device_lock(dev); 307 if (dev->driver) 308 result = -EBUSY; 309 else if (val) 310 result = pci_enable_device(pdev); 311 else if (pci_is_enabled(pdev)) 312 pci_disable_device(pdev); 313 else 314 result = -EIO; 315 device_unlock(dev); 316 317 return result < 0 ? result : count; 318 } 319 320 static ssize_t enable_show(struct device *dev, struct device_attribute *attr, 321 char *buf) 322 { 323 struct pci_dev *pdev; 324 325 pdev = to_pci_dev(dev); 326 return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt)); 327 } 328 static DEVICE_ATTR_RW(enable); 329 330 #ifdef CONFIG_NUMA 331 static ssize_t numa_node_store(struct device *dev, 332 struct device_attribute *attr, const char *buf, 333 size_t count) 334 { 335 struct pci_dev *pdev = to_pci_dev(dev); 336 int node; 337 338 if (!capable(CAP_SYS_ADMIN)) 339 return -EPERM; 340 341 if (kstrtoint(buf, 0, &node) < 0) 342 return -EINVAL; 343 344 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES) 345 return -EINVAL; 346 347 if (node != NUMA_NO_NODE && !node_online(node)) 348 return -EINVAL; 349 350 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 351 pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.", 352 node); 353 354 dev->numa_node = node; 355 return count; 356 } 357 358 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, 359 char *buf) 360 { 361 return sysfs_emit(buf, "%d\n", dev->numa_node); 362 } 363 static DEVICE_ATTR_RW(numa_node); 364 #endif 365 366 static ssize_t dma_mask_bits_show(struct device *dev, 367 struct device_attribute *attr, char *buf) 368 { 369 struct pci_dev *pdev = to_pci_dev(dev); 370 371 return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask)); 372 } 373 static DEVICE_ATTR_RO(dma_mask_bits); 374 375 static ssize_t consistent_dma_mask_bits_show(struct device *dev, 376 struct device_attribute *attr, 377 char *buf) 378 { 379 return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask)); 380 } 381 static DEVICE_ATTR_RO(consistent_dma_mask_bits); 382 383 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr, 384 char *buf) 385 { 386 struct pci_dev *pdev = to_pci_dev(dev); 387 struct pci_bus *subordinate = pdev->subordinate; 388 389 return sysfs_emit(buf, "%u\n", subordinate ? 390 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) 391 : !pdev->no_msi); 392 } 393 394 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr, 395 const char *buf, size_t count) 396 { 397 struct pci_dev *pdev = to_pci_dev(dev); 398 struct pci_bus *subordinate = pdev->subordinate; 399 unsigned long val; 400 401 if (!capable(CAP_SYS_ADMIN)) 402 return -EPERM; 403 404 if (kstrtoul(buf, 0, &val) < 0) 405 return -EINVAL; 406 407 /* 408 * "no_msi" and "bus_flags" only affect what happens when a driver 409 * requests MSI or MSI-X. They don't affect any drivers that have 410 * already requested MSI or MSI-X. 411 */ 412 if (!subordinate) { 413 pdev->no_msi = !val; 414 pci_info(pdev, "MSI/MSI-X %s for future drivers\n", 415 val ? "allowed" : "disallowed"); 416 return count; 417 } 418 419 if (val) 420 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI; 421 else 422 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; 423 424 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n", 425 val ? "allowed" : "disallowed"); 426 return count; 427 } 428 static DEVICE_ATTR_RW(msi_bus); 429 430 static ssize_t rescan_store(const struct bus_type *bus, const char *buf, size_t count) 431 { 432 unsigned long val; 433 struct pci_bus *b = NULL; 434 435 if (kstrtoul(buf, 0, &val) < 0) 436 return -EINVAL; 437 438 if (val) { 439 pci_lock_rescan_remove(); 440 while ((b = pci_find_next_bus(b)) != NULL) 441 pci_rescan_bus(b); 442 pci_unlock_rescan_remove(); 443 } 444 return count; 445 } 446 static BUS_ATTR_WO(rescan); 447 448 static struct attribute *pci_bus_attrs[] = { 449 &bus_attr_rescan.attr, 450 NULL, 451 }; 452 453 static const struct attribute_group pci_bus_group = { 454 .attrs = pci_bus_attrs, 455 }; 456 457 const struct attribute_group *pci_bus_groups[] = { 458 &pci_bus_group, 459 NULL, 460 }; 461 462 static ssize_t dev_rescan_store(struct device *dev, 463 struct device_attribute *attr, const char *buf, 464 size_t count) 465 { 466 unsigned long val; 467 struct pci_dev *pdev = to_pci_dev(dev); 468 469 if (kstrtoul(buf, 0, &val) < 0) 470 return -EINVAL; 471 472 if (val) { 473 pci_lock_rescan_remove(); 474 pci_rescan_bus(pdev->bus); 475 pci_unlock_rescan_remove(); 476 } 477 return count; 478 } 479 static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL, 480 dev_rescan_store); 481 482 static ssize_t remove_store(struct device *dev, struct device_attribute *attr, 483 const char *buf, size_t count) 484 { 485 unsigned long val; 486 487 if (kstrtoul(buf, 0, &val) < 0) 488 return -EINVAL; 489 490 if (val && device_remove_file_self(dev, attr)) 491 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); 492 return count; 493 } 494 static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL, 495 remove_store); 496 497 static ssize_t bus_rescan_store(struct device *dev, 498 struct device_attribute *attr, 499 const char *buf, size_t count) 500 { 501 unsigned long val; 502 struct pci_bus *bus = to_pci_bus(dev); 503 504 if (kstrtoul(buf, 0, &val) < 0) 505 return -EINVAL; 506 507 if (val) { 508 pci_lock_rescan_remove(); 509 if (!pci_is_root_bus(bus) && list_empty(&bus->devices)) 510 pci_rescan_bus_bridge_resize(bus->self); 511 else 512 pci_rescan_bus(bus); 513 pci_unlock_rescan_remove(); 514 } 515 return count; 516 } 517 static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL, 518 bus_rescan_store); 519 520 #if defined(CONFIG_PM) && defined(CONFIG_ACPI) 521 static ssize_t d3cold_allowed_store(struct device *dev, 522 struct device_attribute *attr, 523 const char *buf, size_t count) 524 { 525 struct pci_dev *pdev = to_pci_dev(dev); 526 unsigned long val; 527 528 if (kstrtoul(buf, 0, &val) < 0) 529 return -EINVAL; 530 531 pdev->d3cold_allowed = !!val; 532 pci_bridge_d3_update(pdev); 533 534 pm_runtime_resume(dev); 535 536 return count; 537 } 538 539 static ssize_t d3cold_allowed_show(struct device *dev, 540 struct device_attribute *attr, char *buf) 541 { 542 struct pci_dev *pdev = to_pci_dev(dev); 543 return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed); 544 } 545 static DEVICE_ATTR_RW(d3cold_allowed); 546 #endif 547 548 #ifdef CONFIG_OF 549 static ssize_t devspec_show(struct device *dev, 550 struct device_attribute *attr, char *buf) 551 { 552 struct pci_dev *pdev = to_pci_dev(dev); 553 struct device_node *np = pci_device_to_OF_node(pdev); 554 555 if (np == NULL) 556 return 0; 557 return sysfs_emit(buf, "%pOF\n", np); 558 } 559 static DEVICE_ATTR_RO(devspec); 560 #endif 561 562 static ssize_t driver_override_store(struct device *dev, 563 struct device_attribute *attr, 564 const char *buf, size_t count) 565 { 566 struct pci_dev *pdev = to_pci_dev(dev); 567 int ret; 568 569 ret = driver_set_override(dev, &pdev->driver_override, buf, count); 570 if (ret) 571 return ret; 572 573 return count; 574 } 575 576 static ssize_t driver_override_show(struct device *dev, 577 struct device_attribute *attr, char *buf) 578 { 579 struct pci_dev *pdev = to_pci_dev(dev); 580 ssize_t len; 581 582 device_lock(dev); 583 len = sysfs_emit(buf, "%s\n", pdev->driver_override); 584 device_unlock(dev); 585 return len; 586 } 587 static DEVICE_ATTR_RW(driver_override); 588 589 static struct attribute *pci_dev_attrs[] = { 590 &dev_attr_power_state.attr, 591 &dev_attr_resource.attr, 592 &dev_attr_vendor.attr, 593 &dev_attr_device.attr, 594 &dev_attr_subsystem_vendor.attr, 595 &dev_attr_subsystem_device.attr, 596 &dev_attr_revision.attr, 597 &dev_attr_class.attr, 598 &dev_attr_irq.attr, 599 &dev_attr_local_cpus.attr, 600 &dev_attr_local_cpulist.attr, 601 &dev_attr_modalias.attr, 602 #ifdef CONFIG_NUMA 603 &dev_attr_numa_node.attr, 604 #endif 605 &dev_attr_dma_mask_bits.attr, 606 &dev_attr_consistent_dma_mask_bits.attr, 607 &dev_attr_enable.attr, 608 &dev_attr_broken_parity_status.attr, 609 &dev_attr_msi_bus.attr, 610 #if defined(CONFIG_PM) && defined(CONFIG_ACPI) 611 &dev_attr_d3cold_allowed.attr, 612 #endif 613 #ifdef CONFIG_OF 614 &dev_attr_devspec.attr, 615 #endif 616 &dev_attr_driver_override.attr, 617 &dev_attr_ari_enabled.attr, 618 NULL, 619 }; 620 621 static struct attribute *pci_bridge_attrs[] = { 622 &dev_attr_subordinate_bus_number.attr, 623 &dev_attr_secondary_bus_number.attr, 624 NULL, 625 }; 626 627 static struct attribute *pcie_dev_attrs[] = { 628 &dev_attr_current_link_speed.attr, 629 &dev_attr_current_link_width.attr, 630 &dev_attr_max_link_width.attr, 631 &dev_attr_max_link_speed.attr, 632 NULL, 633 }; 634 635 static struct attribute *pcibus_attrs[] = { 636 &dev_attr_bus_rescan.attr, 637 &dev_attr_cpuaffinity.attr, 638 &dev_attr_cpulistaffinity.attr, 639 NULL, 640 }; 641 642 static const struct attribute_group pcibus_group = { 643 .attrs = pcibus_attrs, 644 }; 645 646 const struct attribute_group *pcibus_groups[] = { 647 &pcibus_group, 648 NULL, 649 }; 650 651 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr, 652 char *buf) 653 { 654 struct pci_dev *pdev = to_pci_dev(dev); 655 struct pci_dev *vga_dev = vga_default_device(); 656 657 if (vga_dev) 658 return sysfs_emit(buf, "%u\n", (pdev == vga_dev)); 659 660 return sysfs_emit(buf, "%u\n", 661 !!(pdev->resource[PCI_ROM_RESOURCE].flags & 662 IORESOURCE_ROM_SHADOW)); 663 } 664 static DEVICE_ATTR_RO(boot_vga); 665 666 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, 667 struct bin_attribute *bin_attr, char *buf, 668 loff_t off, size_t count) 669 { 670 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); 671 unsigned int size = 64; 672 loff_t init_off = off; 673 u8 *data = (u8 *) buf; 674 675 /* Several chips lock up trying to read undefined config space */ 676 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN)) 677 size = dev->cfg_size; 678 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 679 size = 128; 680 681 if (off > size) 682 return 0; 683 if (off + count > size) { 684 size -= off; 685 count = size; 686 } else { 687 size = count; 688 } 689 690 pci_config_pm_runtime_get(dev); 691 692 if ((off & 1) && size) { 693 u8 val; 694 pci_user_read_config_byte(dev, off, &val); 695 data[off - init_off] = val; 696 off++; 697 size--; 698 } 699 700 if ((off & 3) && size > 2) { 701 u16 val; 702 pci_user_read_config_word(dev, off, &val); 703 data[off - init_off] = val & 0xff; 704 data[off - init_off + 1] = (val >> 8) & 0xff; 705 off += 2; 706 size -= 2; 707 } 708 709 while (size > 3) { 710 u32 val; 711 pci_user_read_config_dword(dev, off, &val); 712 data[off - init_off] = val & 0xff; 713 data[off - init_off + 1] = (val >> 8) & 0xff; 714 data[off - init_off + 2] = (val >> 16) & 0xff; 715 data[off - init_off + 3] = (val >> 24) & 0xff; 716 off += 4; 717 size -= 4; 718 cond_resched(); 719 } 720 721 if (size >= 2) { 722 u16 val; 723 pci_user_read_config_word(dev, off, &val); 724 data[off - init_off] = val & 0xff; 725 data[off - init_off + 1] = (val >> 8) & 0xff; 726 off += 2; 727 size -= 2; 728 } 729 730 if (size > 0) { 731 u8 val; 732 pci_user_read_config_byte(dev, off, &val); 733 data[off - init_off] = val; 734 } 735 736 pci_config_pm_runtime_put(dev); 737 738 return count; 739 } 740 741 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj, 742 struct bin_attribute *bin_attr, char *buf, 743 loff_t off, size_t count) 744 { 745 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); 746 unsigned int size = count; 747 loff_t init_off = off; 748 u8 *data = (u8 *) buf; 749 int ret; 750 751 ret = security_locked_down(LOCKDOWN_PCI_ACCESS); 752 if (ret) 753 return ret; 754 755 if (resource_is_exclusive(&dev->driver_exclusive_resource, off, 756 count)) { 757 pci_warn_once(dev, "%s: Unexpected write to kernel-exclusive config offset %llx", 758 current->comm, off); 759 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 760 } 761 762 if (off > dev->cfg_size) 763 return 0; 764 if (off + count > dev->cfg_size) { 765 size = dev->cfg_size - off; 766 count = size; 767 } 768 769 pci_config_pm_runtime_get(dev); 770 771 if ((off & 1) && size) { 772 pci_user_write_config_byte(dev, off, data[off - init_off]); 773 off++; 774 size--; 775 } 776 777 if ((off & 3) && size > 2) { 778 u16 val = data[off - init_off]; 779 val |= (u16) data[off - init_off + 1] << 8; 780 pci_user_write_config_word(dev, off, val); 781 off += 2; 782 size -= 2; 783 } 784 785 while (size > 3) { 786 u32 val = data[off - init_off]; 787 val |= (u32) data[off - init_off + 1] << 8; 788 val |= (u32) data[off - init_off + 2] << 16; 789 val |= (u32) data[off - init_off + 3] << 24; 790 pci_user_write_config_dword(dev, off, val); 791 off += 4; 792 size -= 4; 793 } 794 795 if (size >= 2) { 796 u16 val = data[off - init_off]; 797 val |= (u16) data[off - init_off + 1] << 8; 798 pci_user_write_config_word(dev, off, val); 799 off += 2; 800 size -= 2; 801 } 802 803 if (size) 804 pci_user_write_config_byte(dev, off, data[off - init_off]); 805 806 pci_config_pm_runtime_put(dev); 807 808 return count; 809 } 810 static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0); 811 812 static struct bin_attribute *pci_dev_config_attrs[] = { 813 &bin_attr_config, 814 NULL, 815 }; 816 817 static umode_t pci_dev_config_attr_is_visible(struct kobject *kobj, 818 struct bin_attribute *a, int n) 819 { 820 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 821 822 a->size = PCI_CFG_SPACE_SIZE; 823 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 824 a->size = PCI_CFG_SPACE_EXP_SIZE; 825 826 return a->attr.mode; 827 } 828 829 static const struct attribute_group pci_dev_config_attr_group = { 830 .bin_attrs = pci_dev_config_attrs, 831 .is_bin_visible = pci_dev_config_attr_is_visible, 832 }; 833 834 /* 835 * llseek operation for mmappable PCI resources. 836 * May be left unused if the arch doesn't provide them. 837 */ 838 static __maybe_unused loff_t 839 pci_llseek_resource(struct file *filep, 840 struct kobject *kobj __always_unused, 841 struct bin_attribute *attr, 842 loff_t offset, int whence) 843 { 844 return fixed_size_llseek(filep, offset, whence, attr->size); 845 } 846 847 #ifdef HAVE_PCI_LEGACY 848 /** 849 * pci_read_legacy_io - read byte(s) from legacy I/O port space 850 * @filp: open sysfs file 851 * @kobj: kobject corresponding to file to read from 852 * @bin_attr: struct bin_attribute for this file 853 * @buf: buffer to store results 854 * @off: offset into legacy I/O port space 855 * @count: number of bytes to read 856 * 857 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific 858 * callback routine (pci_legacy_read). 859 */ 860 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj, 861 struct bin_attribute *bin_attr, char *buf, 862 loff_t off, size_t count) 863 { 864 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 865 866 /* Only support 1, 2 or 4 byte accesses */ 867 if (count != 1 && count != 2 && count != 4) 868 return -EINVAL; 869 870 return pci_legacy_read(bus, off, (u32 *)buf, count); 871 } 872 873 /** 874 * pci_write_legacy_io - write byte(s) to legacy I/O port space 875 * @filp: open sysfs file 876 * @kobj: kobject corresponding to file to read from 877 * @bin_attr: struct bin_attribute for this file 878 * @buf: buffer containing value to be written 879 * @off: offset into legacy I/O port space 880 * @count: number of bytes to write 881 * 882 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific 883 * callback routine (pci_legacy_write). 884 */ 885 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj, 886 struct bin_attribute *bin_attr, char *buf, 887 loff_t off, size_t count) 888 { 889 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 890 891 /* Only support 1, 2 or 4 byte accesses */ 892 if (count != 1 && count != 2 && count != 4) 893 return -EINVAL; 894 895 return pci_legacy_write(bus, off, *(u32 *)buf, count); 896 } 897 898 /** 899 * pci_mmap_legacy_mem - map legacy PCI memory into user memory space 900 * @filp: open sysfs file 901 * @kobj: kobject corresponding to device to be mapped 902 * @attr: struct bin_attribute for this file 903 * @vma: struct vm_area_struct passed to mmap 904 * 905 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap 906 * legacy memory space (first meg of bus space) into application virtual 907 * memory space. 908 */ 909 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj, 910 struct bin_attribute *attr, 911 struct vm_area_struct *vma) 912 { 913 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 914 915 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem); 916 } 917 918 /** 919 * pci_mmap_legacy_io - map legacy PCI IO into user memory space 920 * @filp: open sysfs file 921 * @kobj: kobject corresponding to device to be mapped 922 * @attr: struct bin_attribute for this file 923 * @vma: struct vm_area_struct passed to mmap 924 * 925 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap 926 * legacy IO space (first meg of bus space) into application virtual 927 * memory space. Returns -ENOSYS if the operation isn't supported 928 */ 929 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj, 930 struct bin_attribute *attr, 931 struct vm_area_struct *vma) 932 { 933 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 934 935 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io); 936 } 937 938 /** 939 * pci_adjust_legacy_attr - adjustment of legacy file attributes 940 * @b: bus to create files under 941 * @mmap_type: I/O port or memory 942 * 943 * Stub implementation. Can be overridden by arch if necessary. 944 */ 945 void __weak pci_adjust_legacy_attr(struct pci_bus *b, 946 enum pci_mmap_state mmap_type) 947 { 948 } 949 950 /** 951 * pci_create_legacy_files - create legacy I/O port and memory files 952 * @b: bus to create files under 953 * 954 * Some platforms allow access to legacy I/O port and ISA memory space on 955 * a per-bus basis. This routine creates the files and ties them into 956 * their associated read, write and mmap files from pci-sysfs.c 957 * 958 * On error unwind, but don't propagate the error to the caller 959 * as it is ok to set up the PCI bus without these files. 960 */ 961 void pci_create_legacy_files(struct pci_bus *b) 962 { 963 int error; 964 965 if (!sysfs_initialized) 966 return; 967 968 b->legacy_io = kcalloc(2, sizeof(struct bin_attribute), 969 GFP_ATOMIC); 970 if (!b->legacy_io) 971 goto kzalloc_err; 972 973 sysfs_bin_attr_init(b->legacy_io); 974 b->legacy_io->attr.name = "legacy_io"; 975 b->legacy_io->size = 0xffff; 976 b->legacy_io->attr.mode = 0600; 977 b->legacy_io->read = pci_read_legacy_io; 978 b->legacy_io->write = pci_write_legacy_io; 979 /* See pci_create_attr() for motivation */ 980 b->legacy_io->llseek = pci_llseek_resource; 981 b->legacy_io->mmap = pci_mmap_legacy_io; 982 b->legacy_io->f_mapping = iomem_get_mapping; 983 pci_adjust_legacy_attr(b, pci_mmap_io); 984 error = device_create_bin_file(&b->dev, b->legacy_io); 985 if (error) 986 goto legacy_io_err; 987 988 /* Allocated above after the legacy_io struct */ 989 b->legacy_mem = b->legacy_io + 1; 990 sysfs_bin_attr_init(b->legacy_mem); 991 b->legacy_mem->attr.name = "legacy_mem"; 992 b->legacy_mem->size = 1024*1024; 993 b->legacy_mem->attr.mode = 0600; 994 b->legacy_mem->mmap = pci_mmap_legacy_mem; 995 /* See pci_create_attr() for motivation */ 996 b->legacy_mem->llseek = pci_llseek_resource; 997 b->legacy_mem->f_mapping = iomem_get_mapping; 998 pci_adjust_legacy_attr(b, pci_mmap_mem); 999 error = device_create_bin_file(&b->dev, b->legacy_mem); 1000 if (error) 1001 goto legacy_mem_err; 1002 1003 return; 1004 1005 legacy_mem_err: 1006 device_remove_bin_file(&b->dev, b->legacy_io); 1007 legacy_io_err: 1008 kfree(b->legacy_io); 1009 b->legacy_io = NULL; 1010 kzalloc_err: 1011 dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n"); 1012 } 1013 1014 void pci_remove_legacy_files(struct pci_bus *b) 1015 { 1016 if (b->legacy_io) { 1017 device_remove_bin_file(&b->dev, b->legacy_io); 1018 device_remove_bin_file(&b->dev, b->legacy_mem); 1019 kfree(b->legacy_io); /* both are allocated here */ 1020 } 1021 } 1022 #endif /* HAVE_PCI_LEGACY */ 1023 1024 #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE) 1025 1026 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, 1027 enum pci_mmap_api mmap_api) 1028 { 1029 unsigned long nr, start, size; 1030 resource_size_t pci_start = 0, pci_end; 1031 1032 if (pci_resource_len(pdev, resno) == 0) 1033 return 0; 1034 nr = vma_pages(vma); 1035 start = vma->vm_pgoff; 1036 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; 1037 if (mmap_api == PCI_MMAP_PROCFS) { 1038 pci_resource_to_user(pdev, resno, &pdev->resource[resno], 1039 &pci_start, &pci_end); 1040 pci_start >>= PAGE_SHIFT; 1041 } 1042 if (start >= pci_start && start < pci_start + size && 1043 start + nr <= pci_start + size) 1044 return 1; 1045 return 0; 1046 } 1047 1048 /** 1049 * pci_mmap_resource - map a PCI resource into user memory space 1050 * @kobj: kobject for mapping 1051 * @attr: struct bin_attribute for the file being mapped 1052 * @vma: struct vm_area_struct passed into the mmap 1053 * @write_combine: 1 for write_combine mapping 1054 * 1055 * Use the regular PCI mapping routines to map a PCI resource into userspace. 1056 */ 1057 static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, 1058 struct vm_area_struct *vma, int write_combine) 1059 { 1060 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1061 int bar = (unsigned long)attr->private; 1062 enum pci_mmap_state mmap_type; 1063 struct resource *res = &pdev->resource[bar]; 1064 int ret; 1065 1066 ret = security_locked_down(LOCKDOWN_PCI_ACCESS); 1067 if (ret) 1068 return ret; 1069 1070 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) 1071 return -EINVAL; 1072 1073 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) 1074 return -EINVAL; 1075 1076 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; 1077 1078 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine); 1079 } 1080 1081 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj, 1082 struct bin_attribute *attr, 1083 struct vm_area_struct *vma) 1084 { 1085 return pci_mmap_resource(kobj, attr, vma, 0); 1086 } 1087 1088 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj, 1089 struct bin_attribute *attr, 1090 struct vm_area_struct *vma) 1091 { 1092 return pci_mmap_resource(kobj, attr, vma, 1); 1093 } 1094 1095 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj, 1096 struct bin_attribute *attr, char *buf, 1097 loff_t off, size_t count, bool write) 1098 { 1099 #ifdef CONFIG_HAS_IOPORT 1100 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1101 int bar = (unsigned long)attr->private; 1102 unsigned long port = off; 1103 1104 port += pci_resource_start(pdev, bar); 1105 1106 if (port > pci_resource_end(pdev, bar)) 1107 return 0; 1108 1109 if (port + count - 1 > pci_resource_end(pdev, bar)) 1110 return -EINVAL; 1111 1112 switch (count) { 1113 case 1: 1114 if (write) 1115 outb(*(u8 *)buf, port); 1116 else 1117 *(u8 *)buf = inb(port); 1118 return 1; 1119 case 2: 1120 if (write) 1121 outw(*(u16 *)buf, port); 1122 else 1123 *(u16 *)buf = inw(port); 1124 return 2; 1125 case 4: 1126 if (write) 1127 outl(*(u32 *)buf, port); 1128 else 1129 *(u32 *)buf = inl(port); 1130 return 4; 1131 } 1132 return -EINVAL; 1133 #else 1134 return -ENXIO; 1135 #endif 1136 } 1137 1138 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj, 1139 struct bin_attribute *attr, char *buf, 1140 loff_t off, size_t count) 1141 { 1142 return pci_resource_io(filp, kobj, attr, buf, off, count, false); 1143 } 1144 1145 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj, 1146 struct bin_attribute *attr, char *buf, 1147 loff_t off, size_t count) 1148 { 1149 int ret; 1150 1151 ret = security_locked_down(LOCKDOWN_PCI_ACCESS); 1152 if (ret) 1153 return ret; 1154 1155 return pci_resource_io(filp, kobj, attr, buf, off, count, true); 1156 } 1157 1158 /** 1159 * pci_remove_resource_files - cleanup resource files 1160 * @pdev: dev to cleanup 1161 * 1162 * If we created resource files for @pdev, remove them from sysfs and 1163 * free their resources. 1164 */ 1165 static void pci_remove_resource_files(struct pci_dev *pdev) 1166 { 1167 int i; 1168 1169 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 1170 struct bin_attribute *res_attr; 1171 1172 res_attr = pdev->res_attr[i]; 1173 if (res_attr) { 1174 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1175 kfree(res_attr); 1176 } 1177 1178 res_attr = pdev->res_attr_wc[i]; 1179 if (res_attr) { 1180 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1181 kfree(res_attr); 1182 } 1183 } 1184 } 1185 1186 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) 1187 { 1188 /* allocate attribute structure, piggyback attribute name */ 1189 int name_len = write_combine ? 13 : 10; 1190 struct bin_attribute *res_attr; 1191 char *res_attr_name; 1192 int retval; 1193 1194 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC); 1195 if (!res_attr) 1196 return -ENOMEM; 1197 1198 res_attr_name = (char *)(res_attr + 1); 1199 1200 sysfs_bin_attr_init(res_attr); 1201 if (write_combine) { 1202 sprintf(res_attr_name, "resource%d_wc", num); 1203 res_attr->mmap = pci_mmap_resource_wc; 1204 } else { 1205 sprintf(res_attr_name, "resource%d", num); 1206 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { 1207 res_attr->read = pci_read_resource_io; 1208 res_attr->write = pci_write_resource_io; 1209 if (arch_can_pci_mmap_io()) 1210 res_attr->mmap = pci_mmap_resource_uc; 1211 } else { 1212 res_attr->mmap = pci_mmap_resource_uc; 1213 } 1214 } 1215 if (res_attr->mmap) { 1216 res_attr->f_mapping = iomem_get_mapping; 1217 /* 1218 * generic_file_llseek() consults f_mapping->host to determine 1219 * the file size. As iomem_inode knows nothing about the 1220 * attribute, it's not going to work, so override it as well. 1221 */ 1222 res_attr->llseek = pci_llseek_resource; 1223 } 1224 res_attr->attr.name = res_attr_name; 1225 res_attr->attr.mode = 0600; 1226 res_attr->size = pci_resource_len(pdev, num); 1227 res_attr->private = (void *)(unsigned long)num; 1228 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); 1229 if (retval) { 1230 kfree(res_attr); 1231 return retval; 1232 } 1233 1234 if (write_combine) 1235 pdev->res_attr_wc[num] = res_attr; 1236 else 1237 pdev->res_attr[num] = res_attr; 1238 1239 return 0; 1240 } 1241 1242 /** 1243 * pci_create_resource_files - create resource files in sysfs for @dev 1244 * @pdev: dev in question 1245 * 1246 * Walk the resources in @pdev creating files for each resource available. 1247 */ 1248 static int pci_create_resource_files(struct pci_dev *pdev) 1249 { 1250 int i; 1251 int retval; 1252 1253 /* Expose the PCI resources from this device as files */ 1254 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 1255 1256 /* skip empty resources */ 1257 if (!pci_resource_len(pdev, i)) 1258 continue; 1259 1260 retval = pci_create_attr(pdev, i, 0); 1261 /* for prefetchable resources, create a WC mappable file */ 1262 if (!retval && arch_can_pci_mmap_wc() && 1263 pdev->resource[i].flags & IORESOURCE_PREFETCH) 1264 retval = pci_create_attr(pdev, i, 1); 1265 if (retval) { 1266 pci_remove_resource_files(pdev); 1267 return retval; 1268 } 1269 } 1270 return 0; 1271 } 1272 #else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */ 1273 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; } 1274 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; } 1275 #endif 1276 1277 /** 1278 * pci_write_rom - used to enable access to the PCI ROM display 1279 * @filp: sysfs file 1280 * @kobj: kernel object handle 1281 * @bin_attr: struct bin_attribute for this file 1282 * @buf: user input 1283 * @off: file offset 1284 * @count: number of byte in input 1285 * 1286 * writing anything except 0 enables it 1287 */ 1288 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj, 1289 struct bin_attribute *bin_attr, char *buf, 1290 loff_t off, size_t count) 1291 { 1292 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1293 1294 if ((off == 0) && (*buf == '0') && (count == 2)) 1295 pdev->rom_attr_enabled = 0; 1296 else 1297 pdev->rom_attr_enabled = 1; 1298 1299 return count; 1300 } 1301 1302 /** 1303 * pci_read_rom - read a PCI ROM 1304 * @filp: sysfs file 1305 * @kobj: kernel object handle 1306 * @bin_attr: struct bin_attribute for this file 1307 * @buf: where to put the data we read from the ROM 1308 * @off: file offset 1309 * @count: number of bytes to read 1310 * 1311 * Put @count bytes starting at @off into @buf from the ROM in the PCI 1312 * device corresponding to @kobj. 1313 */ 1314 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj, 1315 struct bin_attribute *bin_attr, char *buf, 1316 loff_t off, size_t count) 1317 { 1318 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1319 void __iomem *rom; 1320 size_t size; 1321 1322 if (!pdev->rom_attr_enabled) 1323 return -EINVAL; 1324 1325 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ 1326 if (!rom || !size) 1327 return -EIO; 1328 1329 if (off >= size) 1330 count = 0; 1331 else { 1332 if (off + count > size) 1333 count = size - off; 1334 1335 memcpy_fromio(buf, rom + off, count); 1336 } 1337 pci_unmap_rom(pdev, rom); 1338 1339 return count; 1340 } 1341 static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0); 1342 1343 static struct bin_attribute *pci_dev_rom_attrs[] = { 1344 &bin_attr_rom, 1345 NULL, 1346 }; 1347 1348 static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj, 1349 struct bin_attribute *a, int n) 1350 { 1351 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1352 size_t rom_size; 1353 1354 /* If the device has a ROM, try to expose it in sysfs. */ 1355 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 1356 if (!rom_size) 1357 return 0; 1358 1359 a->size = rom_size; 1360 1361 return a->attr.mode; 1362 } 1363 1364 static const struct attribute_group pci_dev_rom_attr_group = { 1365 .bin_attrs = pci_dev_rom_attrs, 1366 .is_bin_visible = pci_dev_rom_attr_is_visible, 1367 }; 1368 1369 static ssize_t reset_store(struct device *dev, struct device_attribute *attr, 1370 const char *buf, size_t count) 1371 { 1372 struct pci_dev *pdev = to_pci_dev(dev); 1373 unsigned long val; 1374 ssize_t result; 1375 1376 if (kstrtoul(buf, 0, &val) < 0) 1377 return -EINVAL; 1378 1379 if (val != 1) 1380 return -EINVAL; 1381 1382 pm_runtime_get_sync(dev); 1383 result = pci_reset_function(pdev); 1384 pm_runtime_put(dev); 1385 if (result < 0) 1386 return result; 1387 1388 return count; 1389 } 1390 static DEVICE_ATTR_WO(reset); 1391 1392 static struct attribute *pci_dev_reset_attrs[] = { 1393 &dev_attr_reset.attr, 1394 NULL, 1395 }; 1396 1397 static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj, 1398 struct attribute *a, int n) 1399 { 1400 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1401 1402 if (!pci_reset_supported(pdev)) 1403 return 0; 1404 1405 return a->mode; 1406 } 1407 1408 static const struct attribute_group pci_dev_reset_attr_group = { 1409 .attrs = pci_dev_reset_attrs, 1410 .is_visible = pci_dev_reset_attr_is_visible, 1411 }; 1412 1413 #define pci_dev_resource_resize_attr(n) \ 1414 static ssize_t resource##n##_resize_show(struct device *dev, \ 1415 struct device_attribute *attr, \ 1416 char * buf) \ 1417 { \ 1418 struct pci_dev *pdev = to_pci_dev(dev); \ 1419 ssize_t ret; \ 1420 \ 1421 pci_config_pm_runtime_get(pdev); \ 1422 \ 1423 ret = sysfs_emit(buf, "%016llx\n", \ 1424 (u64)pci_rebar_get_possible_sizes(pdev, n)); \ 1425 \ 1426 pci_config_pm_runtime_put(pdev); \ 1427 \ 1428 return ret; \ 1429 } \ 1430 \ 1431 static ssize_t resource##n##_resize_store(struct device *dev, \ 1432 struct device_attribute *attr,\ 1433 const char *buf, size_t count)\ 1434 { \ 1435 struct pci_dev *pdev = to_pci_dev(dev); \ 1436 unsigned long size, flags; \ 1437 int ret, i; \ 1438 u16 cmd; \ 1439 \ 1440 if (kstrtoul(buf, 0, &size) < 0) \ 1441 return -EINVAL; \ 1442 \ 1443 device_lock(dev); \ 1444 if (dev->driver) { \ 1445 ret = -EBUSY; \ 1446 goto unlock; \ 1447 } \ 1448 \ 1449 pci_config_pm_runtime_get(pdev); \ 1450 \ 1451 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \ 1452 ret = aperture_remove_conflicting_pci_devices(pdev, \ 1453 "resourceN_resize"); \ 1454 if (ret) \ 1455 goto pm_put; \ 1456 } \ 1457 \ 1458 pci_read_config_word(pdev, PCI_COMMAND, &cmd); \ 1459 pci_write_config_word(pdev, PCI_COMMAND, \ 1460 cmd & ~PCI_COMMAND_MEMORY); \ 1461 \ 1462 flags = pci_resource_flags(pdev, n); \ 1463 \ 1464 pci_remove_resource_files(pdev); \ 1465 \ 1466 for (i = 0; i < PCI_STD_NUM_BARS; i++) { \ 1467 if (pci_resource_len(pdev, i) && \ 1468 pci_resource_flags(pdev, i) == flags) \ 1469 pci_release_resource(pdev, i); \ 1470 } \ 1471 \ 1472 ret = pci_resize_resource(pdev, n, size); \ 1473 \ 1474 pci_assign_unassigned_bus_resources(pdev->bus); \ 1475 \ 1476 if (pci_create_resource_files(pdev)) \ 1477 pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\ 1478 \ 1479 pci_write_config_word(pdev, PCI_COMMAND, cmd); \ 1480 pm_put: \ 1481 pci_config_pm_runtime_put(pdev); \ 1482 unlock: \ 1483 device_unlock(dev); \ 1484 \ 1485 return ret ? ret : count; \ 1486 } \ 1487 static DEVICE_ATTR_RW(resource##n##_resize) 1488 1489 pci_dev_resource_resize_attr(0); 1490 pci_dev_resource_resize_attr(1); 1491 pci_dev_resource_resize_attr(2); 1492 pci_dev_resource_resize_attr(3); 1493 pci_dev_resource_resize_attr(4); 1494 pci_dev_resource_resize_attr(5); 1495 1496 static struct attribute *resource_resize_attrs[] = { 1497 &dev_attr_resource0_resize.attr, 1498 &dev_attr_resource1_resize.attr, 1499 &dev_attr_resource2_resize.attr, 1500 &dev_attr_resource3_resize.attr, 1501 &dev_attr_resource4_resize.attr, 1502 &dev_attr_resource5_resize.attr, 1503 NULL, 1504 }; 1505 1506 static umode_t resource_resize_is_visible(struct kobject *kobj, 1507 struct attribute *a, int n) 1508 { 1509 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1510 1511 return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode; 1512 } 1513 1514 static const struct attribute_group pci_dev_resource_resize_group = { 1515 .attrs = resource_resize_attrs, 1516 .is_visible = resource_resize_is_visible, 1517 }; 1518 1519 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) 1520 { 1521 if (!sysfs_initialized) 1522 return -EACCES; 1523 1524 return pci_create_resource_files(pdev); 1525 } 1526 1527 /** 1528 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files 1529 * @pdev: device whose entries we should free 1530 * 1531 * Cleanup when @pdev is removed from sysfs. 1532 */ 1533 void pci_remove_sysfs_dev_files(struct pci_dev *pdev) 1534 { 1535 if (!sysfs_initialized) 1536 return; 1537 1538 pci_remove_resource_files(pdev); 1539 } 1540 1541 static int __init pci_sysfs_init(void) 1542 { 1543 struct pci_dev *pdev = NULL; 1544 struct pci_bus *pbus = NULL; 1545 int retval; 1546 1547 sysfs_initialized = 1; 1548 for_each_pci_dev(pdev) { 1549 retval = pci_create_sysfs_dev_files(pdev); 1550 if (retval) { 1551 pci_dev_put(pdev); 1552 return retval; 1553 } 1554 } 1555 1556 while ((pbus = pci_find_next_bus(pbus))) 1557 pci_create_legacy_files(pbus); 1558 1559 return 0; 1560 } 1561 late_initcall(pci_sysfs_init); 1562 1563 static struct attribute *pci_dev_dev_attrs[] = { 1564 &dev_attr_boot_vga.attr, 1565 NULL, 1566 }; 1567 1568 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj, 1569 struct attribute *a, int n) 1570 { 1571 struct device *dev = kobj_to_dev(kobj); 1572 struct pci_dev *pdev = to_pci_dev(dev); 1573 1574 if (a == &dev_attr_boot_vga.attr && pci_is_vga(pdev)) 1575 return a->mode; 1576 1577 return 0; 1578 } 1579 1580 static struct attribute *pci_dev_hp_attrs[] = { 1581 &dev_attr_remove.attr, 1582 &dev_attr_dev_rescan.attr, 1583 NULL, 1584 }; 1585 1586 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj, 1587 struct attribute *a, int n) 1588 { 1589 struct device *dev = kobj_to_dev(kobj); 1590 struct pci_dev *pdev = to_pci_dev(dev); 1591 1592 if (pdev->is_virtfn) 1593 return 0; 1594 1595 return a->mode; 1596 } 1597 1598 static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj, 1599 struct attribute *a, int n) 1600 { 1601 struct device *dev = kobj_to_dev(kobj); 1602 struct pci_dev *pdev = to_pci_dev(dev); 1603 1604 if (pci_is_bridge(pdev)) 1605 return a->mode; 1606 1607 return 0; 1608 } 1609 1610 static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj, 1611 struct attribute *a, int n) 1612 { 1613 struct device *dev = kobj_to_dev(kobj); 1614 struct pci_dev *pdev = to_pci_dev(dev); 1615 1616 if (pci_is_pcie(pdev)) 1617 return a->mode; 1618 1619 return 0; 1620 } 1621 1622 static const struct attribute_group pci_dev_group = { 1623 .attrs = pci_dev_attrs, 1624 }; 1625 1626 const struct attribute_group *pci_dev_groups[] = { 1627 &pci_dev_group, 1628 &pci_dev_config_attr_group, 1629 &pci_dev_rom_attr_group, 1630 &pci_dev_reset_attr_group, 1631 &pci_dev_reset_method_attr_group, 1632 &pci_dev_vpd_attr_group, 1633 #ifdef CONFIG_DMI 1634 &pci_dev_smbios_attr_group, 1635 #endif 1636 #ifdef CONFIG_ACPI 1637 &pci_dev_acpi_attr_group, 1638 #endif 1639 &pci_dev_resource_resize_group, 1640 NULL, 1641 }; 1642 1643 static const struct attribute_group pci_dev_hp_attr_group = { 1644 .attrs = pci_dev_hp_attrs, 1645 .is_visible = pci_dev_hp_attrs_are_visible, 1646 }; 1647 1648 static const struct attribute_group pci_dev_attr_group = { 1649 .attrs = pci_dev_dev_attrs, 1650 .is_visible = pci_dev_attrs_are_visible, 1651 }; 1652 1653 static const struct attribute_group pci_bridge_attr_group = { 1654 .attrs = pci_bridge_attrs, 1655 .is_visible = pci_bridge_attrs_are_visible, 1656 }; 1657 1658 static const struct attribute_group pcie_dev_attr_group = { 1659 .attrs = pcie_dev_attrs, 1660 .is_visible = pcie_dev_attrs_are_visible, 1661 }; 1662 1663 static const struct attribute_group *pci_dev_attr_groups[] = { 1664 &pci_dev_attr_group, 1665 &pci_dev_hp_attr_group, 1666 #ifdef CONFIG_PCI_IOV 1667 &sriov_pf_dev_attr_group, 1668 &sriov_vf_dev_attr_group, 1669 #endif 1670 &pci_bridge_attr_group, 1671 &pcie_dev_attr_group, 1672 #ifdef CONFIG_PCIEAER 1673 &aer_stats_attr_group, 1674 #endif 1675 #ifdef CONFIG_PCIEASPM 1676 &aspm_ctrl_attr_group, 1677 #endif 1678 NULL, 1679 }; 1680 1681 const struct device_type pci_dev_type = { 1682 .groups = pci_dev_attr_groups, 1683 }; 1684