1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com> 4 * (C) Copyright 2002-2004 IBM Corp. 5 * (C) Copyright 2003 Matthew Wilcox 6 * (C) Copyright 2003 Hewlett-Packard 7 * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com> 8 * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com> 9 * 10 * File attributes for PCI devices 11 * 12 * Modeled after usb's driverfs.c 13 */ 14 15 16 #include <linux/kernel.h> 17 #include <linux/sched.h> 18 #include <linux/pci.h> 19 #include <linux/stat.h> 20 #include <linux/export.h> 21 #include <linux/topology.h> 22 #include <linux/mm.h> 23 #include <linux/fs.h> 24 #include <linux/capability.h> 25 #include <linux/security.h> 26 #include <linux/slab.h> 27 #include <linux/vgaarb.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/of.h> 30 #include "pci.h" 31 32 static int sysfs_initialized; /* = 0 */ 33 34 /* show configuration fields */ 35 #define pci_config_attr(field, format_string) \ 36 static ssize_t \ 37 field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ 38 { \ 39 struct pci_dev *pdev; \ 40 \ 41 pdev = to_pci_dev(dev); \ 42 return sprintf(buf, format_string, pdev->field); \ 43 } \ 44 static DEVICE_ATTR_RO(field) 45 46 pci_config_attr(vendor, "0x%04x\n"); 47 pci_config_attr(device, "0x%04x\n"); 48 pci_config_attr(subsystem_vendor, "0x%04x\n"); 49 pci_config_attr(subsystem_device, "0x%04x\n"); 50 pci_config_attr(revision, "0x%02x\n"); 51 pci_config_attr(class, "0x%06x\n"); 52 pci_config_attr(irq, "%u\n"); 53 54 static ssize_t broken_parity_status_show(struct device *dev, 55 struct device_attribute *attr, 56 char *buf) 57 { 58 struct pci_dev *pdev = to_pci_dev(dev); 59 return sprintf(buf, "%u\n", pdev->broken_parity_status); 60 } 61 62 static ssize_t broken_parity_status_store(struct device *dev, 63 struct device_attribute *attr, 64 const char *buf, size_t count) 65 { 66 struct pci_dev *pdev = to_pci_dev(dev); 67 unsigned long val; 68 69 if (kstrtoul(buf, 0, &val) < 0) 70 return -EINVAL; 71 72 pdev->broken_parity_status = !!val; 73 74 return count; 75 } 76 static DEVICE_ATTR_RW(broken_parity_status); 77 78 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list, 79 struct device_attribute *attr, char *buf) 80 { 81 const struct cpumask *mask; 82 83 #ifdef CONFIG_NUMA 84 mask = (dev_to_node(dev) == -1) ? cpu_online_mask : 85 cpumask_of_node(dev_to_node(dev)); 86 #else 87 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 88 #endif 89 return cpumap_print_to_pagebuf(list, buf, mask); 90 } 91 92 static ssize_t local_cpus_show(struct device *dev, 93 struct device_attribute *attr, char *buf) 94 { 95 return pci_dev_show_local_cpu(dev, false, attr, buf); 96 } 97 static DEVICE_ATTR_RO(local_cpus); 98 99 static ssize_t local_cpulist_show(struct device *dev, 100 struct device_attribute *attr, char *buf) 101 { 102 return pci_dev_show_local_cpu(dev, true, attr, buf); 103 } 104 static DEVICE_ATTR_RO(local_cpulist); 105 106 /* 107 * PCI Bus Class Devices 108 */ 109 static ssize_t cpuaffinity_show(struct device *dev, 110 struct device_attribute *attr, char *buf) 111 { 112 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 113 114 return cpumap_print_to_pagebuf(false, buf, cpumask); 115 } 116 static DEVICE_ATTR_RO(cpuaffinity); 117 118 static ssize_t cpulistaffinity_show(struct device *dev, 119 struct device_attribute *attr, char *buf) 120 { 121 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 122 123 return cpumap_print_to_pagebuf(true, buf, cpumask); 124 } 125 static DEVICE_ATTR_RO(cpulistaffinity); 126 127 /* show resources */ 128 static ssize_t resource_show(struct device *dev, struct device_attribute *attr, 129 char *buf) 130 { 131 struct pci_dev *pci_dev = to_pci_dev(dev); 132 char *str = buf; 133 int i; 134 int max; 135 resource_size_t start, end; 136 137 if (pci_dev->subordinate) 138 max = DEVICE_COUNT_RESOURCE; 139 else 140 max = PCI_BRIDGE_RESOURCES; 141 142 for (i = 0; i < max; i++) { 143 struct resource *res = &pci_dev->resource[i]; 144 pci_resource_to_user(pci_dev, i, res, &start, &end); 145 str += sprintf(str, "0x%016llx 0x%016llx 0x%016llx\n", 146 (unsigned long long)start, 147 (unsigned long long)end, 148 (unsigned long long)res->flags); 149 } 150 return (str - buf); 151 } 152 static DEVICE_ATTR_RO(resource); 153 154 static ssize_t max_link_speed_show(struct device *dev, 155 struct device_attribute *attr, char *buf) 156 { 157 struct pci_dev *pdev = to_pci_dev(dev); 158 159 return sprintf(buf, "%s\n", PCIE_SPEED2STR(pcie_get_speed_cap(pdev))); 160 } 161 static DEVICE_ATTR_RO(max_link_speed); 162 163 static ssize_t max_link_width_show(struct device *dev, 164 struct device_attribute *attr, char *buf) 165 { 166 struct pci_dev *pdev = to_pci_dev(dev); 167 168 return sprintf(buf, "%u\n", pcie_get_width_cap(pdev)); 169 } 170 static DEVICE_ATTR_RO(max_link_width); 171 172 static ssize_t current_link_speed_show(struct device *dev, 173 struct device_attribute *attr, char *buf) 174 { 175 struct pci_dev *pci_dev = to_pci_dev(dev); 176 u16 linkstat; 177 int err; 178 const char *speed; 179 180 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); 181 if (err) 182 return -EINVAL; 183 184 switch (linkstat & PCI_EXP_LNKSTA_CLS) { 185 case PCI_EXP_LNKSTA_CLS_32_0GB: 186 speed = "32 GT/s"; 187 break; 188 case PCI_EXP_LNKSTA_CLS_16_0GB: 189 speed = "16 GT/s"; 190 break; 191 case PCI_EXP_LNKSTA_CLS_8_0GB: 192 speed = "8 GT/s"; 193 break; 194 case PCI_EXP_LNKSTA_CLS_5_0GB: 195 speed = "5 GT/s"; 196 break; 197 case PCI_EXP_LNKSTA_CLS_2_5GB: 198 speed = "2.5 GT/s"; 199 break; 200 default: 201 speed = "Unknown speed"; 202 } 203 204 return sprintf(buf, "%s\n", speed); 205 } 206 static DEVICE_ATTR_RO(current_link_speed); 207 208 static ssize_t current_link_width_show(struct device *dev, 209 struct device_attribute *attr, char *buf) 210 { 211 struct pci_dev *pci_dev = to_pci_dev(dev); 212 u16 linkstat; 213 int err; 214 215 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); 216 if (err) 217 return -EINVAL; 218 219 return sprintf(buf, "%u\n", 220 (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); 221 } 222 static DEVICE_ATTR_RO(current_link_width); 223 224 static ssize_t secondary_bus_number_show(struct device *dev, 225 struct device_attribute *attr, 226 char *buf) 227 { 228 struct pci_dev *pci_dev = to_pci_dev(dev); 229 u8 sec_bus; 230 int err; 231 232 err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus); 233 if (err) 234 return -EINVAL; 235 236 return sprintf(buf, "%u\n", sec_bus); 237 } 238 static DEVICE_ATTR_RO(secondary_bus_number); 239 240 static ssize_t subordinate_bus_number_show(struct device *dev, 241 struct device_attribute *attr, 242 char *buf) 243 { 244 struct pci_dev *pci_dev = to_pci_dev(dev); 245 u8 sub_bus; 246 int err; 247 248 err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus); 249 if (err) 250 return -EINVAL; 251 252 return sprintf(buf, "%u\n", sub_bus); 253 } 254 static DEVICE_ATTR_RO(subordinate_bus_number); 255 256 static ssize_t ari_enabled_show(struct device *dev, 257 struct device_attribute *attr, 258 char *buf) 259 { 260 struct pci_dev *pci_dev = to_pci_dev(dev); 261 262 return sprintf(buf, "%u\n", pci_ari_enabled(pci_dev->bus)); 263 } 264 static DEVICE_ATTR_RO(ari_enabled); 265 266 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 267 char *buf) 268 { 269 struct pci_dev *pci_dev = to_pci_dev(dev); 270 271 return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n", 272 pci_dev->vendor, pci_dev->device, 273 pci_dev->subsystem_vendor, pci_dev->subsystem_device, 274 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), 275 (u8)(pci_dev->class)); 276 } 277 static DEVICE_ATTR_RO(modalias); 278 279 static ssize_t enable_store(struct device *dev, struct device_attribute *attr, 280 const char *buf, size_t count) 281 { 282 struct pci_dev *pdev = to_pci_dev(dev); 283 unsigned long val; 284 ssize_t result = kstrtoul(buf, 0, &val); 285 286 if (result < 0) 287 return result; 288 289 /* this can crash the machine when done on the "wrong" device */ 290 if (!capable(CAP_SYS_ADMIN)) 291 return -EPERM; 292 293 device_lock(dev); 294 if (dev->driver) 295 result = -EBUSY; 296 else if (val) 297 result = pci_enable_device(pdev); 298 else if (pci_is_enabled(pdev)) 299 pci_disable_device(pdev); 300 else 301 result = -EIO; 302 device_unlock(dev); 303 304 return result < 0 ? result : count; 305 } 306 307 static ssize_t enable_show(struct device *dev, struct device_attribute *attr, 308 char *buf) 309 { 310 struct pci_dev *pdev; 311 312 pdev = to_pci_dev(dev); 313 return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt)); 314 } 315 static DEVICE_ATTR_RW(enable); 316 317 #ifdef CONFIG_NUMA 318 static ssize_t numa_node_store(struct device *dev, 319 struct device_attribute *attr, const char *buf, 320 size_t count) 321 { 322 struct pci_dev *pdev = to_pci_dev(dev); 323 int node, ret; 324 325 if (!capable(CAP_SYS_ADMIN)) 326 return -EPERM; 327 328 ret = kstrtoint(buf, 0, &node); 329 if (ret) 330 return ret; 331 332 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES) 333 return -EINVAL; 334 335 if (node != NUMA_NO_NODE && !node_online(node)) 336 return -EINVAL; 337 338 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 339 pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.", 340 node); 341 342 dev->numa_node = node; 343 return count; 344 } 345 346 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, 347 char *buf) 348 { 349 return sprintf(buf, "%d\n", dev->numa_node); 350 } 351 static DEVICE_ATTR_RW(numa_node); 352 #endif 353 354 static ssize_t dma_mask_bits_show(struct device *dev, 355 struct device_attribute *attr, char *buf) 356 { 357 struct pci_dev *pdev = to_pci_dev(dev); 358 359 return sprintf(buf, "%d\n", fls64(pdev->dma_mask)); 360 } 361 static DEVICE_ATTR_RO(dma_mask_bits); 362 363 static ssize_t consistent_dma_mask_bits_show(struct device *dev, 364 struct device_attribute *attr, 365 char *buf) 366 { 367 return sprintf(buf, "%d\n", fls64(dev->coherent_dma_mask)); 368 } 369 static DEVICE_ATTR_RO(consistent_dma_mask_bits); 370 371 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr, 372 char *buf) 373 { 374 struct pci_dev *pdev = to_pci_dev(dev); 375 struct pci_bus *subordinate = pdev->subordinate; 376 377 return sprintf(buf, "%u\n", subordinate ? 378 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) 379 : !pdev->no_msi); 380 } 381 382 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr, 383 const char *buf, size_t count) 384 { 385 struct pci_dev *pdev = to_pci_dev(dev); 386 struct pci_bus *subordinate = pdev->subordinate; 387 unsigned long val; 388 389 if (kstrtoul(buf, 0, &val) < 0) 390 return -EINVAL; 391 392 if (!capable(CAP_SYS_ADMIN)) 393 return -EPERM; 394 395 /* 396 * "no_msi" and "bus_flags" only affect what happens when a driver 397 * requests MSI or MSI-X. They don't affect any drivers that have 398 * already requested MSI or MSI-X. 399 */ 400 if (!subordinate) { 401 pdev->no_msi = !val; 402 pci_info(pdev, "MSI/MSI-X %s for future drivers\n", 403 val ? "allowed" : "disallowed"); 404 return count; 405 } 406 407 if (val) 408 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI; 409 else 410 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; 411 412 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n", 413 val ? "allowed" : "disallowed"); 414 return count; 415 } 416 static DEVICE_ATTR_RW(msi_bus); 417 418 static ssize_t rescan_store(struct bus_type *bus, const char *buf, size_t count) 419 { 420 unsigned long val; 421 struct pci_bus *b = NULL; 422 423 if (kstrtoul(buf, 0, &val) < 0) 424 return -EINVAL; 425 426 if (val) { 427 pci_lock_rescan_remove(); 428 while ((b = pci_find_next_bus(b)) != NULL) 429 pci_rescan_bus(b); 430 pci_unlock_rescan_remove(); 431 } 432 return count; 433 } 434 static BUS_ATTR_WO(rescan); 435 436 static struct attribute *pci_bus_attrs[] = { 437 &bus_attr_rescan.attr, 438 NULL, 439 }; 440 441 static const struct attribute_group pci_bus_group = { 442 .attrs = pci_bus_attrs, 443 }; 444 445 const struct attribute_group *pci_bus_groups[] = { 446 &pci_bus_group, 447 NULL, 448 }; 449 450 static ssize_t dev_rescan_store(struct device *dev, 451 struct device_attribute *attr, const char *buf, 452 size_t count) 453 { 454 unsigned long val; 455 struct pci_dev *pdev = to_pci_dev(dev); 456 457 if (kstrtoul(buf, 0, &val) < 0) 458 return -EINVAL; 459 460 if (val) { 461 pci_lock_rescan_remove(); 462 pci_rescan_bus(pdev->bus); 463 pci_unlock_rescan_remove(); 464 } 465 return count; 466 } 467 static struct device_attribute dev_rescan_attr = __ATTR(rescan, 468 (S_IWUSR|S_IWGRP), 469 NULL, dev_rescan_store); 470 471 static ssize_t remove_store(struct device *dev, struct device_attribute *attr, 472 const char *buf, size_t count) 473 { 474 unsigned long val; 475 476 if (kstrtoul(buf, 0, &val) < 0) 477 return -EINVAL; 478 479 if (val && device_remove_file_self(dev, attr)) 480 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); 481 return count; 482 } 483 static struct device_attribute dev_remove_attr = __ATTR_IGNORE_LOCKDEP(remove, 484 (S_IWUSR|S_IWGRP), 485 NULL, remove_store); 486 487 static ssize_t dev_bus_rescan_store(struct device *dev, 488 struct device_attribute *attr, 489 const char *buf, size_t count) 490 { 491 unsigned long val; 492 struct pci_bus *bus = to_pci_bus(dev); 493 494 if (kstrtoul(buf, 0, &val) < 0) 495 return -EINVAL; 496 497 if (val) { 498 pci_lock_rescan_remove(); 499 if (!pci_is_root_bus(bus) && list_empty(&bus->devices)) 500 pci_rescan_bus_bridge_resize(bus->self); 501 else 502 pci_rescan_bus(bus); 503 pci_unlock_rescan_remove(); 504 } 505 return count; 506 } 507 static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store); 508 509 #if defined(CONFIG_PM) && defined(CONFIG_ACPI) 510 static ssize_t d3cold_allowed_store(struct device *dev, 511 struct device_attribute *attr, 512 const char *buf, size_t count) 513 { 514 struct pci_dev *pdev = to_pci_dev(dev); 515 unsigned long val; 516 517 if (kstrtoul(buf, 0, &val) < 0) 518 return -EINVAL; 519 520 pdev->d3cold_allowed = !!val; 521 if (pdev->d3cold_allowed) 522 pci_d3cold_enable(pdev); 523 else 524 pci_d3cold_disable(pdev); 525 526 pm_runtime_resume(dev); 527 528 return count; 529 } 530 531 static ssize_t d3cold_allowed_show(struct device *dev, 532 struct device_attribute *attr, char *buf) 533 { 534 struct pci_dev *pdev = to_pci_dev(dev); 535 return sprintf(buf, "%u\n", pdev->d3cold_allowed); 536 } 537 static DEVICE_ATTR_RW(d3cold_allowed); 538 #endif 539 540 #ifdef CONFIG_OF 541 static ssize_t devspec_show(struct device *dev, 542 struct device_attribute *attr, char *buf) 543 { 544 struct pci_dev *pdev = to_pci_dev(dev); 545 struct device_node *np = pci_device_to_OF_node(pdev); 546 547 if (np == NULL) 548 return 0; 549 return sprintf(buf, "%pOF", np); 550 } 551 static DEVICE_ATTR_RO(devspec); 552 #endif 553 554 #ifdef CONFIG_PCI_IOV 555 static ssize_t sriov_totalvfs_show(struct device *dev, 556 struct device_attribute *attr, 557 char *buf) 558 { 559 struct pci_dev *pdev = to_pci_dev(dev); 560 561 return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev)); 562 } 563 564 565 static ssize_t sriov_numvfs_show(struct device *dev, 566 struct device_attribute *attr, 567 char *buf) 568 { 569 struct pci_dev *pdev = to_pci_dev(dev); 570 571 return sprintf(buf, "%u\n", pdev->sriov->num_VFs); 572 } 573 574 /* 575 * num_vfs > 0; number of VFs to enable 576 * num_vfs = 0; disable all VFs 577 * 578 * Note: SRIOV spec doesn't allow partial VF 579 * disable, so it's all or none. 580 */ 581 static ssize_t sriov_numvfs_store(struct device *dev, 582 struct device_attribute *attr, 583 const char *buf, size_t count) 584 { 585 struct pci_dev *pdev = to_pci_dev(dev); 586 int ret; 587 u16 num_vfs; 588 589 ret = kstrtou16(buf, 0, &num_vfs); 590 if (ret < 0) 591 return ret; 592 593 if (num_vfs > pci_sriov_get_totalvfs(pdev)) 594 return -ERANGE; 595 596 device_lock(&pdev->dev); 597 598 if (num_vfs == pdev->sriov->num_VFs) 599 goto exit; 600 601 /* is PF driver loaded w/callback */ 602 if (!pdev->driver || !pdev->driver->sriov_configure) { 603 pci_info(pdev, "Driver doesn't support SRIOV configuration via sysfs\n"); 604 ret = -ENOENT; 605 goto exit; 606 } 607 608 if (num_vfs == 0) { 609 /* disable VFs */ 610 ret = pdev->driver->sriov_configure(pdev, 0); 611 goto exit; 612 } 613 614 /* enable VFs */ 615 if (pdev->sriov->num_VFs) { 616 pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n", 617 pdev->sriov->num_VFs, num_vfs); 618 ret = -EBUSY; 619 goto exit; 620 } 621 622 ret = pdev->driver->sriov_configure(pdev, num_vfs); 623 if (ret < 0) 624 goto exit; 625 626 if (ret != num_vfs) 627 pci_warn(pdev, "%d VFs requested; only %d enabled\n", 628 num_vfs, ret); 629 630 exit: 631 device_unlock(&pdev->dev); 632 633 if (ret < 0) 634 return ret; 635 636 return count; 637 } 638 639 static ssize_t sriov_offset_show(struct device *dev, 640 struct device_attribute *attr, 641 char *buf) 642 { 643 struct pci_dev *pdev = to_pci_dev(dev); 644 645 return sprintf(buf, "%u\n", pdev->sriov->offset); 646 } 647 648 static ssize_t sriov_stride_show(struct device *dev, 649 struct device_attribute *attr, 650 char *buf) 651 { 652 struct pci_dev *pdev = to_pci_dev(dev); 653 654 return sprintf(buf, "%u\n", pdev->sriov->stride); 655 } 656 657 static ssize_t sriov_vf_device_show(struct device *dev, 658 struct device_attribute *attr, 659 char *buf) 660 { 661 struct pci_dev *pdev = to_pci_dev(dev); 662 663 return sprintf(buf, "%x\n", pdev->sriov->vf_device); 664 } 665 666 static ssize_t sriov_drivers_autoprobe_show(struct device *dev, 667 struct device_attribute *attr, 668 char *buf) 669 { 670 struct pci_dev *pdev = to_pci_dev(dev); 671 672 return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe); 673 } 674 675 static ssize_t sriov_drivers_autoprobe_store(struct device *dev, 676 struct device_attribute *attr, 677 const char *buf, size_t count) 678 { 679 struct pci_dev *pdev = to_pci_dev(dev); 680 bool drivers_autoprobe; 681 682 if (kstrtobool(buf, &drivers_autoprobe) < 0) 683 return -EINVAL; 684 685 pdev->sriov->drivers_autoprobe = drivers_autoprobe; 686 687 return count; 688 } 689 690 static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs); 691 static struct device_attribute sriov_numvfs_attr = 692 __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP), 693 sriov_numvfs_show, sriov_numvfs_store); 694 static struct device_attribute sriov_offset_attr = __ATTR_RO(sriov_offset); 695 static struct device_attribute sriov_stride_attr = __ATTR_RO(sriov_stride); 696 static struct device_attribute sriov_vf_device_attr = __ATTR_RO(sriov_vf_device); 697 static struct device_attribute sriov_drivers_autoprobe_attr = 698 __ATTR(sriov_drivers_autoprobe, (S_IRUGO|S_IWUSR|S_IWGRP), 699 sriov_drivers_autoprobe_show, sriov_drivers_autoprobe_store); 700 #endif /* CONFIG_PCI_IOV */ 701 702 static ssize_t driver_override_store(struct device *dev, 703 struct device_attribute *attr, 704 const char *buf, size_t count) 705 { 706 struct pci_dev *pdev = to_pci_dev(dev); 707 char *driver_override, *old, *cp; 708 709 /* We need to keep extra room for a newline */ 710 if (count >= (PAGE_SIZE - 1)) 711 return -EINVAL; 712 713 driver_override = kstrndup(buf, count, GFP_KERNEL); 714 if (!driver_override) 715 return -ENOMEM; 716 717 cp = strchr(driver_override, '\n'); 718 if (cp) 719 *cp = '\0'; 720 721 device_lock(dev); 722 old = pdev->driver_override; 723 if (strlen(driver_override)) { 724 pdev->driver_override = driver_override; 725 } else { 726 kfree(driver_override); 727 pdev->driver_override = NULL; 728 } 729 device_unlock(dev); 730 731 kfree(old); 732 733 return count; 734 } 735 736 static ssize_t driver_override_show(struct device *dev, 737 struct device_attribute *attr, char *buf) 738 { 739 struct pci_dev *pdev = to_pci_dev(dev); 740 ssize_t len; 741 742 device_lock(dev); 743 len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); 744 device_unlock(dev); 745 return len; 746 } 747 static DEVICE_ATTR_RW(driver_override); 748 749 static struct attribute *pci_dev_attrs[] = { 750 &dev_attr_resource.attr, 751 &dev_attr_vendor.attr, 752 &dev_attr_device.attr, 753 &dev_attr_subsystem_vendor.attr, 754 &dev_attr_subsystem_device.attr, 755 &dev_attr_revision.attr, 756 &dev_attr_class.attr, 757 &dev_attr_irq.attr, 758 &dev_attr_local_cpus.attr, 759 &dev_attr_local_cpulist.attr, 760 &dev_attr_modalias.attr, 761 #ifdef CONFIG_NUMA 762 &dev_attr_numa_node.attr, 763 #endif 764 &dev_attr_dma_mask_bits.attr, 765 &dev_attr_consistent_dma_mask_bits.attr, 766 &dev_attr_enable.attr, 767 &dev_attr_broken_parity_status.attr, 768 &dev_attr_msi_bus.attr, 769 #if defined(CONFIG_PM) && defined(CONFIG_ACPI) 770 &dev_attr_d3cold_allowed.attr, 771 #endif 772 #ifdef CONFIG_OF 773 &dev_attr_devspec.attr, 774 #endif 775 &dev_attr_driver_override.attr, 776 &dev_attr_ari_enabled.attr, 777 NULL, 778 }; 779 780 static struct attribute *pci_bridge_attrs[] = { 781 &dev_attr_subordinate_bus_number.attr, 782 &dev_attr_secondary_bus_number.attr, 783 NULL, 784 }; 785 786 static struct attribute *pcie_dev_attrs[] = { 787 &dev_attr_current_link_speed.attr, 788 &dev_attr_current_link_width.attr, 789 &dev_attr_max_link_width.attr, 790 &dev_attr_max_link_speed.attr, 791 NULL, 792 }; 793 794 static struct attribute *pcibus_attrs[] = { 795 &dev_attr_rescan.attr, 796 &dev_attr_cpuaffinity.attr, 797 &dev_attr_cpulistaffinity.attr, 798 NULL, 799 }; 800 801 static const struct attribute_group pcibus_group = { 802 .attrs = pcibus_attrs, 803 }; 804 805 const struct attribute_group *pcibus_groups[] = { 806 &pcibus_group, 807 NULL, 808 }; 809 810 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr, 811 char *buf) 812 { 813 struct pci_dev *pdev = to_pci_dev(dev); 814 struct pci_dev *vga_dev = vga_default_device(); 815 816 if (vga_dev) 817 return sprintf(buf, "%u\n", (pdev == vga_dev)); 818 819 return sprintf(buf, "%u\n", 820 !!(pdev->resource[PCI_ROM_RESOURCE].flags & 821 IORESOURCE_ROM_SHADOW)); 822 } 823 static struct device_attribute vga_attr = __ATTR_RO(boot_vga); 824 825 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, 826 struct bin_attribute *bin_attr, char *buf, 827 loff_t off, size_t count) 828 { 829 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); 830 unsigned int size = 64; 831 loff_t init_off = off; 832 u8 *data = (u8 *) buf; 833 834 /* Several chips lock up trying to read undefined config space */ 835 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN)) 836 size = dev->cfg_size; 837 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 838 size = 128; 839 840 if (off > size) 841 return 0; 842 if (off + count > size) { 843 size -= off; 844 count = size; 845 } else { 846 size = count; 847 } 848 849 pci_config_pm_runtime_get(dev); 850 851 if ((off & 1) && size) { 852 u8 val; 853 pci_user_read_config_byte(dev, off, &val); 854 data[off - init_off] = val; 855 off++; 856 size--; 857 } 858 859 if ((off & 3) && size > 2) { 860 u16 val; 861 pci_user_read_config_word(dev, off, &val); 862 data[off - init_off] = val & 0xff; 863 data[off - init_off + 1] = (val >> 8) & 0xff; 864 off += 2; 865 size -= 2; 866 } 867 868 while (size > 3) { 869 u32 val; 870 pci_user_read_config_dword(dev, off, &val); 871 data[off - init_off] = val & 0xff; 872 data[off - init_off + 1] = (val >> 8) & 0xff; 873 data[off - init_off + 2] = (val >> 16) & 0xff; 874 data[off - init_off + 3] = (val >> 24) & 0xff; 875 off += 4; 876 size -= 4; 877 } 878 879 if (size >= 2) { 880 u16 val; 881 pci_user_read_config_word(dev, off, &val); 882 data[off - init_off] = val & 0xff; 883 data[off - init_off + 1] = (val >> 8) & 0xff; 884 off += 2; 885 size -= 2; 886 } 887 888 if (size > 0) { 889 u8 val; 890 pci_user_read_config_byte(dev, off, &val); 891 data[off - init_off] = val; 892 off++; 893 --size; 894 } 895 896 pci_config_pm_runtime_put(dev); 897 898 return count; 899 } 900 901 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj, 902 struct bin_attribute *bin_attr, char *buf, 903 loff_t off, size_t count) 904 { 905 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); 906 unsigned int size = count; 907 loff_t init_off = off; 908 u8 *data = (u8 *) buf; 909 910 if (off > dev->cfg_size) 911 return 0; 912 if (off + count > dev->cfg_size) { 913 size = dev->cfg_size - off; 914 count = size; 915 } 916 917 pci_config_pm_runtime_get(dev); 918 919 if ((off & 1) && size) { 920 pci_user_write_config_byte(dev, off, data[off - init_off]); 921 off++; 922 size--; 923 } 924 925 if ((off & 3) && size > 2) { 926 u16 val = data[off - init_off]; 927 val |= (u16) data[off - init_off + 1] << 8; 928 pci_user_write_config_word(dev, off, val); 929 off += 2; 930 size -= 2; 931 } 932 933 while (size > 3) { 934 u32 val = data[off - init_off]; 935 val |= (u32) data[off - init_off + 1] << 8; 936 val |= (u32) data[off - init_off + 2] << 16; 937 val |= (u32) data[off - init_off + 3] << 24; 938 pci_user_write_config_dword(dev, off, val); 939 off += 4; 940 size -= 4; 941 } 942 943 if (size >= 2) { 944 u16 val = data[off - init_off]; 945 val |= (u16) data[off - init_off + 1] << 8; 946 pci_user_write_config_word(dev, off, val); 947 off += 2; 948 size -= 2; 949 } 950 951 if (size) { 952 pci_user_write_config_byte(dev, off, data[off - init_off]); 953 off++; 954 --size; 955 } 956 957 pci_config_pm_runtime_put(dev); 958 959 return count; 960 } 961 962 #ifdef HAVE_PCI_LEGACY 963 /** 964 * pci_read_legacy_io - read byte(s) from legacy I/O port space 965 * @filp: open sysfs file 966 * @kobj: kobject corresponding to file to read from 967 * @bin_attr: struct bin_attribute for this file 968 * @buf: buffer to store results 969 * @off: offset into legacy I/O port space 970 * @count: number of bytes to read 971 * 972 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific 973 * callback routine (pci_legacy_read). 974 */ 975 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj, 976 struct bin_attribute *bin_attr, char *buf, 977 loff_t off, size_t count) 978 { 979 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 980 981 /* Only support 1, 2 or 4 byte accesses */ 982 if (count != 1 && count != 2 && count != 4) 983 return -EINVAL; 984 985 return pci_legacy_read(bus, off, (u32 *)buf, count); 986 } 987 988 /** 989 * pci_write_legacy_io - write byte(s) to legacy I/O port space 990 * @filp: open sysfs file 991 * @kobj: kobject corresponding to file to read from 992 * @bin_attr: struct bin_attribute for this file 993 * @buf: buffer containing value to be written 994 * @off: offset into legacy I/O port space 995 * @count: number of bytes to write 996 * 997 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific 998 * callback routine (pci_legacy_write). 999 */ 1000 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj, 1001 struct bin_attribute *bin_attr, char *buf, 1002 loff_t off, size_t count) 1003 { 1004 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 1005 1006 /* Only support 1, 2 or 4 byte accesses */ 1007 if (count != 1 && count != 2 && count != 4) 1008 return -EINVAL; 1009 1010 return pci_legacy_write(bus, off, *(u32 *)buf, count); 1011 } 1012 1013 /** 1014 * pci_mmap_legacy_mem - map legacy PCI memory into user memory space 1015 * @filp: open sysfs file 1016 * @kobj: kobject corresponding to device to be mapped 1017 * @attr: struct bin_attribute for this file 1018 * @vma: struct vm_area_struct passed to mmap 1019 * 1020 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap 1021 * legacy memory space (first meg of bus space) into application virtual 1022 * memory space. 1023 */ 1024 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj, 1025 struct bin_attribute *attr, 1026 struct vm_area_struct *vma) 1027 { 1028 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 1029 1030 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem); 1031 } 1032 1033 /** 1034 * pci_mmap_legacy_io - map legacy PCI IO into user memory space 1035 * @filp: open sysfs file 1036 * @kobj: kobject corresponding to device to be mapped 1037 * @attr: struct bin_attribute for this file 1038 * @vma: struct vm_area_struct passed to mmap 1039 * 1040 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap 1041 * legacy IO space (first meg of bus space) into application virtual 1042 * memory space. Returns -ENOSYS if the operation isn't supported 1043 */ 1044 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj, 1045 struct bin_attribute *attr, 1046 struct vm_area_struct *vma) 1047 { 1048 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 1049 1050 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io); 1051 } 1052 1053 /** 1054 * pci_adjust_legacy_attr - adjustment of legacy file attributes 1055 * @b: bus to create files under 1056 * @mmap_type: I/O port or memory 1057 * 1058 * Stub implementation. Can be overridden by arch if necessary. 1059 */ 1060 void __weak pci_adjust_legacy_attr(struct pci_bus *b, 1061 enum pci_mmap_state mmap_type) 1062 { 1063 } 1064 1065 /** 1066 * pci_create_legacy_files - create legacy I/O port and memory files 1067 * @b: bus to create files under 1068 * 1069 * Some platforms allow access to legacy I/O port and ISA memory space on 1070 * a per-bus basis. This routine creates the files and ties them into 1071 * their associated read, write and mmap files from pci-sysfs.c 1072 * 1073 * On error unwind, but don't propagate the error to the caller 1074 * as it is ok to set up the PCI bus without these files. 1075 */ 1076 void pci_create_legacy_files(struct pci_bus *b) 1077 { 1078 int error; 1079 1080 b->legacy_io = kcalloc(2, sizeof(struct bin_attribute), 1081 GFP_ATOMIC); 1082 if (!b->legacy_io) 1083 goto kzalloc_err; 1084 1085 sysfs_bin_attr_init(b->legacy_io); 1086 b->legacy_io->attr.name = "legacy_io"; 1087 b->legacy_io->size = 0xffff; 1088 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; 1089 b->legacy_io->read = pci_read_legacy_io; 1090 b->legacy_io->write = pci_write_legacy_io; 1091 b->legacy_io->mmap = pci_mmap_legacy_io; 1092 pci_adjust_legacy_attr(b, pci_mmap_io); 1093 error = device_create_bin_file(&b->dev, b->legacy_io); 1094 if (error) 1095 goto legacy_io_err; 1096 1097 /* Allocated above after the legacy_io struct */ 1098 b->legacy_mem = b->legacy_io + 1; 1099 sysfs_bin_attr_init(b->legacy_mem); 1100 b->legacy_mem->attr.name = "legacy_mem"; 1101 b->legacy_mem->size = 1024*1024; 1102 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; 1103 b->legacy_mem->mmap = pci_mmap_legacy_mem; 1104 pci_adjust_legacy_attr(b, pci_mmap_mem); 1105 error = device_create_bin_file(&b->dev, b->legacy_mem); 1106 if (error) 1107 goto legacy_mem_err; 1108 1109 return; 1110 1111 legacy_mem_err: 1112 device_remove_bin_file(&b->dev, b->legacy_io); 1113 legacy_io_err: 1114 kfree(b->legacy_io); 1115 b->legacy_io = NULL; 1116 kzalloc_err: 1117 dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n"); 1118 } 1119 1120 void pci_remove_legacy_files(struct pci_bus *b) 1121 { 1122 if (b->legacy_io) { 1123 device_remove_bin_file(&b->dev, b->legacy_io); 1124 device_remove_bin_file(&b->dev, b->legacy_mem); 1125 kfree(b->legacy_io); /* both are allocated here */ 1126 } 1127 } 1128 #endif /* HAVE_PCI_LEGACY */ 1129 1130 #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE) 1131 1132 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, 1133 enum pci_mmap_api mmap_api) 1134 { 1135 unsigned long nr, start, size; 1136 resource_size_t pci_start = 0, pci_end; 1137 1138 if (pci_resource_len(pdev, resno) == 0) 1139 return 0; 1140 nr = vma_pages(vma); 1141 start = vma->vm_pgoff; 1142 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; 1143 if (mmap_api == PCI_MMAP_PROCFS) { 1144 pci_resource_to_user(pdev, resno, &pdev->resource[resno], 1145 &pci_start, &pci_end); 1146 pci_start >>= PAGE_SHIFT; 1147 } 1148 if (start >= pci_start && start < pci_start + size && 1149 start + nr <= pci_start + size) 1150 return 1; 1151 return 0; 1152 } 1153 1154 /** 1155 * pci_mmap_resource - map a PCI resource into user memory space 1156 * @kobj: kobject for mapping 1157 * @attr: struct bin_attribute for the file being mapped 1158 * @vma: struct vm_area_struct passed into the mmap 1159 * @write_combine: 1 for write_combine mapping 1160 * 1161 * Use the regular PCI mapping routines to map a PCI resource into userspace. 1162 */ 1163 static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, 1164 struct vm_area_struct *vma, int write_combine) 1165 { 1166 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1167 int bar = (unsigned long)attr->private; 1168 enum pci_mmap_state mmap_type; 1169 struct resource *res = &pdev->resource[bar]; 1170 1171 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) 1172 return -EINVAL; 1173 1174 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) 1175 return -EINVAL; 1176 1177 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; 1178 1179 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine); 1180 } 1181 1182 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj, 1183 struct bin_attribute *attr, 1184 struct vm_area_struct *vma) 1185 { 1186 return pci_mmap_resource(kobj, attr, vma, 0); 1187 } 1188 1189 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj, 1190 struct bin_attribute *attr, 1191 struct vm_area_struct *vma) 1192 { 1193 return pci_mmap_resource(kobj, attr, vma, 1); 1194 } 1195 1196 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj, 1197 struct bin_attribute *attr, char *buf, 1198 loff_t off, size_t count, bool write) 1199 { 1200 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1201 int bar = (unsigned long)attr->private; 1202 unsigned long port = off; 1203 1204 port += pci_resource_start(pdev, bar); 1205 1206 if (port > pci_resource_end(pdev, bar)) 1207 return 0; 1208 1209 if (port + count - 1 > pci_resource_end(pdev, bar)) 1210 return -EINVAL; 1211 1212 switch (count) { 1213 case 1: 1214 if (write) 1215 outb(*(u8 *)buf, port); 1216 else 1217 *(u8 *)buf = inb(port); 1218 return 1; 1219 case 2: 1220 if (write) 1221 outw(*(u16 *)buf, port); 1222 else 1223 *(u16 *)buf = inw(port); 1224 return 2; 1225 case 4: 1226 if (write) 1227 outl(*(u32 *)buf, port); 1228 else 1229 *(u32 *)buf = inl(port); 1230 return 4; 1231 } 1232 return -EINVAL; 1233 } 1234 1235 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj, 1236 struct bin_attribute *attr, char *buf, 1237 loff_t off, size_t count) 1238 { 1239 return pci_resource_io(filp, kobj, attr, buf, off, count, false); 1240 } 1241 1242 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj, 1243 struct bin_attribute *attr, char *buf, 1244 loff_t off, size_t count) 1245 { 1246 return pci_resource_io(filp, kobj, attr, buf, off, count, true); 1247 } 1248 1249 /** 1250 * pci_remove_resource_files - cleanup resource files 1251 * @pdev: dev to cleanup 1252 * 1253 * If we created resource files for @pdev, remove them from sysfs and 1254 * free their resources. 1255 */ 1256 static void pci_remove_resource_files(struct pci_dev *pdev) 1257 { 1258 int i; 1259 1260 for (i = 0; i < PCI_ROM_RESOURCE; i++) { 1261 struct bin_attribute *res_attr; 1262 1263 res_attr = pdev->res_attr[i]; 1264 if (res_attr) { 1265 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1266 kfree(res_attr); 1267 } 1268 1269 res_attr = pdev->res_attr_wc[i]; 1270 if (res_attr) { 1271 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1272 kfree(res_attr); 1273 } 1274 } 1275 } 1276 1277 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) 1278 { 1279 /* allocate attribute structure, piggyback attribute name */ 1280 int name_len = write_combine ? 13 : 10; 1281 struct bin_attribute *res_attr; 1282 char *res_attr_name; 1283 int retval; 1284 1285 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC); 1286 if (!res_attr) 1287 return -ENOMEM; 1288 1289 res_attr_name = (char *)(res_attr + 1); 1290 1291 sysfs_bin_attr_init(res_attr); 1292 if (write_combine) { 1293 pdev->res_attr_wc[num] = res_attr; 1294 sprintf(res_attr_name, "resource%d_wc", num); 1295 res_attr->mmap = pci_mmap_resource_wc; 1296 } else { 1297 pdev->res_attr[num] = res_attr; 1298 sprintf(res_attr_name, "resource%d", num); 1299 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { 1300 res_attr->read = pci_read_resource_io; 1301 res_attr->write = pci_write_resource_io; 1302 if (arch_can_pci_mmap_io()) 1303 res_attr->mmap = pci_mmap_resource_uc; 1304 } else { 1305 res_attr->mmap = pci_mmap_resource_uc; 1306 } 1307 } 1308 res_attr->attr.name = res_attr_name; 1309 res_attr->attr.mode = S_IRUSR | S_IWUSR; 1310 res_attr->size = pci_resource_len(pdev, num); 1311 res_attr->private = (void *)(unsigned long)num; 1312 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); 1313 if (retval) 1314 kfree(res_attr); 1315 1316 return retval; 1317 } 1318 1319 /** 1320 * pci_create_resource_files - create resource files in sysfs for @dev 1321 * @pdev: dev in question 1322 * 1323 * Walk the resources in @pdev creating files for each resource available. 1324 */ 1325 static int pci_create_resource_files(struct pci_dev *pdev) 1326 { 1327 int i; 1328 int retval; 1329 1330 /* Expose the PCI resources from this device as files */ 1331 for (i = 0; i < PCI_ROM_RESOURCE; i++) { 1332 1333 /* skip empty resources */ 1334 if (!pci_resource_len(pdev, i)) 1335 continue; 1336 1337 retval = pci_create_attr(pdev, i, 0); 1338 /* for prefetchable resources, create a WC mappable file */ 1339 if (!retval && arch_can_pci_mmap_wc() && 1340 pdev->resource[i].flags & IORESOURCE_PREFETCH) 1341 retval = pci_create_attr(pdev, i, 1); 1342 if (retval) { 1343 pci_remove_resource_files(pdev); 1344 return retval; 1345 } 1346 } 1347 return 0; 1348 } 1349 #else /* !HAVE_PCI_MMAP */ 1350 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; } 1351 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; } 1352 #endif /* HAVE_PCI_MMAP */ 1353 1354 /** 1355 * pci_write_rom - used to enable access to the PCI ROM display 1356 * @filp: sysfs file 1357 * @kobj: kernel object handle 1358 * @bin_attr: struct bin_attribute for this file 1359 * @buf: user input 1360 * @off: file offset 1361 * @count: number of byte in input 1362 * 1363 * writing anything except 0 enables it 1364 */ 1365 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj, 1366 struct bin_attribute *bin_attr, char *buf, 1367 loff_t off, size_t count) 1368 { 1369 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1370 1371 if ((off == 0) && (*buf == '0') && (count == 2)) 1372 pdev->rom_attr_enabled = 0; 1373 else 1374 pdev->rom_attr_enabled = 1; 1375 1376 return count; 1377 } 1378 1379 /** 1380 * pci_read_rom - read a PCI ROM 1381 * @filp: sysfs file 1382 * @kobj: kernel object handle 1383 * @bin_attr: struct bin_attribute for this file 1384 * @buf: where to put the data we read from the ROM 1385 * @off: file offset 1386 * @count: number of bytes to read 1387 * 1388 * Put @count bytes starting at @off into @buf from the ROM in the PCI 1389 * device corresponding to @kobj. 1390 */ 1391 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj, 1392 struct bin_attribute *bin_attr, char *buf, 1393 loff_t off, size_t count) 1394 { 1395 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1396 void __iomem *rom; 1397 size_t size; 1398 1399 if (!pdev->rom_attr_enabled) 1400 return -EINVAL; 1401 1402 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ 1403 if (!rom || !size) 1404 return -EIO; 1405 1406 if (off >= size) 1407 count = 0; 1408 else { 1409 if (off + count > size) 1410 count = size - off; 1411 1412 memcpy_fromio(buf, rom + off, count); 1413 } 1414 pci_unmap_rom(pdev, rom); 1415 1416 return count; 1417 } 1418 1419 static const struct bin_attribute pci_config_attr = { 1420 .attr = { 1421 .name = "config", 1422 .mode = S_IRUGO | S_IWUSR, 1423 }, 1424 .size = PCI_CFG_SPACE_SIZE, 1425 .read = pci_read_config, 1426 .write = pci_write_config, 1427 }; 1428 1429 static const struct bin_attribute pcie_config_attr = { 1430 .attr = { 1431 .name = "config", 1432 .mode = S_IRUGO | S_IWUSR, 1433 }, 1434 .size = PCI_CFG_SPACE_EXP_SIZE, 1435 .read = pci_read_config, 1436 .write = pci_write_config, 1437 }; 1438 1439 static ssize_t reset_store(struct device *dev, struct device_attribute *attr, 1440 const char *buf, size_t count) 1441 { 1442 struct pci_dev *pdev = to_pci_dev(dev); 1443 unsigned long val; 1444 ssize_t result = kstrtoul(buf, 0, &val); 1445 1446 if (result < 0) 1447 return result; 1448 1449 if (val != 1) 1450 return -EINVAL; 1451 1452 pm_runtime_get_sync(dev); 1453 result = pci_reset_function(pdev); 1454 pm_runtime_put(dev); 1455 if (result < 0) 1456 return result; 1457 1458 return count; 1459 } 1460 1461 static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store); 1462 1463 static int pci_create_capabilities_sysfs(struct pci_dev *dev) 1464 { 1465 int retval; 1466 1467 pcie_vpd_create_sysfs_dev_files(dev); 1468 pcie_aspm_create_sysfs_dev_files(dev); 1469 1470 if (dev->reset_fn) { 1471 retval = device_create_file(&dev->dev, &reset_attr); 1472 if (retval) 1473 goto error; 1474 } 1475 return 0; 1476 1477 error: 1478 pcie_aspm_remove_sysfs_dev_files(dev); 1479 pcie_vpd_remove_sysfs_dev_files(dev); 1480 return retval; 1481 } 1482 1483 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) 1484 { 1485 int retval; 1486 int rom_size; 1487 struct bin_attribute *attr; 1488 1489 if (!sysfs_initialized) 1490 return -EACCES; 1491 1492 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 1493 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1494 else 1495 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr); 1496 if (retval) 1497 goto err; 1498 1499 retval = pci_create_resource_files(pdev); 1500 if (retval) 1501 goto err_config_file; 1502 1503 /* If the device has a ROM, try to expose it in sysfs. */ 1504 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 1505 if (rom_size) { 1506 attr = kzalloc(sizeof(*attr), GFP_ATOMIC); 1507 if (!attr) { 1508 retval = -ENOMEM; 1509 goto err_resource_files; 1510 } 1511 sysfs_bin_attr_init(attr); 1512 attr->size = rom_size; 1513 attr->attr.name = "rom"; 1514 attr->attr.mode = S_IRUSR | S_IWUSR; 1515 attr->read = pci_read_rom; 1516 attr->write = pci_write_rom; 1517 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); 1518 if (retval) { 1519 kfree(attr); 1520 goto err_resource_files; 1521 } 1522 pdev->rom_attr = attr; 1523 } 1524 1525 /* add sysfs entries for various capabilities */ 1526 retval = pci_create_capabilities_sysfs(pdev); 1527 if (retval) 1528 goto err_rom_file; 1529 1530 pci_create_firmware_label_files(pdev); 1531 1532 return 0; 1533 1534 err_rom_file: 1535 if (pdev->rom_attr) { 1536 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 1537 kfree(pdev->rom_attr); 1538 pdev->rom_attr = NULL; 1539 } 1540 err_resource_files: 1541 pci_remove_resource_files(pdev); 1542 err_config_file: 1543 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 1544 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1545 else 1546 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); 1547 err: 1548 return retval; 1549 } 1550 1551 static void pci_remove_capabilities_sysfs(struct pci_dev *dev) 1552 { 1553 pcie_vpd_remove_sysfs_dev_files(dev); 1554 pcie_aspm_remove_sysfs_dev_files(dev); 1555 if (dev->reset_fn) { 1556 device_remove_file(&dev->dev, &reset_attr); 1557 dev->reset_fn = 0; 1558 } 1559 } 1560 1561 /** 1562 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files 1563 * @pdev: device whose entries we should free 1564 * 1565 * Cleanup when @pdev is removed from sysfs. 1566 */ 1567 void pci_remove_sysfs_dev_files(struct pci_dev *pdev) 1568 { 1569 if (!sysfs_initialized) 1570 return; 1571 1572 pci_remove_capabilities_sysfs(pdev); 1573 1574 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 1575 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1576 else 1577 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); 1578 1579 pci_remove_resource_files(pdev); 1580 1581 if (pdev->rom_attr) { 1582 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 1583 kfree(pdev->rom_attr); 1584 pdev->rom_attr = NULL; 1585 } 1586 1587 pci_remove_firmware_label_files(pdev); 1588 } 1589 1590 static int __init pci_sysfs_init(void) 1591 { 1592 struct pci_dev *pdev = NULL; 1593 int retval; 1594 1595 sysfs_initialized = 1; 1596 for_each_pci_dev(pdev) { 1597 retval = pci_create_sysfs_dev_files(pdev); 1598 if (retval) { 1599 pci_dev_put(pdev); 1600 return retval; 1601 } 1602 } 1603 1604 return 0; 1605 } 1606 late_initcall(pci_sysfs_init); 1607 1608 static struct attribute *pci_dev_dev_attrs[] = { 1609 &vga_attr.attr, 1610 NULL, 1611 }; 1612 1613 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj, 1614 struct attribute *a, int n) 1615 { 1616 struct device *dev = kobj_to_dev(kobj); 1617 struct pci_dev *pdev = to_pci_dev(dev); 1618 1619 if (a == &vga_attr.attr) 1620 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) 1621 return 0; 1622 1623 return a->mode; 1624 } 1625 1626 static struct attribute *pci_dev_hp_attrs[] = { 1627 &dev_remove_attr.attr, 1628 &dev_rescan_attr.attr, 1629 NULL, 1630 }; 1631 1632 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj, 1633 struct attribute *a, int n) 1634 { 1635 struct device *dev = kobj_to_dev(kobj); 1636 struct pci_dev *pdev = to_pci_dev(dev); 1637 1638 if (pdev->is_virtfn) 1639 return 0; 1640 1641 return a->mode; 1642 } 1643 1644 static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj, 1645 struct attribute *a, int n) 1646 { 1647 struct device *dev = kobj_to_dev(kobj); 1648 struct pci_dev *pdev = to_pci_dev(dev); 1649 1650 if (pci_is_bridge(pdev)) 1651 return a->mode; 1652 1653 return 0; 1654 } 1655 1656 static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj, 1657 struct attribute *a, int n) 1658 { 1659 struct device *dev = kobj_to_dev(kobj); 1660 struct pci_dev *pdev = to_pci_dev(dev); 1661 1662 if (pci_is_pcie(pdev)) 1663 return a->mode; 1664 1665 return 0; 1666 } 1667 1668 static const struct attribute_group pci_dev_group = { 1669 .attrs = pci_dev_attrs, 1670 }; 1671 1672 const struct attribute_group *pci_dev_groups[] = { 1673 &pci_dev_group, 1674 NULL, 1675 }; 1676 1677 static const struct attribute_group pci_bridge_group = { 1678 .attrs = pci_bridge_attrs, 1679 }; 1680 1681 const struct attribute_group *pci_bridge_groups[] = { 1682 &pci_bridge_group, 1683 NULL, 1684 }; 1685 1686 static const struct attribute_group pcie_dev_group = { 1687 .attrs = pcie_dev_attrs, 1688 }; 1689 1690 const struct attribute_group *pcie_dev_groups[] = { 1691 &pcie_dev_group, 1692 NULL, 1693 }; 1694 1695 static const struct attribute_group pci_dev_hp_attr_group = { 1696 .attrs = pci_dev_hp_attrs, 1697 .is_visible = pci_dev_hp_attrs_are_visible, 1698 }; 1699 1700 #ifdef CONFIG_PCI_IOV 1701 static struct attribute *sriov_dev_attrs[] = { 1702 &sriov_totalvfs_attr.attr, 1703 &sriov_numvfs_attr.attr, 1704 &sriov_offset_attr.attr, 1705 &sriov_stride_attr.attr, 1706 &sriov_vf_device_attr.attr, 1707 &sriov_drivers_autoprobe_attr.attr, 1708 NULL, 1709 }; 1710 1711 static umode_t sriov_attrs_are_visible(struct kobject *kobj, 1712 struct attribute *a, int n) 1713 { 1714 struct device *dev = kobj_to_dev(kobj); 1715 1716 if (!dev_is_pf(dev)) 1717 return 0; 1718 1719 return a->mode; 1720 } 1721 1722 static const struct attribute_group sriov_dev_attr_group = { 1723 .attrs = sriov_dev_attrs, 1724 .is_visible = sriov_attrs_are_visible, 1725 }; 1726 #endif /* CONFIG_PCI_IOV */ 1727 1728 static const struct attribute_group pci_dev_attr_group = { 1729 .attrs = pci_dev_dev_attrs, 1730 .is_visible = pci_dev_attrs_are_visible, 1731 }; 1732 1733 static const struct attribute_group pci_bridge_attr_group = { 1734 .attrs = pci_bridge_attrs, 1735 .is_visible = pci_bridge_attrs_are_visible, 1736 }; 1737 1738 static const struct attribute_group pcie_dev_attr_group = { 1739 .attrs = pcie_dev_attrs, 1740 .is_visible = pcie_dev_attrs_are_visible, 1741 }; 1742 1743 static const struct attribute_group *pci_dev_attr_groups[] = { 1744 &pci_dev_attr_group, 1745 &pci_dev_hp_attr_group, 1746 #ifdef CONFIG_PCI_IOV 1747 &sriov_dev_attr_group, 1748 #endif 1749 &pci_bridge_attr_group, 1750 &pcie_dev_attr_group, 1751 #ifdef CONFIG_PCIEAER 1752 &aer_stats_attr_group, 1753 #endif 1754 NULL, 1755 }; 1756 1757 const struct device_type pci_dev_type = { 1758 .groups = pci_dev_attr_groups, 1759 }; 1760