1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com> 4 * (C) Copyright 2002-2004 IBM Corp. 5 * (C) Copyright 2003 Matthew Wilcox 6 * (C) Copyright 2003 Hewlett-Packard 7 * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com> 8 * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com> 9 * 10 * File attributes for PCI devices 11 * 12 * Modeled after usb's driverfs.c 13 */ 14 15 16 #include <linux/kernel.h> 17 #include <linux/sched.h> 18 #include <linux/pci.h> 19 #include <linux/stat.h> 20 #include <linux/export.h> 21 #include <linux/topology.h> 22 #include <linux/mm.h> 23 #include <linux/fs.h> 24 #include <linux/capability.h> 25 #include <linux/security.h> 26 #include <linux/pci-aspm.h> 27 #include <linux/slab.h> 28 #include <linux/vgaarb.h> 29 #include <linux/pm_runtime.h> 30 #include <linux/of.h> 31 #include "pci.h" 32 33 static int sysfs_initialized; /* = 0 */ 34 35 /* show configuration fields */ 36 #define pci_config_attr(field, format_string) \ 37 static ssize_t \ 38 field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ 39 { \ 40 struct pci_dev *pdev; \ 41 \ 42 pdev = to_pci_dev(dev); \ 43 return sprintf(buf, format_string, pdev->field); \ 44 } \ 45 static DEVICE_ATTR_RO(field) 46 47 pci_config_attr(vendor, "0x%04x\n"); 48 pci_config_attr(device, "0x%04x\n"); 49 pci_config_attr(subsystem_vendor, "0x%04x\n"); 50 pci_config_attr(subsystem_device, "0x%04x\n"); 51 pci_config_attr(revision, "0x%02x\n"); 52 pci_config_attr(class, "0x%06x\n"); 53 pci_config_attr(irq, "%u\n"); 54 55 static ssize_t broken_parity_status_show(struct device *dev, 56 struct device_attribute *attr, 57 char *buf) 58 { 59 struct pci_dev *pdev = to_pci_dev(dev); 60 return sprintf(buf, "%u\n", pdev->broken_parity_status); 61 } 62 63 static ssize_t broken_parity_status_store(struct device *dev, 64 struct device_attribute *attr, 65 const char *buf, size_t count) 66 { 67 struct pci_dev *pdev = to_pci_dev(dev); 68 unsigned long val; 69 70 if (kstrtoul(buf, 0, &val) < 0) 71 return -EINVAL; 72 73 pdev->broken_parity_status = !!val; 74 75 return count; 76 } 77 static DEVICE_ATTR_RW(broken_parity_status); 78 79 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list, 80 struct device_attribute *attr, char *buf) 81 { 82 const struct cpumask *mask; 83 84 #ifdef CONFIG_NUMA 85 mask = (dev_to_node(dev) == -1) ? cpu_online_mask : 86 cpumask_of_node(dev_to_node(dev)); 87 #else 88 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 89 #endif 90 return cpumap_print_to_pagebuf(list, buf, mask); 91 } 92 93 static ssize_t local_cpus_show(struct device *dev, 94 struct device_attribute *attr, char *buf) 95 { 96 return pci_dev_show_local_cpu(dev, false, attr, buf); 97 } 98 static DEVICE_ATTR_RO(local_cpus); 99 100 static ssize_t local_cpulist_show(struct device *dev, 101 struct device_attribute *attr, char *buf) 102 { 103 return pci_dev_show_local_cpu(dev, true, attr, buf); 104 } 105 static DEVICE_ATTR_RO(local_cpulist); 106 107 /* 108 * PCI Bus Class Devices 109 */ 110 static ssize_t cpuaffinity_show(struct device *dev, 111 struct device_attribute *attr, char *buf) 112 { 113 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 114 115 return cpumap_print_to_pagebuf(false, buf, cpumask); 116 } 117 static DEVICE_ATTR_RO(cpuaffinity); 118 119 static ssize_t cpulistaffinity_show(struct device *dev, 120 struct device_attribute *attr, char *buf) 121 { 122 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 123 124 return cpumap_print_to_pagebuf(true, buf, cpumask); 125 } 126 static DEVICE_ATTR_RO(cpulistaffinity); 127 128 /* show resources */ 129 static ssize_t resource_show(struct device *dev, struct device_attribute *attr, 130 char *buf) 131 { 132 struct pci_dev *pci_dev = to_pci_dev(dev); 133 char *str = buf; 134 int i; 135 int max; 136 resource_size_t start, end; 137 138 if (pci_dev->subordinate) 139 max = DEVICE_COUNT_RESOURCE; 140 else 141 max = PCI_BRIDGE_RESOURCES; 142 143 for (i = 0; i < max; i++) { 144 struct resource *res = &pci_dev->resource[i]; 145 pci_resource_to_user(pci_dev, i, res, &start, &end); 146 str += sprintf(str, "0x%016llx 0x%016llx 0x%016llx\n", 147 (unsigned long long)start, 148 (unsigned long long)end, 149 (unsigned long long)res->flags); 150 } 151 return (str - buf); 152 } 153 static DEVICE_ATTR_RO(resource); 154 155 static ssize_t max_link_speed_show(struct device *dev, 156 struct device_attribute *attr, char *buf) 157 { 158 struct pci_dev *pdev = to_pci_dev(dev); 159 160 return sprintf(buf, "%s\n", PCIE_SPEED2STR(pcie_get_speed_cap(pdev))); 161 } 162 static DEVICE_ATTR_RO(max_link_speed); 163 164 static ssize_t max_link_width_show(struct device *dev, 165 struct device_attribute *attr, char *buf) 166 { 167 struct pci_dev *pdev = to_pci_dev(dev); 168 169 return sprintf(buf, "%u\n", pcie_get_width_cap(pdev)); 170 } 171 static DEVICE_ATTR_RO(max_link_width); 172 173 static ssize_t current_link_speed_show(struct device *dev, 174 struct device_attribute *attr, char *buf) 175 { 176 struct pci_dev *pci_dev = to_pci_dev(dev); 177 u16 linkstat; 178 int err; 179 const char *speed; 180 181 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); 182 if (err) 183 return -EINVAL; 184 185 switch (linkstat & PCI_EXP_LNKSTA_CLS) { 186 case PCI_EXP_LNKSTA_CLS_16_0GB: 187 speed = "16 GT/s"; 188 break; 189 case PCI_EXP_LNKSTA_CLS_8_0GB: 190 speed = "8 GT/s"; 191 break; 192 case PCI_EXP_LNKSTA_CLS_5_0GB: 193 speed = "5 GT/s"; 194 break; 195 case PCI_EXP_LNKSTA_CLS_2_5GB: 196 speed = "2.5 GT/s"; 197 break; 198 default: 199 speed = "Unknown speed"; 200 } 201 202 return sprintf(buf, "%s\n", speed); 203 } 204 static DEVICE_ATTR_RO(current_link_speed); 205 206 static ssize_t current_link_width_show(struct device *dev, 207 struct device_attribute *attr, char *buf) 208 { 209 struct pci_dev *pci_dev = to_pci_dev(dev); 210 u16 linkstat; 211 int err; 212 213 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); 214 if (err) 215 return -EINVAL; 216 217 return sprintf(buf, "%u\n", 218 (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); 219 } 220 static DEVICE_ATTR_RO(current_link_width); 221 222 static ssize_t secondary_bus_number_show(struct device *dev, 223 struct device_attribute *attr, 224 char *buf) 225 { 226 struct pci_dev *pci_dev = to_pci_dev(dev); 227 u8 sec_bus; 228 int err; 229 230 err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus); 231 if (err) 232 return -EINVAL; 233 234 return sprintf(buf, "%u\n", sec_bus); 235 } 236 static DEVICE_ATTR_RO(secondary_bus_number); 237 238 static ssize_t subordinate_bus_number_show(struct device *dev, 239 struct device_attribute *attr, 240 char *buf) 241 { 242 struct pci_dev *pci_dev = to_pci_dev(dev); 243 u8 sub_bus; 244 int err; 245 246 err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus); 247 if (err) 248 return -EINVAL; 249 250 return sprintf(buf, "%u\n", sub_bus); 251 } 252 static DEVICE_ATTR_RO(subordinate_bus_number); 253 254 static ssize_t ari_enabled_show(struct device *dev, 255 struct device_attribute *attr, 256 char *buf) 257 { 258 struct pci_dev *pci_dev = to_pci_dev(dev); 259 260 return sprintf(buf, "%u\n", pci_ari_enabled(pci_dev->bus)); 261 } 262 static DEVICE_ATTR_RO(ari_enabled); 263 264 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 265 char *buf) 266 { 267 struct pci_dev *pci_dev = to_pci_dev(dev); 268 269 return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n", 270 pci_dev->vendor, pci_dev->device, 271 pci_dev->subsystem_vendor, pci_dev->subsystem_device, 272 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), 273 (u8)(pci_dev->class)); 274 } 275 static DEVICE_ATTR_RO(modalias); 276 277 static ssize_t enable_store(struct device *dev, struct device_attribute *attr, 278 const char *buf, size_t count) 279 { 280 struct pci_dev *pdev = to_pci_dev(dev); 281 unsigned long val; 282 ssize_t result = kstrtoul(buf, 0, &val); 283 284 if (result < 0) 285 return result; 286 287 /* this can crash the machine when done on the "wrong" device */ 288 if (!capable(CAP_SYS_ADMIN)) 289 return -EPERM; 290 291 if (!val) { 292 if (pci_is_enabled(pdev)) 293 pci_disable_device(pdev); 294 else 295 result = -EIO; 296 } else 297 result = pci_enable_device(pdev); 298 299 return result < 0 ? result : count; 300 } 301 302 static ssize_t enable_show(struct device *dev, struct device_attribute *attr, 303 char *buf) 304 { 305 struct pci_dev *pdev; 306 307 pdev = to_pci_dev(dev); 308 return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt)); 309 } 310 static DEVICE_ATTR_RW(enable); 311 312 #ifdef CONFIG_NUMA 313 static ssize_t numa_node_store(struct device *dev, 314 struct device_attribute *attr, const char *buf, 315 size_t count) 316 { 317 struct pci_dev *pdev = to_pci_dev(dev); 318 int node, ret; 319 320 if (!capable(CAP_SYS_ADMIN)) 321 return -EPERM; 322 323 ret = kstrtoint(buf, 0, &node); 324 if (ret) 325 return ret; 326 327 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES) 328 return -EINVAL; 329 330 if (node != NUMA_NO_NODE && !node_online(node)) 331 return -EINVAL; 332 333 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 334 pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.", 335 node); 336 337 dev->numa_node = node; 338 return count; 339 } 340 341 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, 342 char *buf) 343 { 344 return sprintf(buf, "%d\n", dev->numa_node); 345 } 346 static DEVICE_ATTR_RW(numa_node); 347 #endif 348 349 static ssize_t dma_mask_bits_show(struct device *dev, 350 struct device_attribute *attr, char *buf) 351 { 352 struct pci_dev *pdev = to_pci_dev(dev); 353 354 return sprintf(buf, "%d\n", fls64(pdev->dma_mask)); 355 } 356 static DEVICE_ATTR_RO(dma_mask_bits); 357 358 static ssize_t consistent_dma_mask_bits_show(struct device *dev, 359 struct device_attribute *attr, 360 char *buf) 361 { 362 return sprintf(buf, "%d\n", fls64(dev->coherent_dma_mask)); 363 } 364 static DEVICE_ATTR_RO(consistent_dma_mask_bits); 365 366 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr, 367 char *buf) 368 { 369 struct pci_dev *pdev = to_pci_dev(dev); 370 struct pci_bus *subordinate = pdev->subordinate; 371 372 return sprintf(buf, "%u\n", subordinate ? 373 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) 374 : !pdev->no_msi); 375 } 376 377 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr, 378 const char *buf, size_t count) 379 { 380 struct pci_dev *pdev = to_pci_dev(dev); 381 struct pci_bus *subordinate = pdev->subordinate; 382 unsigned long val; 383 384 if (kstrtoul(buf, 0, &val) < 0) 385 return -EINVAL; 386 387 if (!capable(CAP_SYS_ADMIN)) 388 return -EPERM; 389 390 /* 391 * "no_msi" and "bus_flags" only affect what happens when a driver 392 * requests MSI or MSI-X. They don't affect any drivers that have 393 * already requested MSI or MSI-X. 394 */ 395 if (!subordinate) { 396 pdev->no_msi = !val; 397 pci_info(pdev, "MSI/MSI-X %s for future drivers\n", 398 val ? "allowed" : "disallowed"); 399 return count; 400 } 401 402 if (val) 403 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI; 404 else 405 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; 406 407 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n", 408 val ? "allowed" : "disallowed"); 409 return count; 410 } 411 static DEVICE_ATTR_RW(msi_bus); 412 413 static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf, 414 size_t count) 415 { 416 unsigned long val; 417 struct pci_bus *b = NULL; 418 419 if (kstrtoul(buf, 0, &val) < 0) 420 return -EINVAL; 421 422 if (val) { 423 pci_lock_rescan_remove(); 424 while ((b = pci_find_next_bus(b)) != NULL) 425 pci_rescan_bus(b); 426 pci_unlock_rescan_remove(); 427 } 428 return count; 429 } 430 static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store); 431 432 static struct attribute *pci_bus_attrs[] = { 433 &bus_attr_rescan.attr, 434 NULL, 435 }; 436 437 static const struct attribute_group pci_bus_group = { 438 .attrs = pci_bus_attrs, 439 }; 440 441 const struct attribute_group *pci_bus_groups[] = { 442 &pci_bus_group, 443 NULL, 444 }; 445 446 static ssize_t dev_rescan_store(struct device *dev, 447 struct device_attribute *attr, const char *buf, 448 size_t count) 449 { 450 unsigned long val; 451 struct pci_dev *pdev = to_pci_dev(dev); 452 453 if (kstrtoul(buf, 0, &val) < 0) 454 return -EINVAL; 455 456 if (val) { 457 pci_lock_rescan_remove(); 458 pci_rescan_bus(pdev->bus); 459 pci_unlock_rescan_remove(); 460 } 461 return count; 462 } 463 static struct device_attribute dev_rescan_attr = __ATTR(rescan, 464 (S_IWUSR|S_IWGRP), 465 NULL, dev_rescan_store); 466 467 static ssize_t remove_store(struct device *dev, struct device_attribute *attr, 468 const char *buf, size_t count) 469 { 470 unsigned long val; 471 472 if (kstrtoul(buf, 0, &val) < 0) 473 return -EINVAL; 474 475 if (val && device_remove_file_self(dev, attr)) 476 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); 477 return count; 478 } 479 static struct device_attribute dev_remove_attr = __ATTR(remove, 480 (S_IWUSR|S_IWGRP), 481 NULL, remove_store); 482 483 static ssize_t dev_bus_rescan_store(struct device *dev, 484 struct device_attribute *attr, 485 const char *buf, size_t count) 486 { 487 unsigned long val; 488 struct pci_bus *bus = to_pci_bus(dev); 489 490 if (kstrtoul(buf, 0, &val) < 0) 491 return -EINVAL; 492 493 if (val) { 494 pci_lock_rescan_remove(); 495 if (!pci_is_root_bus(bus) && list_empty(&bus->devices)) 496 pci_rescan_bus_bridge_resize(bus->self); 497 else 498 pci_rescan_bus(bus); 499 pci_unlock_rescan_remove(); 500 } 501 return count; 502 } 503 static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store); 504 505 #if defined(CONFIG_PM) && defined(CONFIG_ACPI) 506 static ssize_t d3cold_allowed_store(struct device *dev, 507 struct device_attribute *attr, 508 const char *buf, size_t count) 509 { 510 struct pci_dev *pdev = to_pci_dev(dev); 511 unsigned long val; 512 513 if (kstrtoul(buf, 0, &val) < 0) 514 return -EINVAL; 515 516 pdev->d3cold_allowed = !!val; 517 if (pdev->d3cold_allowed) 518 pci_d3cold_enable(pdev); 519 else 520 pci_d3cold_disable(pdev); 521 522 pm_runtime_resume(dev); 523 524 return count; 525 } 526 527 static ssize_t d3cold_allowed_show(struct device *dev, 528 struct device_attribute *attr, char *buf) 529 { 530 struct pci_dev *pdev = to_pci_dev(dev); 531 return sprintf(buf, "%u\n", pdev->d3cold_allowed); 532 } 533 static DEVICE_ATTR_RW(d3cold_allowed); 534 #endif 535 536 #ifdef CONFIG_OF 537 static ssize_t devspec_show(struct device *dev, 538 struct device_attribute *attr, char *buf) 539 { 540 struct pci_dev *pdev = to_pci_dev(dev); 541 struct device_node *np = pci_device_to_OF_node(pdev); 542 543 if (np == NULL) 544 return 0; 545 return sprintf(buf, "%pOF", np); 546 } 547 static DEVICE_ATTR_RO(devspec); 548 #endif 549 550 #ifdef CONFIG_PCI_IOV 551 static ssize_t sriov_totalvfs_show(struct device *dev, 552 struct device_attribute *attr, 553 char *buf) 554 { 555 struct pci_dev *pdev = to_pci_dev(dev); 556 557 return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev)); 558 } 559 560 561 static ssize_t sriov_numvfs_show(struct device *dev, 562 struct device_attribute *attr, 563 char *buf) 564 { 565 struct pci_dev *pdev = to_pci_dev(dev); 566 567 return sprintf(buf, "%u\n", pdev->sriov->num_VFs); 568 } 569 570 /* 571 * num_vfs > 0; number of VFs to enable 572 * num_vfs = 0; disable all VFs 573 * 574 * Note: SRIOV spec doesn't allow partial VF 575 * disable, so it's all or none. 576 */ 577 static ssize_t sriov_numvfs_store(struct device *dev, 578 struct device_attribute *attr, 579 const char *buf, size_t count) 580 { 581 struct pci_dev *pdev = to_pci_dev(dev); 582 int ret; 583 u16 num_vfs; 584 585 ret = kstrtou16(buf, 0, &num_vfs); 586 if (ret < 0) 587 return ret; 588 589 if (num_vfs > pci_sriov_get_totalvfs(pdev)) 590 return -ERANGE; 591 592 device_lock(&pdev->dev); 593 594 if (num_vfs == pdev->sriov->num_VFs) 595 goto exit; 596 597 /* is PF driver loaded w/callback */ 598 if (!pdev->driver || !pdev->driver->sriov_configure) { 599 pci_info(pdev, "Driver doesn't support SRIOV configuration via sysfs\n"); 600 ret = -ENOENT; 601 goto exit; 602 } 603 604 if (num_vfs == 0) { 605 /* disable VFs */ 606 ret = pdev->driver->sriov_configure(pdev, 0); 607 goto exit; 608 } 609 610 /* enable VFs */ 611 if (pdev->sriov->num_VFs) { 612 pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n", 613 pdev->sriov->num_VFs, num_vfs); 614 ret = -EBUSY; 615 goto exit; 616 } 617 618 ret = pdev->driver->sriov_configure(pdev, num_vfs); 619 if (ret < 0) 620 goto exit; 621 622 if (ret != num_vfs) 623 pci_warn(pdev, "%d VFs requested; only %d enabled\n", 624 num_vfs, ret); 625 626 exit: 627 device_unlock(&pdev->dev); 628 629 if (ret < 0) 630 return ret; 631 632 return count; 633 } 634 635 static ssize_t sriov_offset_show(struct device *dev, 636 struct device_attribute *attr, 637 char *buf) 638 { 639 struct pci_dev *pdev = to_pci_dev(dev); 640 641 return sprintf(buf, "%u\n", pdev->sriov->offset); 642 } 643 644 static ssize_t sriov_stride_show(struct device *dev, 645 struct device_attribute *attr, 646 char *buf) 647 { 648 struct pci_dev *pdev = to_pci_dev(dev); 649 650 return sprintf(buf, "%u\n", pdev->sriov->stride); 651 } 652 653 static ssize_t sriov_vf_device_show(struct device *dev, 654 struct device_attribute *attr, 655 char *buf) 656 { 657 struct pci_dev *pdev = to_pci_dev(dev); 658 659 return sprintf(buf, "%x\n", pdev->sriov->vf_device); 660 } 661 662 static ssize_t sriov_drivers_autoprobe_show(struct device *dev, 663 struct device_attribute *attr, 664 char *buf) 665 { 666 struct pci_dev *pdev = to_pci_dev(dev); 667 668 return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe); 669 } 670 671 static ssize_t sriov_drivers_autoprobe_store(struct device *dev, 672 struct device_attribute *attr, 673 const char *buf, size_t count) 674 { 675 struct pci_dev *pdev = to_pci_dev(dev); 676 bool drivers_autoprobe; 677 678 if (kstrtobool(buf, &drivers_autoprobe) < 0) 679 return -EINVAL; 680 681 pdev->sriov->drivers_autoprobe = drivers_autoprobe; 682 683 return count; 684 } 685 686 static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs); 687 static struct device_attribute sriov_numvfs_attr = 688 __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP), 689 sriov_numvfs_show, sriov_numvfs_store); 690 static struct device_attribute sriov_offset_attr = __ATTR_RO(sriov_offset); 691 static struct device_attribute sriov_stride_attr = __ATTR_RO(sriov_stride); 692 static struct device_attribute sriov_vf_device_attr = __ATTR_RO(sriov_vf_device); 693 static struct device_attribute sriov_drivers_autoprobe_attr = 694 __ATTR(sriov_drivers_autoprobe, (S_IRUGO|S_IWUSR|S_IWGRP), 695 sriov_drivers_autoprobe_show, sriov_drivers_autoprobe_store); 696 #endif /* CONFIG_PCI_IOV */ 697 698 static ssize_t driver_override_store(struct device *dev, 699 struct device_attribute *attr, 700 const char *buf, size_t count) 701 { 702 struct pci_dev *pdev = to_pci_dev(dev); 703 char *driver_override, *old, *cp; 704 705 /* We need to keep extra room for a newline */ 706 if (count >= (PAGE_SIZE - 1)) 707 return -EINVAL; 708 709 driver_override = kstrndup(buf, count, GFP_KERNEL); 710 if (!driver_override) 711 return -ENOMEM; 712 713 cp = strchr(driver_override, '\n'); 714 if (cp) 715 *cp = '\0'; 716 717 device_lock(dev); 718 old = pdev->driver_override; 719 if (strlen(driver_override)) { 720 pdev->driver_override = driver_override; 721 } else { 722 kfree(driver_override); 723 pdev->driver_override = NULL; 724 } 725 device_unlock(dev); 726 727 kfree(old); 728 729 return count; 730 } 731 732 static ssize_t driver_override_show(struct device *dev, 733 struct device_attribute *attr, char *buf) 734 { 735 struct pci_dev *pdev = to_pci_dev(dev); 736 ssize_t len; 737 738 device_lock(dev); 739 len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); 740 device_unlock(dev); 741 return len; 742 } 743 static DEVICE_ATTR_RW(driver_override); 744 745 static struct attribute *pci_dev_attrs[] = { 746 &dev_attr_resource.attr, 747 &dev_attr_vendor.attr, 748 &dev_attr_device.attr, 749 &dev_attr_subsystem_vendor.attr, 750 &dev_attr_subsystem_device.attr, 751 &dev_attr_revision.attr, 752 &dev_attr_class.attr, 753 &dev_attr_irq.attr, 754 &dev_attr_local_cpus.attr, 755 &dev_attr_local_cpulist.attr, 756 &dev_attr_modalias.attr, 757 #ifdef CONFIG_NUMA 758 &dev_attr_numa_node.attr, 759 #endif 760 &dev_attr_dma_mask_bits.attr, 761 &dev_attr_consistent_dma_mask_bits.attr, 762 &dev_attr_enable.attr, 763 &dev_attr_broken_parity_status.attr, 764 &dev_attr_msi_bus.attr, 765 #if defined(CONFIG_PM) && defined(CONFIG_ACPI) 766 &dev_attr_d3cold_allowed.attr, 767 #endif 768 #ifdef CONFIG_OF 769 &dev_attr_devspec.attr, 770 #endif 771 &dev_attr_driver_override.attr, 772 &dev_attr_ari_enabled.attr, 773 NULL, 774 }; 775 776 static struct attribute *pci_bridge_attrs[] = { 777 &dev_attr_subordinate_bus_number.attr, 778 &dev_attr_secondary_bus_number.attr, 779 NULL, 780 }; 781 782 static struct attribute *pcie_dev_attrs[] = { 783 &dev_attr_current_link_speed.attr, 784 &dev_attr_current_link_width.attr, 785 &dev_attr_max_link_width.attr, 786 &dev_attr_max_link_speed.attr, 787 NULL, 788 }; 789 790 static struct attribute *pcibus_attrs[] = { 791 &dev_attr_rescan.attr, 792 &dev_attr_cpuaffinity.attr, 793 &dev_attr_cpulistaffinity.attr, 794 NULL, 795 }; 796 797 static const struct attribute_group pcibus_group = { 798 .attrs = pcibus_attrs, 799 }; 800 801 const struct attribute_group *pcibus_groups[] = { 802 &pcibus_group, 803 NULL, 804 }; 805 806 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr, 807 char *buf) 808 { 809 struct pci_dev *pdev = to_pci_dev(dev); 810 struct pci_dev *vga_dev = vga_default_device(); 811 812 if (vga_dev) 813 return sprintf(buf, "%u\n", (pdev == vga_dev)); 814 815 return sprintf(buf, "%u\n", 816 !!(pdev->resource[PCI_ROM_RESOURCE].flags & 817 IORESOURCE_ROM_SHADOW)); 818 } 819 static struct device_attribute vga_attr = __ATTR_RO(boot_vga); 820 821 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, 822 struct bin_attribute *bin_attr, char *buf, 823 loff_t off, size_t count) 824 { 825 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); 826 unsigned int size = 64; 827 loff_t init_off = off; 828 u8 *data = (u8 *) buf; 829 830 /* Several chips lock up trying to read undefined config space */ 831 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN)) 832 size = dev->cfg_size; 833 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 834 size = 128; 835 836 if (off > size) 837 return 0; 838 if (off + count > size) { 839 size -= off; 840 count = size; 841 } else { 842 size = count; 843 } 844 845 pci_config_pm_runtime_get(dev); 846 847 if ((off & 1) && size) { 848 u8 val; 849 pci_user_read_config_byte(dev, off, &val); 850 data[off - init_off] = val; 851 off++; 852 size--; 853 } 854 855 if ((off & 3) && size > 2) { 856 u16 val; 857 pci_user_read_config_word(dev, off, &val); 858 data[off - init_off] = val & 0xff; 859 data[off - init_off + 1] = (val >> 8) & 0xff; 860 off += 2; 861 size -= 2; 862 } 863 864 while (size > 3) { 865 u32 val; 866 pci_user_read_config_dword(dev, off, &val); 867 data[off - init_off] = val & 0xff; 868 data[off - init_off + 1] = (val >> 8) & 0xff; 869 data[off - init_off + 2] = (val >> 16) & 0xff; 870 data[off - init_off + 3] = (val >> 24) & 0xff; 871 off += 4; 872 size -= 4; 873 } 874 875 if (size >= 2) { 876 u16 val; 877 pci_user_read_config_word(dev, off, &val); 878 data[off - init_off] = val & 0xff; 879 data[off - init_off + 1] = (val >> 8) & 0xff; 880 off += 2; 881 size -= 2; 882 } 883 884 if (size > 0) { 885 u8 val; 886 pci_user_read_config_byte(dev, off, &val); 887 data[off - init_off] = val; 888 off++; 889 --size; 890 } 891 892 pci_config_pm_runtime_put(dev); 893 894 return count; 895 } 896 897 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj, 898 struct bin_attribute *bin_attr, char *buf, 899 loff_t off, size_t count) 900 { 901 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); 902 unsigned int size = count; 903 loff_t init_off = off; 904 u8 *data = (u8 *) buf; 905 906 if (off > dev->cfg_size) 907 return 0; 908 if (off + count > dev->cfg_size) { 909 size = dev->cfg_size - off; 910 count = size; 911 } 912 913 pci_config_pm_runtime_get(dev); 914 915 if ((off & 1) && size) { 916 pci_user_write_config_byte(dev, off, data[off - init_off]); 917 off++; 918 size--; 919 } 920 921 if ((off & 3) && size > 2) { 922 u16 val = data[off - init_off]; 923 val |= (u16) data[off - init_off + 1] << 8; 924 pci_user_write_config_word(dev, off, val); 925 off += 2; 926 size -= 2; 927 } 928 929 while (size > 3) { 930 u32 val = data[off - init_off]; 931 val |= (u32) data[off - init_off + 1] << 8; 932 val |= (u32) data[off - init_off + 2] << 16; 933 val |= (u32) data[off - init_off + 3] << 24; 934 pci_user_write_config_dword(dev, off, val); 935 off += 4; 936 size -= 4; 937 } 938 939 if (size >= 2) { 940 u16 val = data[off - init_off]; 941 val |= (u16) data[off - init_off + 1] << 8; 942 pci_user_write_config_word(dev, off, val); 943 off += 2; 944 size -= 2; 945 } 946 947 if (size) { 948 pci_user_write_config_byte(dev, off, data[off - init_off]); 949 off++; 950 --size; 951 } 952 953 pci_config_pm_runtime_put(dev); 954 955 return count; 956 } 957 958 #ifdef HAVE_PCI_LEGACY 959 /** 960 * pci_read_legacy_io - read byte(s) from legacy I/O port space 961 * @filp: open sysfs file 962 * @kobj: kobject corresponding to file to read from 963 * @bin_attr: struct bin_attribute for this file 964 * @buf: buffer to store results 965 * @off: offset into legacy I/O port space 966 * @count: number of bytes to read 967 * 968 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific 969 * callback routine (pci_legacy_read). 970 */ 971 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj, 972 struct bin_attribute *bin_attr, char *buf, 973 loff_t off, size_t count) 974 { 975 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 976 977 /* Only support 1, 2 or 4 byte accesses */ 978 if (count != 1 && count != 2 && count != 4) 979 return -EINVAL; 980 981 return pci_legacy_read(bus, off, (u32 *)buf, count); 982 } 983 984 /** 985 * pci_write_legacy_io - write byte(s) to legacy I/O port space 986 * @filp: open sysfs file 987 * @kobj: kobject corresponding to file to read from 988 * @bin_attr: struct bin_attribute for this file 989 * @buf: buffer containing value to be written 990 * @off: offset into legacy I/O port space 991 * @count: number of bytes to write 992 * 993 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific 994 * callback routine (pci_legacy_write). 995 */ 996 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj, 997 struct bin_attribute *bin_attr, char *buf, 998 loff_t off, size_t count) 999 { 1000 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 1001 1002 /* Only support 1, 2 or 4 byte accesses */ 1003 if (count != 1 && count != 2 && count != 4) 1004 return -EINVAL; 1005 1006 return pci_legacy_write(bus, off, *(u32 *)buf, count); 1007 } 1008 1009 /** 1010 * pci_mmap_legacy_mem - map legacy PCI memory into user memory space 1011 * @filp: open sysfs file 1012 * @kobj: kobject corresponding to device to be mapped 1013 * @attr: struct bin_attribute for this file 1014 * @vma: struct vm_area_struct passed to mmap 1015 * 1016 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap 1017 * legacy memory space (first meg of bus space) into application virtual 1018 * memory space. 1019 */ 1020 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj, 1021 struct bin_attribute *attr, 1022 struct vm_area_struct *vma) 1023 { 1024 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 1025 1026 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem); 1027 } 1028 1029 /** 1030 * pci_mmap_legacy_io - map legacy PCI IO into user memory space 1031 * @filp: open sysfs file 1032 * @kobj: kobject corresponding to device to be mapped 1033 * @attr: struct bin_attribute for this file 1034 * @vma: struct vm_area_struct passed to mmap 1035 * 1036 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap 1037 * legacy IO space (first meg of bus space) into application virtual 1038 * memory space. Returns -ENOSYS if the operation isn't supported 1039 */ 1040 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj, 1041 struct bin_attribute *attr, 1042 struct vm_area_struct *vma) 1043 { 1044 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 1045 1046 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io); 1047 } 1048 1049 /** 1050 * pci_adjust_legacy_attr - adjustment of legacy file attributes 1051 * @b: bus to create files under 1052 * @mmap_type: I/O port or memory 1053 * 1054 * Stub implementation. Can be overridden by arch if necessary. 1055 */ 1056 void __weak pci_adjust_legacy_attr(struct pci_bus *b, 1057 enum pci_mmap_state mmap_type) 1058 { 1059 } 1060 1061 /** 1062 * pci_create_legacy_files - create legacy I/O port and memory files 1063 * @b: bus to create files under 1064 * 1065 * Some platforms allow access to legacy I/O port and ISA memory space on 1066 * a per-bus basis. This routine creates the files and ties them into 1067 * their associated read, write and mmap files from pci-sysfs.c 1068 * 1069 * On error unwind, but don't propagate the error to the caller 1070 * as it is ok to set up the PCI bus without these files. 1071 */ 1072 void pci_create_legacy_files(struct pci_bus *b) 1073 { 1074 int error; 1075 1076 b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2, 1077 GFP_ATOMIC); 1078 if (!b->legacy_io) 1079 goto kzalloc_err; 1080 1081 sysfs_bin_attr_init(b->legacy_io); 1082 b->legacy_io->attr.name = "legacy_io"; 1083 b->legacy_io->size = 0xffff; 1084 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; 1085 b->legacy_io->read = pci_read_legacy_io; 1086 b->legacy_io->write = pci_write_legacy_io; 1087 b->legacy_io->mmap = pci_mmap_legacy_io; 1088 pci_adjust_legacy_attr(b, pci_mmap_io); 1089 error = device_create_bin_file(&b->dev, b->legacy_io); 1090 if (error) 1091 goto legacy_io_err; 1092 1093 /* Allocated above after the legacy_io struct */ 1094 b->legacy_mem = b->legacy_io + 1; 1095 sysfs_bin_attr_init(b->legacy_mem); 1096 b->legacy_mem->attr.name = "legacy_mem"; 1097 b->legacy_mem->size = 1024*1024; 1098 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; 1099 b->legacy_mem->mmap = pci_mmap_legacy_mem; 1100 pci_adjust_legacy_attr(b, pci_mmap_mem); 1101 error = device_create_bin_file(&b->dev, b->legacy_mem); 1102 if (error) 1103 goto legacy_mem_err; 1104 1105 return; 1106 1107 legacy_mem_err: 1108 device_remove_bin_file(&b->dev, b->legacy_io); 1109 legacy_io_err: 1110 kfree(b->legacy_io); 1111 b->legacy_io = NULL; 1112 kzalloc_err: 1113 printk(KERN_WARNING "pci: warning: could not create legacy I/O port and ISA memory resources to sysfs\n"); 1114 return; 1115 } 1116 1117 void pci_remove_legacy_files(struct pci_bus *b) 1118 { 1119 if (b->legacy_io) { 1120 device_remove_bin_file(&b->dev, b->legacy_io); 1121 device_remove_bin_file(&b->dev, b->legacy_mem); 1122 kfree(b->legacy_io); /* both are allocated here */ 1123 } 1124 } 1125 #endif /* HAVE_PCI_LEGACY */ 1126 1127 #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE) 1128 1129 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, 1130 enum pci_mmap_api mmap_api) 1131 { 1132 unsigned long nr, start, size; 1133 resource_size_t pci_start = 0, pci_end; 1134 1135 if (pci_resource_len(pdev, resno) == 0) 1136 return 0; 1137 nr = vma_pages(vma); 1138 start = vma->vm_pgoff; 1139 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; 1140 if (mmap_api == PCI_MMAP_PROCFS) { 1141 pci_resource_to_user(pdev, resno, &pdev->resource[resno], 1142 &pci_start, &pci_end); 1143 pci_start >>= PAGE_SHIFT; 1144 } 1145 if (start >= pci_start && start < pci_start + size && 1146 start + nr <= pci_start + size) 1147 return 1; 1148 return 0; 1149 } 1150 1151 /** 1152 * pci_mmap_resource - map a PCI resource into user memory space 1153 * @kobj: kobject for mapping 1154 * @attr: struct bin_attribute for the file being mapped 1155 * @vma: struct vm_area_struct passed into the mmap 1156 * @write_combine: 1 for write_combine mapping 1157 * 1158 * Use the regular PCI mapping routines to map a PCI resource into userspace. 1159 */ 1160 static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, 1161 struct vm_area_struct *vma, int write_combine) 1162 { 1163 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1164 int bar = (unsigned long)attr->private; 1165 enum pci_mmap_state mmap_type; 1166 struct resource *res = &pdev->resource[bar]; 1167 1168 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) 1169 return -EINVAL; 1170 1171 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) 1172 return -EINVAL; 1173 1174 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; 1175 1176 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine); 1177 } 1178 1179 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj, 1180 struct bin_attribute *attr, 1181 struct vm_area_struct *vma) 1182 { 1183 return pci_mmap_resource(kobj, attr, vma, 0); 1184 } 1185 1186 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj, 1187 struct bin_attribute *attr, 1188 struct vm_area_struct *vma) 1189 { 1190 return pci_mmap_resource(kobj, attr, vma, 1); 1191 } 1192 1193 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj, 1194 struct bin_attribute *attr, char *buf, 1195 loff_t off, size_t count, bool write) 1196 { 1197 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1198 int bar = (unsigned long)attr->private; 1199 unsigned long port = off; 1200 1201 port += pci_resource_start(pdev, bar); 1202 1203 if (port > pci_resource_end(pdev, bar)) 1204 return 0; 1205 1206 if (port + count - 1 > pci_resource_end(pdev, bar)) 1207 return -EINVAL; 1208 1209 switch (count) { 1210 case 1: 1211 if (write) 1212 outb(*(u8 *)buf, port); 1213 else 1214 *(u8 *)buf = inb(port); 1215 return 1; 1216 case 2: 1217 if (write) 1218 outw(*(u16 *)buf, port); 1219 else 1220 *(u16 *)buf = inw(port); 1221 return 2; 1222 case 4: 1223 if (write) 1224 outl(*(u32 *)buf, port); 1225 else 1226 *(u32 *)buf = inl(port); 1227 return 4; 1228 } 1229 return -EINVAL; 1230 } 1231 1232 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj, 1233 struct bin_attribute *attr, char *buf, 1234 loff_t off, size_t count) 1235 { 1236 return pci_resource_io(filp, kobj, attr, buf, off, count, false); 1237 } 1238 1239 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj, 1240 struct bin_attribute *attr, char *buf, 1241 loff_t off, size_t count) 1242 { 1243 return pci_resource_io(filp, kobj, attr, buf, off, count, true); 1244 } 1245 1246 /** 1247 * pci_remove_resource_files - cleanup resource files 1248 * @pdev: dev to cleanup 1249 * 1250 * If we created resource files for @pdev, remove them from sysfs and 1251 * free their resources. 1252 */ 1253 static void pci_remove_resource_files(struct pci_dev *pdev) 1254 { 1255 int i; 1256 1257 for (i = 0; i < PCI_ROM_RESOURCE; i++) { 1258 struct bin_attribute *res_attr; 1259 1260 res_attr = pdev->res_attr[i]; 1261 if (res_attr) { 1262 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1263 kfree(res_attr); 1264 } 1265 1266 res_attr = pdev->res_attr_wc[i]; 1267 if (res_attr) { 1268 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1269 kfree(res_attr); 1270 } 1271 } 1272 } 1273 1274 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) 1275 { 1276 /* allocate attribute structure, piggyback attribute name */ 1277 int name_len = write_combine ? 13 : 10; 1278 struct bin_attribute *res_attr; 1279 char *res_attr_name; 1280 int retval; 1281 1282 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC); 1283 if (!res_attr) 1284 return -ENOMEM; 1285 1286 res_attr_name = (char *)(res_attr + 1); 1287 1288 sysfs_bin_attr_init(res_attr); 1289 if (write_combine) { 1290 pdev->res_attr_wc[num] = res_attr; 1291 sprintf(res_attr_name, "resource%d_wc", num); 1292 res_attr->mmap = pci_mmap_resource_wc; 1293 } else { 1294 pdev->res_attr[num] = res_attr; 1295 sprintf(res_attr_name, "resource%d", num); 1296 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { 1297 res_attr->read = pci_read_resource_io; 1298 res_attr->write = pci_write_resource_io; 1299 if (arch_can_pci_mmap_io()) 1300 res_attr->mmap = pci_mmap_resource_uc; 1301 } else { 1302 res_attr->mmap = pci_mmap_resource_uc; 1303 } 1304 } 1305 res_attr->attr.name = res_attr_name; 1306 res_attr->attr.mode = S_IRUSR | S_IWUSR; 1307 res_attr->size = pci_resource_len(pdev, num); 1308 res_attr->private = (void *)(unsigned long)num; 1309 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); 1310 if (retval) 1311 kfree(res_attr); 1312 1313 return retval; 1314 } 1315 1316 /** 1317 * pci_create_resource_files - create resource files in sysfs for @dev 1318 * @pdev: dev in question 1319 * 1320 * Walk the resources in @pdev creating files for each resource available. 1321 */ 1322 static int pci_create_resource_files(struct pci_dev *pdev) 1323 { 1324 int i; 1325 int retval; 1326 1327 /* Expose the PCI resources from this device as files */ 1328 for (i = 0; i < PCI_ROM_RESOURCE; i++) { 1329 1330 /* skip empty resources */ 1331 if (!pci_resource_len(pdev, i)) 1332 continue; 1333 1334 retval = pci_create_attr(pdev, i, 0); 1335 /* for prefetchable resources, create a WC mappable file */ 1336 if (!retval && arch_can_pci_mmap_wc() && 1337 pdev->resource[i].flags & IORESOURCE_PREFETCH) 1338 retval = pci_create_attr(pdev, i, 1); 1339 if (retval) { 1340 pci_remove_resource_files(pdev); 1341 return retval; 1342 } 1343 } 1344 return 0; 1345 } 1346 #else /* !HAVE_PCI_MMAP */ 1347 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; } 1348 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; } 1349 #endif /* HAVE_PCI_MMAP */ 1350 1351 /** 1352 * pci_write_rom - used to enable access to the PCI ROM display 1353 * @filp: sysfs file 1354 * @kobj: kernel object handle 1355 * @bin_attr: struct bin_attribute for this file 1356 * @buf: user input 1357 * @off: file offset 1358 * @count: number of byte in input 1359 * 1360 * writing anything except 0 enables it 1361 */ 1362 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj, 1363 struct bin_attribute *bin_attr, char *buf, 1364 loff_t off, size_t count) 1365 { 1366 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1367 1368 if ((off == 0) && (*buf == '0') && (count == 2)) 1369 pdev->rom_attr_enabled = 0; 1370 else 1371 pdev->rom_attr_enabled = 1; 1372 1373 return count; 1374 } 1375 1376 /** 1377 * pci_read_rom - read a PCI ROM 1378 * @filp: sysfs file 1379 * @kobj: kernel object handle 1380 * @bin_attr: struct bin_attribute for this file 1381 * @buf: where to put the data we read from the ROM 1382 * @off: file offset 1383 * @count: number of bytes to read 1384 * 1385 * Put @count bytes starting at @off into @buf from the ROM in the PCI 1386 * device corresponding to @kobj. 1387 */ 1388 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj, 1389 struct bin_attribute *bin_attr, char *buf, 1390 loff_t off, size_t count) 1391 { 1392 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1393 void __iomem *rom; 1394 size_t size; 1395 1396 if (!pdev->rom_attr_enabled) 1397 return -EINVAL; 1398 1399 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ 1400 if (!rom || !size) 1401 return -EIO; 1402 1403 if (off >= size) 1404 count = 0; 1405 else { 1406 if (off + count > size) 1407 count = size - off; 1408 1409 memcpy_fromio(buf, rom + off, count); 1410 } 1411 pci_unmap_rom(pdev, rom); 1412 1413 return count; 1414 } 1415 1416 static const struct bin_attribute pci_config_attr = { 1417 .attr = { 1418 .name = "config", 1419 .mode = S_IRUGO | S_IWUSR, 1420 }, 1421 .size = PCI_CFG_SPACE_SIZE, 1422 .read = pci_read_config, 1423 .write = pci_write_config, 1424 }; 1425 1426 static const struct bin_attribute pcie_config_attr = { 1427 .attr = { 1428 .name = "config", 1429 .mode = S_IRUGO | S_IWUSR, 1430 }, 1431 .size = PCI_CFG_SPACE_EXP_SIZE, 1432 .read = pci_read_config, 1433 .write = pci_write_config, 1434 }; 1435 1436 static ssize_t reset_store(struct device *dev, struct device_attribute *attr, 1437 const char *buf, size_t count) 1438 { 1439 struct pci_dev *pdev = to_pci_dev(dev); 1440 unsigned long val; 1441 ssize_t result = kstrtoul(buf, 0, &val); 1442 1443 if (result < 0) 1444 return result; 1445 1446 if (val != 1) 1447 return -EINVAL; 1448 1449 result = pci_reset_function(pdev); 1450 if (result < 0) 1451 return result; 1452 1453 return count; 1454 } 1455 1456 static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store); 1457 1458 static int pci_create_capabilities_sysfs(struct pci_dev *dev) 1459 { 1460 int retval; 1461 1462 pcie_vpd_create_sysfs_dev_files(dev); 1463 pcie_aspm_create_sysfs_dev_files(dev); 1464 1465 if (dev->reset_fn) { 1466 retval = device_create_file(&dev->dev, &reset_attr); 1467 if (retval) 1468 goto error; 1469 } 1470 return 0; 1471 1472 error: 1473 pcie_aspm_remove_sysfs_dev_files(dev); 1474 pcie_vpd_remove_sysfs_dev_files(dev); 1475 return retval; 1476 } 1477 1478 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) 1479 { 1480 int retval; 1481 int rom_size; 1482 struct bin_attribute *attr; 1483 1484 if (!sysfs_initialized) 1485 return -EACCES; 1486 1487 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 1488 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1489 else 1490 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr); 1491 if (retval) 1492 goto err; 1493 1494 retval = pci_create_resource_files(pdev); 1495 if (retval) 1496 goto err_config_file; 1497 1498 /* If the device has a ROM, try to expose it in sysfs. */ 1499 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 1500 if (rom_size) { 1501 attr = kzalloc(sizeof(*attr), GFP_ATOMIC); 1502 if (!attr) { 1503 retval = -ENOMEM; 1504 goto err_resource_files; 1505 } 1506 sysfs_bin_attr_init(attr); 1507 attr->size = rom_size; 1508 attr->attr.name = "rom"; 1509 attr->attr.mode = S_IRUSR | S_IWUSR; 1510 attr->read = pci_read_rom; 1511 attr->write = pci_write_rom; 1512 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); 1513 if (retval) { 1514 kfree(attr); 1515 goto err_resource_files; 1516 } 1517 pdev->rom_attr = attr; 1518 } 1519 1520 /* add sysfs entries for various capabilities */ 1521 retval = pci_create_capabilities_sysfs(pdev); 1522 if (retval) 1523 goto err_rom_file; 1524 1525 pci_create_firmware_label_files(pdev); 1526 1527 return 0; 1528 1529 err_rom_file: 1530 if (pdev->rom_attr) { 1531 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 1532 kfree(pdev->rom_attr); 1533 pdev->rom_attr = NULL; 1534 } 1535 err_resource_files: 1536 pci_remove_resource_files(pdev); 1537 err_config_file: 1538 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 1539 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1540 else 1541 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); 1542 err: 1543 return retval; 1544 } 1545 1546 static void pci_remove_capabilities_sysfs(struct pci_dev *dev) 1547 { 1548 pcie_vpd_remove_sysfs_dev_files(dev); 1549 pcie_aspm_remove_sysfs_dev_files(dev); 1550 if (dev->reset_fn) { 1551 device_remove_file(&dev->dev, &reset_attr); 1552 dev->reset_fn = 0; 1553 } 1554 } 1555 1556 /** 1557 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files 1558 * @pdev: device whose entries we should free 1559 * 1560 * Cleanup when @pdev is removed from sysfs. 1561 */ 1562 void pci_remove_sysfs_dev_files(struct pci_dev *pdev) 1563 { 1564 if (!sysfs_initialized) 1565 return; 1566 1567 pci_remove_capabilities_sysfs(pdev); 1568 1569 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 1570 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1571 else 1572 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); 1573 1574 pci_remove_resource_files(pdev); 1575 1576 if (pdev->rom_attr) { 1577 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 1578 kfree(pdev->rom_attr); 1579 pdev->rom_attr = NULL; 1580 } 1581 1582 pci_remove_firmware_label_files(pdev); 1583 } 1584 1585 static int __init pci_sysfs_init(void) 1586 { 1587 struct pci_dev *pdev = NULL; 1588 int retval; 1589 1590 sysfs_initialized = 1; 1591 for_each_pci_dev(pdev) { 1592 retval = pci_create_sysfs_dev_files(pdev); 1593 if (retval) { 1594 pci_dev_put(pdev); 1595 return retval; 1596 } 1597 } 1598 1599 return 0; 1600 } 1601 late_initcall(pci_sysfs_init); 1602 1603 static struct attribute *pci_dev_dev_attrs[] = { 1604 &vga_attr.attr, 1605 NULL, 1606 }; 1607 1608 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj, 1609 struct attribute *a, int n) 1610 { 1611 struct device *dev = kobj_to_dev(kobj); 1612 struct pci_dev *pdev = to_pci_dev(dev); 1613 1614 if (a == &vga_attr.attr) 1615 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) 1616 return 0; 1617 1618 return a->mode; 1619 } 1620 1621 static struct attribute *pci_dev_hp_attrs[] = { 1622 &dev_remove_attr.attr, 1623 &dev_rescan_attr.attr, 1624 NULL, 1625 }; 1626 1627 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj, 1628 struct attribute *a, int n) 1629 { 1630 struct device *dev = kobj_to_dev(kobj); 1631 struct pci_dev *pdev = to_pci_dev(dev); 1632 1633 if (pdev->is_virtfn) 1634 return 0; 1635 1636 return a->mode; 1637 } 1638 1639 static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj, 1640 struct attribute *a, int n) 1641 { 1642 struct device *dev = kobj_to_dev(kobj); 1643 struct pci_dev *pdev = to_pci_dev(dev); 1644 1645 if (pci_is_bridge(pdev)) 1646 return a->mode; 1647 1648 return 0; 1649 } 1650 1651 static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj, 1652 struct attribute *a, int n) 1653 { 1654 struct device *dev = kobj_to_dev(kobj); 1655 struct pci_dev *pdev = to_pci_dev(dev); 1656 1657 if (pci_is_pcie(pdev)) 1658 return a->mode; 1659 1660 return 0; 1661 } 1662 1663 static const struct attribute_group pci_dev_group = { 1664 .attrs = pci_dev_attrs, 1665 }; 1666 1667 const struct attribute_group *pci_dev_groups[] = { 1668 &pci_dev_group, 1669 NULL, 1670 }; 1671 1672 static const struct attribute_group pci_bridge_group = { 1673 .attrs = pci_bridge_attrs, 1674 }; 1675 1676 const struct attribute_group *pci_bridge_groups[] = { 1677 &pci_bridge_group, 1678 NULL, 1679 }; 1680 1681 static const struct attribute_group pcie_dev_group = { 1682 .attrs = pcie_dev_attrs, 1683 }; 1684 1685 const struct attribute_group *pcie_dev_groups[] = { 1686 &pcie_dev_group, 1687 NULL, 1688 }; 1689 1690 static const struct attribute_group pci_dev_hp_attr_group = { 1691 .attrs = pci_dev_hp_attrs, 1692 .is_visible = pci_dev_hp_attrs_are_visible, 1693 }; 1694 1695 #ifdef CONFIG_PCI_IOV 1696 static struct attribute *sriov_dev_attrs[] = { 1697 &sriov_totalvfs_attr.attr, 1698 &sriov_numvfs_attr.attr, 1699 &sriov_offset_attr.attr, 1700 &sriov_stride_attr.attr, 1701 &sriov_vf_device_attr.attr, 1702 &sriov_drivers_autoprobe_attr.attr, 1703 NULL, 1704 }; 1705 1706 static umode_t sriov_attrs_are_visible(struct kobject *kobj, 1707 struct attribute *a, int n) 1708 { 1709 struct device *dev = kobj_to_dev(kobj); 1710 1711 if (!dev_is_pf(dev)) 1712 return 0; 1713 1714 return a->mode; 1715 } 1716 1717 static const struct attribute_group sriov_dev_attr_group = { 1718 .attrs = sriov_dev_attrs, 1719 .is_visible = sriov_attrs_are_visible, 1720 }; 1721 #endif /* CONFIG_PCI_IOV */ 1722 1723 static const struct attribute_group pci_dev_attr_group = { 1724 .attrs = pci_dev_dev_attrs, 1725 .is_visible = pci_dev_attrs_are_visible, 1726 }; 1727 1728 static const struct attribute_group pci_bridge_attr_group = { 1729 .attrs = pci_bridge_attrs, 1730 .is_visible = pci_bridge_attrs_are_visible, 1731 }; 1732 1733 static const struct attribute_group pcie_dev_attr_group = { 1734 .attrs = pcie_dev_attrs, 1735 .is_visible = pcie_dev_attrs_are_visible, 1736 }; 1737 1738 static const struct attribute_group *pci_dev_attr_groups[] = { 1739 &pci_dev_attr_group, 1740 &pci_dev_hp_attr_group, 1741 #ifdef CONFIG_PCI_IOV 1742 &sriov_dev_attr_group, 1743 #endif 1744 &pci_bridge_attr_group, 1745 &pcie_dev_attr_group, 1746 NULL, 1747 }; 1748 1749 const struct device_type pci_dev_type = { 1750 .groups = pci_dev_attr_groups, 1751 }; 1752