1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com>
4 * (C) Copyright 2002-2004 IBM Corp.
5 * (C) Copyright 2003 Matthew Wilcox
6 * (C) Copyright 2003 Hewlett-Packard
7 * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com>
8 * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com>
9 *
10 * File attributes for PCI devices
11 *
12 * Modeled after usb's driverfs.c
13 */
14
15 #include <linux/bitfield.h>
16 #include <linux/cleanup.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/pci.h>
20 #include <linux/stat.h>
21 #include <linux/export.h>
22 #include <linux/topology.h>
23 #include <linux/mm.h>
24 #include <linux/fs.h>
25 #include <linux/capability.h>
26 #include <linux/security.h>
27 #include <linux/slab.h>
28 #include <linux/vgaarb.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/msi.h>
31 #include <linux/of.h>
32 #include <linux/aperture.h>
33 #include "pci.h"
34
35 #ifndef ARCH_PCI_DEV_GROUPS
36 #define ARCH_PCI_DEV_GROUPS
37 #endif
38
39 static int sysfs_initialized; /* = 0 */
40
41 /* show configuration fields */
42 #define pci_config_attr(field, format_string) \
43 static ssize_t \
44 field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
45 { \
46 struct pci_dev *pdev; \
47 \
48 pdev = to_pci_dev(dev); \
49 return sysfs_emit(buf, format_string, pdev->field); \
50 } \
51 static DEVICE_ATTR_RO(field)
52
53 pci_config_attr(vendor, "0x%04x\n");
54 pci_config_attr(device, "0x%04x\n");
55 pci_config_attr(subsystem_vendor, "0x%04x\n");
56 pci_config_attr(subsystem_device, "0x%04x\n");
57 pci_config_attr(revision, "0x%02x\n");
58 pci_config_attr(class, "0x%06x\n");
59
irq_show(struct device * dev,struct device_attribute * attr,char * buf)60 static ssize_t irq_show(struct device *dev,
61 struct device_attribute *attr,
62 char *buf)
63 {
64 struct pci_dev *pdev = to_pci_dev(dev);
65
66 #ifdef CONFIG_PCI_MSI
67 /*
68 * For MSI, show the first MSI IRQ; for all other cases including
69 * MSI-X, show the legacy INTx IRQ.
70 */
71 if (pdev->msi_enabled)
72 return sysfs_emit(buf, "%u\n", pci_irq_vector(pdev, 0));
73 #endif
74
75 return sysfs_emit(buf, "%u\n", pdev->irq);
76 }
77 static DEVICE_ATTR_RO(irq);
78
broken_parity_status_show(struct device * dev,struct device_attribute * attr,char * buf)79 static ssize_t broken_parity_status_show(struct device *dev,
80 struct device_attribute *attr,
81 char *buf)
82 {
83 struct pci_dev *pdev = to_pci_dev(dev);
84 return sysfs_emit(buf, "%u\n", pdev->broken_parity_status);
85 }
86
broken_parity_status_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)87 static ssize_t broken_parity_status_store(struct device *dev,
88 struct device_attribute *attr,
89 const char *buf, size_t count)
90 {
91 struct pci_dev *pdev = to_pci_dev(dev);
92 unsigned long val;
93
94 if (kstrtoul(buf, 0, &val) < 0)
95 return -EINVAL;
96
97 pdev->broken_parity_status = !!val;
98
99 return count;
100 }
101 static DEVICE_ATTR_RW(broken_parity_status);
102
pci_dev_show_local_cpu(struct device * dev,bool list,struct device_attribute * attr,char * buf)103 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list,
104 struct device_attribute *attr, char *buf)
105 {
106 const struct cpumask *mask;
107
108 #ifdef CONFIG_NUMA
109 if (dev_to_node(dev) == NUMA_NO_NODE)
110 mask = cpu_online_mask;
111 else
112 mask = cpumask_of_node(dev_to_node(dev));
113 #else
114 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
115 #endif
116 return cpumap_print_to_pagebuf(list, buf, mask);
117 }
118
local_cpus_show(struct device * dev,struct device_attribute * attr,char * buf)119 static ssize_t local_cpus_show(struct device *dev,
120 struct device_attribute *attr, char *buf)
121 {
122 return pci_dev_show_local_cpu(dev, false, attr, buf);
123 }
124 static DEVICE_ATTR_RO(local_cpus);
125
local_cpulist_show(struct device * dev,struct device_attribute * attr,char * buf)126 static ssize_t local_cpulist_show(struct device *dev,
127 struct device_attribute *attr, char *buf)
128 {
129 return pci_dev_show_local_cpu(dev, true, attr, buf);
130 }
131 static DEVICE_ATTR_RO(local_cpulist);
132
133 /*
134 * PCI Bus Class Devices
135 */
cpuaffinity_show(struct device * dev,struct device_attribute * attr,char * buf)136 static ssize_t cpuaffinity_show(struct device *dev,
137 struct device_attribute *attr, char *buf)
138 {
139 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
140
141 return cpumap_print_to_pagebuf(false, buf, cpumask);
142 }
143 static DEVICE_ATTR_RO(cpuaffinity);
144
cpulistaffinity_show(struct device * dev,struct device_attribute * attr,char * buf)145 static ssize_t cpulistaffinity_show(struct device *dev,
146 struct device_attribute *attr, char *buf)
147 {
148 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
149
150 return cpumap_print_to_pagebuf(true, buf, cpumask);
151 }
152 static DEVICE_ATTR_RO(cpulistaffinity);
153
power_state_show(struct device * dev,struct device_attribute * attr,char * buf)154 static ssize_t power_state_show(struct device *dev,
155 struct device_attribute *attr, char *buf)
156 {
157 struct pci_dev *pdev = to_pci_dev(dev);
158
159 return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state));
160 }
161 static DEVICE_ATTR_RO(power_state);
162
163 /* show resources */
resource_show(struct device * dev,struct device_attribute * attr,char * buf)164 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
165 char *buf)
166 {
167 struct pci_dev *pci_dev = to_pci_dev(dev);
168 int i;
169 int max;
170 resource_size_t start, end;
171 size_t len = 0;
172
173 if (pci_dev->subordinate)
174 max = DEVICE_COUNT_RESOURCE;
175 else
176 max = PCI_BRIDGE_RESOURCES;
177
178 for (i = 0; i < max; i++) {
179 struct resource *res = &pci_dev->resource[i];
180 pci_resource_to_user(pci_dev, i, res, &start, &end);
181 len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n",
182 (unsigned long long)start,
183 (unsigned long long)end,
184 (unsigned long long)res->flags);
185 }
186 return len;
187 }
188 static DEVICE_ATTR_RO(resource);
189
max_link_speed_show(struct device * dev,struct device_attribute * attr,char * buf)190 static ssize_t max_link_speed_show(struct device *dev,
191 struct device_attribute *attr, char *buf)
192 {
193 struct pci_dev *pdev = to_pci_dev(dev);
194
195 return sysfs_emit(buf, "%s\n",
196 pci_speed_string(pcie_get_speed_cap(pdev)));
197 }
198 static DEVICE_ATTR_RO(max_link_speed);
199
max_link_width_show(struct device * dev,struct device_attribute * attr,char * buf)200 static ssize_t max_link_width_show(struct device *dev,
201 struct device_attribute *attr, char *buf)
202 {
203 struct pci_dev *pdev = to_pci_dev(dev);
204
205 return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
206 }
207 static DEVICE_ATTR_RO(max_link_width);
208
current_link_speed_show(struct device * dev,struct device_attribute * attr,char * buf)209 static ssize_t current_link_speed_show(struct device *dev,
210 struct device_attribute *attr, char *buf)
211 {
212 struct pci_dev *pci_dev = to_pci_dev(dev);
213 u16 linkstat;
214 int err;
215 enum pci_bus_speed speed;
216
217 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
218 if (err)
219 return -EINVAL;
220
221 speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS];
222
223 return sysfs_emit(buf, "%s\n", pci_speed_string(speed));
224 }
225 static DEVICE_ATTR_RO(current_link_speed);
226
current_link_width_show(struct device * dev,struct device_attribute * attr,char * buf)227 static ssize_t current_link_width_show(struct device *dev,
228 struct device_attribute *attr, char *buf)
229 {
230 struct pci_dev *pci_dev = to_pci_dev(dev);
231 u16 linkstat;
232 int err;
233
234 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
235 if (err)
236 return -EINVAL;
237
238 return sysfs_emit(buf, "%u\n", FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat));
239 }
240 static DEVICE_ATTR_RO(current_link_width);
241
secondary_bus_number_show(struct device * dev,struct device_attribute * attr,char * buf)242 static ssize_t secondary_bus_number_show(struct device *dev,
243 struct device_attribute *attr,
244 char *buf)
245 {
246 struct pci_dev *pci_dev = to_pci_dev(dev);
247 u8 sec_bus;
248 int err;
249
250 err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
251 if (err)
252 return -EINVAL;
253
254 return sysfs_emit(buf, "%u\n", sec_bus);
255 }
256 static DEVICE_ATTR_RO(secondary_bus_number);
257
subordinate_bus_number_show(struct device * dev,struct device_attribute * attr,char * buf)258 static ssize_t subordinate_bus_number_show(struct device *dev,
259 struct device_attribute *attr,
260 char *buf)
261 {
262 struct pci_dev *pci_dev = to_pci_dev(dev);
263 u8 sub_bus;
264 int err;
265
266 err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
267 if (err)
268 return -EINVAL;
269
270 return sysfs_emit(buf, "%u\n", sub_bus);
271 }
272 static DEVICE_ATTR_RO(subordinate_bus_number);
273
ari_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)274 static ssize_t ari_enabled_show(struct device *dev,
275 struct device_attribute *attr,
276 char *buf)
277 {
278 struct pci_dev *pci_dev = to_pci_dev(dev);
279
280 return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus));
281 }
282 static DEVICE_ATTR_RO(ari_enabled);
283
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)284 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
285 char *buf)
286 {
287 struct pci_dev *pci_dev = to_pci_dev(dev);
288
289 return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
290 pci_dev->vendor, pci_dev->device,
291 pci_dev->subsystem_vendor, pci_dev->subsystem_device,
292 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
293 (u8)(pci_dev->class));
294 }
295 static DEVICE_ATTR_RO(modalias);
296
enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)297 static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
298 const char *buf, size_t count)
299 {
300 struct pci_dev *pdev = to_pci_dev(dev);
301 unsigned long val;
302 ssize_t result = 0;
303
304 /* this can crash the machine when done on the "wrong" device */
305 if (!capable(CAP_SYS_ADMIN))
306 return -EPERM;
307
308 if (kstrtoul(buf, 0, &val) < 0)
309 return -EINVAL;
310
311 device_lock(dev);
312 if (dev->driver)
313 result = -EBUSY;
314 else if (val)
315 result = pci_enable_device(pdev);
316 else if (pci_is_enabled(pdev))
317 pci_disable_device(pdev);
318 else
319 result = -EIO;
320 device_unlock(dev);
321
322 return result < 0 ? result : count;
323 }
324
enable_show(struct device * dev,struct device_attribute * attr,char * buf)325 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
326 char *buf)
327 {
328 struct pci_dev *pdev;
329
330 pdev = to_pci_dev(dev);
331 return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt));
332 }
333 static DEVICE_ATTR_RW(enable);
334
335 #ifdef CONFIG_NUMA
numa_node_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)336 static ssize_t numa_node_store(struct device *dev,
337 struct device_attribute *attr, const char *buf,
338 size_t count)
339 {
340 struct pci_dev *pdev = to_pci_dev(dev);
341 int node;
342
343 if (!capable(CAP_SYS_ADMIN))
344 return -EPERM;
345
346 if (kstrtoint(buf, 0, &node) < 0)
347 return -EINVAL;
348
349 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
350 return -EINVAL;
351
352 if (node != NUMA_NO_NODE && !node_online(node))
353 return -EINVAL;
354
355 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
356 pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.",
357 node);
358
359 dev->numa_node = node;
360 return count;
361 }
362
numa_node_show(struct device * dev,struct device_attribute * attr,char * buf)363 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
364 char *buf)
365 {
366 return sysfs_emit(buf, "%d\n", dev->numa_node);
367 }
368 static DEVICE_ATTR_RW(numa_node);
369 #endif
370
dma_mask_bits_show(struct device * dev,struct device_attribute * attr,char * buf)371 static ssize_t dma_mask_bits_show(struct device *dev,
372 struct device_attribute *attr, char *buf)
373 {
374 struct pci_dev *pdev = to_pci_dev(dev);
375
376 return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask));
377 }
378 static DEVICE_ATTR_RO(dma_mask_bits);
379
consistent_dma_mask_bits_show(struct device * dev,struct device_attribute * attr,char * buf)380 static ssize_t consistent_dma_mask_bits_show(struct device *dev,
381 struct device_attribute *attr,
382 char *buf)
383 {
384 return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask));
385 }
386 static DEVICE_ATTR_RO(consistent_dma_mask_bits);
387
msi_bus_show(struct device * dev,struct device_attribute * attr,char * buf)388 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
389 char *buf)
390 {
391 struct pci_dev *pdev = to_pci_dev(dev);
392 struct pci_bus *subordinate = pdev->subordinate;
393
394 return sysfs_emit(buf, "%u\n", subordinate ?
395 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
396 : !pdev->no_msi);
397 }
398
msi_bus_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)399 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
400 const char *buf, size_t count)
401 {
402 struct pci_dev *pdev = to_pci_dev(dev);
403 struct pci_bus *subordinate = pdev->subordinate;
404 unsigned long val;
405
406 if (!capable(CAP_SYS_ADMIN))
407 return -EPERM;
408
409 if (kstrtoul(buf, 0, &val) < 0)
410 return -EINVAL;
411
412 /*
413 * "no_msi" and "bus_flags" only affect what happens when a driver
414 * requests MSI or MSI-X. They don't affect any drivers that have
415 * already requested MSI or MSI-X.
416 */
417 if (!subordinate) {
418 pdev->no_msi = !val;
419 pci_info(pdev, "MSI/MSI-X %s for future drivers\n",
420 val ? "allowed" : "disallowed");
421 return count;
422 }
423
424 if (val)
425 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
426 else
427 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
428
429 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n",
430 val ? "allowed" : "disallowed");
431 return count;
432 }
433 static DEVICE_ATTR_RW(msi_bus);
434
rescan_store(const struct bus_type * bus,const char * buf,size_t count)435 static ssize_t rescan_store(const struct bus_type *bus, const char *buf, size_t count)
436 {
437 unsigned long val;
438 struct pci_bus *b = NULL;
439
440 if (kstrtoul(buf, 0, &val) < 0)
441 return -EINVAL;
442
443 if (val) {
444 pci_lock_rescan_remove();
445 while ((b = pci_find_next_bus(b)) != NULL)
446 pci_rescan_bus(b);
447 pci_unlock_rescan_remove();
448 }
449 return count;
450 }
451 static BUS_ATTR_WO(rescan);
452
453 static struct attribute *pci_bus_attrs[] = {
454 &bus_attr_rescan.attr,
455 NULL,
456 };
457
458 static const struct attribute_group pci_bus_group = {
459 .attrs = pci_bus_attrs,
460 };
461
462 const struct attribute_group *pci_bus_groups[] = {
463 &pci_bus_group,
464 NULL,
465 };
466
dev_rescan_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)467 static ssize_t dev_rescan_store(struct device *dev,
468 struct device_attribute *attr, const char *buf,
469 size_t count)
470 {
471 unsigned long val;
472 struct pci_dev *pdev = to_pci_dev(dev);
473
474 if (kstrtoul(buf, 0, &val) < 0)
475 return -EINVAL;
476
477 if (val) {
478 pci_lock_rescan_remove();
479 pci_rescan_bus(pdev->bus);
480 pci_unlock_rescan_remove();
481 }
482 return count;
483 }
484 static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL,
485 dev_rescan_store);
486
remove_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)487 static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
488 const char *buf, size_t count)
489 {
490 unsigned long val;
491
492 if (kstrtoul(buf, 0, &val) < 0)
493 return -EINVAL;
494
495 if (val && device_remove_file_self(dev, attr))
496 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
497 return count;
498 }
499 static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL,
500 remove_store);
501
bus_rescan_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)502 static ssize_t bus_rescan_store(struct device *dev,
503 struct device_attribute *attr,
504 const char *buf, size_t count)
505 {
506 unsigned long val;
507 struct pci_bus *bus = to_pci_bus(dev);
508
509 if (kstrtoul(buf, 0, &val) < 0)
510 return -EINVAL;
511
512 if (val) {
513 pci_lock_rescan_remove();
514 if (!pci_is_root_bus(bus) && list_empty(&bus->devices))
515 pci_rescan_bus_bridge_resize(bus->self);
516 else
517 pci_rescan_bus(bus);
518 pci_unlock_rescan_remove();
519 }
520 return count;
521 }
522 static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
523 bus_rescan_store);
524
reset_subordinate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)525 static ssize_t reset_subordinate_store(struct device *dev,
526 struct device_attribute *attr,
527 const char *buf, size_t count)
528 {
529 struct pci_dev *pdev = to_pci_dev(dev);
530 struct pci_bus *bus = pdev->subordinate;
531 unsigned long val;
532
533 if (!capable(CAP_SYS_ADMIN))
534 return -EPERM;
535
536 if (kstrtoul(buf, 0, &val) < 0)
537 return -EINVAL;
538
539 if (val) {
540 int ret = __pci_reset_bus(bus);
541
542 if (ret)
543 return ret;
544 }
545
546 return count;
547 }
548 static DEVICE_ATTR_WO(reset_subordinate);
549
550 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
d3cold_allowed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)551 static ssize_t d3cold_allowed_store(struct device *dev,
552 struct device_attribute *attr,
553 const char *buf, size_t count)
554 {
555 struct pci_dev *pdev = to_pci_dev(dev);
556 unsigned long val;
557
558 if (kstrtoul(buf, 0, &val) < 0)
559 return -EINVAL;
560
561 pdev->d3cold_allowed = !!val;
562 pci_bridge_d3_update(pdev);
563
564 pm_runtime_resume(dev);
565
566 return count;
567 }
568
d3cold_allowed_show(struct device * dev,struct device_attribute * attr,char * buf)569 static ssize_t d3cold_allowed_show(struct device *dev,
570 struct device_attribute *attr, char *buf)
571 {
572 struct pci_dev *pdev = to_pci_dev(dev);
573 return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed);
574 }
575 static DEVICE_ATTR_RW(d3cold_allowed);
576 #endif
577
578 #ifdef CONFIG_OF
devspec_show(struct device * dev,struct device_attribute * attr,char * buf)579 static ssize_t devspec_show(struct device *dev,
580 struct device_attribute *attr, char *buf)
581 {
582 struct pci_dev *pdev = to_pci_dev(dev);
583 struct device_node *np = pci_device_to_OF_node(pdev);
584
585 if (np == NULL)
586 return 0;
587 return sysfs_emit(buf, "%pOF\n", np);
588 }
589 static DEVICE_ATTR_RO(devspec);
590 #endif
591
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)592 static ssize_t driver_override_store(struct device *dev,
593 struct device_attribute *attr,
594 const char *buf, size_t count)
595 {
596 struct pci_dev *pdev = to_pci_dev(dev);
597 int ret;
598
599 ret = driver_set_override(dev, &pdev->driver_override, buf, count);
600 if (ret)
601 return ret;
602
603 return count;
604 }
605
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)606 static ssize_t driver_override_show(struct device *dev,
607 struct device_attribute *attr, char *buf)
608 {
609 struct pci_dev *pdev = to_pci_dev(dev);
610 ssize_t len;
611
612 device_lock(dev);
613 len = sysfs_emit(buf, "%s\n", pdev->driver_override);
614 device_unlock(dev);
615 return len;
616 }
617 static DEVICE_ATTR_RW(driver_override);
618
619 static struct attribute *pci_dev_attrs[] = {
620 &dev_attr_power_state.attr,
621 &dev_attr_resource.attr,
622 &dev_attr_vendor.attr,
623 &dev_attr_device.attr,
624 &dev_attr_subsystem_vendor.attr,
625 &dev_attr_subsystem_device.attr,
626 &dev_attr_revision.attr,
627 &dev_attr_class.attr,
628 &dev_attr_irq.attr,
629 &dev_attr_local_cpus.attr,
630 &dev_attr_local_cpulist.attr,
631 &dev_attr_modalias.attr,
632 #ifdef CONFIG_NUMA
633 &dev_attr_numa_node.attr,
634 #endif
635 &dev_attr_dma_mask_bits.attr,
636 &dev_attr_consistent_dma_mask_bits.attr,
637 &dev_attr_enable.attr,
638 &dev_attr_broken_parity_status.attr,
639 &dev_attr_msi_bus.attr,
640 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
641 &dev_attr_d3cold_allowed.attr,
642 #endif
643 #ifdef CONFIG_OF
644 &dev_attr_devspec.attr,
645 #endif
646 &dev_attr_driver_override.attr,
647 &dev_attr_ari_enabled.attr,
648 NULL,
649 };
650
651 static struct attribute *pci_bridge_attrs[] = {
652 &dev_attr_subordinate_bus_number.attr,
653 &dev_attr_secondary_bus_number.attr,
654 &dev_attr_reset_subordinate.attr,
655 NULL,
656 };
657
658 static struct attribute *pcie_dev_attrs[] = {
659 &dev_attr_current_link_speed.attr,
660 &dev_attr_current_link_width.attr,
661 &dev_attr_max_link_width.attr,
662 &dev_attr_max_link_speed.attr,
663 NULL,
664 };
665
666 static struct attribute *pcibus_attrs[] = {
667 &dev_attr_bus_rescan.attr,
668 &dev_attr_cpuaffinity.attr,
669 &dev_attr_cpulistaffinity.attr,
670 NULL,
671 };
672
673 static const struct attribute_group pcibus_group = {
674 .attrs = pcibus_attrs,
675 };
676
677 const struct attribute_group *pcibus_groups[] = {
678 &pcibus_group,
679 NULL,
680 };
681
boot_vga_show(struct device * dev,struct device_attribute * attr,char * buf)682 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
683 char *buf)
684 {
685 struct pci_dev *pdev = to_pci_dev(dev);
686 struct pci_dev *vga_dev = vga_default_device();
687
688 if (vga_dev)
689 return sysfs_emit(buf, "%u\n", (pdev == vga_dev));
690
691 return sysfs_emit(buf, "%u\n",
692 !!(pdev->resource[PCI_ROM_RESOURCE].flags &
693 IORESOURCE_ROM_SHADOW));
694 }
695 static DEVICE_ATTR_RO(boot_vga);
696
pci_read_config(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)697 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
698 const struct bin_attribute *bin_attr, char *buf,
699 loff_t off, size_t count)
700 {
701 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
702 unsigned int size = 64;
703 loff_t init_off = off;
704 u8 *data = (u8 *) buf;
705
706 /* Several chips lock up trying to read undefined config space */
707 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
708 size = dev->cfg_size;
709 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
710 size = 128;
711
712 if (off > size)
713 return 0;
714 if (off + count > size) {
715 size -= off;
716 count = size;
717 } else {
718 size = count;
719 }
720
721 pci_config_pm_runtime_get(dev);
722
723 if ((off & 1) && size) {
724 u8 val;
725 pci_user_read_config_byte(dev, off, &val);
726 data[off - init_off] = val;
727 off++;
728 size--;
729 }
730
731 if ((off & 3) && size > 2) {
732 u16 val;
733 pci_user_read_config_word(dev, off, &val);
734 data[off - init_off] = val & 0xff;
735 data[off - init_off + 1] = (val >> 8) & 0xff;
736 off += 2;
737 size -= 2;
738 }
739
740 while (size > 3) {
741 u32 val;
742 pci_user_read_config_dword(dev, off, &val);
743 data[off - init_off] = val & 0xff;
744 data[off - init_off + 1] = (val >> 8) & 0xff;
745 data[off - init_off + 2] = (val >> 16) & 0xff;
746 data[off - init_off + 3] = (val >> 24) & 0xff;
747 off += 4;
748 size -= 4;
749 cond_resched();
750 }
751
752 if (size >= 2) {
753 u16 val;
754 pci_user_read_config_word(dev, off, &val);
755 data[off - init_off] = val & 0xff;
756 data[off - init_off + 1] = (val >> 8) & 0xff;
757 off += 2;
758 size -= 2;
759 }
760
761 if (size > 0) {
762 u8 val;
763 pci_user_read_config_byte(dev, off, &val);
764 data[off - init_off] = val;
765 }
766
767 pci_config_pm_runtime_put(dev);
768
769 return count;
770 }
771
pci_write_config(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)772 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
773 const struct bin_attribute *bin_attr, char *buf,
774 loff_t off, size_t count)
775 {
776 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
777 unsigned int size = count;
778 loff_t init_off = off;
779 u8 *data = (u8 *) buf;
780 int ret;
781
782 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
783 if (ret)
784 return ret;
785
786 if (resource_is_exclusive(&dev->driver_exclusive_resource, off,
787 count)) {
788 pci_warn_once(dev, "%s: Unexpected write to kernel-exclusive config offset %llx",
789 current->comm, off);
790 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
791 }
792
793 if (off > dev->cfg_size)
794 return 0;
795 if (off + count > dev->cfg_size) {
796 size = dev->cfg_size - off;
797 count = size;
798 }
799
800 pci_config_pm_runtime_get(dev);
801
802 if ((off & 1) && size) {
803 pci_user_write_config_byte(dev, off, data[off - init_off]);
804 off++;
805 size--;
806 }
807
808 if ((off & 3) && size > 2) {
809 u16 val = data[off - init_off];
810 val |= (u16) data[off - init_off + 1] << 8;
811 pci_user_write_config_word(dev, off, val);
812 off += 2;
813 size -= 2;
814 }
815
816 while (size > 3) {
817 u32 val = data[off - init_off];
818 val |= (u32) data[off - init_off + 1] << 8;
819 val |= (u32) data[off - init_off + 2] << 16;
820 val |= (u32) data[off - init_off + 3] << 24;
821 pci_user_write_config_dword(dev, off, val);
822 off += 4;
823 size -= 4;
824 }
825
826 if (size >= 2) {
827 u16 val = data[off - init_off];
828 val |= (u16) data[off - init_off + 1] << 8;
829 pci_user_write_config_word(dev, off, val);
830 off += 2;
831 size -= 2;
832 }
833
834 if (size)
835 pci_user_write_config_byte(dev, off, data[off - init_off]);
836
837 pci_config_pm_runtime_put(dev);
838
839 return count;
840 }
841 static const BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
842
843 static const struct bin_attribute *const pci_dev_config_attrs[] = {
844 &bin_attr_config,
845 NULL,
846 };
847
pci_dev_config_attr_bin_size(struct kobject * kobj,const struct bin_attribute * a,int n)848 static size_t pci_dev_config_attr_bin_size(struct kobject *kobj,
849 const struct bin_attribute *a,
850 int n)
851 {
852 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
853
854 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
855 return PCI_CFG_SPACE_EXP_SIZE;
856 return PCI_CFG_SPACE_SIZE;
857 }
858
859 static const struct attribute_group pci_dev_config_attr_group = {
860 .bin_attrs_new = pci_dev_config_attrs,
861 .bin_size = pci_dev_config_attr_bin_size,
862 };
863
864 /*
865 * llseek operation for mmappable PCI resources.
866 * May be left unused if the arch doesn't provide them.
867 */
868 static __maybe_unused loff_t
pci_llseek_resource(struct file * filep,struct kobject * kobj __always_unused,const struct bin_attribute * attr,loff_t offset,int whence)869 pci_llseek_resource(struct file *filep,
870 struct kobject *kobj __always_unused,
871 const struct bin_attribute *attr,
872 loff_t offset, int whence)
873 {
874 return fixed_size_llseek(filep, offset, whence, attr->size);
875 }
876
877 #ifdef HAVE_PCI_LEGACY
878 /**
879 * pci_read_legacy_io - read byte(s) from legacy I/O port space
880 * @filp: open sysfs file
881 * @kobj: kobject corresponding to file to read from
882 * @bin_attr: struct bin_attribute for this file
883 * @buf: buffer to store results
884 * @off: offset into legacy I/O port space
885 * @count: number of bytes to read
886 *
887 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
888 * callback routine (pci_legacy_read).
889 */
pci_read_legacy_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)890 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
891 const struct bin_attribute *bin_attr,
892 char *buf, loff_t off, size_t count)
893 {
894 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
895
896 /* Only support 1, 2 or 4 byte accesses */
897 if (count != 1 && count != 2 && count != 4)
898 return -EINVAL;
899
900 return pci_legacy_read(bus, off, (u32 *)buf, count);
901 }
902
903 /**
904 * pci_write_legacy_io - write byte(s) to legacy I/O port space
905 * @filp: open sysfs file
906 * @kobj: kobject corresponding to file to read from
907 * @bin_attr: struct bin_attribute for this file
908 * @buf: buffer containing value to be written
909 * @off: offset into legacy I/O port space
910 * @count: number of bytes to write
911 *
912 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
913 * callback routine (pci_legacy_write).
914 */
pci_write_legacy_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)915 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
916 const struct bin_attribute *bin_attr,
917 char *buf, loff_t off, size_t count)
918 {
919 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
920
921 /* Only support 1, 2 or 4 byte accesses */
922 if (count != 1 && count != 2 && count != 4)
923 return -EINVAL;
924
925 return pci_legacy_write(bus, off, *(u32 *)buf, count);
926 }
927
928 /**
929 * pci_mmap_legacy_mem - map legacy PCI memory into user memory space
930 * @filp: open sysfs file
931 * @kobj: kobject corresponding to device to be mapped
932 * @attr: struct bin_attribute for this file
933 * @vma: struct vm_area_struct passed to mmap
934 *
935 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap
936 * legacy memory space (first meg of bus space) into application virtual
937 * memory space.
938 */
pci_mmap_legacy_mem(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma)939 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
940 const struct bin_attribute *attr,
941 struct vm_area_struct *vma)
942 {
943 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
944
945 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
946 }
947
948 /**
949 * pci_mmap_legacy_io - map legacy PCI IO into user memory space
950 * @filp: open sysfs file
951 * @kobj: kobject corresponding to device to be mapped
952 * @attr: struct bin_attribute for this file
953 * @vma: struct vm_area_struct passed to mmap
954 *
955 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap
956 * legacy IO space (first meg of bus space) into application virtual
957 * memory space. Returns -ENOSYS if the operation isn't supported
958 */
pci_mmap_legacy_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma)959 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
960 const struct bin_attribute *attr,
961 struct vm_area_struct *vma)
962 {
963 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
964
965 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
966 }
967
968 /**
969 * pci_adjust_legacy_attr - adjustment of legacy file attributes
970 * @b: bus to create files under
971 * @mmap_type: I/O port or memory
972 *
973 * Stub implementation. Can be overridden by arch if necessary.
974 */
pci_adjust_legacy_attr(struct pci_bus * b,enum pci_mmap_state mmap_type)975 void __weak pci_adjust_legacy_attr(struct pci_bus *b,
976 enum pci_mmap_state mmap_type)
977 {
978 }
979
980 /**
981 * pci_create_legacy_files - create legacy I/O port and memory files
982 * @b: bus to create files under
983 *
984 * Some platforms allow access to legacy I/O port and ISA memory space on
985 * a per-bus basis. This routine creates the files and ties them into
986 * their associated read, write and mmap files from pci-sysfs.c
987 *
988 * On error unwind, but don't propagate the error to the caller
989 * as it is ok to set up the PCI bus without these files.
990 */
pci_create_legacy_files(struct pci_bus * b)991 void pci_create_legacy_files(struct pci_bus *b)
992 {
993 int error;
994
995 if (!sysfs_initialized)
996 return;
997
998 b->legacy_io = kcalloc(2, sizeof(struct bin_attribute),
999 GFP_ATOMIC);
1000 if (!b->legacy_io)
1001 goto kzalloc_err;
1002
1003 sysfs_bin_attr_init(b->legacy_io);
1004 b->legacy_io->attr.name = "legacy_io";
1005 b->legacy_io->size = 0xffff;
1006 b->legacy_io->attr.mode = 0600;
1007 b->legacy_io->read_new = pci_read_legacy_io;
1008 b->legacy_io->write_new = pci_write_legacy_io;
1009 /* See pci_create_attr() for motivation */
1010 b->legacy_io->llseek = pci_llseek_resource;
1011 b->legacy_io->mmap = pci_mmap_legacy_io;
1012 b->legacy_io->f_mapping = iomem_get_mapping;
1013 pci_adjust_legacy_attr(b, pci_mmap_io);
1014 error = device_create_bin_file(&b->dev, b->legacy_io);
1015 if (error)
1016 goto legacy_io_err;
1017
1018 /* Allocated above after the legacy_io struct */
1019 b->legacy_mem = b->legacy_io + 1;
1020 sysfs_bin_attr_init(b->legacy_mem);
1021 b->legacy_mem->attr.name = "legacy_mem";
1022 b->legacy_mem->size = 1024*1024;
1023 b->legacy_mem->attr.mode = 0600;
1024 b->legacy_mem->mmap = pci_mmap_legacy_mem;
1025 /* See pci_create_attr() for motivation */
1026 b->legacy_mem->llseek = pci_llseek_resource;
1027 b->legacy_mem->f_mapping = iomem_get_mapping;
1028 pci_adjust_legacy_attr(b, pci_mmap_mem);
1029 error = device_create_bin_file(&b->dev, b->legacy_mem);
1030 if (error)
1031 goto legacy_mem_err;
1032
1033 return;
1034
1035 legacy_mem_err:
1036 device_remove_bin_file(&b->dev, b->legacy_io);
1037 legacy_io_err:
1038 kfree(b->legacy_io);
1039 b->legacy_io = NULL;
1040 kzalloc_err:
1041 dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n");
1042 }
1043
pci_remove_legacy_files(struct pci_bus * b)1044 void pci_remove_legacy_files(struct pci_bus *b)
1045 {
1046 if (b->legacy_io) {
1047 device_remove_bin_file(&b->dev, b->legacy_io);
1048 device_remove_bin_file(&b->dev, b->legacy_mem);
1049 kfree(b->legacy_io); /* both are allocated here */
1050 }
1051 }
1052 #endif /* HAVE_PCI_LEGACY */
1053
1054 #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
1055 /**
1056 * pci_mmap_resource - map a PCI resource into user memory space
1057 * @kobj: kobject for mapping
1058 * @attr: struct bin_attribute for the file being mapped
1059 * @vma: struct vm_area_struct passed into the mmap
1060 * @write_combine: 1 for write_combine mapping
1061 *
1062 * Use the regular PCI mapping routines to map a PCI resource into userspace.
1063 */
pci_mmap_resource(struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma,int write_combine)1064 static int pci_mmap_resource(struct kobject *kobj, const struct bin_attribute *attr,
1065 struct vm_area_struct *vma, int write_combine)
1066 {
1067 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1068 int bar = (unsigned long)attr->private;
1069 enum pci_mmap_state mmap_type;
1070 struct resource *res = &pdev->resource[bar];
1071 int ret;
1072
1073 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1074 if (ret)
1075 return ret;
1076
1077 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
1078 return -EINVAL;
1079
1080 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS))
1081 return -EINVAL;
1082
1083 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
1084
1085 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
1086 }
1087
pci_mmap_resource_uc(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma)1088 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
1089 const struct bin_attribute *attr,
1090 struct vm_area_struct *vma)
1091 {
1092 return pci_mmap_resource(kobj, attr, vma, 0);
1093 }
1094
pci_mmap_resource_wc(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma)1095 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
1096 const struct bin_attribute *attr,
1097 struct vm_area_struct *vma)
1098 {
1099 return pci_mmap_resource(kobj, attr, vma, 1);
1100 }
1101
pci_resource_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count,bool write)1102 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
1103 const struct bin_attribute *attr, char *buf,
1104 loff_t off, size_t count, bool write)
1105 {
1106 #ifdef CONFIG_HAS_IOPORT
1107 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1108 int bar = (unsigned long)attr->private;
1109 unsigned long port = off;
1110
1111 port += pci_resource_start(pdev, bar);
1112
1113 if (port > pci_resource_end(pdev, bar))
1114 return 0;
1115
1116 if (port + count - 1 > pci_resource_end(pdev, bar))
1117 return -EINVAL;
1118
1119 switch (count) {
1120 case 1:
1121 if (write)
1122 outb(*(u8 *)buf, port);
1123 else
1124 *(u8 *)buf = inb(port);
1125 return 1;
1126 case 2:
1127 if (write)
1128 outw(*(u16 *)buf, port);
1129 else
1130 *(u16 *)buf = inw(port);
1131 return 2;
1132 case 4:
1133 if (write)
1134 outl(*(u32 *)buf, port);
1135 else
1136 *(u32 *)buf = inl(port);
1137 return 4;
1138 }
1139 return -EINVAL;
1140 #else
1141 return -ENXIO;
1142 #endif
1143 }
1144
pci_read_resource_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count)1145 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
1146 const struct bin_attribute *attr, char *buf,
1147 loff_t off, size_t count)
1148 {
1149 return pci_resource_io(filp, kobj, attr, buf, off, count, false);
1150 }
1151
pci_write_resource_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count)1152 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
1153 const struct bin_attribute *attr, char *buf,
1154 loff_t off, size_t count)
1155 {
1156 int ret;
1157
1158 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1159 if (ret)
1160 return ret;
1161
1162 return pci_resource_io(filp, kobj, attr, buf, off, count, true);
1163 }
1164
1165 /**
1166 * pci_remove_resource_files - cleanup resource files
1167 * @pdev: dev to cleanup
1168 *
1169 * If we created resource files for @pdev, remove them from sysfs and
1170 * free their resources.
1171 */
pci_remove_resource_files(struct pci_dev * pdev)1172 static void pci_remove_resource_files(struct pci_dev *pdev)
1173 {
1174 int i;
1175
1176 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1177 struct bin_attribute *res_attr;
1178
1179 res_attr = pdev->res_attr[i];
1180 if (res_attr) {
1181 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1182 kfree(res_attr);
1183 }
1184
1185 res_attr = pdev->res_attr_wc[i];
1186 if (res_attr) {
1187 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1188 kfree(res_attr);
1189 }
1190 }
1191 }
1192
pci_create_attr(struct pci_dev * pdev,int num,int write_combine)1193 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
1194 {
1195 /* allocate attribute structure, piggyback attribute name */
1196 int name_len = write_combine ? 13 : 10;
1197 struct bin_attribute *res_attr;
1198 char *res_attr_name;
1199 int retval;
1200
1201 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
1202 if (!res_attr)
1203 return -ENOMEM;
1204
1205 res_attr_name = (char *)(res_attr + 1);
1206
1207 sysfs_bin_attr_init(res_attr);
1208 if (write_combine) {
1209 sprintf(res_attr_name, "resource%d_wc", num);
1210 res_attr->mmap = pci_mmap_resource_wc;
1211 } else {
1212 sprintf(res_attr_name, "resource%d", num);
1213 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
1214 res_attr->read_new = pci_read_resource_io;
1215 res_attr->write_new = pci_write_resource_io;
1216 if (arch_can_pci_mmap_io())
1217 res_attr->mmap = pci_mmap_resource_uc;
1218 } else {
1219 res_attr->mmap = pci_mmap_resource_uc;
1220 }
1221 }
1222 if (res_attr->mmap) {
1223 res_attr->f_mapping = iomem_get_mapping;
1224 /*
1225 * generic_file_llseek() consults f_mapping->host to determine
1226 * the file size. As iomem_inode knows nothing about the
1227 * attribute, it's not going to work, so override it as well.
1228 */
1229 res_attr->llseek = pci_llseek_resource;
1230 }
1231 res_attr->attr.name = res_attr_name;
1232 res_attr->attr.mode = 0600;
1233 res_attr->size = pci_resource_len(pdev, num);
1234 res_attr->private = (void *)(unsigned long)num;
1235 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
1236 if (retval) {
1237 kfree(res_attr);
1238 return retval;
1239 }
1240
1241 if (write_combine)
1242 pdev->res_attr_wc[num] = res_attr;
1243 else
1244 pdev->res_attr[num] = res_attr;
1245
1246 return 0;
1247 }
1248
1249 /**
1250 * pci_create_resource_files - create resource files in sysfs for @dev
1251 * @pdev: dev in question
1252 *
1253 * Walk the resources in @pdev creating files for each resource available.
1254 */
pci_create_resource_files(struct pci_dev * pdev)1255 static int pci_create_resource_files(struct pci_dev *pdev)
1256 {
1257 int i;
1258 int retval;
1259
1260 /* Skip devices with non-mappable BARs */
1261 if (pdev->non_mappable_bars)
1262 return 0;
1263
1264 /* Expose the PCI resources from this device as files */
1265 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1266
1267 /* skip empty resources */
1268 if (!pci_resource_len(pdev, i))
1269 continue;
1270
1271 retval = pci_create_attr(pdev, i, 0);
1272 /* for prefetchable resources, create a WC mappable file */
1273 if (!retval && arch_can_pci_mmap_wc() &&
1274 pdev->resource[i].flags & IORESOURCE_PREFETCH)
1275 retval = pci_create_attr(pdev, i, 1);
1276 if (retval) {
1277 pci_remove_resource_files(pdev);
1278 return retval;
1279 }
1280 }
1281 return 0;
1282 }
1283 #else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */
pci_create_resource_files(struct pci_dev * dev)1284 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
pci_remove_resource_files(struct pci_dev * dev)1285 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
1286 #endif
1287
1288 /**
1289 * pci_write_rom - used to enable access to the PCI ROM display
1290 * @filp: sysfs file
1291 * @kobj: kernel object handle
1292 * @bin_attr: struct bin_attribute for this file
1293 * @buf: user input
1294 * @off: file offset
1295 * @count: number of byte in input
1296 *
1297 * writing anything except 0 enables it
1298 */
pci_write_rom(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1299 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
1300 const struct bin_attribute *bin_attr, char *buf,
1301 loff_t off, size_t count)
1302 {
1303 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1304
1305 if ((off == 0) && (*buf == '0') && (count == 2))
1306 pdev->rom_attr_enabled = 0;
1307 else
1308 pdev->rom_attr_enabled = 1;
1309
1310 return count;
1311 }
1312
1313 /**
1314 * pci_read_rom - read a PCI ROM
1315 * @filp: sysfs file
1316 * @kobj: kernel object handle
1317 * @bin_attr: struct bin_attribute for this file
1318 * @buf: where to put the data we read from the ROM
1319 * @off: file offset
1320 * @count: number of bytes to read
1321 *
1322 * Put @count bytes starting at @off into @buf from the ROM in the PCI
1323 * device corresponding to @kobj.
1324 */
pci_read_rom(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1325 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
1326 const struct bin_attribute *bin_attr, char *buf,
1327 loff_t off, size_t count)
1328 {
1329 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1330 void __iomem *rom;
1331 size_t size;
1332
1333 if (!pdev->rom_attr_enabled)
1334 return -EINVAL;
1335
1336 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
1337 if (!rom || !size)
1338 return -EIO;
1339
1340 if (off >= size)
1341 count = 0;
1342 else {
1343 if (off + count > size)
1344 count = size - off;
1345
1346 memcpy_fromio(buf, rom + off, count);
1347 }
1348 pci_unmap_rom(pdev, rom);
1349
1350 return count;
1351 }
1352 static const BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
1353
1354 static const struct bin_attribute *const pci_dev_rom_attrs[] = {
1355 &bin_attr_rom,
1356 NULL,
1357 };
1358
pci_dev_rom_attr_is_visible(struct kobject * kobj,const struct bin_attribute * a,int n)1359 static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj,
1360 const struct bin_attribute *a, int n)
1361 {
1362 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1363
1364 /* If the device has a ROM, try to expose it in sysfs. */
1365 if (!pci_resource_end(pdev, PCI_ROM_RESOURCE))
1366 return 0;
1367
1368 return a->attr.mode;
1369 }
1370
pci_dev_rom_attr_bin_size(struct kobject * kobj,const struct bin_attribute * a,int n)1371 static size_t pci_dev_rom_attr_bin_size(struct kobject *kobj,
1372 const struct bin_attribute *a, int n)
1373 {
1374 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1375
1376 return pci_resource_len(pdev, PCI_ROM_RESOURCE);
1377 }
1378
1379 static const struct attribute_group pci_dev_rom_attr_group = {
1380 .bin_attrs_new = pci_dev_rom_attrs,
1381 .is_bin_visible = pci_dev_rom_attr_is_visible,
1382 .bin_size = pci_dev_rom_attr_bin_size,
1383 };
1384
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1385 static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
1386 const char *buf, size_t count)
1387 {
1388 struct pci_dev *pdev = to_pci_dev(dev);
1389 unsigned long val;
1390 ssize_t result;
1391
1392 if (kstrtoul(buf, 0, &val) < 0)
1393 return -EINVAL;
1394
1395 if (val != 1)
1396 return -EINVAL;
1397
1398 pm_runtime_get_sync(dev);
1399 result = pci_reset_function(pdev);
1400 pm_runtime_put(dev);
1401 if (result < 0)
1402 return result;
1403
1404 return count;
1405 }
1406 static DEVICE_ATTR_WO(reset);
1407
1408 static struct attribute *pci_dev_reset_attrs[] = {
1409 &dev_attr_reset.attr,
1410 NULL,
1411 };
1412
pci_dev_reset_attr_is_visible(struct kobject * kobj,struct attribute * a,int n)1413 static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj,
1414 struct attribute *a, int n)
1415 {
1416 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1417
1418 if (!pci_reset_supported(pdev))
1419 return 0;
1420
1421 return a->mode;
1422 }
1423
1424 static const struct attribute_group pci_dev_reset_attr_group = {
1425 .attrs = pci_dev_reset_attrs,
1426 .is_visible = pci_dev_reset_attr_is_visible,
1427 };
1428
reset_method_show(struct device * dev,struct device_attribute * attr,char * buf)1429 static ssize_t reset_method_show(struct device *dev,
1430 struct device_attribute *attr, char *buf)
1431 {
1432 struct pci_dev *pdev = to_pci_dev(dev);
1433 ssize_t len = 0;
1434 int i, m;
1435
1436 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
1437 m = pdev->reset_methods[i];
1438 if (!m)
1439 break;
1440
1441 len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
1442 pci_reset_fn_methods[m].name);
1443 }
1444
1445 if (len)
1446 len += sysfs_emit_at(buf, len, "\n");
1447
1448 return len;
1449 }
1450
reset_method_lookup(const char * name)1451 static int reset_method_lookup(const char *name)
1452 {
1453 int m;
1454
1455 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
1456 if (sysfs_streq(name, pci_reset_fn_methods[m].name))
1457 return m;
1458 }
1459
1460 return 0; /* not found */
1461 }
1462
reset_method_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1463 static ssize_t reset_method_store(struct device *dev,
1464 struct device_attribute *attr,
1465 const char *buf, size_t count)
1466 {
1467 struct pci_dev *pdev = to_pci_dev(dev);
1468 char *tmp_options, *name;
1469 int m, n;
1470 u8 reset_methods[PCI_NUM_RESET_METHODS] = {};
1471
1472 if (sysfs_streq(buf, "")) {
1473 pdev->reset_methods[0] = 0;
1474 pci_warn(pdev, "All device reset methods disabled by user");
1475 return count;
1476 }
1477
1478 pm_runtime_get_sync(dev);
1479 struct device *pmdev __free(pm_runtime_put) = dev;
1480
1481 if (sysfs_streq(buf, "default")) {
1482 pci_init_reset_methods(pdev);
1483 return count;
1484 }
1485
1486 char *options __free(kfree) = kstrndup(buf, count, GFP_KERNEL);
1487 if (!options)
1488 return -ENOMEM;
1489
1490 n = 0;
1491 tmp_options = options;
1492 while ((name = strsep(&tmp_options, " ")) != NULL) {
1493 if (sysfs_streq(name, ""))
1494 continue;
1495
1496 name = strim(name);
1497
1498 /* Leave previous methods unchanged if input is invalid */
1499 m = reset_method_lookup(name);
1500 if (!m) {
1501 pci_err(pdev, "Invalid reset method '%s'", name);
1502 return -EINVAL;
1503 }
1504
1505 if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
1506 pci_err(pdev, "Unsupported reset method '%s'", name);
1507 return -EINVAL;
1508 }
1509
1510 if (n == PCI_NUM_RESET_METHODS - 1) {
1511 pci_err(pdev, "Too many reset methods\n");
1512 return -EINVAL;
1513 }
1514
1515 reset_methods[n++] = m;
1516 }
1517
1518 reset_methods[n] = 0;
1519
1520 /* Warn if dev-specific supported but not highest priority */
1521 if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
1522 reset_methods[0] != 1)
1523 pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
1524 memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
1525 return count;
1526 }
1527 static DEVICE_ATTR_RW(reset_method);
1528
1529 static struct attribute *pci_dev_reset_method_attrs[] = {
1530 &dev_attr_reset_method.attr,
1531 NULL,
1532 };
1533
1534 static const struct attribute_group pci_dev_reset_method_attr_group = {
1535 .attrs = pci_dev_reset_method_attrs,
1536 .is_visible = pci_dev_reset_attr_is_visible,
1537 };
1538
__resource_resize_show(struct device * dev,int n,char * buf)1539 static ssize_t __resource_resize_show(struct device *dev, int n, char *buf)
1540 {
1541 struct pci_dev *pdev = to_pci_dev(dev);
1542 ssize_t ret;
1543
1544 pci_config_pm_runtime_get(pdev);
1545
1546 ret = sysfs_emit(buf, "%016llx\n",
1547 (u64)pci_rebar_get_possible_sizes(pdev, n));
1548
1549 pci_config_pm_runtime_put(pdev);
1550
1551 return ret;
1552 }
1553
__resource_resize_store(struct device * dev,int n,const char * buf,size_t count)1554 static ssize_t __resource_resize_store(struct device *dev, int n,
1555 const char *buf, size_t count)
1556 {
1557 struct pci_dev *pdev = to_pci_dev(dev);
1558 unsigned long size, flags;
1559 int ret, i;
1560 u16 cmd;
1561
1562 if (kstrtoul(buf, 0, &size) < 0)
1563 return -EINVAL;
1564
1565 device_lock(dev);
1566 if (dev->driver || pci_num_vf(pdev)) {
1567 ret = -EBUSY;
1568 goto unlock;
1569 }
1570
1571 pci_config_pm_runtime_get(pdev);
1572
1573 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
1574 ret = aperture_remove_conflicting_pci_devices(pdev,
1575 "resourceN_resize");
1576 if (ret)
1577 goto pm_put;
1578 }
1579
1580 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1581 pci_write_config_word(pdev, PCI_COMMAND,
1582 cmd & ~PCI_COMMAND_MEMORY);
1583
1584 flags = pci_resource_flags(pdev, n);
1585
1586 pci_remove_resource_files(pdev);
1587
1588 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
1589 if (pci_resource_len(pdev, i) &&
1590 pci_resource_flags(pdev, i) == flags)
1591 pci_release_resource(pdev, i);
1592 }
1593
1594 ret = pci_resize_resource(pdev, n, size);
1595
1596 pci_assign_unassigned_bus_resources(pdev->bus);
1597
1598 if (pci_create_resource_files(pdev))
1599 pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");
1600
1601 pci_write_config_word(pdev, PCI_COMMAND, cmd);
1602 pm_put:
1603 pci_config_pm_runtime_put(pdev);
1604 unlock:
1605 device_unlock(dev);
1606
1607 return ret ? ret : count;
1608 }
1609
1610 #define pci_dev_resource_resize_attr(n) \
1611 static ssize_t resource##n##_resize_show(struct device *dev, \
1612 struct device_attribute *attr, \
1613 char *buf) \
1614 { \
1615 return __resource_resize_show(dev, n, buf); \
1616 } \
1617 static ssize_t resource##n##_resize_store(struct device *dev, \
1618 struct device_attribute *attr,\
1619 const char *buf, size_t count)\
1620 { \
1621 return __resource_resize_store(dev, n, buf, count); \
1622 } \
1623 static DEVICE_ATTR_RW(resource##n##_resize)
1624
1625 pci_dev_resource_resize_attr(0);
1626 pci_dev_resource_resize_attr(1);
1627 pci_dev_resource_resize_attr(2);
1628 pci_dev_resource_resize_attr(3);
1629 pci_dev_resource_resize_attr(4);
1630 pci_dev_resource_resize_attr(5);
1631
1632 static struct attribute *resource_resize_attrs[] = {
1633 &dev_attr_resource0_resize.attr,
1634 &dev_attr_resource1_resize.attr,
1635 &dev_attr_resource2_resize.attr,
1636 &dev_attr_resource3_resize.attr,
1637 &dev_attr_resource4_resize.attr,
1638 &dev_attr_resource5_resize.attr,
1639 NULL,
1640 };
1641
resource_resize_is_visible(struct kobject * kobj,struct attribute * a,int n)1642 static umode_t resource_resize_is_visible(struct kobject *kobj,
1643 struct attribute *a, int n)
1644 {
1645 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1646
1647 return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode;
1648 }
1649
1650 static const struct attribute_group pci_dev_resource_resize_group = {
1651 .attrs = resource_resize_attrs,
1652 .is_visible = resource_resize_is_visible,
1653 };
1654
pci_create_sysfs_dev_files(struct pci_dev * pdev)1655 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
1656 {
1657 if (!sysfs_initialized)
1658 return -EACCES;
1659
1660 return pci_create_resource_files(pdev);
1661 }
1662
1663 /**
1664 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files
1665 * @pdev: device whose entries we should free
1666 *
1667 * Cleanup when @pdev is removed from sysfs.
1668 */
pci_remove_sysfs_dev_files(struct pci_dev * pdev)1669 void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
1670 {
1671 if (!sysfs_initialized)
1672 return;
1673
1674 pci_remove_resource_files(pdev);
1675 }
1676
pci_sysfs_init(void)1677 static int __init pci_sysfs_init(void)
1678 {
1679 struct pci_dev *pdev = NULL;
1680 struct pci_bus *pbus = NULL;
1681 int retval;
1682
1683 sysfs_initialized = 1;
1684 for_each_pci_dev(pdev) {
1685 retval = pci_create_sysfs_dev_files(pdev);
1686 if (retval) {
1687 pci_dev_put(pdev);
1688 return retval;
1689 }
1690 }
1691
1692 while ((pbus = pci_find_next_bus(pbus)))
1693 pci_create_legacy_files(pbus);
1694
1695 return 0;
1696 }
1697 late_initcall(pci_sysfs_init);
1698
1699 static struct attribute *pci_dev_dev_attrs[] = {
1700 &dev_attr_boot_vga.attr,
1701 NULL,
1702 };
1703
pci_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1704 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
1705 struct attribute *a, int n)
1706 {
1707 struct device *dev = kobj_to_dev(kobj);
1708 struct pci_dev *pdev = to_pci_dev(dev);
1709
1710 if (a == &dev_attr_boot_vga.attr && pci_is_vga(pdev))
1711 return a->mode;
1712
1713 return 0;
1714 }
1715
1716 static struct attribute *pci_dev_hp_attrs[] = {
1717 &dev_attr_remove.attr,
1718 &dev_attr_dev_rescan.attr,
1719 NULL,
1720 };
1721
pci_dev_hp_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1722 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
1723 struct attribute *a, int n)
1724 {
1725 struct device *dev = kobj_to_dev(kobj);
1726 struct pci_dev *pdev = to_pci_dev(dev);
1727
1728 if (pdev->is_virtfn)
1729 return 0;
1730
1731 return a->mode;
1732 }
1733
pci_bridge_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1734 static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj,
1735 struct attribute *a, int n)
1736 {
1737 struct device *dev = kobj_to_dev(kobj);
1738 struct pci_dev *pdev = to_pci_dev(dev);
1739
1740 if (pci_is_bridge(pdev))
1741 return a->mode;
1742
1743 return 0;
1744 }
1745
pcie_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1746 static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj,
1747 struct attribute *a, int n)
1748 {
1749 struct device *dev = kobj_to_dev(kobj);
1750 struct pci_dev *pdev = to_pci_dev(dev);
1751
1752 if (pci_is_pcie(pdev))
1753 return a->mode;
1754
1755 return 0;
1756 }
1757
1758 static const struct attribute_group pci_dev_group = {
1759 .attrs = pci_dev_attrs,
1760 };
1761
1762 const struct attribute_group *pci_dev_groups[] = {
1763 &pci_dev_group,
1764 &pci_dev_config_attr_group,
1765 &pci_dev_rom_attr_group,
1766 &pci_dev_reset_attr_group,
1767 &pci_dev_reset_method_attr_group,
1768 &pci_dev_vpd_attr_group,
1769 #ifdef CONFIG_DMI
1770 &pci_dev_smbios_attr_group,
1771 #endif
1772 #ifdef CONFIG_ACPI
1773 &pci_dev_acpi_attr_group,
1774 #endif
1775 &pci_dev_resource_resize_group,
1776 ARCH_PCI_DEV_GROUPS
1777 NULL,
1778 };
1779
1780 static const struct attribute_group pci_dev_hp_attr_group = {
1781 .attrs = pci_dev_hp_attrs,
1782 .is_visible = pci_dev_hp_attrs_are_visible,
1783 };
1784
1785 static const struct attribute_group pci_dev_attr_group = {
1786 .attrs = pci_dev_dev_attrs,
1787 .is_visible = pci_dev_attrs_are_visible,
1788 };
1789
1790 static const struct attribute_group pci_bridge_attr_group = {
1791 .attrs = pci_bridge_attrs,
1792 .is_visible = pci_bridge_attrs_are_visible,
1793 };
1794
1795 static const struct attribute_group pcie_dev_attr_group = {
1796 .attrs = pcie_dev_attrs,
1797 .is_visible = pcie_dev_attrs_are_visible,
1798 };
1799
1800 const struct attribute_group *pci_dev_attr_groups[] = {
1801 &pci_dev_attr_group,
1802 &pci_dev_hp_attr_group,
1803 #ifdef CONFIG_PCI_IOV
1804 &sriov_pf_dev_attr_group,
1805 &sriov_vf_dev_attr_group,
1806 #endif
1807 &pci_bridge_attr_group,
1808 &pcie_dev_attr_group,
1809 #ifdef CONFIG_PCIEAER
1810 &aer_stats_attr_group,
1811 &aer_attr_group,
1812 #endif
1813 #ifdef CONFIG_PCIEASPM
1814 &aspm_ctrl_attr_group,
1815 #endif
1816 #ifdef CONFIG_PCI_DOE
1817 &pci_doe_sysfs_group,
1818 #endif
1819 NULL,
1820 };
1821