xref: /linux/drivers/pci/pci-sysfs.c (revision 40286d6379aacfcc053253ef78dc78b09addffda)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com>
4  * (C) Copyright 2002-2004 IBM Corp.
5  * (C) Copyright 2003 Matthew Wilcox
6  * (C) Copyright 2003 Hewlett-Packard
7  * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com>
8  * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com>
9  *
10  * File attributes for PCI devices
11  *
12  * Modeled after usb's driverfs.c
13  */
14 
15 #include <linux/bitfield.h>
16 #include <linux/cleanup.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/pci.h>
20 #include <linux/stat.h>
21 #include <linux/export.h>
22 #include <linux/topology.h>
23 #include <linux/mm.h>
24 #include <linux/fs.h>
25 #include <linux/capability.h>
26 #include <linux/security.h>
27 #include <linux/slab.h>
28 #include <linux/vgaarb.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/msi.h>
31 #include <linux/of.h>
32 #include <linux/aperture.h>
33 #include <linux/unaligned.h>
34 #include "pci.h"
35 
36 #ifndef ARCH_PCI_DEV_GROUPS
37 #define ARCH_PCI_DEV_GROUPS
38 #endif
39 
40 static int sysfs_initialized;	/* = 0 */
41 
42 /* show configuration fields */
43 #define pci_config_attr(field, format_string)				\
44 static ssize_t								\
45 field##_show(struct device *dev, struct device_attribute *attr, char *buf)				\
46 {									\
47 	struct pci_dev *pdev;						\
48 									\
49 	pdev = to_pci_dev(dev);						\
50 	return sysfs_emit(buf, format_string, pdev->field);		\
51 }									\
52 static DEVICE_ATTR_RO(field)
53 
54 pci_config_attr(vendor, "0x%04x\n");
55 pci_config_attr(device, "0x%04x\n");
56 pci_config_attr(subsystem_vendor, "0x%04x\n");
57 pci_config_attr(subsystem_device, "0x%04x\n");
58 pci_config_attr(revision, "0x%02x\n");
59 pci_config_attr(class, "0x%06x\n");
60 
61 static ssize_t irq_show(struct device *dev,
62 			struct device_attribute *attr,
63 			char *buf)
64 {
65 	struct pci_dev *pdev = to_pci_dev(dev);
66 
67 #ifdef CONFIG_PCI_MSI
68 	/*
69 	 * For MSI, show the first MSI IRQ; for all other cases including
70 	 * MSI-X, show the legacy INTx IRQ.
71 	 */
72 	if (pdev->msi_enabled)
73 		return sysfs_emit(buf, "%u\n", pci_irq_vector(pdev, 0));
74 #endif
75 
76 	return sysfs_emit(buf, "%u\n", pdev->irq);
77 }
78 static DEVICE_ATTR_RO(irq);
79 
80 static ssize_t broken_parity_status_show(struct device *dev,
81 					 struct device_attribute *attr,
82 					 char *buf)
83 {
84 	struct pci_dev *pdev = to_pci_dev(dev);
85 	return sysfs_emit(buf, "%u\n", pdev->broken_parity_status);
86 }
87 
88 static ssize_t broken_parity_status_store(struct device *dev,
89 					  struct device_attribute *attr,
90 					  const char *buf, size_t count)
91 {
92 	struct pci_dev *pdev = to_pci_dev(dev);
93 	unsigned long val;
94 
95 	if (kstrtoul(buf, 0, &val) < 0)
96 		return -EINVAL;
97 
98 	pdev->broken_parity_status = !!val;
99 
100 	return count;
101 }
102 static DEVICE_ATTR_RW(broken_parity_status);
103 
104 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list,
105 				      struct device_attribute *attr, char *buf)
106 {
107 	const struct cpumask *mask;
108 
109 #ifdef CONFIG_NUMA
110 	if (dev_to_node(dev) == NUMA_NO_NODE)
111 		mask = cpu_online_mask;
112 	else
113 		mask = cpumask_of_node(dev_to_node(dev));
114 #else
115 	mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
116 #endif
117 	return cpumap_print_to_pagebuf(list, buf, mask);
118 }
119 
120 static ssize_t local_cpus_show(struct device *dev,
121 			       struct device_attribute *attr, char *buf)
122 {
123 	return pci_dev_show_local_cpu(dev, false, attr, buf);
124 }
125 static DEVICE_ATTR_RO(local_cpus);
126 
127 static ssize_t local_cpulist_show(struct device *dev,
128 				  struct device_attribute *attr, char *buf)
129 {
130 	return pci_dev_show_local_cpu(dev, true, attr, buf);
131 }
132 static DEVICE_ATTR_RO(local_cpulist);
133 
134 /*
135  * PCI Bus Class Devices
136  */
137 static ssize_t cpuaffinity_show(struct device *dev,
138 				struct device_attribute *attr, char *buf)
139 {
140 	const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
141 
142 	return cpumap_print_to_pagebuf(false, buf, cpumask);
143 }
144 static DEVICE_ATTR_RO(cpuaffinity);
145 
146 static ssize_t cpulistaffinity_show(struct device *dev,
147 				    struct device_attribute *attr, char *buf)
148 {
149 	const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
150 
151 	return cpumap_print_to_pagebuf(true, buf, cpumask);
152 }
153 static DEVICE_ATTR_RO(cpulistaffinity);
154 
155 static ssize_t power_state_show(struct device *dev,
156 				struct device_attribute *attr, char *buf)
157 {
158 	struct pci_dev *pdev = to_pci_dev(dev);
159 
160 	return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state));
161 }
162 static DEVICE_ATTR_RO(power_state);
163 
164 /* show resources */
165 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
166 			     char *buf)
167 {
168 	struct pci_dev *pci_dev = to_pci_dev(dev);
169 	int i;
170 	int max;
171 	resource_size_t start, end;
172 	size_t len = 0;
173 
174 	if (pci_dev->subordinate)
175 		max = DEVICE_COUNT_RESOURCE;
176 	else
177 		max = PCI_BRIDGE_RESOURCES;
178 
179 	for (i = 0; i < max; i++) {
180 		struct resource *res =  &pci_dev->resource[i];
181 		struct resource zerores = {};
182 
183 		/* For backwards compatibility */
184 		if (pci_resource_is_bridge_win(i) &&
185 		    res->flags & (IORESOURCE_UNSET | IORESOURCE_DISABLED))
186 			res = &zerores;
187 
188 		pci_resource_to_user(pci_dev, i, res, &start, &end);
189 		len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n",
190 				     (unsigned long long)start,
191 				     (unsigned long long)end,
192 				     (unsigned long long)res->flags);
193 	}
194 	return len;
195 }
196 static DEVICE_ATTR_RO(resource);
197 
198 static ssize_t max_link_speed_show(struct device *dev,
199 				   struct device_attribute *attr, char *buf)
200 {
201 	struct pci_dev *pdev = to_pci_dev(dev);
202 
203 	return sysfs_emit(buf, "%s\n",
204 			  pci_speed_string(pcie_get_speed_cap(pdev)));
205 }
206 static DEVICE_ATTR_RO(max_link_speed);
207 
208 static ssize_t max_link_width_show(struct device *dev,
209 				   struct device_attribute *attr, char *buf)
210 {
211 	struct pci_dev *pdev = to_pci_dev(dev);
212 	ssize_t ret;
213 
214 	/* We read PCI_EXP_LNKCAP, so we need the device to be accessible. */
215 	pci_config_pm_runtime_get(pdev);
216 	ret = sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
217 	pci_config_pm_runtime_put(pdev);
218 
219 	return ret;
220 }
221 static DEVICE_ATTR_RO(max_link_width);
222 
223 static ssize_t current_link_speed_show(struct device *dev,
224 				       struct device_attribute *attr, char *buf)
225 {
226 	struct pci_dev *pci_dev = to_pci_dev(dev);
227 	u16 linkstat;
228 	int err;
229 	enum pci_bus_speed speed;
230 
231 	pci_config_pm_runtime_get(pci_dev);
232 	err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
233 	pci_config_pm_runtime_put(pci_dev);
234 
235 	if (err)
236 		return -EINVAL;
237 
238 	speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS];
239 
240 	return sysfs_emit(buf, "%s\n", pci_speed_string(speed));
241 }
242 static DEVICE_ATTR_RO(current_link_speed);
243 
244 static ssize_t current_link_width_show(struct device *dev,
245 				       struct device_attribute *attr, char *buf)
246 {
247 	struct pci_dev *pci_dev = to_pci_dev(dev);
248 	u16 linkstat;
249 	int err;
250 
251 	pci_config_pm_runtime_get(pci_dev);
252 	err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
253 	pci_config_pm_runtime_put(pci_dev);
254 
255 	if (err)
256 		return -EINVAL;
257 
258 	return sysfs_emit(buf, "%u\n", FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat));
259 }
260 static DEVICE_ATTR_RO(current_link_width);
261 
262 static ssize_t secondary_bus_number_show(struct device *dev,
263 					 struct device_attribute *attr,
264 					 char *buf)
265 {
266 	struct pci_dev *pci_dev = to_pci_dev(dev);
267 	u8 sec_bus;
268 	int err;
269 
270 	pci_config_pm_runtime_get(pci_dev);
271 	err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
272 	pci_config_pm_runtime_put(pci_dev);
273 
274 	if (err)
275 		return -EINVAL;
276 
277 	return sysfs_emit(buf, "%u\n", sec_bus);
278 }
279 static DEVICE_ATTR_RO(secondary_bus_number);
280 
281 static ssize_t subordinate_bus_number_show(struct device *dev,
282 					   struct device_attribute *attr,
283 					   char *buf)
284 {
285 	struct pci_dev *pci_dev = to_pci_dev(dev);
286 	u8 sub_bus;
287 	int err;
288 
289 	pci_config_pm_runtime_get(pci_dev);
290 	err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
291 	pci_config_pm_runtime_put(pci_dev);
292 
293 	if (err)
294 		return -EINVAL;
295 
296 	return sysfs_emit(buf, "%u\n", sub_bus);
297 }
298 static DEVICE_ATTR_RO(subordinate_bus_number);
299 
300 static ssize_t ari_enabled_show(struct device *dev,
301 				struct device_attribute *attr,
302 				char *buf)
303 {
304 	struct pci_dev *pci_dev = to_pci_dev(dev);
305 
306 	return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus));
307 }
308 static DEVICE_ATTR_RO(ari_enabled);
309 
310 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
311 			     char *buf)
312 {
313 	struct pci_dev *pci_dev = to_pci_dev(dev);
314 
315 	return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
316 			  pci_dev->vendor, pci_dev->device,
317 			  pci_dev->subsystem_vendor, pci_dev->subsystem_device,
318 			  (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
319 			  (u8)(pci_dev->class));
320 }
321 static DEVICE_ATTR_RO(modalias);
322 
323 static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
324 			     const char *buf, size_t count)
325 {
326 	struct pci_dev *pdev = to_pci_dev(dev);
327 	unsigned long val;
328 	ssize_t result = 0;
329 
330 	/* this can crash the machine when done on the "wrong" device */
331 	if (!capable(CAP_SYS_ADMIN))
332 		return -EPERM;
333 
334 	if (kstrtoul(buf, 0, &val) < 0)
335 		return -EINVAL;
336 
337 	device_lock(dev);
338 	if (dev->driver)
339 		result = -EBUSY;
340 	else if (val)
341 		result = pci_enable_device(pdev);
342 	else if (pci_is_enabled(pdev))
343 		pci_disable_device(pdev);
344 	else
345 		result = -EIO;
346 	device_unlock(dev);
347 
348 	return result < 0 ? result : count;
349 }
350 
351 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
352 			    char *buf)
353 {
354 	struct pci_dev *pdev;
355 
356 	pdev = to_pci_dev(dev);
357 	return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt));
358 }
359 static DEVICE_ATTR_RW(enable);
360 
361 #ifdef CONFIG_NUMA
362 static ssize_t numa_node_store(struct device *dev,
363 			       struct device_attribute *attr, const char *buf,
364 			       size_t count)
365 {
366 	struct pci_dev *pdev = to_pci_dev(dev);
367 	int node;
368 
369 	if (!capable(CAP_SYS_ADMIN))
370 		return -EPERM;
371 
372 	if (kstrtoint(buf, 0, &node) < 0)
373 		return -EINVAL;
374 
375 	if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
376 		return -EINVAL;
377 
378 	if (node != NUMA_NO_NODE && !node_online(node))
379 		return -EINVAL;
380 
381 	if (node == dev->numa_node)
382 		return count;
383 
384 	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
385 	pci_alert(pdev, FW_BUG "Overriding NUMA node to %d.  Contact your vendor for updates.",
386 		  node);
387 
388 	dev->numa_node = node;
389 	return count;
390 }
391 
392 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
393 			      char *buf)
394 {
395 	return sysfs_emit(buf, "%d\n", dev->numa_node);
396 }
397 static DEVICE_ATTR_RW(numa_node);
398 #endif
399 
400 static ssize_t dma_mask_bits_show(struct device *dev,
401 				  struct device_attribute *attr, char *buf)
402 {
403 	struct pci_dev *pdev = to_pci_dev(dev);
404 
405 	return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask));
406 }
407 static DEVICE_ATTR_RO(dma_mask_bits);
408 
409 static ssize_t consistent_dma_mask_bits_show(struct device *dev,
410 					     struct device_attribute *attr,
411 					     char *buf)
412 {
413 	return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask));
414 }
415 static DEVICE_ATTR_RO(consistent_dma_mask_bits);
416 
417 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
418 			    char *buf)
419 {
420 	struct pci_dev *pdev = to_pci_dev(dev);
421 	struct pci_bus *subordinate = pdev->subordinate;
422 
423 	return sysfs_emit(buf, "%u\n", subordinate ?
424 			  !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
425 			    : !pdev->no_msi);
426 }
427 
428 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
429 			     const char *buf, size_t count)
430 {
431 	struct pci_dev *pdev = to_pci_dev(dev);
432 	struct pci_bus *subordinate = pdev->subordinate;
433 	unsigned long val;
434 
435 	if (!capable(CAP_SYS_ADMIN))
436 		return -EPERM;
437 
438 	if (kstrtoul(buf, 0, &val) < 0)
439 		return -EINVAL;
440 
441 	/*
442 	 * "no_msi" and "bus_flags" only affect what happens when a driver
443 	 * requests MSI or MSI-X.  They don't affect any drivers that have
444 	 * already requested MSI or MSI-X.
445 	 */
446 	if (!subordinate) {
447 		pdev->no_msi = !val;
448 		pci_info(pdev, "MSI/MSI-X %s for future drivers\n",
449 			 val ? "allowed" : "disallowed");
450 		return count;
451 	}
452 
453 	if (val)
454 		subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
455 	else
456 		subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
457 
458 	dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n",
459 		 val ? "allowed" : "disallowed");
460 	return count;
461 }
462 static DEVICE_ATTR_RW(msi_bus);
463 
464 static ssize_t rescan_store(const struct bus_type *bus, const char *buf, size_t count)
465 {
466 	unsigned long val;
467 	struct pci_bus *b = NULL;
468 
469 	if (kstrtoul(buf, 0, &val) < 0)
470 		return -EINVAL;
471 
472 	if (val) {
473 		pci_lock_rescan_remove();
474 		while ((b = pci_find_next_bus(b)) != NULL)
475 			pci_rescan_bus(b);
476 		pci_unlock_rescan_remove();
477 	}
478 	return count;
479 }
480 static BUS_ATTR_WO(rescan);
481 
482 static struct attribute *pci_bus_attrs[] = {
483 	&bus_attr_rescan.attr,
484 	NULL,
485 };
486 
487 static const struct attribute_group pci_bus_group = {
488 	.attrs = pci_bus_attrs,
489 };
490 
491 const struct attribute_group *pci_bus_groups[] = {
492 	&pci_bus_group,
493 	NULL,
494 };
495 
496 static ssize_t dev_rescan_store(struct device *dev,
497 				struct device_attribute *attr, const char *buf,
498 				size_t count)
499 {
500 	unsigned long val;
501 	struct pci_dev *pdev = to_pci_dev(dev);
502 
503 	if (kstrtoul(buf, 0, &val) < 0)
504 		return -EINVAL;
505 
506 	if (val) {
507 		pci_lock_rescan_remove();
508 		pci_rescan_bus(pdev->bus);
509 		pci_unlock_rescan_remove();
510 	}
511 	return count;
512 }
513 static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL,
514 							    dev_rescan_store);
515 
516 static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
517 			    const char *buf, size_t count)
518 {
519 	unsigned long val;
520 
521 	if (kstrtoul(buf, 0, &val) < 0)
522 		return -EINVAL;
523 
524 	if (val && device_remove_file_self(dev, attr))
525 		pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
526 	return count;
527 }
528 static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL,
529 				  remove_store);
530 
531 static ssize_t bus_rescan_store(struct device *dev,
532 				struct device_attribute *attr,
533 				const char *buf, size_t count)
534 {
535 	unsigned long val;
536 	struct pci_bus *bus = to_pci_bus(dev);
537 
538 	if (kstrtoul(buf, 0, &val) < 0)
539 		return -EINVAL;
540 
541 	if (val) {
542 		pci_lock_rescan_remove();
543 		if (!pci_is_root_bus(bus) && list_empty(&bus->devices))
544 			pci_rescan_bus_bridge_resize(bus->self);
545 		else
546 			pci_rescan_bus(bus);
547 		pci_unlock_rescan_remove();
548 	}
549 	return count;
550 }
551 static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
552 							    bus_rescan_store);
553 
554 static ssize_t reset_subordinate_store(struct device *dev,
555 				struct device_attribute *attr,
556 				const char *buf, size_t count)
557 {
558 	struct pci_dev *pdev = to_pci_dev(dev);
559 	unsigned long val;
560 
561 	if (!capable(CAP_SYS_ADMIN))
562 		return -EPERM;
563 
564 	if (kstrtoul(buf, 0, &val) < 0)
565 		return -EINVAL;
566 
567 	if (val) {
568 		int ret = pci_try_reset_bridge(pdev);
569 
570 		if (ret)
571 			return ret;
572 	}
573 
574 	return count;
575 }
576 static DEVICE_ATTR_WO(reset_subordinate);
577 
578 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
579 static ssize_t d3cold_allowed_store(struct device *dev,
580 				    struct device_attribute *attr,
581 				    const char *buf, size_t count)
582 {
583 	struct pci_dev *pdev = to_pci_dev(dev);
584 	unsigned long val;
585 
586 	if (kstrtoul(buf, 0, &val) < 0)
587 		return -EINVAL;
588 
589 	pdev->d3cold_allowed = !!val;
590 	pci_bridge_d3_update(pdev);
591 
592 	pm_runtime_resume(dev);
593 
594 	return count;
595 }
596 
597 static ssize_t d3cold_allowed_show(struct device *dev,
598 				   struct device_attribute *attr, char *buf)
599 {
600 	struct pci_dev *pdev = to_pci_dev(dev);
601 	return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed);
602 }
603 static DEVICE_ATTR_RW(d3cold_allowed);
604 #endif
605 
606 #ifdef CONFIG_OF
607 static ssize_t devspec_show(struct device *dev,
608 			    struct device_attribute *attr, char *buf)
609 {
610 	struct pci_dev *pdev = to_pci_dev(dev);
611 	struct device_node *np = pci_device_to_OF_node(pdev);
612 
613 	if (np == NULL)
614 		return 0;
615 	return sysfs_emit(buf, "%pOF\n", np);
616 }
617 static DEVICE_ATTR_RO(devspec);
618 #endif
619 
620 static struct attribute *pci_dev_attrs[] = {
621 	&dev_attr_power_state.attr,
622 	&dev_attr_resource.attr,
623 	&dev_attr_vendor.attr,
624 	&dev_attr_device.attr,
625 	&dev_attr_subsystem_vendor.attr,
626 	&dev_attr_subsystem_device.attr,
627 	&dev_attr_revision.attr,
628 	&dev_attr_class.attr,
629 	&dev_attr_irq.attr,
630 	&dev_attr_local_cpus.attr,
631 	&dev_attr_local_cpulist.attr,
632 	&dev_attr_modalias.attr,
633 #ifdef CONFIG_NUMA
634 	&dev_attr_numa_node.attr,
635 #endif
636 	&dev_attr_dma_mask_bits.attr,
637 	&dev_attr_consistent_dma_mask_bits.attr,
638 	&dev_attr_enable.attr,
639 	&dev_attr_broken_parity_status.attr,
640 	&dev_attr_msi_bus.attr,
641 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
642 	&dev_attr_d3cold_allowed.attr,
643 #endif
644 #ifdef CONFIG_OF
645 	&dev_attr_devspec.attr,
646 #endif
647 	&dev_attr_ari_enabled.attr,
648 	NULL,
649 };
650 
651 static struct attribute *pci_bridge_attrs[] = {
652 	&dev_attr_subordinate_bus_number.attr,
653 	&dev_attr_secondary_bus_number.attr,
654 	&dev_attr_reset_subordinate.attr,
655 	NULL,
656 };
657 
658 static struct attribute *pcie_dev_attrs[] = {
659 	&dev_attr_current_link_speed.attr,
660 	&dev_attr_current_link_width.attr,
661 	&dev_attr_max_link_width.attr,
662 	&dev_attr_max_link_speed.attr,
663 	NULL,
664 };
665 
666 static struct attribute *pcibus_attrs[] = {
667 	&dev_attr_bus_rescan.attr,
668 	&dev_attr_cpuaffinity.attr,
669 	&dev_attr_cpulistaffinity.attr,
670 	NULL,
671 };
672 
673 static const struct attribute_group pcibus_group = {
674 	.attrs = pcibus_attrs,
675 };
676 
677 const struct attribute_group *pcibus_groups[] = {
678 	&pcibus_group,
679 	NULL,
680 };
681 
682 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
683 			     char *buf)
684 {
685 	struct pci_dev *pdev = to_pci_dev(dev);
686 	struct pci_dev *vga_dev = vga_default_device();
687 
688 	if (vga_dev)
689 		return sysfs_emit(buf, "%u\n", (pdev == vga_dev));
690 
691 	return sysfs_emit(buf, "%u\n",
692 			  !!(pdev->resource[PCI_ROM_RESOURCE].flags &
693 			     IORESOURCE_ROM_SHADOW));
694 }
695 static DEVICE_ATTR_RO(boot_vga);
696 
697 static ssize_t serial_number_show(struct device *dev,
698 				  struct device_attribute *attr, char *buf)
699 {
700 	struct pci_dev *pci_dev = to_pci_dev(dev);
701 	u64 dsn;
702 	u8 bytes[8];
703 
704 	dsn = pci_get_dsn(pci_dev);
705 	if (!dsn)
706 		return -EIO;
707 
708 	put_unaligned_be64(dsn, bytes);
709 	return sysfs_emit(buf, "%8phD\n", bytes);
710 }
711 static DEVICE_ATTR_ADMIN_RO(serial_number);
712 
713 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
714 			       const struct bin_attribute *bin_attr, char *buf,
715 			       loff_t off, size_t count)
716 {
717 	struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
718 	unsigned int size = 64;
719 	loff_t init_off = off;
720 	u8 *data = (u8 *) buf;
721 
722 	/* Several chips lock up trying to read undefined config space */
723 	if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
724 		size = dev->cfg_size;
725 	else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
726 		size = 128;
727 
728 	if (off > size)
729 		return 0;
730 	if (off + count > size) {
731 		size -= off;
732 		count = size;
733 	} else {
734 		size = count;
735 	}
736 
737 	pci_config_pm_runtime_get(dev);
738 
739 	if ((off & 1) && size) {
740 		u8 val;
741 		pci_user_read_config_byte(dev, off, &val);
742 		data[off - init_off] = val;
743 		off++;
744 		size--;
745 	}
746 
747 	if ((off & 3) && size > 2) {
748 		u16 val;
749 		pci_user_read_config_word(dev, off, &val);
750 		data[off - init_off] = val & 0xff;
751 		data[off - init_off + 1] = (val >> 8) & 0xff;
752 		off += 2;
753 		size -= 2;
754 	}
755 
756 	while (size > 3) {
757 		u32 val;
758 		pci_user_read_config_dword(dev, off, &val);
759 		data[off - init_off] = val & 0xff;
760 		data[off - init_off + 1] = (val >> 8) & 0xff;
761 		data[off - init_off + 2] = (val >> 16) & 0xff;
762 		data[off - init_off + 3] = (val >> 24) & 0xff;
763 		off += 4;
764 		size -= 4;
765 		cond_resched();
766 	}
767 
768 	if (size >= 2) {
769 		u16 val;
770 		pci_user_read_config_word(dev, off, &val);
771 		data[off - init_off] = val & 0xff;
772 		data[off - init_off + 1] = (val >> 8) & 0xff;
773 		off += 2;
774 		size -= 2;
775 	}
776 
777 	if (size > 0) {
778 		u8 val;
779 		pci_user_read_config_byte(dev, off, &val);
780 		data[off - init_off] = val;
781 	}
782 
783 	pci_config_pm_runtime_put(dev);
784 
785 	return count;
786 }
787 
788 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
789 				const struct bin_attribute *bin_attr, char *buf,
790 				loff_t off, size_t count)
791 {
792 	struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
793 	unsigned int size = count;
794 	loff_t init_off = off;
795 	u8 *data = (u8 *) buf;
796 	int ret;
797 
798 	ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
799 	if (ret)
800 		return ret;
801 
802 	if (resource_is_exclusive(&dev->driver_exclusive_resource, off,
803 				  count)) {
804 		pci_warn_once(dev, "%s: Unexpected write to kernel-exclusive config offset %llx",
805 			      current->comm, off);
806 		add_taint(TAINT_USER, LOCKDEP_STILL_OK);
807 	}
808 
809 	if (off > dev->cfg_size)
810 		return 0;
811 	if (off + count > dev->cfg_size) {
812 		size = dev->cfg_size - off;
813 		count = size;
814 	}
815 
816 	pci_config_pm_runtime_get(dev);
817 
818 	if ((off & 1) && size) {
819 		pci_user_write_config_byte(dev, off, data[off - init_off]);
820 		off++;
821 		size--;
822 	}
823 
824 	if ((off & 3) && size > 2) {
825 		u16 val = data[off - init_off];
826 		val |= (u16) data[off - init_off + 1] << 8;
827 		pci_user_write_config_word(dev, off, val);
828 		off += 2;
829 		size -= 2;
830 	}
831 
832 	while (size > 3) {
833 		u32 val = data[off - init_off];
834 		val |= (u32) data[off - init_off + 1] << 8;
835 		val |= (u32) data[off - init_off + 2] << 16;
836 		val |= (u32) data[off - init_off + 3] << 24;
837 		pci_user_write_config_dword(dev, off, val);
838 		off += 4;
839 		size -= 4;
840 	}
841 
842 	if (size >= 2) {
843 		u16 val = data[off - init_off];
844 		val |= (u16) data[off - init_off + 1] << 8;
845 		pci_user_write_config_word(dev, off, val);
846 		off += 2;
847 		size -= 2;
848 	}
849 
850 	if (size)
851 		pci_user_write_config_byte(dev, off, data[off - init_off]);
852 
853 	pci_config_pm_runtime_put(dev);
854 
855 	return count;
856 }
857 static const BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
858 
859 static const struct bin_attribute *const pci_dev_config_attrs[] = {
860 	&bin_attr_config,
861 	NULL,
862 };
863 
864 static size_t pci_dev_config_attr_bin_size(struct kobject *kobj,
865 					   const struct bin_attribute *a,
866 					   int n)
867 {
868 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
869 
870 	if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
871 		return PCI_CFG_SPACE_EXP_SIZE;
872 	return PCI_CFG_SPACE_SIZE;
873 }
874 
875 static const struct attribute_group pci_dev_config_attr_group = {
876 	.bin_attrs = pci_dev_config_attrs,
877 	.bin_size = pci_dev_config_attr_bin_size,
878 };
879 
880 /*
881  * llseek operation for mmappable PCI resources.
882  * May be left unused if the arch doesn't provide them.
883  */
884 static __maybe_unused loff_t
885 pci_llseek_resource(struct file *filep,
886 		    struct kobject *kobj __always_unused,
887 		    const struct bin_attribute *attr,
888 		    loff_t offset, int whence)
889 {
890 	return fixed_size_llseek(filep, offset, whence, attr->size);
891 }
892 
893 #ifdef HAVE_PCI_LEGACY
894 /**
895  * pci_read_legacy_io - read byte(s) from legacy I/O port space
896  * @filp: open sysfs file
897  * @kobj: kobject corresponding to file to read from
898  * @bin_attr: struct bin_attribute for this file
899  * @buf: buffer to store results
900  * @off: offset into legacy I/O port space
901  * @count: number of bytes to read
902  *
903  * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
904  * callback routine (pci_legacy_read).
905  */
906 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
907 				  const struct bin_attribute *bin_attr,
908 				  char *buf, loff_t off, size_t count)
909 {
910 	struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
911 
912 	/* Only support 1, 2 or 4 byte accesses */
913 	if (count != 1 && count != 2 && count != 4)
914 		return -EINVAL;
915 
916 	return pci_legacy_read(bus, off, (u32 *)buf, count);
917 }
918 
919 /**
920  * pci_write_legacy_io - write byte(s) to legacy I/O port space
921  * @filp: open sysfs file
922  * @kobj: kobject corresponding to file to read from
923  * @bin_attr: struct bin_attribute for this file
924  * @buf: buffer containing value to be written
925  * @off: offset into legacy I/O port space
926  * @count: number of bytes to write
927  *
928  * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
929  * callback routine (pci_legacy_write).
930  */
931 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
932 				   const struct bin_attribute *bin_attr,
933 				   char *buf, loff_t off, size_t count)
934 {
935 	struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
936 
937 	/* Only support 1, 2 or 4 byte accesses */
938 	if (count != 1 && count != 2 && count != 4)
939 		return -EINVAL;
940 
941 	return pci_legacy_write(bus, off, *(u32 *)buf, count);
942 }
943 
944 /**
945  * pci_mmap_legacy_mem - map legacy PCI memory into user memory space
946  * @filp: open sysfs file
947  * @kobj: kobject corresponding to device to be mapped
948  * @attr: struct bin_attribute for this file
949  * @vma: struct vm_area_struct passed to mmap
950  *
951  * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap
952  * legacy memory space (first meg of bus space) into application virtual
953  * memory space.
954  */
955 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
956 			       const struct bin_attribute *attr,
957 			       struct vm_area_struct *vma)
958 {
959 	struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
960 
961 	return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
962 }
963 
964 /**
965  * pci_mmap_legacy_io - map legacy PCI IO into user memory space
966  * @filp: open sysfs file
967  * @kobj: kobject corresponding to device to be mapped
968  * @attr: struct bin_attribute for this file
969  * @vma: struct vm_area_struct passed to mmap
970  *
971  * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap
972  * legacy IO space (first meg of bus space) into application virtual
973  * memory space. Returns -ENOSYS if the operation isn't supported
974  */
975 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
976 			      const struct bin_attribute *attr,
977 			      struct vm_area_struct *vma)
978 {
979 	struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
980 
981 	return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
982 }
983 
984 /**
985  * pci_adjust_legacy_attr - adjustment of legacy file attributes
986  * @b: bus to create files under
987  * @mmap_type: I/O port or memory
988  *
989  * Stub implementation. Can be overridden by arch if necessary.
990  */
991 void __weak pci_adjust_legacy_attr(struct pci_bus *b,
992 				   enum pci_mmap_state mmap_type)
993 {
994 }
995 
996 /**
997  * pci_create_legacy_files - create legacy I/O port and memory files
998  * @b: bus to create files under
999  *
1000  * Some platforms allow access to legacy I/O port and ISA memory space on
1001  * a per-bus basis.  This routine creates the files and ties them into
1002  * their associated read, write and mmap files from pci-sysfs.c
1003  *
1004  * On error unwind, but don't propagate the error to the caller
1005  * as it is ok to set up the PCI bus without these files.
1006  */
1007 void pci_create_legacy_files(struct pci_bus *b)
1008 {
1009 	int error;
1010 
1011 	if (!sysfs_initialized)
1012 		return;
1013 
1014 	b->legacy_io = kzalloc_objs(struct bin_attribute, 2, GFP_ATOMIC);
1015 	if (!b->legacy_io)
1016 		goto kzalloc_err;
1017 
1018 	sysfs_bin_attr_init(b->legacy_io);
1019 	b->legacy_io->attr.name = "legacy_io";
1020 	b->legacy_io->size = 0xffff;
1021 	b->legacy_io->attr.mode = 0600;
1022 	b->legacy_io->read = pci_read_legacy_io;
1023 	b->legacy_io->write = pci_write_legacy_io;
1024 	/* See pci_create_attr() for motivation */
1025 	b->legacy_io->llseek = pci_llseek_resource;
1026 	b->legacy_io->mmap = pci_mmap_legacy_io;
1027 	b->legacy_io->f_mapping = iomem_get_mapping;
1028 	pci_adjust_legacy_attr(b, pci_mmap_io);
1029 	error = device_create_bin_file(&b->dev, b->legacy_io);
1030 	if (error)
1031 		goto legacy_io_err;
1032 
1033 	/* Allocated above after the legacy_io struct */
1034 	b->legacy_mem = b->legacy_io + 1;
1035 	sysfs_bin_attr_init(b->legacy_mem);
1036 	b->legacy_mem->attr.name = "legacy_mem";
1037 	b->legacy_mem->size = 1024*1024;
1038 	b->legacy_mem->attr.mode = 0600;
1039 	b->legacy_mem->mmap = pci_mmap_legacy_mem;
1040 	/* See pci_create_attr() for motivation */
1041 	b->legacy_mem->llseek = pci_llseek_resource;
1042 	b->legacy_mem->f_mapping = iomem_get_mapping;
1043 	pci_adjust_legacy_attr(b, pci_mmap_mem);
1044 	error = device_create_bin_file(&b->dev, b->legacy_mem);
1045 	if (error)
1046 		goto legacy_mem_err;
1047 
1048 	return;
1049 
1050 legacy_mem_err:
1051 	device_remove_bin_file(&b->dev, b->legacy_io);
1052 legacy_io_err:
1053 	kfree(b->legacy_io);
1054 	b->legacy_io = NULL;
1055 kzalloc_err:
1056 	dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n");
1057 }
1058 
1059 void pci_remove_legacy_files(struct pci_bus *b)
1060 {
1061 	if (b->legacy_io) {
1062 		device_remove_bin_file(&b->dev, b->legacy_io);
1063 		device_remove_bin_file(&b->dev, b->legacy_mem);
1064 		kfree(b->legacy_io); /* both are allocated here */
1065 	}
1066 }
1067 #endif /* HAVE_PCI_LEGACY */
1068 
1069 #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
1070 /**
1071  * pci_mmap_resource - map a PCI resource into user memory space
1072  * @kobj: kobject for mapping
1073  * @attr: struct bin_attribute for the file being mapped
1074  * @vma: struct vm_area_struct passed into the mmap
1075  * @write_combine: 1 for write_combine mapping
1076  *
1077  * Use the regular PCI mapping routines to map a PCI resource into userspace.
1078  */
1079 static int pci_mmap_resource(struct kobject *kobj, const struct bin_attribute *attr,
1080 			     struct vm_area_struct *vma, int write_combine)
1081 {
1082 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1083 	int bar = (unsigned long)attr->private;
1084 	enum pci_mmap_state mmap_type;
1085 	struct resource *res = &pdev->resource[bar];
1086 	int ret;
1087 
1088 	ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1089 	if (ret)
1090 		return ret;
1091 
1092 	if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
1093 		return -EINVAL;
1094 
1095 	if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS))
1096 		return -EINVAL;
1097 
1098 	mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
1099 
1100 	return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
1101 }
1102 
1103 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
1104 				const struct bin_attribute *attr,
1105 				struct vm_area_struct *vma)
1106 {
1107 	return pci_mmap_resource(kobj, attr, vma, 0);
1108 }
1109 
1110 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
1111 				const struct bin_attribute *attr,
1112 				struct vm_area_struct *vma)
1113 {
1114 	return pci_mmap_resource(kobj, attr, vma, 1);
1115 }
1116 
1117 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
1118 			       const struct bin_attribute *attr, char *buf,
1119 			       loff_t off, size_t count, bool write)
1120 {
1121 #ifdef CONFIG_HAS_IOPORT
1122 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1123 	int bar = (unsigned long)attr->private;
1124 	unsigned long port = off;
1125 
1126 	port += pci_resource_start(pdev, bar);
1127 
1128 	if (port > pci_resource_end(pdev, bar))
1129 		return 0;
1130 
1131 	if (port + count - 1 > pci_resource_end(pdev, bar))
1132 		return -EINVAL;
1133 
1134 	switch (count) {
1135 	case 1:
1136 		if (write)
1137 			outb(*(u8 *)buf, port);
1138 		else
1139 			*(u8 *)buf = inb(port);
1140 		return 1;
1141 	case 2:
1142 		if (write)
1143 			outw(*(u16 *)buf, port);
1144 		else
1145 			*(u16 *)buf = inw(port);
1146 		return 2;
1147 	case 4:
1148 		if (write)
1149 			outl(*(u32 *)buf, port);
1150 		else
1151 			*(u32 *)buf = inl(port);
1152 		return 4;
1153 	}
1154 	return -EINVAL;
1155 #else
1156 	return -ENXIO;
1157 #endif
1158 }
1159 
1160 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
1161 				    const struct bin_attribute *attr, char *buf,
1162 				    loff_t off, size_t count)
1163 {
1164 	return pci_resource_io(filp, kobj, attr, buf, off, count, false);
1165 }
1166 
1167 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
1168 				     const struct bin_attribute *attr, char *buf,
1169 				     loff_t off, size_t count)
1170 {
1171 	int ret;
1172 
1173 	ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1174 	if (ret)
1175 		return ret;
1176 
1177 	return pci_resource_io(filp, kobj, attr, buf, off, count, true);
1178 }
1179 
1180 /**
1181  * pci_remove_resource_files - cleanup resource files
1182  * @pdev: dev to cleanup
1183  *
1184  * If we created resource files for @pdev, remove them from sysfs and
1185  * free their resources.
1186  */
1187 static void pci_remove_resource_files(struct pci_dev *pdev)
1188 {
1189 	int i;
1190 
1191 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1192 		struct bin_attribute *res_attr;
1193 
1194 		res_attr = pdev->res_attr[i];
1195 		if (res_attr) {
1196 			sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1197 			kfree(res_attr);
1198 		}
1199 
1200 		res_attr = pdev->res_attr_wc[i];
1201 		if (res_attr) {
1202 			sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1203 			kfree(res_attr);
1204 		}
1205 	}
1206 }
1207 
1208 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
1209 {
1210 	/* allocate attribute structure, piggyback attribute name */
1211 	int name_len = write_combine ? 13 : 10;
1212 	struct bin_attribute *res_attr;
1213 	char *res_attr_name;
1214 	int retval;
1215 
1216 	res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
1217 	if (!res_attr)
1218 		return -ENOMEM;
1219 
1220 	res_attr_name = (char *)(res_attr + 1);
1221 
1222 	sysfs_bin_attr_init(res_attr);
1223 	if (write_combine) {
1224 		sprintf(res_attr_name, "resource%d_wc", num);
1225 		res_attr->mmap = pci_mmap_resource_wc;
1226 	} else {
1227 		sprintf(res_attr_name, "resource%d", num);
1228 		if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
1229 			res_attr->read = pci_read_resource_io;
1230 			res_attr->write = pci_write_resource_io;
1231 			if (arch_can_pci_mmap_io())
1232 				res_attr->mmap = pci_mmap_resource_uc;
1233 		} else {
1234 			res_attr->mmap = pci_mmap_resource_uc;
1235 		}
1236 	}
1237 	if (res_attr->mmap) {
1238 		res_attr->f_mapping = iomem_get_mapping;
1239 		/*
1240 		 * generic_file_llseek() consults f_mapping->host to determine
1241 		 * the file size. As iomem_inode knows nothing about the
1242 		 * attribute, it's not going to work, so override it as well.
1243 		 */
1244 		res_attr->llseek = pci_llseek_resource;
1245 	}
1246 	res_attr->attr.name = res_attr_name;
1247 	res_attr->attr.mode = 0600;
1248 	res_attr->size = pci_resource_len(pdev, num);
1249 	res_attr->private = (void *)(unsigned long)num;
1250 	retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
1251 	if (retval) {
1252 		kfree(res_attr);
1253 		return retval;
1254 	}
1255 
1256 	if (write_combine)
1257 		pdev->res_attr_wc[num] = res_attr;
1258 	else
1259 		pdev->res_attr[num] = res_attr;
1260 
1261 	return 0;
1262 }
1263 
1264 /**
1265  * pci_create_resource_files - create resource files in sysfs for @dev
1266  * @pdev: dev in question
1267  *
1268  * Walk the resources in @pdev creating files for each resource available.
1269  */
1270 static int pci_create_resource_files(struct pci_dev *pdev)
1271 {
1272 	int i;
1273 	int retval;
1274 
1275 	/* Skip devices with non-mappable BARs */
1276 	if (pdev->non_mappable_bars)
1277 		return 0;
1278 
1279 	/* Expose the PCI resources from this device as files */
1280 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1281 
1282 		/* skip empty resources */
1283 		if (!pci_resource_len(pdev, i))
1284 			continue;
1285 
1286 		retval = pci_create_attr(pdev, i, 0);
1287 		/* for prefetchable resources, create a WC mappable file */
1288 		if (!retval && arch_can_pci_mmap_wc() &&
1289 		    pdev->resource[i].flags & IORESOURCE_PREFETCH)
1290 			retval = pci_create_attr(pdev, i, 1);
1291 		if (retval) {
1292 			pci_remove_resource_files(pdev);
1293 			return retval;
1294 		}
1295 	}
1296 	return 0;
1297 }
1298 #else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */
1299 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
1300 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
1301 #endif
1302 
1303 /**
1304  * pci_write_rom - used to enable access to the PCI ROM display
1305  * @filp: sysfs file
1306  * @kobj: kernel object handle
1307  * @bin_attr: struct bin_attribute for this file
1308  * @buf: user input
1309  * @off: file offset
1310  * @count: number of byte in input
1311  *
1312  * writing anything except 0 enables it
1313  */
1314 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
1315 			     const struct bin_attribute *bin_attr, char *buf,
1316 			     loff_t off, size_t count)
1317 {
1318 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1319 
1320 	if ((off ==  0) && (*buf == '0') && (count == 2))
1321 		pdev->rom_attr_enabled = 0;
1322 	else
1323 		pdev->rom_attr_enabled = 1;
1324 
1325 	return count;
1326 }
1327 
1328 /**
1329  * pci_read_rom - read a PCI ROM
1330  * @filp: sysfs file
1331  * @kobj: kernel object handle
1332  * @bin_attr: struct bin_attribute for this file
1333  * @buf: where to put the data we read from the ROM
1334  * @off: file offset
1335  * @count: number of bytes to read
1336  *
1337  * Put @count bytes starting at @off into @buf from the ROM in the PCI
1338  * device corresponding to @kobj.
1339  */
1340 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
1341 			    const struct bin_attribute *bin_attr, char *buf,
1342 			    loff_t off, size_t count)
1343 {
1344 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1345 	void __iomem *rom;
1346 	size_t size;
1347 
1348 	if (!pdev->rom_attr_enabled)
1349 		return -EINVAL;
1350 
1351 	rom = pci_map_rom(pdev, &size);	/* size starts out as PCI window size */
1352 	if (!rom || !size)
1353 		return -EIO;
1354 
1355 	if (off >= size)
1356 		count = 0;
1357 	else {
1358 		if (off + count > size)
1359 			count = size - off;
1360 
1361 		memcpy_fromio(buf, rom + off, count);
1362 	}
1363 	pci_unmap_rom(pdev, rom);
1364 
1365 	return count;
1366 }
1367 static const BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
1368 
1369 static const struct bin_attribute *const pci_dev_rom_attrs[] = {
1370 	&bin_attr_rom,
1371 	NULL,
1372 };
1373 
1374 static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj,
1375 					   const struct bin_attribute *a, int n)
1376 {
1377 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1378 
1379 	/* If the device has a ROM, try to expose it in sysfs. */
1380 	if (!pci_resource_end(pdev, PCI_ROM_RESOURCE))
1381 		return 0;
1382 
1383 	return a->attr.mode;
1384 }
1385 
1386 static size_t pci_dev_rom_attr_bin_size(struct kobject *kobj,
1387 					const struct bin_attribute *a, int n)
1388 {
1389 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1390 
1391 	return pci_resource_len(pdev, PCI_ROM_RESOURCE);
1392 }
1393 
1394 static const struct attribute_group pci_dev_rom_attr_group = {
1395 	.bin_attrs = pci_dev_rom_attrs,
1396 	.is_bin_visible = pci_dev_rom_attr_is_visible,
1397 	.bin_size = pci_dev_rom_attr_bin_size,
1398 };
1399 
1400 static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
1401 			   const char *buf, size_t count)
1402 {
1403 	struct pci_dev *pdev = to_pci_dev(dev);
1404 	unsigned long val;
1405 	ssize_t result;
1406 
1407 	if (kstrtoul(buf, 0, &val) < 0)
1408 		return -EINVAL;
1409 
1410 	if (val != 1)
1411 		return -EINVAL;
1412 
1413 	pm_runtime_get_sync(dev);
1414 	result = pci_reset_function(pdev);
1415 	pm_runtime_put(dev);
1416 	if (result < 0)
1417 		return result;
1418 
1419 	return count;
1420 }
1421 static DEVICE_ATTR_WO(reset);
1422 
1423 static struct attribute *pci_dev_reset_attrs[] = {
1424 	&dev_attr_reset.attr,
1425 	NULL,
1426 };
1427 
1428 static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj,
1429 					     struct attribute *a, int n)
1430 {
1431 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1432 
1433 	if (!pci_reset_supported(pdev))
1434 		return 0;
1435 
1436 	return a->mode;
1437 }
1438 
1439 static const struct attribute_group pci_dev_reset_attr_group = {
1440 	.attrs = pci_dev_reset_attrs,
1441 	.is_visible = pci_dev_reset_attr_is_visible,
1442 };
1443 
1444 static ssize_t reset_method_show(struct device *dev,
1445 				 struct device_attribute *attr, char *buf)
1446 {
1447 	struct pci_dev *pdev = to_pci_dev(dev);
1448 	ssize_t len = 0;
1449 	int i, m;
1450 
1451 	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
1452 		m = pdev->reset_methods[i];
1453 		if (!m)
1454 			break;
1455 
1456 		len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
1457 				     pci_reset_fn_methods[m].name);
1458 	}
1459 
1460 	if (len)
1461 		len += sysfs_emit_at(buf, len, "\n");
1462 
1463 	return len;
1464 }
1465 
1466 static int reset_method_lookup(const char *name)
1467 {
1468 	int m;
1469 
1470 	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
1471 		if (sysfs_streq(name, pci_reset_fn_methods[m].name))
1472 			return m;
1473 	}
1474 
1475 	return 0;	/* not found */
1476 }
1477 
1478 static ssize_t reset_method_store(struct device *dev,
1479 				  struct device_attribute *attr,
1480 				  const char *buf, size_t count)
1481 {
1482 	struct pci_dev *pdev = to_pci_dev(dev);
1483 	char *tmp_options, *name;
1484 	int m, n;
1485 	u8 reset_methods[PCI_NUM_RESET_METHODS] = {};
1486 
1487 	if (sysfs_streq(buf, "")) {
1488 		pdev->reset_methods[0] = 0;
1489 		pci_warn(pdev, "All device reset methods disabled by user");
1490 		return count;
1491 	}
1492 
1493 	PM_RUNTIME_ACQUIRE(dev, pm);
1494 	if (PM_RUNTIME_ACQUIRE_ERR(&pm))
1495 		return -ENXIO;
1496 
1497 	if (sysfs_streq(buf, "default")) {
1498 		pci_init_reset_methods(pdev);
1499 		return count;
1500 	}
1501 
1502 	char *options __free(kfree) = kstrndup(buf, count, GFP_KERNEL);
1503 	if (!options)
1504 		return -ENOMEM;
1505 
1506 	n = 0;
1507 	tmp_options = options;
1508 	while ((name = strsep(&tmp_options, " ")) != NULL) {
1509 		if (sysfs_streq(name, ""))
1510 			continue;
1511 
1512 		name = strim(name);
1513 
1514 		/* Leave previous methods unchanged if input is invalid */
1515 		m = reset_method_lookup(name);
1516 		if (!m) {
1517 			pci_err(pdev, "Invalid reset method '%s'", name);
1518 			return -EINVAL;
1519 		}
1520 
1521 		if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
1522 			pci_err(pdev, "Unsupported reset method '%s'", name);
1523 			return -EINVAL;
1524 		}
1525 
1526 		if (n == PCI_NUM_RESET_METHODS - 1) {
1527 			pci_err(pdev, "Too many reset methods\n");
1528 			return -EINVAL;
1529 		}
1530 
1531 		reset_methods[n++] = m;
1532 	}
1533 
1534 	reset_methods[n] = 0;
1535 
1536 	/* Warn if dev-specific supported but not highest priority */
1537 	if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
1538 	    reset_methods[0] != 1)
1539 		pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
1540 	memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
1541 	return count;
1542 }
1543 static DEVICE_ATTR_RW(reset_method);
1544 
1545 static struct attribute *pci_dev_reset_method_attrs[] = {
1546 	&dev_attr_reset_method.attr,
1547 	NULL,
1548 };
1549 
1550 static const struct attribute_group pci_dev_reset_method_attr_group = {
1551 	.attrs = pci_dev_reset_method_attrs,
1552 	.is_visible = pci_dev_reset_attr_is_visible,
1553 };
1554 
1555 static ssize_t __resource_resize_show(struct device *dev, int n, char *buf)
1556 {
1557 	struct pci_dev *pdev = to_pci_dev(dev);
1558 	ssize_t ret;
1559 
1560 	pci_config_pm_runtime_get(pdev);
1561 
1562 	ret = sysfs_emit(buf, "%016llx\n",
1563 			 pci_rebar_get_possible_sizes(pdev, n));
1564 
1565 	pci_config_pm_runtime_put(pdev);
1566 
1567 	return ret;
1568 }
1569 
1570 static ssize_t __resource_resize_store(struct device *dev, int n,
1571 				       const char *buf, size_t count)
1572 {
1573 	struct pci_dev *pdev = to_pci_dev(dev);
1574 	struct pci_bus *bus = pdev->bus;
1575 	unsigned long size;
1576 	int ret;
1577 	u16 cmd;
1578 
1579 	if (kstrtoul(buf, 0, &size) < 0)
1580 		return -EINVAL;
1581 
1582 	device_lock(dev);
1583 	if (dev->driver || pci_num_vf(pdev)) {
1584 		ret = -EBUSY;
1585 		goto unlock;
1586 	}
1587 
1588 	pci_config_pm_runtime_get(pdev);
1589 
1590 	if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
1591 		ret = aperture_remove_conflicting_pci_devices(pdev,
1592 						"resourceN_resize");
1593 		if (ret)
1594 			goto pm_put;
1595 	}
1596 
1597 	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1598 	pci_write_config_word(pdev, PCI_COMMAND,
1599 			      cmd & ~PCI_COMMAND_MEMORY);
1600 
1601 	pci_remove_resource_files(pdev);
1602 
1603 	ret = pci_resize_resource(pdev, n, size, 0);
1604 
1605 	pci_assign_unassigned_bus_resources(bus);
1606 
1607 	if (pci_create_resource_files(pdev))
1608 		pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");
1609 
1610 	pci_write_config_word(pdev, PCI_COMMAND, cmd);
1611 pm_put:
1612 	pci_config_pm_runtime_put(pdev);
1613 unlock:
1614 	device_unlock(dev);
1615 
1616 	return ret ? ret : count;
1617 }
1618 
1619 #define pci_dev_resource_resize_attr(n)					\
1620 static ssize_t resource##n##_resize_show(struct device *dev,		\
1621 					 struct device_attribute *attr,	\
1622 					 char *buf)			\
1623 {									\
1624 	return __resource_resize_show(dev, n, buf);			\
1625 }									\
1626 static ssize_t resource##n##_resize_store(struct device *dev,		\
1627 					  struct device_attribute *attr,\
1628 					  const char *buf, size_t count)\
1629 {									\
1630 	return __resource_resize_store(dev, n, buf, count);		\
1631 }									\
1632 static DEVICE_ATTR_RW(resource##n##_resize)
1633 
1634 pci_dev_resource_resize_attr(0);
1635 pci_dev_resource_resize_attr(1);
1636 pci_dev_resource_resize_attr(2);
1637 pci_dev_resource_resize_attr(3);
1638 pci_dev_resource_resize_attr(4);
1639 pci_dev_resource_resize_attr(5);
1640 
1641 static struct attribute *resource_resize_attrs[] = {
1642 	&dev_attr_resource0_resize.attr,
1643 	&dev_attr_resource1_resize.attr,
1644 	&dev_attr_resource2_resize.attr,
1645 	&dev_attr_resource3_resize.attr,
1646 	&dev_attr_resource4_resize.attr,
1647 	&dev_attr_resource5_resize.attr,
1648 	NULL,
1649 };
1650 
1651 static umode_t resource_resize_is_visible(struct kobject *kobj,
1652 					  struct attribute *a, int n)
1653 {
1654 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1655 
1656 	return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode;
1657 }
1658 
1659 static const struct attribute_group pci_dev_resource_resize_group = {
1660 	.attrs = resource_resize_attrs,
1661 	.is_visible = resource_resize_is_visible,
1662 };
1663 
1664 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
1665 {
1666 	if (!sysfs_initialized)
1667 		return -EACCES;
1668 
1669 	return pci_create_resource_files(pdev);
1670 }
1671 
1672 /**
1673  * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files
1674  * @pdev: device whose entries we should free
1675  *
1676  * Cleanup when @pdev is removed from sysfs.
1677  */
1678 void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
1679 {
1680 	if (!sysfs_initialized)
1681 		return;
1682 
1683 	pci_remove_resource_files(pdev);
1684 }
1685 
1686 static int __init pci_sysfs_init(void)
1687 {
1688 	struct pci_dev *pdev = NULL;
1689 	struct pci_bus *pbus = NULL;
1690 	int retval;
1691 
1692 	sysfs_initialized = 1;
1693 	for_each_pci_dev(pdev) {
1694 		retval = pci_create_sysfs_dev_files(pdev);
1695 		if (retval) {
1696 			pci_dev_put(pdev);
1697 			return retval;
1698 		}
1699 	}
1700 
1701 	while ((pbus = pci_find_next_bus(pbus)))
1702 		pci_create_legacy_files(pbus);
1703 
1704 	return 0;
1705 }
1706 late_initcall(pci_sysfs_init);
1707 
1708 static struct attribute *pci_dev_dev_attrs[] = {
1709 	&dev_attr_boot_vga.attr,
1710 	&dev_attr_serial_number.attr,
1711 	NULL,
1712 };
1713 
1714 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
1715 					 struct attribute *a, int n)
1716 {
1717 	struct device *dev = kobj_to_dev(kobj);
1718 	struct pci_dev *pdev = to_pci_dev(dev);
1719 
1720 	if (a == &dev_attr_boot_vga.attr && pci_is_vga(pdev))
1721 		return a->mode;
1722 
1723 	if (a == &dev_attr_serial_number.attr && pci_get_dsn(pdev))
1724 		return a->mode;
1725 
1726 	return 0;
1727 }
1728 
1729 static struct attribute *pci_dev_hp_attrs[] = {
1730 	&dev_attr_remove.attr,
1731 	&dev_attr_dev_rescan.attr,
1732 	NULL,
1733 };
1734 
1735 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
1736 					    struct attribute *a, int n)
1737 {
1738 	struct device *dev = kobj_to_dev(kobj);
1739 	struct pci_dev *pdev = to_pci_dev(dev);
1740 
1741 	if (pdev->is_virtfn)
1742 		return 0;
1743 
1744 	return a->mode;
1745 }
1746 
1747 static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj,
1748 					    struct attribute *a, int n)
1749 {
1750 	struct device *dev = kobj_to_dev(kobj);
1751 	struct pci_dev *pdev = to_pci_dev(dev);
1752 
1753 	if (pci_is_bridge(pdev))
1754 		return a->mode;
1755 
1756 	return 0;
1757 }
1758 
1759 static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj,
1760 					  struct attribute *a, int n)
1761 {
1762 	struct device *dev = kobj_to_dev(kobj);
1763 	struct pci_dev *pdev = to_pci_dev(dev);
1764 
1765 	if (pci_is_pcie(pdev))
1766 		return a->mode;
1767 
1768 	return 0;
1769 }
1770 
1771 static const struct attribute_group pci_dev_group = {
1772 	.attrs = pci_dev_attrs,
1773 };
1774 
1775 const struct attribute_group *pci_dev_groups[] = {
1776 	&pci_dev_group,
1777 	&pci_dev_config_attr_group,
1778 	&pci_dev_rom_attr_group,
1779 	&pci_dev_reset_attr_group,
1780 	&pci_dev_reset_method_attr_group,
1781 	&pci_dev_vpd_attr_group,
1782 #ifdef CONFIG_DMI
1783 	&pci_dev_smbios_attr_group,
1784 #endif
1785 #ifdef CONFIG_ACPI
1786 	&pci_dev_acpi_attr_group,
1787 #endif
1788 	&pci_dev_resource_resize_group,
1789 	ARCH_PCI_DEV_GROUPS
1790 	NULL,
1791 };
1792 
1793 static const struct attribute_group pci_dev_hp_attr_group = {
1794 	.attrs = pci_dev_hp_attrs,
1795 	.is_visible = pci_dev_hp_attrs_are_visible,
1796 };
1797 
1798 static const struct attribute_group pci_dev_attr_group = {
1799 	.attrs = pci_dev_dev_attrs,
1800 	.is_visible = pci_dev_attrs_are_visible,
1801 };
1802 
1803 static const struct attribute_group pci_bridge_attr_group = {
1804 	.attrs = pci_bridge_attrs,
1805 	.is_visible = pci_bridge_attrs_are_visible,
1806 };
1807 
1808 static const struct attribute_group pcie_dev_attr_group = {
1809 	.attrs = pcie_dev_attrs,
1810 	.is_visible = pcie_dev_attrs_are_visible,
1811 };
1812 
1813 const struct attribute_group *pci_dev_attr_groups[] = {
1814 	&pci_dev_attr_group,
1815 	&pci_dev_hp_attr_group,
1816 #ifdef CONFIG_PCI_IOV
1817 	&sriov_pf_dev_attr_group,
1818 	&sriov_vf_dev_attr_group,
1819 #endif
1820 	&pci_bridge_attr_group,
1821 	&pcie_dev_attr_group,
1822 #ifdef CONFIG_PCIEAER
1823 	&aer_stats_attr_group,
1824 	&aer_attr_group,
1825 #endif
1826 #ifdef CONFIG_PCIEASPM
1827 	&aspm_ctrl_attr_group,
1828 #endif
1829 #ifdef CONFIG_PCI_DOE
1830 	&pci_doe_sysfs_group,
1831 #endif
1832 #ifdef CONFIG_PCI_TSM
1833 	&pci_tsm_auth_attr_group,
1834 	&pci_tsm_attr_group,
1835 #endif
1836 	NULL,
1837 };
1838