xref: /linux/drivers/pci/pci-sysfs.c (revision 2f2c7254931f41b5736e3ba12aaa9ac1bbeeeb92)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com>
4  * (C) Copyright 2002-2004 IBM Corp.
5  * (C) Copyright 2003 Matthew Wilcox
6  * (C) Copyright 2003 Hewlett-Packard
7  * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com>
8  * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com>
9  *
10  * File attributes for PCI devices
11  *
12  * Modeled after usb's driverfs.c
13  */
14 
15 #include <linux/bitfield.h>
16 #include <linux/cleanup.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/pci.h>
20 #include <linux/stat.h>
21 #include <linux/export.h>
22 #include <linux/topology.h>
23 #include <linux/mm.h>
24 #include <linux/fs.h>
25 #include <linux/capability.h>
26 #include <linux/security.h>
27 #include <linux/slab.h>
28 #include <linux/vgaarb.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/msi.h>
31 #include <linux/of.h>
32 #include <linux/aperture.h>
33 #include <linux/unaligned.h>
34 #include "pci.h"
35 
36 #ifndef ARCH_PCI_DEV_GROUPS
37 #define ARCH_PCI_DEV_GROUPS
38 #endif
39 
40 static int sysfs_initialized;	/* = 0 */
41 
42 /* show configuration fields */
43 #define pci_config_attr(field, format_string)				\
44 static ssize_t								\
45 field##_show(struct device *dev, struct device_attribute *attr, char *buf)				\
46 {									\
47 	struct pci_dev *pdev;						\
48 									\
49 	pdev = to_pci_dev(dev);						\
50 	return sysfs_emit(buf, format_string, pdev->field);		\
51 }									\
52 static DEVICE_ATTR_RO(field)
53 
54 pci_config_attr(vendor, "0x%04x\n");
55 pci_config_attr(device, "0x%04x\n");
56 pci_config_attr(subsystem_vendor, "0x%04x\n");
57 pci_config_attr(subsystem_device, "0x%04x\n");
58 pci_config_attr(revision, "0x%02x\n");
59 pci_config_attr(class, "0x%06x\n");
60 
irq_show(struct device * dev,struct device_attribute * attr,char * buf)61 static ssize_t irq_show(struct device *dev,
62 			struct device_attribute *attr,
63 			char *buf)
64 {
65 	struct pci_dev *pdev = to_pci_dev(dev);
66 
67 #ifdef CONFIG_PCI_MSI
68 	/*
69 	 * For MSI, show the first MSI IRQ; for all other cases including
70 	 * MSI-X, show the legacy INTx IRQ.
71 	 */
72 	if (pdev->msi_enabled)
73 		return sysfs_emit(buf, "%u\n", pci_irq_vector(pdev, 0));
74 #endif
75 
76 	return sysfs_emit(buf, "%u\n", pdev->irq);
77 }
78 static DEVICE_ATTR_RO(irq);
79 
broken_parity_status_show(struct device * dev,struct device_attribute * attr,char * buf)80 static ssize_t broken_parity_status_show(struct device *dev,
81 					 struct device_attribute *attr,
82 					 char *buf)
83 {
84 	struct pci_dev *pdev = to_pci_dev(dev);
85 	return sysfs_emit(buf, "%u\n", pdev->broken_parity_status);
86 }
87 
broken_parity_status_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)88 static ssize_t broken_parity_status_store(struct device *dev,
89 					  struct device_attribute *attr,
90 					  const char *buf, size_t count)
91 {
92 	struct pci_dev *pdev = to_pci_dev(dev);
93 	unsigned long val;
94 
95 	if (kstrtoul(buf, 0, &val) < 0)
96 		return -EINVAL;
97 
98 	pdev->broken_parity_status = !!val;
99 
100 	return count;
101 }
102 static DEVICE_ATTR_RW(broken_parity_status);
103 
pci_dev_show_local_cpu(struct device * dev,bool list,struct device_attribute * attr,char * buf)104 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list,
105 				      struct device_attribute *attr, char *buf)
106 {
107 	const struct cpumask *mask;
108 
109 #ifdef CONFIG_NUMA
110 	if (dev_to_node(dev) == NUMA_NO_NODE)
111 		mask = cpu_online_mask;
112 	else
113 		mask = cpumask_of_node(dev_to_node(dev));
114 #else
115 	mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
116 #endif
117 	return cpumap_print_to_pagebuf(list, buf, mask);
118 }
119 
local_cpus_show(struct device * dev,struct device_attribute * attr,char * buf)120 static ssize_t local_cpus_show(struct device *dev,
121 			       struct device_attribute *attr, char *buf)
122 {
123 	return pci_dev_show_local_cpu(dev, false, attr, buf);
124 }
125 static DEVICE_ATTR_RO(local_cpus);
126 
local_cpulist_show(struct device * dev,struct device_attribute * attr,char * buf)127 static ssize_t local_cpulist_show(struct device *dev,
128 				  struct device_attribute *attr, char *buf)
129 {
130 	return pci_dev_show_local_cpu(dev, true, attr, buf);
131 }
132 static DEVICE_ATTR_RO(local_cpulist);
133 
134 /*
135  * PCI Bus Class Devices
136  */
cpuaffinity_show(struct device * dev,struct device_attribute * attr,char * buf)137 static ssize_t cpuaffinity_show(struct device *dev,
138 				struct device_attribute *attr, char *buf)
139 {
140 	const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
141 
142 	return cpumap_print_to_pagebuf(false, buf, cpumask);
143 }
144 static DEVICE_ATTR_RO(cpuaffinity);
145 
cpulistaffinity_show(struct device * dev,struct device_attribute * attr,char * buf)146 static ssize_t cpulistaffinity_show(struct device *dev,
147 				    struct device_attribute *attr, char *buf)
148 {
149 	const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
150 
151 	return cpumap_print_to_pagebuf(true, buf, cpumask);
152 }
153 static DEVICE_ATTR_RO(cpulistaffinity);
154 
power_state_show(struct device * dev,struct device_attribute * attr,char * buf)155 static ssize_t power_state_show(struct device *dev,
156 				struct device_attribute *attr, char *buf)
157 {
158 	struct pci_dev *pdev = to_pci_dev(dev);
159 
160 	return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state));
161 }
162 static DEVICE_ATTR_RO(power_state);
163 
164 /* show resources */
resource_show(struct device * dev,struct device_attribute * attr,char * buf)165 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
166 			     char *buf)
167 {
168 	struct pci_dev *pci_dev = to_pci_dev(dev);
169 	int i;
170 	int max;
171 	resource_size_t start, end;
172 	size_t len = 0;
173 
174 	if (pci_dev->subordinate)
175 		max = DEVICE_COUNT_RESOURCE;
176 	else
177 		max = PCI_BRIDGE_RESOURCES;
178 
179 	for (i = 0; i < max; i++) {
180 		struct resource *res =  &pci_dev->resource[i];
181 		struct resource zerores = {};
182 
183 		/* For backwards compatibility */
184 		if (i >= PCI_BRIDGE_RESOURCES && i <= PCI_BRIDGE_RESOURCE_END &&
185 		    res->flags & (IORESOURCE_UNSET | IORESOURCE_DISABLED))
186 			res = &zerores;
187 
188 		pci_resource_to_user(pci_dev, i, res, &start, &end);
189 		len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n",
190 				     (unsigned long long)start,
191 				     (unsigned long long)end,
192 				     (unsigned long long)res->flags);
193 	}
194 	return len;
195 }
196 static DEVICE_ATTR_RO(resource);
197 
max_link_speed_show(struct device * dev,struct device_attribute * attr,char * buf)198 static ssize_t max_link_speed_show(struct device *dev,
199 				   struct device_attribute *attr, char *buf)
200 {
201 	struct pci_dev *pdev = to_pci_dev(dev);
202 
203 	return sysfs_emit(buf, "%s\n",
204 			  pci_speed_string(pcie_get_speed_cap(pdev)));
205 }
206 static DEVICE_ATTR_RO(max_link_speed);
207 
max_link_width_show(struct device * dev,struct device_attribute * attr,char * buf)208 static ssize_t max_link_width_show(struct device *dev,
209 				   struct device_attribute *attr, char *buf)
210 {
211 	struct pci_dev *pdev = to_pci_dev(dev);
212 	ssize_t ret;
213 
214 	/* We read PCI_EXP_LNKCAP, so we need the device to be accessible. */
215 	pci_config_pm_runtime_get(pdev);
216 	ret = sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
217 	pci_config_pm_runtime_put(pdev);
218 
219 	return ret;
220 }
221 static DEVICE_ATTR_RO(max_link_width);
222 
current_link_speed_show(struct device * dev,struct device_attribute * attr,char * buf)223 static ssize_t current_link_speed_show(struct device *dev,
224 				       struct device_attribute *attr, char *buf)
225 {
226 	struct pci_dev *pci_dev = to_pci_dev(dev);
227 	u16 linkstat;
228 	int err;
229 	enum pci_bus_speed speed;
230 
231 	pci_config_pm_runtime_get(pci_dev);
232 	err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
233 	pci_config_pm_runtime_put(pci_dev);
234 
235 	if (err)
236 		return -EINVAL;
237 
238 	speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS];
239 
240 	return sysfs_emit(buf, "%s\n", pci_speed_string(speed));
241 }
242 static DEVICE_ATTR_RO(current_link_speed);
243 
current_link_width_show(struct device * dev,struct device_attribute * attr,char * buf)244 static ssize_t current_link_width_show(struct device *dev,
245 				       struct device_attribute *attr, char *buf)
246 {
247 	struct pci_dev *pci_dev = to_pci_dev(dev);
248 	u16 linkstat;
249 	int err;
250 
251 	pci_config_pm_runtime_get(pci_dev);
252 	err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
253 	pci_config_pm_runtime_put(pci_dev);
254 
255 	if (err)
256 		return -EINVAL;
257 
258 	return sysfs_emit(buf, "%u\n", FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat));
259 }
260 static DEVICE_ATTR_RO(current_link_width);
261 
secondary_bus_number_show(struct device * dev,struct device_attribute * attr,char * buf)262 static ssize_t secondary_bus_number_show(struct device *dev,
263 					 struct device_attribute *attr,
264 					 char *buf)
265 {
266 	struct pci_dev *pci_dev = to_pci_dev(dev);
267 	u8 sec_bus;
268 	int err;
269 
270 	pci_config_pm_runtime_get(pci_dev);
271 	err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
272 	pci_config_pm_runtime_put(pci_dev);
273 
274 	if (err)
275 		return -EINVAL;
276 
277 	return sysfs_emit(buf, "%u\n", sec_bus);
278 }
279 static DEVICE_ATTR_RO(secondary_bus_number);
280 
subordinate_bus_number_show(struct device * dev,struct device_attribute * attr,char * buf)281 static ssize_t subordinate_bus_number_show(struct device *dev,
282 					   struct device_attribute *attr,
283 					   char *buf)
284 {
285 	struct pci_dev *pci_dev = to_pci_dev(dev);
286 	u8 sub_bus;
287 	int err;
288 
289 	pci_config_pm_runtime_get(pci_dev);
290 	err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
291 	pci_config_pm_runtime_put(pci_dev);
292 
293 	if (err)
294 		return -EINVAL;
295 
296 	return sysfs_emit(buf, "%u\n", sub_bus);
297 }
298 static DEVICE_ATTR_RO(subordinate_bus_number);
299 
ari_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)300 static ssize_t ari_enabled_show(struct device *dev,
301 				struct device_attribute *attr,
302 				char *buf)
303 {
304 	struct pci_dev *pci_dev = to_pci_dev(dev);
305 
306 	return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus));
307 }
308 static DEVICE_ATTR_RO(ari_enabled);
309 
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)310 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
311 			     char *buf)
312 {
313 	struct pci_dev *pci_dev = to_pci_dev(dev);
314 
315 	return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
316 			  pci_dev->vendor, pci_dev->device,
317 			  pci_dev->subsystem_vendor, pci_dev->subsystem_device,
318 			  (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
319 			  (u8)(pci_dev->class));
320 }
321 static DEVICE_ATTR_RO(modalias);
322 
enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)323 static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
324 			     const char *buf, size_t count)
325 {
326 	struct pci_dev *pdev = to_pci_dev(dev);
327 	unsigned long val;
328 	ssize_t result = 0;
329 
330 	/* this can crash the machine when done on the "wrong" device */
331 	if (!capable(CAP_SYS_ADMIN))
332 		return -EPERM;
333 
334 	if (kstrtoul(buf, 0, &val) < 0)
335 		return -EINVAL;
336 
337 	device_lock(dev);
338 	if (dev->driver)
339 		result = -EBUSY;
340 	else if (val)
341 		result = pci_enable_device(pdev);
342 	else if (pci_is_enabled(pdev))
343 		pci_disable_device(pdev);
344 	else
345 		result = -EIO;
346 	device_unlock(dev);
347 
348 	return result < 0 ? result : count;
349 }
350 
enable_show(struct device * dev,struct device_attribute * attr,char * buf)351 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
352 			    char *buf)
353 {
354 	struct pci_dev *pdev;
355 
356 	pdev = to_pci_dev(dev);
357 	return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt));
358 }
359 static DEVICE_ATTR_RW(enable);
360 
361 #ifdef CONFIG_NUMA
numa_node_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)362 static ssize_t numa_node_store(struct device *dev,
363 			       struct device_attribute *attr, const char *buf,
364 			       size_t count)
365 {
366 	struct pci_dev *pdev = to_pci_dev(dev);
367 	int node;
368 
369 	if (!capable(CAP_SYS_ADMIN))
370 		return -EPERM;
371 
372 	if (kstrtoint(buf, 0, &node) < 0)
373 		return -EINVAL;
374 
375 	if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
376 		return -EINVAL;
377 
378 	if (node != NUMA_NO_NODE && !node_online(node))
379 		return -EINVAL;
380 
381 	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
382 	pci_alert(pdev, FW_BUG "Overriding NUMA node to %d.  Contact your vendor for updates.",
383 		  node);
384 
385 	dev->numa_node = node;
386 	return count;
387 }
388 
numa_node_show(struct device * dev,struct device_attribute * attr,char * buf)389 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
390 			      char *buf)
391 {
392 	return sysfs_emit(buf, "%d\n", dev->numa_node);
393 }
394 static DEVICE_ATTR_RW(numa_node);
395 #endif
396 
dma_mask_bits_show(struct device * dev,struct device_attribute * attr,char * buf)397 static ssize_t dma_mask_bits_show(struct device *dev,
398 				  struct device_attribute *attr, char *buf)
399 {
400 	struct pci_dev *pdev = to_pci_dev(dev);
401 
402 	return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask));
403 }
404 static DEVICE_ATTR_RO(dma_mask_bits);
405 
consistent_dma_mask_bits_show(struct device * dev,struct device_attribute * attr,char * buf)406 static ssize_t consistent_dma_mask_bits_show(struct device *dev,
407 					     struct device_attribute *attr,
408 					     char *buf)
409 {
410 	return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask));
411 }
412 static DEVICE_ATTR_RO(consistent_dma_mask_bits);
413 
msi_bus_show(struct device * dev,struct device_attribute * attr,char * buf)414 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
415 			    char *buf)
416 {
417 	struct pci_dev *pdev = to_pci_dev(dev);
418 	struct pci_bus *subordinate = pdev->subordinate;
419 
420 	return sysfs_emit(buf, "%u\n", subordinate ?
421 			  !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
422 			    : !pdev->no_msi);
423 }
424 
msi_bus_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)425 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
426 			     const char *buf, size_t count)
427 {
428 	struct pci_dev *pdev = to_pci_dev(dev);
429 	struct pci_bus *subordinate = pdev->subordinate;
430 	unsigned long val;
431 
432 	if (!capable(CAP_SYS_ADMIN))
433 		return -EPERM;
434 
435 	if (kstrtoul(buf, 0, &val) < 0)
436 		return -EINVAL;
437 
438 	/*
439 	 * "no_msi" and "bus_flags" only affect what happens when a driver
440 	 * requests MSI or MSI-X.  They don't affect any drivers that have
441 	 * already requested MSI or MSI-X.
442 	 */
443 	if (!subordinate) {
444 		pdev->no_msi = !val;
445 		pci_info(pdev, "MSI/MSI-X %s for future drivers\n",
446 			 val ? "allowed" : "disallowed");
447 		return count;
448 	}
449 
450 	if (val)
451 		subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
452 	else
453 		subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
454 
455 	dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n",
456 		 val ? "allowed" : "disallowed");
457 	return count;
458 }
459 static DEVICE_ATTR_RW(msi_bus);
460 
rescan_store(const struct bus_type * bus,const char * buf,size_t count)461 static ssize_t rescan_store(const struct bus_type *bus, const char *buf, size_t count)
462 {
463 	unsigned long val;
464 	struct pci_bus *b = NULL;
465 
466 	if (kstrtoul(buf, 0, &val) < 0)
467 		return -EINVAL;
468 
469 	if (val) {
470 		pci_lock_rescan_remove();
471 		while ((b = pci_find_next_bus(b)) != NULL)
472 			pci_rescan_bus(b);
473 		pci_unlock_rescan_remove();
474 	}
475 	return count;
476 }
477 static BUS_ATTR_WO(rescan);
478 
479 static struct attribute *pci_bus_attrs[] = {
480 	&bus_attr_rescan.attr,
481 	NULL,
482 };
483 
484 static const struct attribute_group pci_bus_group = {
485 	.attrs = pci_bus_attrs,
486 };
487 
488 const struct attribute_group *pci_bus_groups[] = {
489 	&pci_bus_group,
490 	NULL,
491 };
492 
dev_rescan_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)493 static ssize_t dev_rescan_store(struct device *dev,
494 				struct device_attribute *attr, const char *buf,
495 				size_t count)
496 {
497 	unsigned long val;
498 	struct pci_dev *pdev = to_pci_dev(dev);
499 
500 	if (kstrtoul(buf, 0, &val) < 0)
501 		return -EINVAL;
502 
503 	if (val) {
504 		pci_lock_rescan_remove();
505 		pci_rescan_bus(pdev->bus);
506 		pci_unlock_rescan_remove();
507 	}
508 	return count;
509 }
510 static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL,
511 							    dev_rescan_store);
512 
remove_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)513 static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
514 			    const char *buf, size_t count)
515 {
516 	unsigned long val;
517 
518 	if (kstrtoul(buf, 0, &val) < 0)
519 		return -EINVAL;
520 
521 	if (val && device_remove_file_self(dev, attr))
522 		pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
523 	return count;
524 }
525 static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL,
526 				  remove_store);
527 
bus_rescan_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)528 static ssize_t bus_rescan_store(struct device *dev,
529 				struct device_attribute *attr,
530 				const char *buf, size_t count)
531 {
532 	unsigned long val;
533 	struct pci_bus *bus = to_pci_bus(dev);
534 
535 	if (kstrtoul(buf, 0, &val) < 0)
536 		return -EINVAL;
537 
538 	if (val) {
539 		pci_lock_rescan_remove();
540 		if (!pci_is_root_bus(bus) && list_empty(&bus->devices))
541 			pci_rescan_bus_bridge_resize(bus->self);
542 		else
543 			pci_rescan_bus(bus);
544 		pci_unlock_rescan_remove();
545 	}
546 	return count;
547 }
548 static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
549 							    bus_rescan_store);
550 
reset_subordinate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)551 static ssize_t reset_subordinate_store(struct device *dev,
552 				struct device_attribute *attr,
553 				const char *buf, size_t count)
554 {
555 	struct pci_dev *pdev = to_pci_dev(dev);
556 	struct pci_bus *bus = pdev->subordinate;
557 	unsigned long val;
558 
559 	if (!capable(CAP_SYS_ADMIN))
560 		return -EPERM;
561 
562 	if (kstrtoul(buf, 0, &val) < 0)
563 		return -EINVAL;
564 
565 	if (val) {
566 		int ret = __pci_reset_bus(bus);
567 
568 		if (ret)
569 			return ret;
570 	}
571 
572 	return count;
573 }
574 static DEVICE_ATTR_WO(reset_subordinate);
575 
576 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
d3cold_allowed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)577 static ssize_t d3cold_allowed_store(struct device *dev,
578 				    struct device_attribute *attr,
579 				    const char *buf, size_t count)
580 {
581 	struct pci_dev *pdev = to_pci_dev(dev);
582 	unsigned long val;
583 
584 	if (kstrtoul(buf, 0, &val) < 0)
585 		return -EINVAL;
586 
587 	pdev->d3cold_allowed = !!val;
588 	pci_bridge_d3_update(pdev);
589 
590 	pm_runtime_resume(dev);
591 
592 	return count;
593 }
594 
d3cold_allowed_show(struct device * dev,struct device_attribute * attr,char * buf)595 static ssize_t d3cold_allowed_show(struct device *dev,
596 				   struct device_attribute *attr, char *buf)
597 {
598 	struct pci_dev *pdev = to_pci_dev(dev);
599 	return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed);
600 }
601 static DEVICE_ATTR_RW(d3cold_allowed);
602 #endif
603 
604 #ifdef CONFIG_OF
devspec_show(struct device * dev,struct device_attribute * attr,char * buf)605 static ssize_t devspec_show(struct device *dev,
606 			    struct device_attribute *attr, char *buf)
607 {
608 	struct pci_dev *pdev = to_pci_dev(dev);
609 	struct device_node *np = pci_device_to_OF_node(pdev);
610 
611 	if (np == NULL)
612 		return 0;
613 	return sysfs_emit(buf, "%pOF\n", np);
614 }
615 static DEVICE_ATTR_RO(devspec);
616 #endif
617 
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)618 static ssize_t driver_override_store(struct device *dev,
619 				     struct device_attribute *attr,
620 				     const char *buf, size_t count)
621 {
622 	struct pci_dev *pdev = to_pci_dev(dev);
623 	int ret;
624 
625 	ret = driver_set_override(dev, &pdev->driver_override, buf, count);
626 	if (ret)
627 		return ret;
628 
629 	return count;
630 }
631 
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)632 static ssize_t driver_override_show(struct device *dev,
633 				    struct device_attribute *attr, char *buf)
634 {
635 	struct pci_dev *pdev = to_pci_dev(dev);
636 	ssize_t len;
637 
638 	device_lock(dev);
639 	len = sysfs_emit(buf, "%s\n", pdev->driver_override);
640 	device_unlock(dev);
641 	return len;
642 }
643 static DEVICE_ATTR_RW(driver_override);
644 
645 static struct attribute *pci_dev_attrs[] = {
646 	&dev_attr_power_state.attr,
647 	&dev_attr_resource.attr,
648 	&dev_attr_vendor.attr,
649 	&dev_attr_device.attr,
650 	&dev_attr_subsystem_vendor.attr,
651 	&dev_attr_subsystem_device.attr,
652 	&dev_attr_revision.attr,
653 	&dev_attr_class.attr,
654 	&dev_attr_irq.attr,
655 	&dev_attr_local_cpus.attr,
656 	&dev_attr_local_cpulist.attr,
657 	&dev_attr_modalias.attr,
658 #ifdef CONFIG_NUMA
659 	&dev_attr_numa_node.attr,
660 #endif
661 	&dev_attr_dma_mask_bits.attr,
662 	&dev_attr_consistent_dma_mask_bits.attr,
663 	&dev_attr_enable.attr,
664 	&dev_attr_broken_parity_status.attr,
665 	&dev_attr_msi_bus.attr,
666 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
667 	&dev_attr_d3cold_allowed.attr,
668 #endif
669 #ifdef CONFIG_OF
670 	&dev_attr_devspec.attr,
671 #endif
672 	&dev_attr_driver_override.attr,
673 	&dev_attr_ari_enabled.attr,
674 	NULL,
675 };
676 
677 static struct attribute *pci_bridge_attrs[] = {
678 	&dev_attr_subordinate_bus_number.attr,
679 	&dev_attr_secondary_bus_number.attr,
680 	&dev_attr_reset_subordinate.attr,
681 	NULL,
682 };
683 
684 static struct attribute *pcie_dev_attrs[] = {
685 	&dev_attr_current_link_speed.attr,
686 	&dev_attr_current_link_width.attr,
687 	&dev_attr_max_link_width.attr,
688 	&dev_attr_max_link_speed.attr,
689 	NULL,
690 };
691 
692 static struct attribute *pcibus_attrs[] = {
693 	&dev_attr_bus_rescan.attr,
694 	&dev_attr_cpuaffinity.attr,
695 	&dev_attr_cpulistaffinity.attr,
696 	NULL,
697 };
698 
699 static const struct attribute_group pcibus_group = {
700 	.attrs = pcibus_attrs,
701 };
702 
703 const struct attribute_group *pcibus_groups[] = {
704 	&pcibus_group,
705 	NULL,
706 };
707 
boot_vga_show(struct device * dev,struct device_attribute * attr,char * buf)708 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
709 			     char *buf)
710 {
711 	struct pci_dev *pdev = to_pci_dev(dev);
712 	struct pci_dev *vga_dev = vga_default_device();
713 
714 	if (vga_dev)
715 		return sysfs_emit(buf, "%u\n", (pdev == vga_dev));
716 
717 	return sysfs_emit(buf, "%u\n",
718 			  !!(pdev->resource[PCI_ROM_RESOURCE].flags &
719 			     IORESOURCE_ROM_SHADOW));
720 }
721 static DEVICE_ATTR_RO(boot_vga);
722 
serial_number_show(struct device * dev,struct device_attribute * attr,char * buf)723 static ssize_t serial_number_show(struct device *dev,
724 				  struct device_attribute *attr, char *buf)
725 {
726 	struct pci_dev *pci_dev = to_pci_dev(dev);
727 	u64 dsn;
728 	u8 bytes[8];
729 
730 	dsn = pci_get_dsn(pci_dev);
731 	if (!dsn)
732 		return -EIO;
733 
734 	put_unaligned_be64(dsn, bytes);
735 	return sysfs_emit(buf, "%8phD\n", bytes);
736 }
737 static DEVICE_ATTR_ADMIN_RO(serial_number);
738 
pci_read_config(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)739 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
740 			       const struct bin_attribute *bin_attr, char *buf,
741 			       loff_t off, size_t count)
742 {
743 	struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
744 	unsigned int size = 64;
745 	loff_t init_off = off;
746 	u8 *data = (u8 *) buf;
747 
748 	/* Several chips lock up trying to read undefined config space */
749 	if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
750 		size = dev->cfg_size;
751 	else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
752 		size = 128;
753 
754 	if (off > size)
755 		return 0;
756 	if (off + count > size) {
757 		size -= off;
758 		count = size;
759 	} else {
760 		size = count;
761 	}
762 
763 	pci_config_pm_runtime_get(dev);
764 
765 	if ((off & 1) && size) {
766 		u8 val;
767 		pci_user_read_config_byte(dev, off, &val);
768 		data[off - init_off] = val;
769 		off++;
770 		size--;
771 	}
772 
773 	if ((off & 3) && size > 2) {
774 		u16 val;
775 		pci_user_read_config_word(dev, off, &val);
776 		data[off - init_off] = val & 0xff;
777 		data[off - init_off + 1] = (val >> 8) & 0xff;
778 		off += 2;
779 		size -= 2;
780 	}
781 
782 	while (size > 3) {
783 		u32 val;
784 		pci_user_read_config_dword(dev, off, &val);
785 		data[off - init_off] = val & 0xff;
786 		data[off - init_off + 1] = (val >> 8) & 0xff;
787 		data[off - init_off + 2] = (val >> 16) & 0xff;
788 		data[off - init_off + 3] = (val >> 24) & 0xff;
789 		off += 4;
790 		size -= 4;
791 		cond_resched();
792 	}
793 
794 	if (size >= 2) {
795 		u16 val;
796 		pci_user_read_config_word(dev, off, &val);
797 		data[off - init_off] = val & 0xff;
798 		data[off - init_off + 1] = (val >> 8) & 0xff;
799 		off += 2;
800 		size -= 2;
801 	}
802 
803 	if (size > 0) {
804 		u8 val;
805 		pci_user_read_config_byte(dev, off, &val);
806 		data[off - init_off] = val;
807 	}
808 
809 	pci_config_pm_runtime_put(dev);
810 
811 	return count;
812 }
813 
pci_write_config(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)814 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
815 				const struct bin_attribute *bin_attr, char *buf,
816 				loff_t off, size_t count)
817 {
818 	struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
819 	unsigned int size = count;
820 	loff_t init_off = off;
821 	u8 *data = (u8 *) buf;
822 	int ret;
823 
824 	ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
825 	if (ret)
826 		return ret;
827 
828 	if (resource_is_exclusive(&dev->driver_exclusive_resource, off,
829 				  count)) {
830 		pci_warn_once(dev, "%s: Unexpected write to kernel-exclusive config offset %llx",
831 			      current->comm, off);
832 		add_taint(TAINT_USER, LOCKDEP_STILL_OK);
833 	}
834 
835 	if (off > dev->cfg_size)
836 		return 0;
837 	if (off + count > dev->cfg_size) {
838 		size = dev->cfg_size - off;
839 		count = size;
840 	}
841 
842 	pci_config_pm_runtime_get(dev);
843 
844 	if ((off & 1) && size) {
845 		pci_user_write_config_byte(dev, off, data[off - init_off]);
846 		off++;
847 		size--;
848 	}
849 
850 	if ((off & 3) && size > 2) {
851 		u16 val = data[off - init_off];
852 		val |= (u16) data[off - init_off + 1] << 8;
853 		pci_user_write_config_word(dev, off, val);
854 		off += 2;
855 		size -= 2;
856 	}
857 
858 	while (size > 3) {
859 		u32 val = data[off - init_off];
860 		val |= (u32) data[off - init_off + 1] << 8;
861 		val |= (u32) data[off - init_off + 2] << 16;
862 		val |= (u32) data[off - init_off + 3] << 24;
863 		pci_user_write_config_dword(dev, off, val);
864 		off += 4;
865 		size -= 4;
866 	}
867 
868 	if (size >= 2) {
869 		u16 val = data[off - init_off];
870 		val |= (u16) data[off - init_off + 1] << 8;
871 		pci_user_write_config_word(dev, off, val);
872 		off += 2;
873 		size -= 2;
874 	}
875 
876 	if (size)
877 		pci_user_write_config_byte(dev, off, data[off - init_off]);
878 
879 	pci_config_pm_runtime_put(dev);
880 
881 	return count;
882 }
883 static const BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
884 
885 static const struct bin_attribute *const pci_dev_config_attrs[] = {
886 	&bin_attr_config,
887 	NULL,
888 };
889 
pci_dev_config_attr_bin_size(struct kobject * kobj,const struct bin_attribute * a,int n)890 static size_t pci_dev_config_attr_bin_size(struct kobject *kobj,
891 					   const struct bin_attribute *a,
892 					   int n)
893 {
894 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
895 
896 	if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
897 		return PCI_CFG_SPACE_EXP_SIZE;
898 	return PCI_CFG_SPACE_SIZE;
899 }
900 
901 static const struct attribute_group pci_dev_config_attr_group = {
902 	.bin_attrs = pci_dev_config_attrs,
903 	.bin_size = pci_dev_config_attr_bin_size,
904 };
905 
906 /*
907  * llseek operation for mmappable PCI resources.
908  * May be left unused if the arch doesn't provide them.
909  */
910 static __maybe_unused loff_t
pci_llseek_resource(struct file * filep,struct kobject * kobj __always_unused,const struct bin_attribute * attr,loff_t offset,int whence)911 pci_llseek_resource(struct file *filep,
912 		    struct kobject *kobj __always_unused,
913 		    const struct bin_attribute *attr,
914 		    loff_t offset, int whence)
915 {
916 	return fixed_size_llseek(filep, offset, whence, attr->size);
917 }
918 
919 #ifdef HAVE_PCI_LEGACY
920 /**
921  * pci_read_legacy_io - read byte(s) from legacy I/O port space
922  * @filp: open sysfs file
923  * @kobj: kobject corresponding to file to read from
924  * @bin_attr: struct bin_attribute for this file
925  * @buf: buffer to store results
926  * @off: offset into legacy I/O port space
927  * @count: number of bytes to read
928  *
929  * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
930  * callback routine (pci_legacy_read).
931  */
pci_read_legacy_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)932 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
933 				  const struct bin_attribute *bin_attr,
934 				  char *buf, loff_t off, size_t count)
935 {
936 	struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
937 
938 	/* Only support 1, 2 or 4 byte accesses */
939 	if (count != 1 && count != 2 && count != 4)
940 		return -EINVAL;
941 
942 	return pci_legacy_read(bus, off, (u32 *)buf, count);
943 }
944 
945 /**
946  * pci_write_legacy_io - write byte(s) to legacy I/O port space
947  * @filp: open sysfs file
948  * @kobj: kobject corresponding to file to read from
949  * @bin_attr: struct bin_attribute for this file
950  * @buf: buffer containing value to be written
951  * @off: offset into legacy I/O port space
952  * @count: number of bytes to write
953  *
954  * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
955  * callback routine (pci_legacy_write).
956  */
pci_write_legacy_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)957 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
958 				   const struct bin_attribute *bin_attr,
959 				   char *buf, loff_t off, size_t count)
960 {
961 	struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
962 
963 	/* Only support 1, 2 or 4 byte accesses */
964 	if (count != 1 && count != 2 && count != 4)
965 		return -EINVAL;
966 
967 	return pci_legacy_write(bus, off, *(u32 *)buf, count);
968 }
969 
970 /**
971  * pci_mmap_legacy_mem - map legacy PCI memory into user memory space
972  * @filp: open sysfs file
973  * @kobj: kobject corresponding to device to be mapped
974  * @attr: struct bin_attribute for this file
975  * @vma: struct vm_area_struct passed to mmap
976  *
977  * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap
978  * legacy memory space (first meg of bus space) into application virtual
979  * memory space.
980  */
pci_mmap_legacy_mem(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma)981 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
982 			       const struct bin_attribute *attr,
983 			       struct vm_area_struct *vma)
984 {
985 	struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
986 
987 	return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
988 }
989 
990 /**
991  * pci_mmap_legacy_io - map legacy PCI IO into user memory space
992  * @filp: open sysfs file
993  * @kobj: kobject corresponding to device to be mapped
994  * @attr: struct bin_attribute for this file
995  * @vma: struct vm_area_struct passed to mmap
996  *
997  * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap
998  * legacy IO space (first meg of bus space) into application virtual
999  * memory space. Returns -ENOSYS if the operation isn't supported
1000  */
pci_mmap_legacy_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma)1001 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
1002 			      const struct bin_attribute *attr,
1003 			      struct vm_area_struct *vma)
1004 {
1005 	struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
1006 
1007 	return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
1008 }
1009 
1010 /**
1011  * pci_adjust_legacy_attr - adjustment of legacy file attributes
1012  * @b: bus to create files under
1013  * @mmap_type: I/O port or memory
1014  *
1015  * Stub implementation. Can be overridden by arch if necessary.
1016  */
pci_adjust_legacy_attr(struct pci_bus * b,enum pci_mmap_state mmap_type)1017 void __weak pci_adjust_legacy_attr(struct pci_bus *b,
1018 				   enum pci_mmap_state mmap_type)
1019 {
1020 }
1021 
1022 /**
1023  * pci_create_legacy_files - create legacy I/O port and memory files
1024  * @b: bus to create files under
1025  *
1026  * Some platforms allow access to legacy I/O port and ISA memory space on
1027  * a per-bus basis.  This routine creates the files and ties them into
1028  * their associated read, write and mmap files from pci-sysfs.c
1029  *
1030  * On error unwind, but don't propagate the error to the caller
1031  * as it is ok to set up the PCI bus without these files.
1032  */
pci_create_legacy_files(struct pci_bus * b)1033 void pci_create_legacy_files(struct pci_bus *b)
1034 {
1035 	int error;
1036 
1037 	if (!sysfs_initialized)
1038 		return;
1039 
1040 	b->legacy_io = kcalloc(2, sizeof(struct bin_attribute),
1041 			       GFP_ATOMIC);
1042 	if (!b->legacy_io)
1043 		goto kzalloc_err;
1044 
1045 	sysfs_bin_attr_init(b->legacy_io);
1046 	b->legacy_io->attr.name = "legacy_io";
1047 	b->legacy_io->size = 0xffff;
1048 	b->legacy_io->attr.mode = 0600;
1049 	b->legacy_io->read = pci_read_legacy_io;
1050 	b->legacy_io->write = pci_write_legacy_io;
1051 	/* See pci_create_attr() for motivation */
1052 	b->legacy_io->llseek = pci_llseek_resource;
1053 	b->legacy_io->mmap = pci_mmap_legacy_io;
1054 	b->legacy_io->f_mapping = iomem_get_mapping;
1055 	pci_adjust_legacy_attr(b, pci_mmap_io);
1056 	error = device_create_bin_file(&b->dev, b->legacy_io);
1057 	if (error)
1058 		goto legacy_io_err;
1059 
1060 	/* Allocated above after the legacy_io struct */
1061 	b->legacy_mem = b->legacy_io + 1;
1062 	sysfs_bin_attr_init(b->legacy_mem);
1063 	b->legacy_mem->attr.name = "legacy_mem";
1064 	b->legacy_mem->size = 1024*1024;
1065 	b->legacy_mem->attr.mode = 0600;
1066 	b->legacy_mem->mmap = pci_mmap_legacy_mem;
1067 	/* See pci_create_attr() for motivation */
1068 	b->legacy_mem->llseek = pci_llseek_resource;
1069 	b->legacy_mem->f_mapping = iomem_get_mapping;
1070 	pci_adjust_legacy_attr(b, pci_mmap_mem);
1071 	error = device_create_bin_file(&b->dev, b->legacy_mem);
1072 	if (error)
1073 		goto legacy_mem_err;
1074 
1075 	return;
1076 
1077 legacy_mem_err:
1078 	device_remove_bin_file(&b->dev, b->legacy_io);
1079 legacy_io_err:
1080 	kfree(b->legacy_io);
1081 	b->legacy_io = NULL;
1082 kzalloc_err:
1083 	dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n");
1084 }
1085 
pci_remove_legacy_files(struct pci_bus * b)1086 void pci_remove_legacy_files(struct pci_bus *b)
1087 {
1088 	if (b->legacy_io) {
1089 		device_remove_bin_file(&b->dev, b->legacy_io);
1090 		device_remove_bin_file(&b->dev, b->legacy_mem);
1091 		kfree(b->legacy_io); /* both are allocated here */
1092 	}
1093 }
1094 #endif /* HAVE_PCI_LEGACY */
1095 
1096 #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
1097 /**
1098  * pci_mmap_resource - map a PCI resource into user memory space
1099  * @kobj: kobject for mapping
1100  * @attr: struct bin_attribute for the file being mapped
1101  * @vma: struct vm_area_struct passed into the mmap
1102  * @write_combine: 1 for write_combine mapping
1103  *
1104  * Use the regular PCI mapping routines to map a PCI resource into userspace.
1105  */
pci_mmap_resource(struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma,int write_combine)1106 static int pci_mmap_resource(struct kobject *kobj, const struct bin_attribute *attr,
1107 			     struct vm_area_struct *vma, int write_combine)
1108 {
1109 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1110 	int bar = (unsigned long)attr->private;
1111 	enum pci_mmap_state mmap_type;
1112 	struct resource *res = &pdev->resource[bar];
1113 	int ret;
1114 
1115 	ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1116 	if (ret)
1117 		return ret;
1118 
1119 	if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
1120 		return -EINVAL;
1121 
1122 	if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS))
1123 		return -EINVAL;
1124 
1125 	mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
1126 
1127 	return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
1128 }
1129 
pci_mmap_resource_uc(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma)1130 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
1131 				const struct bin_attribute *attr,
1132 				struct vm_area_struct *vma)
1133 {
1134 	return pci_mmap_resource(kobj, attr, vma, 0);
1135 }
1136 
pci_mmap_resource_wc(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma)1137 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
1138 				const struct bin_attribute *attr,
1139 				struct vm_area_struct *vma)
1140 {
1141 	return pci_mmap_resource(kobj, attr, vma, 1);
1142 }
1143 
pci_resource_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count,bool write)1144 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
1145 			       const struct bin_attribute *attr, char *buf,
1146 			       loff_t off, size_t count, bool write)
1147 {
1148 #ifdef CONFIG_HAS_IOPORT
1149 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1150 	int bar = (unsigned long)attr->private;
1151 	unsigned long port = off;
1152 
1153 	port += pci_resource_start(pdev, bar);
1154 
1155 	if (port > pci_resource_end(pdev, bar))
1156 		return 0;
1157 
1158 	if (port + count - 1 > pci_resource_end(pdev, bar))
1159 		return -EINVAL;
1160 
1161 	switch (count) {
1162 	case 1:
1163 		if (write)
1164 			outb(*(u8 *)buf, port);
1165 		else
1166 			*(u8 *)buf = inb(port);
1167 		return 1;
1168 	case 2:
1169 		if (write)
1170 			outw(*(u16 *)buf, port);
1171 		else
1172 			*(u16 *)buf = inw(port);
1173 		return 2;
1174 	case 4:
1175 		if (write)
1176 			outl(*(u32 *)buf, port);
1177 		else
1178 			*(u32 *)buf = inl(port);
1179 		return 4;
1180 	}
1181 	return -EINVAL;
1182 #else
1183 	return -ENXIO;
1184 #endif
1185 }
1186 
pci_read_resource_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count)1187 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
1188 				    const struct bin_attribute *attr, char *buf,
1189 				    loff_t off, size_t count)
1190 {
1191 	return pci_resource_io(filp, kobj, attr, buf, off, count, false);
1192 }
1193 
pci_write_resource_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count)1194 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
1195 				     const struct bin_attribute *attr, char *buf,
1196 				     loff_t off, size_t count)
1197 {
1198 	int ret;
1199 
1200 	ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1201 	if (ret)
1202 		return ret;
1203 
1204 	return pci_resource_io(filp, kobj, attr, buf, off, count, true);
1205 }
1206 
1207 /**
1208  * pci_remove_resource_files - cleanup resource files
1209  * @pdev: dev to cleanup
1210  *
1211  * If we created resource files for @pdev, remove them from sysfs and
1212  * free their resources.
1213  */
pci_remove_resource_files(struct pci_dev * pdev)1214 static void pci_remove_resource_files(struct pci_dev *pdev)
1215 {
1216 	int i;
1217 
1218 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1219 		struct bin_attribute *res_attr;
1220 
1221 		res_attr = pdev->res_attr[i];
1222 		if (res_attr) {
1223 			sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1224 			kfree(res_attr);
1225 		}
1226 
1227 		res_attr = pdev->res_attr_wc[i];
1228 		if (res_attr) {
1229 			sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1230 			kfree(res_attr);
1231 		}
1232 	}
1233 }
1234 
pci_create_attr(struct pci_dev * pdev,int num,int write_combine)1235 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
1236 {
1237 	/* allocate attribute structure, piggyback attribute name */
1238 	int name_len = write_combine ? 13 : 10;
1239 	struct bin_attribute *res_attr;
1240 	char *res_attr_name;
1241 	int retval;
1242 
1243 	res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
1244 	if (!res_attr)
1245 		return -ENOMEM;
1246 
1247 	res_attr_name = (char *)(res_attr + 1);
1248 
1249 	sysfs_bin_attr_init(res_attr);
1250 	if (write_combine) {
1251 		sprintf(res_attr_name, "resource%d_wc", num);
1252 		res_attr->mmap = pci_mmap_resource_wc;
1253 	} else {
1254 		sprintf(res_attr_name, "resource%d", num);
1255 		if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
1256 			res_attr->read = pci_read_resource_io;
1257 			res_attr->write = pci_write_resource_io;
1258 			if (arch_can_pci_mmap_io())
1259 				res_attr->mmap = pci_mmap_resource_uc;
1260 		} else {
1261 			res_attr->mmap = pci_mmap_resource_uc;
1262 		}
1263 	}
1264 	if (res_attr->mmap) {
1265 		res_attr->f_mapping = iomem_get_mapping;
1266 		/*
1267 		 * generic_file_llseek() consults f_mapping->host to determine
1268 		 * the file size. As iomem_inode knows nothing about the
1269 		 * attribute, it's not going to work, so override it as well.
1270 		 */
1271 		res_attr->llseek = pci_llseek_resource;
1272 	}
1273 	res_attr->attr.name = res_attr_name;
1274 	res_attr->attr.mode = 0600;
1275 	res_attr->size = pci_resource_len(pdev, num);
1276 	res_attr->private = (void *)(unsigned long)num;
1277 	retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
1278 	if (retval) {
1279 		kfree(res_attr);
1280 		return retval;
1281 	}
1282 
1283 	if (write_combine)
1284 		pdev->res_attr_wc[num] = res_attr;
1285 	else
1286 		pdev->res_attr[num] = res_attr;
1287 
1288 	return 0;
1289 }
1290 
1291 /**
1292  * pci_create_resource_files - create resource files in sysfs for @dev
1293  * @pdev: dev in question
1294  *
1295  * Walk the resources in @pdev creating files for each resource available.
1296  */
pci_create_resource_files(struct pci_dev * pdev)1297 static int pci_create_resource_files(struct pci_dev *pdev)
1298 {
1299 	int i;
1300 	int retval;
1301 
1302 	/* Skip devices with non-mappable BARs */
1303 	if (pdev->non_mappable_bars)
1304 		return 0;
1305 
1306 	/* Expose the PCI resources from this device as files */
1307 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1308 
1309 		/* skip empty resources */
1310 		if (!pci_resource_len(pdev, i))
1311 			continue;
1312 
1313 		retval = pci_create_attr(pdev, i, 0);
1314 		/* for prefetchable resources, create a WC mappable file */
1315 		if (!retval && arch_can_pci_mmap_wc() &&
1316 		    pdev->resource[i].flags & IORESOURCE_PREFETCH)
1317 			retval = pci_create_attr(pdev, i, 1);
1318 		if (retval) {
1319 			pci_remove_resource_files(pdev);
1320 			return retval;
1321 		}
1322 	}
1323 	return 0;
1324 }
1325 #else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */
pci_create_resource_files(struct pci_dev * dev)1326 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
pci_remove_resource_files(struct pci_dev * dev)1327 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
1328 #endif
1329 
1330 /**
1331  * pci_write_rom - used to enable access to the PCI ROM display
1332  * @filp: sysfs file
1333  * @kobj: kernel object handle
1334  * @bin_attr: struct bin_attribute for this file
1335  * @buf: user input
1336  * @off: file offset
1337  * @count: number of byte in input
1338  *
1339  * writing anything except 0 enables it
1340  */
pci_write_rom(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1341 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
1342 			     const struct bin_attribute *bin_attr, char *buf,
1343 			     loff_t off, size_t count)
1344 {
1345 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1346 
1347 	if ((off ==  0) && (*buf == '0') && (count == 2))
1348 		pdev->rom_attr_enabled = 0;
1349 	else
1350 		pdev->rom_attr_enabled = 1;
1351 
1352 	return count;
1353 }
1354 
1355 /**
1356  * pci_read_rom - read a PCI ROM
1357  * @filp: sysfs file
1358  * @kobj: kernel object handle
1359  * @bin_attr: struct bin_attribute for this file
1360  * @buf: where to put the data we read from the ROM
1361  * @off: file offset
1362  * @count: number of bytes to read
1363  *
1364  * Put @count bytes starting at @off into @buf from the ROM in the PCI
1365  * device corresponding to @kobj.
1366  */
pci_read_rom(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1367 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
1368 			    const struct bin_attribute *bin_attr, char *buf,
1369 			    loff_t off, size_t count)
1370 {
1371 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1372 	void __iomem *rom;
1373 	size_t size;
1374 
1375 	if (!pdev->rom_attr_enabled)
1376 		return -EINVAL;
1377 
1378 	rom = pci_map_rom(pdev, &size);	/* size starts out as PCI window size */
1379 	if (!rom || !size)
1380 		return -EIO;
1381 
1382 	if (off >= size)
1383 		count = 0;
1384 	else {
1385 		if (off + count > size)
1386 			count = size - off;
1387 
1388 		memcpy_fromio(buf, rom + off, count);
1389 	}
1390 	pci_unmap_rom(pdev, rom);
1391 
1392 	return count;
1393 }
1394 static const BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
1395 
1396 static const struct bin_attribute *const pci_dev_rom_attrs[] = {
1397 	&bin_attr_rom,
1398 	NULL,
1399 };
1400 
pci_dev_rom_attr_is_visible(struct kobject * kobj,const struct bin_attribute * a,int n)1401 static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj,
1402 					   const struct bin_attribute *a, int n)
1403 {
1404 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1405 
1406 	/* If the device has a ROM, try to expose it in sysfs. */
1407 	if (!pci_resource_end(pdev, PCI_ROM_RESOURCE))
1408 		return 0;
1409 
1410 	return a->attr.mode;
1411 }
1412 
pci_dev_rom_attr_bin_size(struct kobject * kobj,const struct bin_attribute * a,int n)1413 static size_t pci_dev_rom_attr_bin_size(struct kobject *kobj,
1414 					const struct bin_attribute *a, int n)
1415 {
1416 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1417 
1418 	return pci_resource_len(pdev, PCI_ROM_RESOURCE);
1419 }
1420 
1421 static const struct attribute_group pci_dev_rom_attr_group = {
1422 	.bin_attrs = pci_dev_rom_attrs,
1423 	.is_bin_visible = pci_dev_rom_attr_is_visible,
1424 	.bin_size = pci_dev_rom_attr_bin_size,
1425 };
1426 
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1427 static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
1428 			   const char *buf, size_t count)
1429 {
1430 	struct pci_dev *pdev = to_pci_dev(dev);
1431 	unsigned long val;
1432 	ssize_t result;
1433 
1434 	if (kstrtoul(buf, 0, &val) < 0)
1435 		return -EINVAL;
1436 
1437 	if (val != 1)
1438 		return -EINVAL;
1439 
1440 	pm_runtime_get_sync(dev);
1441 	result = pci_reset_function(pdev);
1442 	pm_runtime_put(dev);
1443 	if (result < 0)
1444 		return result;
1445 
1446 	return count;
1447 }
1448 static DEVICE_ATTR_WO(reset);
1449 
1450 static struct attribute *pci_dev_reset_attrs[] = {
1451 	&dev_attr_reset.attr,
1452 	NULL,
1453 };
1454 
pci_dev_reset_attr_is_visible(struct kobject * kobj,struct attribute * a,int n)1455 static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj,
1456 					     struct attribute *a, int n)
1457 {
1458 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1459 
1460 	if (!pci_reset_supported(pdev))
1461 		return 0;
1462 
1463 	return a->mode;
1464 }
1465 
1466 static const struct attribute_group pci_dev_reset_attr_group = {
1467 	.attrs = pci_dev_reset_attrs,
1468 	.is_visible = pci_dev_reset_attr_is_visible,
1469 };
1470 
reset_method_show(struct device * dev,struct device_attribute * attr,char * buf)1471 static ssize_t reset_method_show(struct device *dev,
1472 				 struct device_attribute *attr, char *buf)
1473 {
1474 	struct pci_dev *pdev = to_pci_dev(dev);
1475 	ssize_t len = 0;
1476 	int i, m;
1477 
1478 	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
1479 		m = pdev->reset_methods[i];
1480 		if (!m)
1481 			break;
1482 
1483 		len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
1484 				     pci_reset_fn_methods[m].name);
1485 	}
1486 
1487 	if (len)
1488 		len += sysfs_emit_at(buf, len, "\n");
1489 
1490 	return len;
1491 }
1492 
reset_method_lookup(const char * name)1493 static int reset_method_lookup(const char *name)
1494 {
1495 	int m;
1496 
1497 	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
1498 		if (sysfs_streq(name, pci_reset_fn_methods[m].name))
1499 			return m;
1500 	}
1501 
1502 	return 0;	/* not found */
1503 }
1504 
reset_method_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1505 static ssize_t reset_method_store(struct device *dev,
1506 				  struct device_attribute *attr,
1507 				  const char *buf, size_t count)
1508 {
1509 	struct pci_dev *pdev = to_pci_dev(dev);
1510 	char *tmp_options, *name;
1511 	int m, n;
1512 	u8 reset_methods[PCI_NUM_RESET_METHODS] = {};
1513 
1514 	if (sysfs_streq(buf, "")) {
1515 		pdev->reset_methods[0] = 0;
1516 		pci_warn(pdev, "All device reset methods disabled by user");
1517 		return count;
1518 	}
1519 
1520 	pm_runtime_get_sync(dev);
1521 	struct device *pmdev __free(pm_runtime_put) = dev;
1522 
1523 	if (sysfs_streq(buf, "default")) {
1524 		pci_init_reset_methods(pdev);
1525 		return count;
1526 	}
1527 
1528 	char *options __free(kfree) = kstrndup(buf, count, GFP_KERNEL);
1529 	if (!options)
1530 		return -ENOMEM;
1531 
1532 	n = 0;
1533 	tmp_options = options;
1534 	while ((name = strsep(&tmp_options, " ")) != NULL) {
1535 		if (sysfs_streq(name, ""))
1536 			continue;
1537 
1538 		name = strim(name);
1539 
1540 		/* Leave previous methods unchanged if input is invalid */
1541 		m = reset_method_lookup(name);
1542 		if (!m) {
1543 			pci_err(pdev, "Invalid reset method '%s'", name);
1544 			return -EINVAL;
1545 		}
1546 
1547 		if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
1548 			pci_err(pdev, "Unsupported reset method '%s'", name);
1549 			return -EINVAL;
1550 		}
1551 
1552 		if (n == PCI_NUM_RESET_METHODS - 1) {
1553 			pci_err(pdev, "Too many reset methods\n");
1554 			return -EINVAL;
1555 		}
1556 
1557 		reset_methods[n++] = m;
1558 	}
1559 
1560 	reset_methods[n] = 0;
1561 
1562 	/* Warn if dev-specific supported but not highest priority */
1563 	if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
1564 	    reset_methods[0] != 1)
1565 		pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
1566 	memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
1567 	return count;
1568 }
1569 static DEVICE_ATTR_RW(reset_method);
1570 
1571 static struct attribute *pci_dev_reset_method_attrs[] = {
1572 	&dev_attr_reset_method.attr,
1573 	NULL,
1574 };
1575 
1576 static const struct attribute_group pci_dev_reset_method_attr_group = {
1577 	.attrs = pci_dev_reset_method_attrs,
1578 	.is_visible = pci_dev_reset_attr_is_visible,
1579 };
1580 
__resource_resize_show(struct device * dev,int n,char * buf)1581 static ssize_t __resource_resize_show(struct device *dev, int n, char *buf)
1582 {
1583 	struct pci_dev *pdev = to_pci_dev(dev);
1584 	ssize_t ret;
1585 
1586 	pci_config_pm_runtime_get(pdev);
1587 
1588 	ret = sysfs_emit(buf, "%016llx\n",
1589 			 (u64)pci_rebar_get_possible_sizes(pdev, n));
1590 
1591 	pci_config_pm_runtime_put(pdev);
1592 
1593 	return ret;
1594 }
1595 
__resource_resize_store(struct device * dev,int n,const char * buf,size_t count)1596 static ssize_t __resource_resize_store(struct device *dev, int n,
1597 				       const char *buf, size_t count)
1598 {
1599 	struct pci_dev *pdev = to_pci_dev(dev);
1600 	struct pci_bus *bus = pdev->bus;
1601 	struct resource *b_win, *res;
1602 	unsigned long size;
1603 	int ret, i;
1604 	u16 cmd;
1605 
1606 	if (kstrtoul(buf, 0, &size) < 0)
1607 		return -EINVAL;
1608 
1609 	b_win = pbus_select_window(bus, pci_resource_n(pdev, n));
1610 	if (!b_win)
1611 		return -EINVAL;
1612 
1613 	device_lock(dev);
1614 	if (dev->driver || pci_num_vf(pdev)) {
1615 		ret = -EBUSY;
1616 		goto unlock;
1617 	}
1618 
1619 	pci_config_pm_runtime_get(pdev);
1620 
1621 	if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
1622 		ret = aperture_remove_conflicting_pci_devices(pdev,
1623 						"resourceN_resize");
1624 		if (ret)
1625 			goto pm_put;
1626 	}
1627 
1628 	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1629 	pci_write_config_word(pdev, PCI_COMMAND,
1630 			      cmd & ~PCI_COMMAND_MEMORY);
1631 
1632 	pci_remove_resource_files(pdev);
1633 
1634 	pci_dev_for_each_resource(pdev, res, i) {
1635 		if (i >= PCI_BRIDGE_RESOURCES)
1636 			break;
1637 
1638 		if (b_win == pbus_select_window(bus, res))
1639 			pci_release_resource(pdev, i);
1640 	}
1641 
1642 	ret = pci_resize_resource(pdev, n, size);
1643 
1644 	pci_assign_unassigned_bus_resources(bus);
1645 
1646 	if (pci_create_resource_files(pdev))
1647 		pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");
1648 
1649 	pci_write_config_word(pdev, PCI_COMMAND, cmd);
1650 pm_put:
1651 	pci_config_pm_runtime_put(pdev);
1652 unlock:
1653 	device_unlock(dev);
1654 
1655 	return ret ? ret : count;
1656 }
1657 
1658 #define pci_dev_resource_resize_attr(n)					\
1659 static ssize_t resource##n##_resize_show(struct device *dev,		\
1660 					 struct device_attribute *attr,	\
1661 					 char *buf)			\
1662 {									\
1663 	return __resource_resize_show(dev, n, buf);			\
1664 }									\
1665 static ssize_t resource##n##_resize_store(struct device *dev,		\
1666 					  struct device_attribute *attr,\
1667 					  const char *buf, size_t count)\
1668 {									\
1669 	return __resource_resize_store(dev, n, buf, count);		\
1670 }									\
1671 static DEVICE_ATTR_RW(resource##n##_resize)
1672 
1673 pci_dev_resource_resize_attr(0);
1674 pci_dev_resource_resize_attr(1);
1675 pci_dev_resource_resize_attr(2);
1676 pci_dev_resource_resize_attr(3);
1677 pci_dev_resource_resize_attr(4);
1678 pci_dev_resource_resize_attr(5);
1679 
1680 static struct attribute *resource_resize_attrs[] = {
1681 	&dev_attr_resource0_resize.attr,
1682 	&dev_attr_resource1_resize.attr,
1683 	&dev_attr_resource2_resize.attr,
1684 	&dev_attr_resource3_resize.attr,
1685 	&dev_attr_resource4_resize.attr,
1686 	&dev_attr_resource5_resize.attr,
1687 	NULL,
1688 };
1689 
resource_resize_is_visible(struct kobject * kobj,struct attribute * a,int n)1690 static umode_t resource_resize_is_visible(struct kobject *kobj,
1691 					  struct attribute *a, int n)
1692 {
1693 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1694 
1695 	return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode;
1696 }
1697 
1698 static const struct attribute_group pci_dev_resource_resize_group = {
1699 	.attrs = resource_resize_attrs,
1700 	.is_visible = resource_resize_is_visible,
1701 };
1702 
pci_create_sysfs_dev_files(struct pci_dev * pdev)1703 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
1704 {
1705 	if (!sysfs_initialized)
1706 		return -EACCES;
1707 
1708 	return pci_create_resource_files(pdev);
1709 }
1710 
1711 /**
1712  * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files
1713  * @pdev: device whose entries we should free
1714  *
1715  * Cleanup when @pdev is removed from sysfs.
1716  */
pci_remove_sysfs_dev_files(struct pci_dev * pdev)1717 void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
1718 {
1719 	if (!sysfs_initialized)
1720 		return;
1721 
1722 	pci_remove_resource_files(pdev);
1723 }
1724 
pci_sysfs_init(void)1725 static int __init pci_sysfs_init(void)
1726 {
1727 	struct pci_dev *pdev = NULL;
1728 	struct pci_bus *pbus = NULL;
1729 	int retval;
1730 
1731 	sysfs_initialized = 1;
1732 	for_each_pci_dev(pdev) {
1733 		retval = pci_create_sysfs_dev_files(pdev);
1734 		if (retval) {
1735 			pci_dev_put(pdev);
1736 			return retval;
1737 		}
1738 	}
1739 
1740 	while ((pbus = pci_find_next_bus(pbus)))
1741 		pci_create_legacy_files(pbus);
1742 
1743 	return 0;
1744 }
1745 late_initcall(pci_sysfs_init);
1746 
1747 static struct attribute *pci_dev_dev_attrs[] = {
1748 	&dev_attr_boot_vga.attr,
1749 	&dev_attr_serial_number.attr,
1750 	NULL,
1751 };
1752 
pci_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1753 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
1754 					 struct attribute *a, int n)
1755 {
1756 	struct device *dev = kobj_to_dev(kobj);
1757 	struct pci_dev *pdev = to_pci_dev(dev);
1758 
1759 	if (a == &dev_attr_boot_vga.attr && pci_is_vga(pdev))
1760 		return a->mode;
1761 
1762 	if (a == &dev_attr_serial_number.attr && pci_get_dsn(pdev))
1763 		return a->mode;
1764 
1765 	return 0;
1766 }
1767 
1768 static struct attribute *pci_dev_hp_attrs[] = {
1769 	&dev_attr_remove.attr,
1770 	&dev_attr_dev_rescan.attr,
1771 	NULL,
1772 };
1773 
pci_dev_hp_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1774 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
1775 					    struct attribute *a, int n)
1776 {
1777 	struct device *dev = kobj_to_dev(kobj);
1778 	struct pci_dev *pdev = to_pci_dev(dev);
1779 
1780 	if (pdev->is_virtfn)
1781 		return 0;
1782 
1783 	return a->mode;
1784 }
1785 
pci_bridge_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1786 static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj,
1787 					    struct attribute *a, int n)
1788 {
1789 	struct device *dev = kobj_to_dev(kobj);
1790 	struct pci_dev *pdev = to_pci_dev(dev);
1791 
1792 	if (pci_is_bridge(pdev))
1793 		return a->mode;
1794 
1795 	return 0;
1796 }
1797 
pcie_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1798 static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj,
1799 					  struct attribute *a, int n)
1800 {
1801 	struct device *dev = kobj_to_dev(kobj);
1802 	struct pci_dev *pdev = to_pci_dev(dev);
1803 
1804 	if (pci_is_pcie(pdev))
1805 		return a->mode;
1806 
1807 	return 0;
1808 }
1809 
1810 static const struct attribute_group pci_dev_group = {
1811 	.attrs = pci_dev_attrs,
1812 };
1813 
1814 const struct attribute_group *pci_dev_groups[] = {
1815 	&pci_dev_group,
1816 	&pci_dev_config_attr_group,
1817 	&pci_dev_rom_attr_group,
1818 	&pci_dev_reset_attr_group,
1819 	&pci_dev_reset_method_attr_group,
1820 	&pci_dev_vpd_attr_group,
1821 #ifdef CONFIG_DMI
1822 	&pci_dev_smbios_attr_group,
1823 #endif
1824 #ifdef CONFIG_ACPI
1825 	&pci_dev_acpi_attr_group,
1826 #endif
1827 	&pci_dev_resource_resize_group,
1828 	ARCH_PCI_DEV_GROUPS
1829 	NULL,
1830 };
1831 
1832 static const struct attribute_group pci_dev_hp_attr_group = {
1833 	.attrs = pci_dev_hp_attrs,
1834 	.is_visible = pci_dev_hp_attrs_are_visible,
1835 };
1836 
1837 static const struct attribute_group pci_dev_attr_group = {
1838 	.attrs = pci_dev_dev_attrs,
1839 	.is_visible = pci_dev_attrs_are_visible,
1840 };
1841 
1842 static const struct attribute_group pci_bridge_attr_group = {
1843 	.attrs = pci_bridge_attrs,
1844 	.is_visible = pci_bridge_attrs_are_visible,
1845 };
1846 
1847 static const struct attribute_group pcie_dev_attr_group = {
1848 	.attrs = pcie_dev_attrs,
1849 	.is_visible = pcie_dev_attrs_are_visible,
1850 };
1851 
1852 const struct attribute_group *pci_dev_attr_groups[] = {
1853 	&pci_dev_attr_group,
1854 	&pci_dev_hp_attr_group,
1855 #ifdef CONFIG_PCI_IOV
1856 	&sriov_pf_dev_attr_group,
1857 	&sriov_vf_dev_attr_group,
1858 #endif
1859 	&pci_bridge_attr_group,
1860 	&pcie_dev_attr_group,
1861 #ifdef CONFIG_PCIEAER
1862 	&aer_stats_attr_group,
1863 	&aer_attr_group,
1864 #endif
1865 #ifdef CONFIG_PCIEASPM
1866 	&aspm_ctrl_attr_group,
1867 #endif
1868 #ifdef CONFIG_PCI_DOE
1869 	&pci_doe_sysfs_group,
1870 #endif
1871 	NULL,
1872 };
1873