xref: /linux/drivers/pci/pci.c (revision 26bff9eb49201aeb4e1b32d698c191831a39f5d4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Bus Services, see include/linux/pci.h for further explanation.
4  *
5  * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6  * David Mosberger-Tang
7  *
8  * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/of_pci.h>
19 #include <linux/pci.h>
20 #include <linux/pm.h>
21 #include <linux/slab.h>
22 #include <linux/module.h>
23 #include <linux/spinlock.h>
24 #include <linux/string.h>
25 #include <linux/log2.h>
26 #include <linux/logic_pio.h>
27 #include <linux/pm_wakeup.h>
28 #include <linux/interrupt.h>
29 #include <linux/device.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/pci_hotplug.h>
32 #include <linux/vmalloc.h>
33 #include <linux/pci-ats.h>
34 #include <asm/setup.h>
35 #include <asm/dma.h>
36 #include <linux/aer.h>
37 #include "pci.h"
38 
39 DEFINE_MUTEX(pci_slot_mutex);
40 
41 const char *pci_power_names[] = {
42 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
43 };
44 EXPORT_SYMBOL_GPL(pci_power_names);
45 
46 int isa_dma_bridge_buggy;
47 EXPORT_SYMBOL(isa_dma_bridge_buggy);
48 
49 int pci_pci_problems;
50 EXPORT_SYMBOL(pci_pci_problems);
51 
52 unsigned int pci_pm_d3_delay;
53 
54 static void pci_pme_list_scan(struct work_struct *work);
55 
56 static LIST_HEAD(pci_pme_list);
57 static DEFINE_MUTEX(pci_pme_list_mutex);
58 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
59 
60 struct pci_pme_device {
61 	struct list_head list;
62 	struct pci_dev *dev;
63 };
64 
65 #define PME_TIMEOUT 1000 /* How long between PME checks */
66 
67 static void pci_dev_d3_sleep(struct pci_dev *dev)
68 {
69 	unsigned int delay = dev->d3_delay;
70 
71 	if (delay < pci_pm_d3_delay)
72 		delay = pci_pm_d3_delay;
73 
74 	if (delay)
75 		msleep(delay);
76 }
77 
78 #ifdef CONFIG_PCI_DOMAINS
79 int pci_domains_supported = 1;
80 #endif
81 
82 #define DEFAULT_CARDBUS_IO_SIZE		(256)
83 #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
84 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
85 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
86 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
87 
88 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
89 #define DEFAULT_HOTPLUG_MMIO_SIZE	(2*1024*1024)
90 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE	(2*1024*1024)
91 /* hpiosize=nn can override this */
92 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
93 /*
94  * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
95  * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
96  * pci=hpmemsize=nnM overrides both
97  */
98 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
99 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
100 
101 #define DEFAULT_HOTPLUG_BUS_SIZE	1
102 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
103 
104 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
105 
106 /*
107  * The default CLS is used if arch didn't set CLS explicitly and not
108  * all pci devices agree on the same value.  Arch can override either
109  * the dfl or actual value as it sees fit.  Don't forget this is
110  * measured in 32-bit words, not bytes.
111  */
112 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
113 u8 pci_cache_line_size;
114 
115 /*
116  * If we set up a device for bus mastering, we need to check the latency
117  * timer as certain BIOSes forget to set it properly.
118  */
119 unsigned int pcibios_max_latency = 255;
120 
121 /* If set, the PCIe ARI capability will not be used. */
122 static bool pcie_ari_disabled;
123 
124 /* If set, the PCIe ATS capability will not be used. */
125 static bool pcie_ats_disabled;
126 
127 /* If set, the PCI config space of each device is printed during boot. */
128 bool pci_early_dump;
129 
130 bool pci_ats_disabled(void)
131 {
132 	return pcie_ats_disabled;
133 }
134 EXPORT_SYMBOL_GPL(pci_ats_disabled);
135 
136 /* Disable bridge_d3 for all PCIe ports */
137 static bool pci_bridge_d3_disable;
138 /* Force bridge_d3 for all PCIe ports */
139 static bool pci_bridge_d3_force;
140 
141 static int __init pcie_port_pm_setup(char *str)
142 {
143 	if (!strcmp(str, "off"))
144 		pci_bridge_d3_disable = true;
145 	else if (!strcmp(str, "force"))
146 		pci_bridge_d3_force = true;
147 	return 1;
148 }
149 __setup("pcie_port_pm=", pcie_port_pm_setup);
150 
151 /* Time to wait after a reset for device to become responsive */
152 #define PCIE_RESET_READY_POLL_MS 60000
153 
154 /**
155  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
156  * @bus: pointer to PCI bus structure to search
157  *
158  * Given a PCI bus, returns the highest PCI bus number present in the set
159  * including the given PCI bus and its list of child PCI buses.
160  */
161 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
162 {
163 	struct pci_bus *tmp;
164 	unsigned char max, n;
165 
166 	max = bus->busn_res.end;
167 	list_for_each_entry(tmp, &bus->children, node) {
168 		n = pci_bus_max_busnr(tmp);
169 		if (n > max)
170 			max = n;
171 	}
172 	return max;
173 }
174 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
175 
176 /**
177  * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
178  * @pdev: the PCI device
179  *
180  * Returns error bits set in PCI_STATUS and clears them.
181  */
182 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
183 {
184 	u16 status;
185 	int ret;
186 
187 	ret = pci_read_config_word(pdev, PCI_STATUS, &status);
188 	if (ret != PCIBIOS_SUCCESSFUL)
189 		return -EIO;
190 
191 	status &= PCI_STATUS_ERROR_BITS;
192 	if (status)
193 		pci_write_config_word(pdev, PCI_STATUS, status);
194 
195 	return status;
196 }
197 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
198 
199 #ifdef CONFIG_HAS_IOMEM
200 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
201 {
202 	struct resource *res = &pdev->resource[bar];
203 
204 	/*
205 	 * Make sure the BAR is actually a memory resource, not an IO resource
206 	 */
207 	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
208 		pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
209 		return NULL;
210 	}
211 	return ioremap(res->start, resource_size(res));
212 }
213 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
214 
215 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
216 {
217 	/*
218 	 * Make sure the BAR is actually a memory resource, not an IO resource
219 	 */
220 	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
221 		WARN_ON(1);
222 		return NULL;
223 	}
224 	return ioremap_wc(pci_resource_start(pdev, bar),
225 			  pci_resource_len(pdev, bar));
226 }
227 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
228 #endif
229 
230 /**
231  * pci_dev_str_match_path - test if a path string matches a device
232  * @dev: the PCI device to test
233  * @path: string to match the device against
234  * @endptr: pointer to the string after the match
235  *
236  * Test if a string (typically from a kernel parameter) formatted as a
237  * path of device/function addresses matches a PCI device. The string must
238  * be of the form:
239  *
240  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
241  *
242  * A path for a device can be obtained using 'lspci -t'.  Using a path
243  * is more robust against bus renumbering than using only a single bus,
244  * device and function address.
245  *
246  * Returns 1 if the string matches the device, 0 if it does not and
247  * a negative error code if it fails to parse the string.
248  */
249 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
250 				  const char **endptr)
251 {
252 	int ret;
253 	int seg, bus, slot, func;
254 	char *wpath, *p;
255 	char end;
256 
257 	*endptr = strchrnul(path, ';');
258 
259 	wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
260 	if (!wpath)
261 		return -ENOMEM;
262 
263 	while (1) {
264 		p = strrchr(wpath, '/');
265 		if (!p)
266 			break;
267 		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
268 		if (ret != 2) {
269 			ret = -EINVAL;
270 			goto free_and_exit;
271 		}
272 
273 		if (dev->devfn != PCI_DEVFN(slot, func)) {
274 			ret = 0;
275 			goto free_and_exit;
276 		}
277 
278 		/*
279 		 * Note: we don't need to get a reference to the upstream
280 		 * bridge because we hold a reference to the top level
281 		 * device which should hold a reference to the bridge,
282 		 * and so on.
283 		 */
284 		dev = pci_upstream_bridge(dev);
285 		if (!dev) {
286 			ret = 0;
287 			goto free_and_exit;
288 		}
289 
290 		*p = 0;
291 	}
292 
293 	ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
294 		     &func, &end);
295 	if (ret != 4) {
296 		seg = 0;
297 		ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
298 		if (ret != 3) {
299 			ret = -EINVAL;
300 			goto free_and_exit;
301 		}
302 	}
303 
304 	ret = (seg == pci_domain_nr(dev->bus) &&
305 	       bus == dev->bus->number &&
306 	       dev->devfn == PCI_DEVFN(slot, func));
307 
308 free_and_exit:
309 	kfree(wpath);
310 	return ret;
311 }
312 
313 /**
314  * pci_dev_str_match - test if a string matches a device
315  * @dev: the PCI device to test
316  * @p: string to match the device against
317  * @endptr: pointer to the string after the match
318  *
319  * Test if a string (typically from a kernel parameter) matches a specified
320  * PCI device. The string may be of one of the following formats:
321  *
322  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
323  *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
324  *
325  * The first format specifies a PCI bus/device/function address which
326  * may change if new hardware is inserted, if motherboard firmware changes,
327  * or due to changes caused in kernel parameters. If the domain is
328  * left unspecified, it is taken to be 0.  In order to be robust against
329  * bus renumbering issues, a path of PCI device/function numbers may be used
330  * to address the specific device.  The path for a device can be determined
331  * through the use of 'lspci -t'.
332  *
333  * The second format matches devices using IDs in the configuration
334  * space which may match multiple devices in the system. A value of 0
335  * for any field will match all devices. (Note: this differs from
336  * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
337  * legacy reasons and convenience so users don't have to specify
338  * FFFFFFFFs on the command line.)
339  *
340  * Returns 1 if the string matches the device, 0 if it does not and
341  * a negative error code if the string cannot be parsed.
342  */
343 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
344 			     const char **endptr)
345 {
346 	int ret;
347 	int count;
348 	unsigned short vendor, device, subsystem_vendor, subsystem_device;
349 
350 	if (strncmp(p, "pci:", 4) == 0) {
351 		/* PCI vendor/device (subvendor/subdevice) IDs are specified */
352 		p += 4;
353 		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
354 			     &subsystem_vendor, &subsystem_device, &count);
355 		if (ret != 4) {
356 			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
357 			if (ret != 2)
358 				return -EINVAL;
359 
360 			subsystem_vendor = 0;
361 			subsystem_device = 0;
362 		}
363 
364 		p += count;
365 
366 		if ((!vendor || vendor == dev->vendor) &&
367 		    (!device || device == dev->device) &&
368 		    (!subsystem_vendor ||
369 			    subsystem_vendor == dev->subsystem_vendor) &&
370 		    (!subsystem_device ||
371 			    subsystem_device == dev->subsystem_device))
372 			goto found;
373 	} else {
374 		/*
375 		 * PCI Bus, Device, Function IDs are specified
376 		 * (optionally, may include a path of devfns following it)
377 		 */
378 		ret = pci_dev_str_match_path(dev, p, &p);
379 		if (ret < 0)
380 			return ret;
381 		else if (ret)
382 			goto found;
383 	}
384 
385 	*endptr = p;
386 	return 0;
387 
388 found:
389 	*endptr = p;
390 	return 1;
391 }
392 
393 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
394 				   u8 pos, int cap, int *ttl)
395 {
396 	u8 id;
397 	u16 ent;
398 
399 	pci_bus_read_config_byte(bus, devfn, pos, &pos);
400 
401 	while ((*ttl)--) {
402 		if (pos < 0x40)
403 			break;
404 		pos &= ~3;
405 		pci_bus_read_config_word(bus, devfn, pos, &ent);
406 
407 		id = ent & 0xff;
408 		if (id == 0xff)
409 			break;
410 		if (id == cap)
411 			return pos;
412 		pos = (ent >> 8);
413 	}
414 	return 0;
415 }
416 
417 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
418 			       u8 pos, int cap)
419 {
420 	int ttl = PCI_FIND_CAP_TTL;
421 
422 	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
423 }
424 
425 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
426 {
427 	return __pci_find_next_cap(dev->bus, dev->devfn,
428 				   pos + PCI_CAP_LIST_NEXT, cap);
429 }
430 EXPORT_SYMBOL_GPL(pci_find_next_capability);
431 
432 static int __pci_bus_find_cap_start(struct pci_bus *bus,
433 				    unsigned int devfn, u8 hdr_type)
434 {
435 	u16 status;
436 
437 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
438 	if (!(status & PCI_STATUS_CAP_LIST))
439 		return 0;
440 
441 	switch (hdr_type) {
442 	case PCI_HEADER_TYPE_NORMAL:
443 	case PCI_HEADER_TYPE_BRIDGE:
444 		return PCI_CAPABILITY_LIST;
445 	case PCI_HEADER_TYPE_CARDBUS:
446 		return PCI_CB_CAPABILITY_LIST;
447 	}
448 
449 	return 0;
450 }
451 
452 /**
453  * pci_find_capability - query for devices' capabilities
454  * @dev: PCI device to query
455  * @cap: capability code
456  *
457  * Tell if a device supports a given PCI capability.
458  * Returns the address of the requested capability structure within the
459  * device's PCI configuration space or 0 in case the device does not
460  * support it.  Possible values for @cap include:
461  *
462  *  %PCI_CAP_ID_PM           Power Management
463  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
464  *  %PCI_CAP_ID_VPD          Vital Product Data
465  *  %PCI_CAP_ID_SLOTID       Slot Identification
466  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
467  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
468  *  %PCI_CAP_ID_PCIX         PCI-X
469  *  %PCI_CAP_ID_EXP          PCI Express
470  */
471 int pci_find_capability(struct pci_dev *dev, int cap)
472 {
473 	int pos;
474 
475 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
476 	if (pos)
477 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
478 
479 	return pos;
480 }
481 EXPORT_SYMBOL(pci_find_capability);
482 
483 /**
484  * pci_bus_find_capability - query for devices' capabilities
485  * @bus: the PCI bus to query
486  * @devfn: PCI device to query
487  * @cap: capability code
488  *
489  * Like pci_find_capability() but works for PCI devices that do not have a
490  * pci_dev structure set up yet.
491  *
492  * Returns the address of the requested capability structure within the
493  * device's PCI configuration space or 0 in case the device does not
494  * support it.
495  */
496 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
497 {
498 	int pos;
499 	u8 hdr_type;
500 
501 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
502 
503 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
504 	if (pos)
505 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
506 
507 	return pos;
508 }
509 EXPORT_SYMBOL(pci_bus_find_capability);
510 
511 /**
512  * pci_find_next_ext_capability - Find an extended capability
513  * @dev: PCI device to query
514  * @start: address at which to start looking (0 to start at beginning of list)
515  * @cap: capability code
516  *
517  * Returns the address of the next matching extended capability structure
518  * within the device's PCI configuration space or 0 if the device does
519  * not support it.  Some capabilities can occur several times, e.g., the
520  * vendor-specific capability, and this provides a way to find them all.
521  */
522 int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
523 {
524 	u32 header;
525 	int ttl;
526 	int pos = PCI_CFG_SPACE_SIZE;
527 
528 	/* minimum 8 bytes per capability */
529 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
530 
531 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
532 		return 0;
533 
534 	if (start)
535 		pos = start;
536 
537 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
538 		return 0;
539 
540 	/*
541 	 * If we have no capabilities, this is indicated by cap ID,
542 	 * cap version and next pointer all being 0.
543 	 */
544 	if (header == 0)
545 		return 0;
546 
547 	while (ttl-- > 0) {
548 		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
549 			return pos;
550 
551 		pos = PCI_EXT_CAP_NEXT(header);
552 		if (pos < PCI_CFG_SPACE_SIZE)
553 			break;
554 
555 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
556 			break;
557 	}
558 
559 	return 0;
560 }
561 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
562 
563 /**
564  * pci_find_ext_capability - Find an extended capability
565  * @dev: PCI device to query
566  * @cap: capability code
567  *
568  * Returns the address of the requested extended capability structure
569  * within the device's PCI configuration space or 0 if the device does
570  * not support it.  Possible values for @cap include:
571  *
572  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
573  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
574  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
575  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
576  */
577 int pci_find_ext_capability(struct pci_dev *dev, int cap)
578 {
579 	return pci_find_next_ext_capability(dev, 0, cap);
580 }
581 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
582 
583 /**
584  * pci_get_dsn - Read and return the 8-byte Device Serial Number
585  * @dev: PCI device to query
586  *
587  * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
588  * Number.
589  *
590  * Returns the DSN, or zero if the capability does not exist.
591  */
592 u64 pci_get_dsn(struct pci_dev *dev)
593 {
594 	u32 dword;
595 	u64 dsn;
596 	int pos;
597 
598 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
599 	if (!pos)
600 		return 0;
601 
602 	/*
603 	 * The Device Serial Number is two dwords offset 4 bytes from the
604 	 * capability position. The specification says that the first dword is
605 	 * the lower half, and the second dword is the upper half.
606 	 */
607 	pos += 4;
608 	pci_read_config_dword(dev, pos, &dword);
609 	dsn = (u64)dword;
610 	pci_read_config_dword(dev, pos + 4, &dword);
611 	dsn |= ((u64)dword) << 32;
612 
613 	return dsn;
614 }
615 EXPORT_SYMBOL_GPL(pci_get_dsn);
616 
617 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
618 {
619 	int rc, ttl = PCI_FIND_CAP_TTL;
620 	u8 cap, mask;
621 
622 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
623 		mask = HT_3BIT_CAP_MASK;
624 	else
625 		mask = HT_5BIT_CAP_MASK;
626 
627 	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
628 				      PCI_CAP_ID_HT, &ttl);
629 	while (pos) {
630 		rc = pci_read_config_byte(dev, pos + 3, &cap);
631 		if (rc != PCIBIOS_SUCCESSFUL)
632 			return 0;
633 
634 		if ((cap & mask) == ht_cap)
635 			return pos;
636 
637 		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
638 					      pos + PCI_CAP_LIST_NEXT,
639 					      PCI_CAP_ID_HT, &ttl);
640 	}
641 
642 	return 0;
643 }
644 /**
645  * pci_find_next_ht_capability - query a device's Hypertransport capabilities
646  * @dev: PCI device to query
647  * @pos: Position from which to continue searching
648  * @ht_cap: Hypertransport capability code
649  *
650  * To be used in conjunction with pci_find_ht_capability() to search for
651  * all capabilities matching @ht_cap. @pos should always be a value returned
652  * from pci_find_ht_capability().
653  *
654  * NB. To be 100% safe against broken PCI devices, the caller should take
655  * steps to avoid an infinite loop.
656  */
657 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
658 {
659 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
660 }
661 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
662 
663 /**
664  * pci_find_ht_capability - query a device's Hypertransport capabilities
665  * @dev: PCI device to query
666  * @ht_cap: Hypertransport capability code
667  *
668  * Tell if a device supports a given Hypertransport capability.
669  * Returns an address within the device's PCI configuration space
670  * or 0 in case the device does not support the request capability.
671  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
672  * which has a Hypertransport capability matching @ht_cap.
673  */
674 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
675 {
676 	int pos;
677 
678 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
679 	if (pos)
680 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
681 
682 	return pos;
683 }
684 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
685 
686 /**
687  * pci_find_parent_resource - return resource region of parent bus of given
688  *			      region
689  * @dev: PCI device structure contains resources to be searched
690  * @res: child resource record for which parent is sought
691  *
692  * For given resource region of given device, return the resource region of
693  * parent bus the given region is contained in.
694  */
695 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
696 					  struct resource *res)
697 {
698 	const struct pci_bus *bus = dev->bus;
699 	struct resource *r;
700 	int i;
701 
702 	pci_bus_for_each_resource(bus, r, i) {
703 		if (!r)
704 			continue;
705 		if (resource_contains(r, res)) {
706 
707 			/*
708 			 * If the window is prefetchable but the BAR is
709 			 * not, the allocator made a mistake.
710 			 */
711 			if (r->flags & IORESOURCE_PREFETCH &&
712 			    !(res->flags & IORESOURCE_PREFETCH))
713 				return NULL;
714 
715 			/*
716 			 * If we're below a transparent bridge, there may
717 			 * be both a positively-decoded aperture and a
718 			 * subtractively-decoded region that contain the BAR.
719 			 * We want the positively-decoded one, so this depends
720 			 * on pci_bus_for_each_resource() giving us those
721 			 * first.
722 			 */
723 			return r;
724 		}
725 	}
726 	return NULL;
727 }
728 EXPORT_SYMBOL(pci_find_parent_resource);
729 
730 /**
731  * pci_find_resource - Return matching PCI device resource
732  * @dev: PCI device to query
733  * @res: Resource to look for
734  *
735  * Goes over standard PCI resources (BARs) and checks if the given resource
736  * is partially or fully contained in any of them. In that case the
737  * matching resource is returned, %NULL otherwise.
738  */
739 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
740 {
741 	int i;
742 
743 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
744 		struct resource *r = &dev->resource[i];
745 
746 		if (r->start && resource_contains(r, res))
747 			return r;
748 	}
749 
750 	return NULL;
751 }
752 EXPORT_SYMBOL(pci_find_resource);
753 
754 /**
755  * pci_find_pcie_root_port - return PCIe Root Port
756  * @dev: PCI device to query
757  *
758  * Traverse up the parent chain and return the PCIe Root Port PCI Device
759  * for a given PCI Device.
760  */
761 struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
762 {
763 	struct pci_dev *bridge, *highest_pcie_bridge = dev;
764 
765 	bridge = pci_upstream_bridge(dev);
766 	while (bridge && pci_is_pcie(bridge)) {
767 		highest_pcie_bridge = bridge;
768 		bridge = pci_upstream_bridge(bridge);
769 	}
770 
771 	if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
772 		return NULL;
773 
774 	return highest_pcie_bridge;
775 }
776 EXPORT_SYMBOL(pci_find_pcie_root_port);
777 
778 /**
779  * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
780  * @dev: the PCI device to operate on
781  * @pos: config space offset of status word
782  * @mask: mask of bit(s) to care about in status word
783  *
784  * Return 1 when mask bit(s) in status word clear, 0 otherwise.
785  */
786 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
787 {
788 	int i;
789 
790 	/* Wait for Transaction Pending bit clean */
791 	for (i = 0; i < 4; i++) {
792 		u16 status;
793 		if (i)
794 			msleep((1 << (i - 1)) * 100);
795 
796 		pci_read_config_word(dev, pos, &status);
797 		if (!(status & mask))
798 			return 1;
799 	}
800 
801 	return 0;
802 }
803 
804 /**
805  * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
806  * @dev: PCI device to have its BARs restored
807  *
808  * Restore the BAR values for a given device, so as to make it
809  * accessible by its driver.
810  */
811 static void pci_restore_bars(struct pci_dev *dev)
812 {
813 	int i;
814 
815 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
816 		pci_update_resource(dev, i);
817 }
818 
819 static const struct pci_platform_pm_ops *pci_platform_pm;
820 
821 int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
822 {
823 	if (!ops->is_manageable || !ops->set_state  || !ops->get_state ||
824 	    !ops->choose_state  || !ops->set_wakeup || !ops->need_resume)
825 		return -EINVAL;
826 	pci_platform_pm = ops;
827 	return 0;
828 }
829 
830 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
831 {
832 	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
833 }
834 
835 static inline int platform_pci_set_power_state(struct pci_dev *dev,
836 					       pci_power_t t)
837 {
838 	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
839 }
840 
841 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
842 {
843 	return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
844 }
845 
846 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
847 {
848 	if (pci_platform_pm && pci_platform_pm->refresh_state)
849 		pci_platform_pm->refresh_state(dev);
850 }
851 
852 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
853 {
854 	return pci_platform_pm ?
855 			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
856 }
857 
858 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
859 {
860 	return pci_platform_pm ?
861 			pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
862 }
863 
864 static inline bool platform_pci_need_resume(struct pci_dev *dev)
865 {
866 	return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
867 }
868 
869 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
870 {
871 	return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false;
872 }
873 
874 /**
875  * pci_raw_set_power_state - Use PCI PM registers to set the power state of
876  *			     given PCI device
877  * @dev: PCI device to handle.
878  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
879  *
880  * RETURN VALUE:
881  * -EINVAL if the requested state is invalid.
882  * -EIO if device does not support PCI PM or its PM capabilities register has a
883  * wrong version, or device doesn't support the requested state.
884  * 0 if device already is in the requested state.
885  * 0 if device's power state has been successfully changed.
886  */
887 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
888 {
889 	u16 pmcsr;
890 	bool need_restore = false;
891 
892 	/* Check if we're already there */
893 	if (dev->current_state == state)
894 		return 0;
895 
896 	if (!dev->pm_cap)
897 		return -EIO;
898 
899 	if (state < PCI_D0 || state > PCI_D3hot)
900 		return -EINVAL;
901 
902 	/*
903 	 * Validate transition: We can enter D0 from any state, but if
904 	 * we're already in a low-power state, we can only go deeper.  E.g.,
905 	 * we can go from D1 to D3, but we can't go directly from D3 to D1;
906 	 * we'd have to go from D3 to D0, then to D1.
907 	 */
908 	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
909 	    && dev->current_state > state) {
910 		pci_err(dev, "invalid power transition (from %s to %s)\n",
911 			pci_power_name(dev->current_state),
912 			pci_power_name(state));
913 		return -EINVAL;
914 	}
915 
916 	/* Check if this device supports the desired state */
917 	if ((state == PCI_D1 && !dev->d1_support)
918 	   || (state == PCI_D2 && !dev->d2_support))
919 		return -EIO;
920 
921 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
922 	if (pmcsr == (u16) ~0) {
923 		pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
924 			pci_power_name(dev->current_state),
925 			pci_power_name(state));
926 		return -EIO;
927 	}
928 
929 	/*
930 	 * If we're (effectively) in D3, force entire word to 0.
931 	 * This doesn't affect PME_Status, disables PME_En, and
932 	 * sets PowerState to 0.
933 	 */
934 	switch (dev->current_state) {
935 	case PCI_D0:
936 	case PCI_D1:
937 	case PCI_D2:
938 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
939 		pmcsr |= state;
940 		break;
941 	case PCI_D3hot:
942 	case PCI_D3cold:
943 	case PCI_UNKNOWN: /* Boot-up */
944 		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
945 		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
946 			need_restore = true;
947 		/* Fall-through - force to D0 */
948 	default:
949 		pmcsr = 0;
950 		break;
951 	}
952 
953 	/* Enter specified state */
954 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
955 
956 	/*
957 	 * Mandatory power management transition delays; see PCI PM 1.1
958 	 * 5.6.1 table 18
959 	 */
960 	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
961 		pci_dev_d3_sleep(dev);
962 	else if (state == PCI_D2 || dev->current_state == PCI_D2)
963 		msleep(PCI_PM_D2_DELAY);
964 
965 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
966 	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
967 	if (dev->current_state != state)
968 		pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
969 			 pci_power_name(dev->current_state),
970 			 pci_power_name(state));
971 
972 	/*
973 	 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
974 	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
975 	 * from D3hot to D0 _may_ perform an internal reset, thereby
976 	 * going to "D0 Uninitialized" rather than "D0 Initialized".
977 	 * For example, at least some versions of the 3c905B and the
978 	 * 3c556B exhibit this behaviour.
979 	 *
980 	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
981 	 * devices in a D3hot state at boot.  Consequently, we need to
982 	 * restore at least the BARs so that the device will be
983 	 * accessible to its driver.
984 	 */
985 	if (need_restore)
986 		pci_restore_bars(dev);
987 
988 	if (dev->bus->self)
989 		pcie_aspm_pm_state_change(dev->bus->self);
990 
991 	return 0;
992 }
993 
994 /**
995  * pci_update_current_state - Read power state of given device and cache it
996  * @dev: PCI device to handle.
997  * @state: State to cache in case the device doesn't have the PM capability
998  *
999  * The power state is read from the PMCSR register, which however is
1000  * inaccessible in D3cold.  The platform firmware is therefore queried first
1001  * to detect accessibility of the register.  In case the platform firmware
1002  * reports an incorrect state or the device isn't power manageable by the
1003  * platform at all, we try to detect D3cold by testing accessibility of the
1004  * vendor ID in config space.
1005  */
1006 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1007 {
1008 	if (platform_pci_get_power_state(dev) == PCI_D3cold ||
1009 	    !pci_device_is_present(dev)) {
1010 		dev->current_state = PCI_D3cold;
1011 	} else if (dev->pm_cap) {
1012 		u16 pmcsr;
1013 
1014 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1015 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1016 	} else {
1017 		dev->current_state = state;
1018 	}
1019 }
1020 
1021 /**
1022  * pci_refresh_power_state - Refresh the given device's power state data
1023  * @dev: Target PCI device.
1024  *
1025  * Ask the platform to refresh the devices power state information and invoke
1026  * pci_update_current_state() to update its current PCI power state.
1027  */
1028 void pci_refresh_power_state(struct pci_dev *dev)
1029 {
1030 	if (platform_pci_power_manageable(dev))
1031 		platform_pci_refresh_power_state(dev);
1032 
1033 	pci_update_current_state(dev, dev->current_state);
1034 }
1035 
1036 /**
1037  * pci_platform_power_transition - Use platform to change device power state
1038  * @dev: PCI device to handle.
1039  * @state: State to put the device into.
1040  */
1041 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1042 {
1043 	int error;
1044 
1045 	if (platform_pci_power_manageable(dev)) {
1046 		error = platform_pci_set_power_state(dev, state);
1047 		if (!error)
1048 			pci_update_current_state(dev, state);
1049 	} else
1050 		error = -ENODEV;
1051 
1052 	if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
1053 		dev->current_state = PCI_D0;
1054 
1055 	return error;
1056 }
1057 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1058 
1059 /**
1060  * pci_wakeup - Wake up a PCI device
1061  * @pci_dev: Device to handle.
1062  * @ign: ignored parameter
1063  */
1064 static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1065 {
1066 	pci_wakeup_event(pci_dev);
1067 	pm_request_resume(&pci_dev->dev);
1068 	return 0;
1069 }
1070 
1071 /**
1072  * pci_wakeup_bus - Walk given bus and wake up devices on it
1073  * @bus: Top bus of the subtree to walk.
1074  */
1075 void pci_wakeup_bus(struct pci_bus *bus)
1076 {
1077 	if (bus)
1078 		pci_walk_bus(bus, pci_wakeup, NULL);
1079 }
1080 
1081 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1082 {
1083 	int delay = 1;
1084 	u32 id;
1085 
1086 	/*
1087 	 * After reset, the device should not silently discard config
1088 	 * requests, but it may still indicate that it needs more time by
1089 	 * responding to them with CRS completions.  The Root Port will
1090 	 * generally synthesize ~0 data to complete the read (except when
1091 	 * CRS SV is enabled and the read was for the Vendor ID; in that
1092 	 * case it synthesizes 0x0001 data).
1093 	 *
1094 	 * Wait for the device to return a non-CRS completion.  Read the
1095 	 * Command register instead of Vendor ID so we don't have to
1096 	 * contend with the CRS SV value.
1097 	 */
1098 	pci_read_config_dword(dev, PCI_COMMAND, &id);
1099 	while (id == ~0) {
1100 		if (delay > timeout) {
1101 			pci_warn(dev, "not ready %dms after %s; giving up\n",
1102 				 delay - 1, reset_type);
1103 			return -ENOTTY;
1104 		}
1105 
1106 		if (delay > 1000)
1107 			pci_info(dev, "not ready %dms after %s; waiting\n",
1108 				 delay - 1, reset_type);
1109 
1110 		msleep(delay);
1111 		delay *= 2;
1112 		pci_read_config_dword(dev, PCI_COMMAND, &id);
1113 	}
1114 
1115 	if (delay > 1000)
1116 		pci_info(dev, "ready %dms after %s\n", delay - 1,
1117 			 reset_type);
1118 
1119 	return 0;
1120 }
1121 
1122 /**
1123  * pci_power_up - Put the given device into D0
1124  * @dev: PCI device to power up
1125  */
1126 int pci_power_up(struct pci_dev *dev)
1127 {
1128 	pci_platform_power_transition(dev, PCI_D0);
1129 
1130 	/*
1131 	 * Mandatory power management transition delays are handled in
1132 	 * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the
1133 	 * corresponding bridge.
1134 	 */
1135 	if (dev->runtime_d3cold) {
1136 		/*
1137 		 * When powering on a bridge from D3cold, the whole hierarchy
1138 		 * may be powered on into D0uninitialized state, resume them to
1139 		 * give them a chance to suspend again
1140 		 */
1141 		pci_wakeup_bus(dev->subordinate);
1142 	}
1143 
1144 	return pci_raw_set_power_state(dev, PCI_D0);
1145 }
1146 
1147 /**
1148  * __pci_dev_set_current_state - Set current state of a PCI device
1149  * @dev: Device to handle
1150  * @data: pointer to state to be set
1151  */
1152 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1153 {
1154 	pci_power_t state = *(pci_power_t *)data;
1155 
1156 	dev->current_state = state;
1157 	return 0;
1158 }
1159 
1160 /**
1161  * pci_bus_set_current_state - Walk given bus and set current state of devices
1162  * @bus: Top bus of the subtree to walk.
1163  * @state: state to be set
1164  */
1165 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1166 {
1167 	if (bus)
1168 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1169 }
1170 
1171 /**
1172  * pci_set_power_state - Set the power state of a PCI device
1173  * @dev: PCI device to handle.
1174  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1175  *
1176  * Transition a device to a new power state, using the platform firmware and/or
1177  * the device's PCI PM registers.
1178  *
1179  * RETURN VALUE:
1180  * -EINVAL if the requested state is invalid.
1181  * -EIO if device does not support PCI PM or its PM capabilities register has a
1182  * wrong version, or device doesn't support the requested state.
1183  * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1184  * 0 if device already is in the requested state.
1185  * 0 if the transition is to D3 but D3 is not supported.
1186  * 0 if device's power state has been successfully changed.
1187  */
1188 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1189 {
1190 	int error;
1191 
1192 	/* Bound the state we're entering */
1193 	if (state > PCI_D3cold)
1194 		state = PCI_D3cold;
1195 	else if (state < PCI_D0)
1196 		state = PCI_D0;
1197 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1198 
1199 		/*
1200 		 * If the device or the parent bridge do not support PCI
1201 		 * PM, ignore the request if we're doing anything other
1202 		 * than putting it into D0 (which would only happen on
1203 		 * boot).
1204 		 */
1205 		return 0;
1206 
1207 	/* Check if we're already there */
1208 	if (dev->current_state == state)
1209 		return 0;
1210 
1211 	if (state == PCI_D0)
1212 		return pci_power_up(dev);
1213 
1214 	/*
1215 	 * This device is quirked not to be put into D3, so don't put it in
1216 	 * D3
1217 	 */
1218 	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1219 		return 0;
1220 
1221 	/*
1222 	 * To put device in D3cold, we put device into D3hot in native
1223 	 * way, then put device into D3cold with platform ops
1224 	 */
1225 	error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1226 					PCI_D3hot : state);
1227 
1228 	if (pci_platform_power_transition(dev, state))
1229 		return error;
1230 
1231 	/* Powering off a bridge may power off the whole hierarchy */
1232 	if (state == PCI_D3cold)
1233 		pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1234 
1235 	return 0;
1236 }
1237 EXPORT_SYMBOL(pci_set_power_state);
1238 
1239 /**
1240  * pci_choose_state - Choose the power state of a PCI device
1241  * @dev: PCI device to be suspended
1242  * @state: target sleep state for the whole system. This is the value
1243  *	   that is passed to suspend() function.
1244  *
1245  * Returns PCI power state suitable for given device and given system
1246  * message.
1247  */
1248 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1249 {
1250 	pci_power_t ret;
1251 
1252 	if (!dev->pm_cap)
1253 		return PCI_D0;
1254 
1255 	ret = platform_pci_choose_state(dev);
1256 	if (ret != PCI_POWER_ERROR)
1257 		return ret;
1258 
1259 	switch (state.event) {
1260 	case PM_EVENT_ON:
1261 		return PCI_D0;
1262 	case PM_EVENT_FREEZE:
1263 	case PM_EVENT_PRETHAW:
1264 		/* REVISIT both freeze and pre-thaw "should" use D0 */
1265 	case PM_EVENT_SUSPEND:
1266 	case PM_EVENT_HIBERNATE:
1267 		return PCI_D3hot;
1268 	default:
1269 		pci_info(dev, "unrecognized suspend event %d\n",
1270 			 state.event);
1271 		BUG();
1272 	}
1273 	return PCI_D0;
1274 }
1275 EXPORT_SYMBOL(pci_choose_state);
1276 
1277 #define PCI_EXP_SAVE_REGS	7
1278 
1279 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1280 						       u16 cap, bool extended)
1281 {
1282 	struct pci_cap_saved_state *tmp;
1283 
1284 	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1285 		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1286 			return tmp;
1287 	}
1288 	return NULL;
1289 }
1290 
1291 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1292 {
1293 	return _pci_find_saved_cap(dev, cap, false);
1294 }
1295 
1296 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1297 {
1298 	return _pci_find_saved_cap(dev, cap, true);
1299 }
1300 
1301 static int pci_save_pcie_state(struct pci_dev *dev)
1302 {
1303 	int i = 0;
1304 	struct pci_cap_saved_state *save_state;
1305 	u16 *cap;
1306 
1307 	if (!pci_is_pcie(dev))
1308 		return 0;
1309 
1310 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1311 	if (!save_state) {
1312 		pci_err(dev, "buffer not found in %s\n", __func__);
1313 		return -ENOMEM;
1314 	}
1315 
1316 	cap = (u16 *)&save_state->cap.data[0];
1317 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1318 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1319 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1320 	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1321 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1322 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1323 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1324 
1325 	return 0;
1326 }
1327 
1328 static void pci_restore_pcie_state(struct pci_dev *dev)
1329 {
1330 	int i = 0;
1331 	struct pci_cap_saved_state *save_state;
1332 	u16 *cap;
1333 
1334 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1335 	if (!save_state)
1336 		return;
1337 
1338 	cap = (u16 *)&save_state->cap.data[0];
1339 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1340 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1341 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1342 	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1343 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1344 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1345 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1346 }
1347 
1348 static int pci_save_pcix_state(struct pci_dev *dev)
1349 {
1350 	int pos;
1351 	struct pci_cap_saved_state *save_state;
1352 
1353 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1354 	if (!pos)
1355 		return 0;
1356 
1357 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1358 	if (!save_state) {
1359 		pci_err(dev, "buffer not found in %s\n", __func__);
1360 		return -ENOMEM;
1361 	}
1362 
1363 	pci_read_config_word(dev, pos + PCI_X_CMD,
1364 			     (u16 *)save_state->cap.data);
1365 
1366 	return 0;
1367 }
1368 
1369 static void pci_restore_pcix_state(struct pci_dev *dev)
1370 {
1371 	int i = 0, pos;
1372 	struct pci_cap_saved_state *save_state;
1373 	u16 *cap;
1374 
1375 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1376 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1377 	if (!save_state || !pos)
1378 		return;
1379 	cap = (u16 *)&save_state->cap.data[0];
1380 
1381 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1382 }
1383 
1384 static void pci_save_ltr_state(struct pci_dev *dev)
1385 {
1386 	int ltr;
1387 	struct pci_cap_saved_state *save_state;
1388 	u16 *cap;
1389 
1390 	if (!pci_is_pcie(dev))
1391 		return;
1392 
1393 	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1394 	if (!ltr)
1395 		return;
1396 
1397 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1398 	if (!save_state) {
1399 		pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1400 		return;
1401 	}
1402 
1403 	cap = (u16 *)&save_state->cap.data[0];
1404 	pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1405 	pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1406 }
1407 
1408 static void pci_restore_ltr_state(struct pci_dev *dev)
1409 {
1410 	struct pci_cap_saved_state *save_state;
1411 	int ltr;
1412 	u16 *cap;
1413 
1414 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1415 	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1416 	if (!save_state || !ltr)
1417 		return;
1418 
1419 	cap = (u16 *)&save_state->cap.data[0];
1420 	pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1421 	pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1422 }
1423 
1424 /**
1425  * pci_save_state - save the PCI configuration space of a device before
1426  *		    suspending
1427  * @dev: PCI device that we're dealing with
1428  */
1429 int pci_save_state(struct pci_dev *dev)
1430 {
1431 	int i;
1432 	/* XXX: 100% dword access ok here? */
1433 	for (i = 0; i < 16; i++) {
1434 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1435 		pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1436 			i * 4, dev->saved_config_space[i]);
1437 	}
1438 	dev->state_saved = true;
1439 
1440 	i = pci_save_pcie_state(dev);
1441 	if (i != 0)
1442 		return i;
1443 
1444 	i = pci_save_pcix_state(dev);
1445 	if (i != 0)
1446 		return i;
1447 
1448 	pci_save_ltr_state(dev);
1449 	pci_save_dpc_state(dev);
1450 	pci_save_aer_state(dev);
1451 	return pci_save_vc_state(dev);
1452 }
1453 EXPORT_SYMBOL(pci_save_state);
1454 
1455 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1456 				     u32 saved_val, int retry, bool force)
1457 {
1458 	u32 val;
1459 
1460 	pci_read_config_dword(pdev, offset, &val);
1461 	if (!force && val == saved_val)
1462 		return;
1463 
1464 	for (;;) {
1465 		pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1466 			offset, val, saved_val);
1467 		pci_write_config_dword(pdev, offset, saved_val);
1468 		if (retry-- <= 0)
1469 			return;
1470 
1471 		pci_read_config_dword(pdev, offset, &val);
1472 		if (val == saved_val)
1473 			return;
1474 
1475 		mdelay(1);
1476 	}
1477 }
1478 
1479 static void pci_restore_config_space_range(struct pci_dev *pdev,
1480 					   int start, int end, int retry,
1481 					   bool force)
1482 {
1483 	int index;
1484 
1485 	for (index = end; index >= start; index--)
1486 		pci_restore_config_dword(pdev, 4 * index,
1487 					 pdev->saved_config_space[index],
1488 					 retry, force);
1489 }
1490 
1491 static void pci_restore_config_space(struct pci_dev *pdev)
1492 {
1493 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1494 		pci_restore_config_space_range(pdev, 10, 15, 0, false);
1495 		/* Restore BARs before the command register. */
1496 		pci_restore_config_space_range(pdev, 4, 9, 10, false);
1497 		pci_restore_config_space_range(pdev, 0, 3, 0, false);
1498 	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1499 		pci_restore_config_space_range(pdev, 12, 15, 0, false);
1500 
1501 		/*
1502 		 * Force rewriting of prefetch registers to avoid S3 resume
1503 		 * issues on Intel PCI bridges that occur when these
1504 		 * registers are not explicitly written.
1505 		 */
1506 		pci_restore_config_space_range(pdev, 9, 11, 0, true);
1507 		pci_restore_config_space_range(pdev, 0, 8, 0, false);
1508 	} else {
1509 		pci_restore_config_space_range(pdev, 0, 15, 0, false);
1510 	}
1511 }
1512 
1513 static void pci_restore_rebar_state(struct pci_dev *pdev)
1514 {
1515 	unsigned int pos, nbars, i;
1516 	u32 ctrl;
1517 
1518 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1519 	if (!pos)
1520 		return;
1521 
1522 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1523 	nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1524 		    PCI_REBAR_CTRL_NBAR_SHIFT;
1525 
1526 	for (i = 0; i < nbars; i++, pos += 8) {
1527 		struct resource *res;
1528 		int bar_idx, size;
1529 
1530 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1531 		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1532 		res = pdev->resource + bar_idx;
1533 		size = ilog2(resource_size(res)) - 20;
1534 		ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1535 		ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1536 		pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1537 	}
1538 }
1539 
1540 /**
1541  * pci_restore_state - Restore the saved state of a PCI device
1542  * @dev: PCI device that we're dealing with
1543  */
1544 void pci_restore_state(struct pci_dev *dev)
1545 {
1546 	if (!dev->state_saved)
1547 		return;
1548 
1549 	/*
1550 	 * Restore max latencies (in the LTR capability) before enabling
1551 	 * LTR itself (in the PCIe capability).
1552 	 */
1553 	pci_restore_ltr_state(dev);
1554 
1555 	pci_restore_pcie_state(dev);
1556 	pci_restore_pasid_state(dev);
1557 	pci_restore_pri_state(dev);
1558 	pci_restore_ats_state(dev);
1559 	pci_restore_vc_state(dev);
1560 	pci_restore_rebar_state(dev);
1561 	pci_restore_dpc_state(dev);
1562 
1563 	pci_aer_clear_status(dev);
1564 	pci_restore_aer_state(dev);
1565 
1566 	pci_restore_config_space(dev);
1567 
1568 	pci_restore_pcix_state(dev);
1569 	pci_restore_msi_state(dev);
1570 
1571 	/* Restore ACS and IOV configuration state */
1572 	pci_enable_acs(dev);
1573 	pci_restore_iov_state(dev);
1574 
1575 	dev->state_saved = false;
1576 }
1577 EXPORT_SYMBOL(pci_restore_state);
1578 
1579 struct pci_saved_state {
1580 	u32 config_space[16];
1581 	struct pci_cap_saved_data cap[0];
1582 };
1583 
1584 /**
1585  * pci_store_saved_state - Allocate and return an opaque struct containing
1586  *			   the device saved state.
1587  * @dev: PCI device that we're dealing with
1588  *
1589  * Return NULL if no state or error.
1590  */
1591 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1592 {
1593 	struct pci_saved_state *state;
1594 	struct pci_cap_saved_state *tmp;
1595 	struct pci_cap_saved_data *cap;
1596 	size_t size;
1597 
1598 	if (!dev->state_saved)
1599 		return NULL;
1600 
1601 	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1602 
1603 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1604 		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1605 
1606 	state = kzalloc(size, GFP_KERNEL);
1607 	if (!state)
1608 		return NULL;
1609 
1610 	memcpy(state->config_space, dev->saved_config_space,
1611 	       sizeof(state->config_space));
1612 
1613 	cap = state->cap;
1614 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1615 		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1616 		memcpy(cap, &tmp->cap, len);
1617 		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1618 	}
1619 	/* Empty cap_save terminates list */
1620 
1621 	return state;
1622 }
1623 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1624 
1625 /**
1626  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1627  * @dev: PCI device that we're dealing with
1628  * @state: Saved state returned from pci_store_saved_state()
1629  */
1630 int pci_load_saved_state(struct pci_dev *dev,
1631 			 struct pci_saved_state *state)
1632 {
1633 	struct pci_cap_saved_data *cap;
1634 
1635 	dev->state_saved = false;
1636 
1637 	if (!state)
1638 		return 0;
1639 
1640 	memcpy(dev->saved_config_space, state->config_space,
1641 	       sizeof(state->config_space));
1642 
1643 	cap = state->cap;
1644 	while (cap->size) {
1645 		struct pci_cap_saved_state *tmp;
1646 
1647 		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1648 		if (!tmp || tmp->cap.size != cap->size)
1649 			return -EINVAL;
1650 
1651 		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1652 		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1653 		       sizeof(struct pci_cap_saved_data) + cap->size);
1654 	}
1655 
1656 	dev->state_saved = true;
1657 	return 0;
1658 }
1659 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1660 
1661 /**
1662  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1663  *				   and free the memory allocated for it.
1664  * @dev: PCI device that we're dealing with
1665  * @state: Pointer to saved state returned from pci_store_saved_state()
1666  */
1667 int pci_load_and_free_saved_state(struct pci_dev *dev,
1668 				  struct pci_saved_state **state)
1669 {
1670 	int ret = pci_load_saved_state(dev, *state);
1671 	kfree(*state);
1672 	*state = NULL;
1673 	return ret;
1674 }
1675 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1676 
1677 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1678 {
1679 	return pci_enable_resources(dev, bars);
1680 }
1681 
1682 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1683 {
1684 	int err;
1685 	struct pci_dev *bridge;
1686 	u16 cmd;
1687 	u8 pin;
1688 
1689 	err = pci_set_power_state(dev, PCI_D0);
1690 	if (err < 0 && err != -EIO)
1691 		return err;
1692 
1693 	bridge = pci_upstream_bridge(dev);
1694 	if (bridge)
1695 		pcie_aspm_powersave_config_link(bridge);
1696 
1697 	err = pcibios_enable_device(dev, bars);
1698 	if (err < 0)
1699 		return err;
1700 	pci_fixup_device(pci_fixup_enable, dev);
1701 
1702 	if (dev->msi_enabled || dev->msix_enabled)
1703 		return 0;
1704 
1705 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1706 	if (pin) {
1707 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1708 		if (cmd & PCI_COMMAND_INTX_DISABLE)
1709 			pci_write_config_word(dev, PCI_COMMAND,
1710 					      cmd & ~PCI_COMMAND_INTX_DISABLE);
1711 	}
1712 
1713 	return 0;
1714 }
1715 
1716 /**
1717  * pci_reenable_device - Resume abandoned device
1718  * @dev: PCI device to be resumed
1719  *
1720  * NOTE: This function is a backend of pci_default_resume() and is not supposed
1721  * to be called by normal code, write proper resume handler and use it instead.
1722  */
1723 int pci_reenable_device(struct pci_dev *dev)
1724 {
1725 	if (pci_is_enabled(dev))
1726 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1727 	return 0;
1728 }
1729 EXPORT_SYMBOL(pci_reenable_device);
1730 
1731 static void pci_enable_bridge(struct pci_dev *dev)
1732 {
1733 	struct pci_dev *bridge;
1734 	int retval;
1735 
1736 	bridge = pci_upstream_bridge(dev);
1737 	if (bridge)
1738 		pci_enable_bridge(bridge);
1739 
1740 	if (pci_is_enabled(dev)) {
1741 		if (!dev->is_busmaster)
1742 			pci_set_master(dev);
1743 		return;
1744 	}
1745 
1746 	retval = pci_enable_device(dev);
1747 	if (retval)
1748 		pci_err(dev, "Error enabling bridge (%d), continuing\n",
1749 			retval);
1750 	pci_set_master(dev);
1751 }
1752 
1753 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1754 {
1755 	struct pci_dev *bridge;
1756 	int err;
1757 	int i, bars = 0;
1758 
1759 	/*
1760 	 * Power state could be unknown at this point, either due to a fresh
1761 	 * boot or a device removal call.  So get the current power state
1762 	 * so that things like MSI message writing will behave as expected
1763 	 * (e.g. if the device really is in D0 at enable time).
1764 	 */
1765 	if (dev->pm_cap) {
1766 		u16 pmcsr;
1767 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1768 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1769 	}
1770 
1771 	if (atomic_inc_return(&dev->enable_cnt) > 1)
1772 		return 0;		/* already enabled */
1773 
1774 	bridge = pci_upstream_bridge(dev);
1775 	if (bridge)
1776 		pci_enable_bridge(bridge);
1777 
1778 	/* only skip sriov related */
1779 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1780 		if (dev->resource[i].flags & flags)
1781 			bars |= (1 << i);
1782 	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1783 		if (dev->resource[i].flags & flags)
1784 			bars |= (1 << i);
1785 
1786 	err = do_pci_enable_device(dev, bars);
1787 	if (err < 0)
1788 		atomic_dec(&dev->enable_cnt);
1789 	return err;
1790 }
1791 
1792 /**
1793  * pci_enable_device_io - Initialize a device for use with IO space
1794  * @dev: PCI device to be initialized
1795  *
1796  * Initialize device before it's used by a driver. Ask low-level code
1797  * to enable I/O resources. Wake up the device if it was suspended.
1798  * Beware, this function can fail.
1799  */
1800 int pci_enable_device_io(struct pci_dev *dev)
1801 {
1802 	return pci_enable_device_flags(dev, IORESOURCE_IO);
1803 }
1804 EXPORT_SYMBOL(pci_enable_device_io);
1805 
1806 /**
1807  * pci_enable_device_mem - Initialize a device for use with Memory space
1808  * @dev: PCI device to be initialized
1809  *
1810  * Initialize device before it's used by a driver. Ask low-level code
1811  * to enable Memory resources. Wake up the device if it was suspended.
1812  * Beware, this function can fail.
1813  */
1814 int pci_enable_device_mem(struct pci_dev *dev)
1815 {
1816 	return pci_enable_device_flags(dev, IORESOURCE_MEM);
1817 }
1818 EXPORT_SYMBOL(pci_enable_device_mem);
1819 
1820 /**
1821  * pci_enable_device - Initialize device before it's used by a driver.
1822  * @dev: PCI device to be initialized
1823  *
1824  * Initialize device before it's used by a driver. Ask low-level code
1825  * to enable I/O and memory. Wake up the device if it was suspended.
1826  * Beware, this function can fail.
1827  *
1828  * Note we don't actually enable the device many times if we call
1829  * this function repeatedly (we just increment the count).
1830  */
1831 int pci_enable_device(struct pci_dev *dev)
1832 {
1833 	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1834 }
1835 EXPORT_SYMBOL(pci_enable_device);
1836 
1837 /*
1838  * Managed PCI resources.  This manages device on/off, INTx/MSI/MSI-X
1839  * on/off and BAR regions.  pci_dev itself records MSI/MSI-X status, so
1840  * there's no need to track it separately.  pci_devres is initialized
1841  * when a device is enabled using managed PCI device enable interface.
1842  */
1843 struct pci_devres {
1844 	unsigned int enabled:1;
1845 	unsigned int pinned:1;
1846 	unsigned int orig_intx:1;
1847 	unsigned int restore_intx:1;
1848 	unsigned int mwi:1;
1849 	u32 region_mask;
1850 };
1851 
1852 static void pcim_release(struct device *gendev, void *res)
1853 {
1854 	struct pci_dev *dev = to_pci_dev(gendev);
1855 	struct pci_devres *this = res;
1856 	int i;
1857 
1858 	if (dev->msi_enabled)
1859 		pci_disable_msi(dev);
1860 	if (dev->msix_enabled)
1861 		pci_disable_msix(dev);
1862 
1863 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1864 		if (this->region_mask & (1 << i))
1865 			pci_release_region(dev, i);
1866 
1867 	if (this->mwi)
1868 		pci_clear_mwi(dev);
1869 
1870 	if (this->restore_intx)
1871 		pci_intx(dev, this->orig_intx);
1872 
1873 	if (this->enabled && !this->pinned)
1874 		pci_disable_device(dev);
1875 }
1876 
1877 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1878 {
1879 	struct pci_devres *dr, *new_dr;
1880 
1881 	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1882 	if (dr)
1883 		return dr;
1884 
1885 	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1886 	if (!new_dr)
1887 		return NULL;
1888 	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1889 }
1890 
1891 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1892 {
1893 	if (pci_is_managed(pdev))
1894 		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1895 	return NULL;
1896 }
1897 
1898 /**
1899  * pcim_enable_device - Managed pci_enable_device()
1900  * @pdev: PCI device to be initialized
1901  *
1902  * Managed pci_enable_device().
1903  */
1904 int pcim_enable_device(struct pci_dev *pdev)
1905 {
1906 	struct pci_devres *dr;
1907 	int rc;
1908 
1909 	dr = get_pci_dr(pdev);
1910 	if (unlikely(!dr))
1911 		return -ENOMEM;
1912 	if (dr->enabled)
1913 		return 0;
1914 
1915 	rc = pci_enable_device(pdev);
1916 	if (!rc) {
1917 		pdev->is_managed = 1;
1918 		dr->enabled = 1;
1919 	}
1920 	return rc;
1921 }
1922 EXPORT_SYMBOL(pcim_enable_device);
1923 
1924 /**
1925  * pcim_pin_device - Pin managed PCI device
1926  * @pdev: PCI device to pin
1927  *
1928  * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1929  * driver detach.  @pdev must have been enabled with
1930  * pcim_enable_device().
1931  */
1932 void pcim_pin_device(struct pci_dev *pdev)
1933 {
1934 	struct pci_devres *dr;
1935 
1936 	dr = find_pci_dr(pdev);
1937 	WARN_ON(!dr || !dr->enabled);
1938 	if (dr)
1939 		dr->pinned = 1;
1940 }
1941 EXPORT_SYMBOL(pcim_pin_device);
1942 
1943 /*
1944  * pcibios_add_device - provide arch specific hooks when adding device dev
1945  * @dev: the PCI device being added
1946  *
1947  * Permits the platform to provide architecture specific functionality when
1948  * devices are added. This is the default implementation. Architecture
1949  * implementations can override this.
1950  */
1951 int __weak pcibios_add_device(struct pci_dev *dev)
1952 {
1953 	return 0;
1954 }
1955 
1956 /**
1957  * pcibios_release_device - provide arch specific hooks when releasing
1958  *			    device dev
1959  * @dev: the PCI device being released
1960  *
1961  * Permits the platform to provide architecture specific functionality when
1962  * devices are released. This is the default implementation. Architecture
1963  * implementations can override this.
1964  */
1965 void __weak pcibios_release_device(struct pci_dev *dev) {}
1966 
1967 /**
1968  * pcibios_disable_device - disable arch specific PCI resources for device dev
1969  * @dev: the PCI device to disable
1970  *
1971  * Disables architecture specific PCI resources for the device. This
1972  * is the default implementation. Architecture implementations can
1973  * override this.
1974  */
1975 void __weak pcibios_disable_device(struct pci_dev *dev) {}
1976 
1977 /**
1978  * pcibios_penalize_isa_irq - penalize an ISA IRQ
1979  * @irq: ISA IRQ to penalize
1980  * @active: IRQ active or not
1981  *
1982  * Permits the platform to provide architecture-specific functionality when
1983  * penalizing ISA IRQs. This is the default implementation. Architecture
1984  * implementations can override this.
1985  */
1986 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1987 
1988 static void do_pci_disable_device(struct pci_dev *dev)
1989 {
1990 	u16 pci_command;
1991 
1992 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1993 	if (pci_command & PCI_COMMAND_MASTER) {
1994 		pci_command &= ~PCI_COMMAND_MASTER;
1995 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1996 	}
1997 
1998 	pcibios_disable_device(dev);
1999 }
2000 
2001 /**
2002  * pci_disable_enabled_device - Disable device without updating enable_cnt
2003  * @dev: PCI device to disable
2004  *
2005  * NOTE: This function is a backend of PCI power management routines and is
2006  * not supposed to be called drivers.
2007  */
2008 void pci_disable_enabled_device(struct pci_dev *dev)
2009 {
2010 	if (pci_is_enabled(dev))
2011 		do_pci_disable_device(dev);
2012 }
2013 
2014 /**
2015  * pci_disable_device - Disable PCI device after use
2016  * @dev: PCI device to be disabled
2017  *
2018  * Signal to the system that the PCI device is not in use by the system
2019  * anymore.  This only involves disabling PCI bus-mastering, if active.
2020  *
2021  * Note we don't actually disable the device until all callers of
2022  * pci_enable_device() have called pci_disable_device().
2023  */
2024 void pci_disable_device(struct pci_dev *dev)
2025 {
2026 	struct pci_devres *dr;
2027 
2028 	dr = find_pci_dr(dev);
2029 	if (dr)
2030 		dr->enabled = 0;
2031 
2032 	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2033 		      "disabling already-disabled device");
2034 
2035 	if (atomic_dec_return(&dev->enable_cnt) != 0)
2036 		return;
2037 
2038 	do_pci_disable_device(dev);
2039 
2040 	dev->is_busmaster = 0;
2041 }
2042 EXPORT_SYMBOL(pci_disable_device);
2043 
2044 /**
2045  * pcibios_set_pcie_reset_state - set reset state for device dev
2046  * @dev: the PCIe device reset
2047  * @state: Reset state to enter into
2048  *
2049  * Set the PCIe reset state for the device. This is the default
2050  * implementation. Architecture implementations can override this.
2051  */
2052 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2053 					enum pcie_reset_state state)
2054 {
2055 	return -EINVAL;
2056 }
2057 
2058 /**
2059  * pci_set_pcie_reset_state - set reset state for device dev
2060  * @dev: the PCIe device reset
2061  * @state: Reset state to enter into
2062  *
2063  * Sets the PCI reset state for the device.
2064  */
2065 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2066 {
2067 	return pcibios_set_pcie_reset_state(dev, state);
2068 }
2069 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2070 
2071 /**
2072  * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2073  * @dev: PCIe root port or event collector.
2074  */
2075 void pcie_clear_root_pme_status(struct pci_dev *dev)
2076 {
2077 	pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2078 }
2079 
2080 /**
2081  * pci_check_pme_status - Check if given device has generated PME.
2082  * @dev: Device to check.
2083  *
2084  * Check the PME status of the device and if set, clear it and clear PME enable
2085  * (if set).  Return 'true' if PME status and PME enable were both set or
2086  * 'false' otherwise.
2087  */
2088 bool pci_check_pme_status(struct pci_dev *dev)
2089 {
2090 	int pmcsr_pos;
2091 	u16 pmcsr;
2092 	bool ret = false;
2093 
2094 	if (!dev->pm_cap)
2095 		return false;
2096 
2097 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2098 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2099 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2100 		return false;
2101 
2102 	/* Clear PME status. */
2103 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
2104 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2105 		/* Disable PME to avoid interrupt flood. */
2106 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2107 		ret = true;
2108 	}
2109 
2110 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
2111 
2112 	return ret;
2113 }
2114 
2115 /**
2116  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2117  * @dev: Device to handle.
2118  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2119  *
2120  * Check if @dev has generated PME and queue a resume request for it in that
2121  * case.
2122  */
2123 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2124 {
2125 	if (pme_poll_reset && dev->pme_poll)
2126 		dev->pme_poll = false;
2127 
2128 	if (pci_check_pme_status(dev)) {
2129 		pci_wakeup_event(dev);
2130 		pm_request_resume(&dev->dev);
2131 	}
2132 	return 0;
2133 }
2134 
2135 /**
2136  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2137  * @bus: Top bus of the subtree to walk.
2138  */
2139 void pci_pme_wakeup_bus(struct pci_bus *bus)
2140 {
2141 	if (bus)
2142 		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2143 }
2144 
2145 
2146 /**
2147  * pci_pme_capable - check the capability of PCI device to generate PME#
2148  * @dev: PCI device to handle.
2149  * @state: PCI state from which device will issue PME#.
2150  */
2151 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2152 {
2153 	if (!dev->pm_cap)
2154 		return false;
2155 
2156 	return !!(dev->pme_support & (1 << state));
2157 }
2158 EXPORT_SYMBOL(pci_pme_capable);
2159 
2160 static void pci_pme_list_scan(struct work_struct *work)
2161 {
2162 	struct pci_pme_device *pme_dev, *n;
2163 
2164 	mutex_lock(&pci_pme_list_mutex);
2165 	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2166 		if (pme_dev->dev->pme_poll) {
2167 			struct pci_dev *bridge;
2168 
2169 			bridge = pme_dev->dev->bus->self;
2170 			/*
2171 			 * If bridge is in low power state, the
2172 			 * configuration space of subordinate devices
2173 			 * may be not accessible
2174 			 */
2175 			if (bridge && bridge->current_state != PCI_D0)
2176 				continue;
2177 			/*
2178 			 * If the device is in D3cold it should not be
2179 			 * polled either.
2180 			 */
2181 			if (pme_dev->dev->current_state == PCI_D3cold)
2182 				continue;
2183 
2184 			pci_pme_wakeup(pme_dev->dev, NULL);
2185 		} else {
2186 			list_del(&pme_dev->list);
2187 			kfree(pme_dev);
2188 		}
2189 	}
2190 	if (!list_empty(&pci_pme_list))
2191 		queue_delayed_work(system_freezable_wq, &pci_pme_work,
2192 				   msecs_to_jiffies(PME_TIMEOUT));
2193 	mutex_unlock(&pci_pme_list_mutex);
2194 }
2195 
2196 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2197 {
2198 	u16 pmcsr;
2199 
2200 	if (!dev->pme_support)
2201 		return;
2202 
2203 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2204 	/* Clear PME_Status by writing 1 to it and enable PME# */
2205 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2206 	if (!enable)
2207 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2208 
2209 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2210 }
2211 
2212 /**
2213  * pci_pme_restore - Restore PME configuration after config space restore.
2214  * @dev: PCI device to update.
2215  */
2216 void pci_pme_restore(struct pci_dev *dev)
2217 {
2218 	u16 pmcsr;
2219 
2220 	if (!dev->pme_support)
2221 		return;
2222 
2223 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2224 	if (dev->wakeup_prepared) {
2225 		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2226 		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2227 	} else {
2228 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2229 		pmcsr |= PCI_PM_CTRL_PME_STATUS;
2230 	}
2231 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2232 }
2233 
2234 /**
2235  * pci_pme_active - enable or disable PCI device's PME# function
2236  * @dev: PCI device to handle.
2237  * @enable: 'true' to enable PME# generation; 'false' to disable it.
2238  *
2239  * The caller must verify that the device is capable of generating PME# before
2240  * calling this function with @enable equal to 'true'.
2241  */
2242 void pci_pme_active(struct pci_dev *dev, bool enable)
2243 {
2244 	__pci_pme_active(dev, enable);
2245 
2246 	/*
2247 	 * PCI (as opposed to PCIe) PME requires that the device have
2248 	 * its PME# line hooked up correctly. Not all hardware vendors
2249 	 * do this, so the PME never gets delivered and the device
2250 	 * remains asleep. The easiest way around this is to
2251 	 * periodically walk the list of suspended devices and check
2252 	 * whether any have their PME flag set. The assumption is that
2253 	 * we'll wake up often enough anyway that this won't be a huge
2254 	 * hit, and the power savings from the devices will still be a
2255 	 * win.
2256 	 *
2257 	 * Although PCIe uses in-band PME message instead of PME# line
2258 	 * to report PME, PME does not work for some PCIe devices in
2259 	 * reality.  For example, there are devices that set their PME
2260 	 * status bits, but don't really bother to send a PME message;
2261 	 * there are PCI Express Root Ports that don't bother to
2262 	 * trigger interrupts when they receive PME messages from the
2263 	 * devices below.  So PME poll is used for PCIe devices too.
2264 	 */
2265 
2266 	if (dev->pme_poll) {
2267 		struct pci_pme_device *pme_dev;
2268 		if (enable) {
2269 			pme_dev = kmalloc(sizeof(struct pci_pme_device),
2270 					  GFP_KERNEL);
2271 			if (!pme_dev) {
2272 				pci_warn(dev, "can't enable PME#\n");
2273 				return;
2274 			}
2275 			pme_dev->dev = dev;
2276 			mutex_lock(&pci_pme_list_mutex);
2277 			list_add(&pme_dev->list, &pci_pme_list);
2278 			if (list_is_singular(&pci_pme_list))
2279 				queue_delayed_work(system_freezable_wq,
2280 						   &pci_pme_work,
2281 						   msecs_to_jiffies(PME_TIMEOUT));
2282 			mutex_unlock(&pci_pme_list_mutex);
2283 		} else {
2284 			mutex_lock(&pci_pme_list_mutex);
2285 			list_for_each_entry(pme_dev, &pci_pme_list, list) {
2286 				if (pme_dev->dev == dev) {
2287 					list_del(&pme_dev->list);
2288 					kfree(pme_dev);
2289 					break;
2290 				}
2291 			}
2292 			mutex_unlock(&pci_pme_list_mutex);
2293 		}
2294 	}
2295 
2296 	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2297 }
2298 EXPORT_SYMBOL(pci_pme_active);
2299 
2300 /**
2301  * __pci_enable_wake - enable PCI device as wakeup event source
2302  * @dev: PCI device affected
2303  * @state: PCI state from which device will issue wakeup events
2304  * @enable: True to enable event generation; false to disable
2305  *
2306  * This enables the device as a wakeup event source, or disables it.
2307  * When such events involves platform-specific hooks, those hooks are
2308  * called automatically by this routine.
2309  *
2310  * Devices with legacy power management (no standard PCI PM capabilities)
2311  * always require such platform hooks.
2312  *
2313  * RETURN VALUE:
2314  * 0 is returned on success
2315  * -EINVAL is returned if device is not supposed to wake up the system
2316  * Error code depending on the platform is returned if both the platform and
2317  * the native mechanism fail to enable the generation of wake-up events
2318  */
2319 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2320 {
2321 	int ret = 0;
2322 
2323 	/*
2324 	 * Bridges that are not power-manageable directly only signal
2325 	 * wakeup on behalf of subordinate devices which is set up
2326 	 * elsewhere, so skip them. However, bridges that are
2327 	 * power-manageable may signal wakeup for themselves (for example,
2328 	 * on a hotplug event) and they need to be covered here.
2329 	 */
2330 	if (!pci_power_manageable(dev))
2331 		return 0;
2332 
2333 	/* Don't do the same thing twice in a row for one device. */
2334 	if (!!enable == !!dev->wakeup_prepared)
2335 		return 0;
2336 
2337 	/*
2338 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2339 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
2340 	 * enable.  To disable wake-up we call the platform first, for symmetry.
2341 	 */
2342 
2343 	if (enable) {
2344 		int error;
2345 
2346 		if (pci_pme_capable(dev, state))
2347 			pci_pme_active(dev, true);
2348 		else
2349 			ret = 1;
2350 		error = platform_pci_set_wakeup(dev, true);
2351 		if (ret)
2352 			ret = error;
2353 		if (!ret)
2354 			dev->wakeup_prepared = true;
2355 	} else {
2356 		platform_pci_set_wakeup(dev, false);
2357 		pci_pme_active(dev, false);
2358 		dev->wakeup_prepared = false;
2359 	}
2360 
2361 	return ret;
2362 }
2363 
2364 /**
2365  * pci_enable_wake - change wakeup settings for a PCI device
2366  * @pci_dev: Target device
2367  * @state: PCI state from which device will issue wakeup events
2368  * @enable: Whether or not to enable event generation
2369  *
2370  * If @enable is set, check device_may_wakeup() for the device before calling
2371  * __pci_enable_wake() for it.
2372  */
2373 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2374 {
2375 	if (enable && !device_may_wakeup(&pci_dev->dev))
2376 		return -EINVAL;
2377 
2378 	return __pci_enable_wake(pci_dev, state, enable);
2379 }
2380 EXPORT_SYMBOL(pci_enable_wake);
2381 
2382 /**
2383  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2384  * @dev: PCI device to prepare
2385  * @enable: True to enable wake-up event generation; false to disable
2386  *
2387  * Many drivers want the device to wake up the system from D3_hot or D3_cold
2388  * and this function allows them to set that up cleanly - pci_enable_wake()
2389  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2390  * ordering constraints.
2391  *
2392  * This function only returns error code if the device is not allowed to wake
2393  * up the system from sleep or it is not capable of generating PME# from both
2394  * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2395  */
2396 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2397 {
2398 	return pci_pme_capable(dev, PCI_D3cold) ?
2399 			pci_enable_wake(dev, PCI_D3cold, enable) :
2400 			pci_enable_wake(dev, PCI_D3hot, enable);
2401 }
2402 EXPORT_SYMBOL(pci_wake_from_d3);
2403 
2404 /**
2405  * pci_target_state - find an appropriate low power state for a given PCI dev
2406  * @dev: PCI device
2407  * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2408  *
2409  * Use underlying platform code to find a supported low power state for @dev.
2410  * If the platform can't manage @dev, return the deepest state from which it
2411  * can generate wake events, based on any available PME info.
2412  */
2413 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2414 {
2415 	pci_power_t target_state = PCI_D3hot;
2416 
2417 	if (platform_pci_power_manageable(dev)) {
2418 		/*
2419 		 * Call the platform to find the target state for the device.
2420 		 */
2421 		pci_power_t state = platform_pci_choose_state(dev);
2422 
2423 		switch (state) {
2424 		case PCI_POWER_ERROR:
2425 		case PCI_UNKNOWN:
2426 			break;
2427 		case PCI_D1:
2428 		case PCI_D2:
2429 			if (pci_no_d1d2(dev))
2430 				break;
2431 			/* else, fall through */
2432 		default:
2433 			target_state = state;
2434 		}
2435 
2436 		return target_state;
2437 	}
2438 
2439 	if (!dev->pm_cap)
2440 		target_state = PCI_D0;
2441 
2442 	/*
2443 	 * If the device is in D3cold even though it's not power-manageable by
2444 	 * the platform, it may have been powered down by non-standard means.
2445 	 * Best to let it slumber.
2446 	 */
2447 	if (dev->current_state == PCI_D3cold)
2448 		target_state = PCI_D3cold;
2449 
2450 	if (wakeup) {
2451 		/*
2452 		 * Find the deepest state from which the device can generate
2453 		 * PME#.
2454 		 */
2455 		if (dev->pme_support) {
2456 			while (target_state
2457 			      && !(dev->pme_support & (1 << target_state)))
2458 				target_state--;
2459 		}
2460 	}
2461 
2462 	return target_state;
2463 }
2464 
2465 /**
2466  * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2467  *			  into a sleep state
2468  * @dev: Device to handle.
2469  *
2470  * Choose the power state appropriate for the device depending on whether
2471  * it can wake up the system and/or is power manageable by the platform
2472  * (PCI_D3hot is the default) and put the device into that state.
2473  */
2474 int pci_prepare_to_sleep(struct pci_dev *dev)
2475 {
2476 	bool wakeup = device_may_wakeup(&dev->dev);
2477 	pci_power_t target_state = pci_target_state(dev, wakeup);
2478 	int error;
2479 
2480 	if (target_state == PCI_POWER_ERROR)
2481 		return -EIO;
2482 
2483 	pci_enable_wake(dev, target_state, wakeup);
2484 
2485 	error = pci_set_power_state(dev, target_state);
2486 
2487 	if (error)
2488 		pci_enable_wake(dev, target_state, false);
2489 
2490 	return error;
2491 }
2492 EXPORT_SYMBOL(pci_prepare_to_sleep);
2493 
2494 /**
2495  * pci_back_from_sleep - turn PCI device on during system-wide transition
2496  *			 into working state
2497  * @dev: Device to handle.
2498  *
2499  * Disable device's system wake-up capability and put it into D0.
2500  */
2501 int pci_back_from_sleep(struct pci_dev *dev)
2502 {
2503 	pci_enable_wake(dev, PCI_D0, false);
2504 	return pci_set_power_state(dev, PCI_D0);
2505 }
2506 EXPORT_SYMBOL(pci_back_from_sleep);
2507 
2508 /**
2509  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2510  * @dev: PCI device being suspended.
2511  *
2512  * Prepare @dev to generate wake-up events at run time and put it into a low
2513  * power state.
2514  */
2515 int pci_finish_runtime_suspend(struct pci_dev *dev)
2516 {
2517 	pci_power_t target_state;
2518 	int error;
2519 
2520 	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2521 	if (target_state == PCI_POWER_ERROR)
2522 		return -EIO;
2523 
2524 	dev->runtime_d3cold = target_state == PCI_D3cold;
2525 
2526 	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2527 
2528 	error = pci_set_power_state(dev, target_state);
2529 
2530 	if (error) {
2531 		pci_enable_wake(dev, target_state, false);
2532 		dev->runtime_d3cold = false;
2533 	}
2534 
2535 	return error;
2536 }
2537 
2538 /**
2539  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2540  * @dev: Device to check.
2541  *
2542  * Return true if the device itself is capable of generating wake-up events
2543  * (through the platform or using the native PCIe PME) or if the device supports
2544  * PME and one of its upstream bridges can generate wake-up events.
2545  */
2546 bool pci_dev_run_wake(struct pci_dev *dev)
2547 {
2548 	struct pci_bus *bus = dev->bus;
2549 
2550 	if (!dev->pme_support)
2551 		return false;
2552 
2553 	/* PME-capable in principle, but not from the target power state */
2554 	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2555 		return false;
2556 
2557 	if (device_can_wakeup(&dev->dev))
2558 		return true;
2559 
2560 	while (bus->parent) {
2561 		struct pci_dev *bridge = bus->self;
2562 
2563 		if (device_can_wakeup(&bridge->dev))
2564 			return true;
2565 
2566 		bus = bus->parent;
2567 	}
2568 
2569 	/* We have reached the root bus. */
2570 	if (bus->bridge)
2571 		return device_can_wakeup(bus->bridge);
2572 
2573 	return false;
2574 }
2575 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2576 
2577 /**
2578  * pci_dev_need_resume - Check if it is necessary to resume the device.
2579  * @pci_dev: Device to check.
2580  *
2581  * Return 'true' if the device is not runtime-suspended or it has to be
2582  * reconfigured due to wakeup settings difference between system and runtime
2583  * suspend, or the current power state of it is not suitable for the upcoming
2584  * (system-wide) transition.
2585  */
2586 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2587 {
2588 	struct device *dev = &pci_dev->dev;
2589 	pci_power_t target_state;
2590 
2591 	if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2592 		return true;
2593 
2594 	target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2595 
2596 	/*
2597 	 * If the earlier platform check has not triggered, D3cold is just power
2598 	 * removal on top of D3hot, so no need to resume the device in that
2599 	 * case.
2600 	 */
2601 	return target_state != pci_dev->current_state &&
2602 		target_state != PCI_D3cold &&
2603 		pci_dev->current_state != PCI_D3hot;
2604 }
2605 
2606 /**
2607  * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2608  * @pci_dev: Device to check.
2609  *
2610  * If the device is suspended and it is not configured for system wakeup,
2611  * disable PME for it to prevent it from waking up the system unnecessarily.
2612  *
2613  * Note that if the device's power state is D3cold and the platform check in
2614  * pci_dev_need_resume() has not triggered, the device's configuration need not
2615  * be changed.
2616  */
2617 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2618 {
2619 	struct device *dev = &pci_dev->dev;
2620 
2621 	spin_lock_irq(&dev->power.lock);
2622 
2623 	if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2624 	    pci_dev->current_state < PCI_D3cold)
2625 		__pci_pme_active(pci_dev, false);
2626 
2627 	spin_unlock_irq(&dev->power.lock);
2628 }
2629 
2630 /**
2631  * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2632  * @pci_dev: Device to handle.
2633  *
2634  * If the device is runtime suspended and wakeup-capable, enable PME for it as
2635  * it might have been disabled during the prepare phase of system suspend if
2636  * the device was not configured for system wakeup.
2637  */
2638 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2639 {
2640 	struct device *dev = &pci_dev->dev;
2641 
2642 	if (!pci_dev_run_wake(pci_dev))
2643 		return;
2644 
2645 	spin_lock_irq(&dev->power.lock);
2646 
2647 	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2648 		__pci_pme_active(pci_dev, true);
2649 
2650 	spin_unlock_irq(&dev->power.lock);
2651 }
2652 
2653 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2654 {
2655 	struct device *dev = &pdev->dev;
2656 	struct device *parent = dev->parent;
2657 
2658 	if (parent)
2659 		pm_runtime_get_sync(parent);
2660 	pm_runtime_get_noresume(dev);
2661 	/*
2662 	 * pdev->current_state is set to PCI_D3cold during suspending,
2663 	 * so wait until suspending completes
2664 	 */
2665 	pm_runtime_barrier(dev);
2666 	/*
2667 	 * Only need to resume devices in D3cold, because config
2668 	 * registers are still accessible for devices suspended but
2669 	 * not in D3cold.
2670 	 */
2671 	if (pdev->current_state == PCI_D3cold)
2672 		pm_runtime_resume(dev);
2673 }
2674 
2675 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2676 {
2677 	struct device *dev = &pdev->dev;
2678 	struct device *parent = dev->parent;
2679 
2680 	pm_runtime_put(dev);
2681 	if (parent)
2682 		pm_runtime_put_sync(parent);
2683 }
2684 
2685 static const struct dmi_system_id bridge_d3_blacklist[] = {
2686 #ifdef CONFIG_X86
2687 	{
2688 		/*
2689 		 * Gigabyte X299 root port is not marked as hotplug capable
2690 		 * which allows Linux to power manage it.  However, this
2691 		 * confuses the BIOS SMI handler so don't power manage root
2692 		 * ports on that system.
2693 		 */
2694 		.ident = "X299 DESIGNARE EX-CF",
2695 		.matches = {
2696 			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2697 			DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2698 		},
2699 	},
2700 #endif
2701 	{ }
2702 };
2703 
2704 /**
2705  * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2706  * @bridge: Bridge to check
2707  *
2708  * This function checks if it is possible to move the bridge to D3.
2709  * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
2710  */
2711 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2712 {
2713 	if (!pci_is_pcie(bridge))
2714 		return false;
2715 
2716 	switch (pci_pcie_type(bridge)) {
2717 	case PCI_EXP_TYPE_ROOT_PORT:
2718 	case PCI_EXP_TYPE_UPSTREAM:
2719 	case PCI_EXP_TYPE_DOWNSTREAM:
2720 		if (pci_bridge_d3_disable)
2721 			return false;
2722 
2723 		/*
2724 		 * Hotplug ports handled by firmware in System Management Mode
2725 		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2726 		 */
2727 		if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2728 			return false;
2729 
2730 		if (pci_bridge_d3_force)
2731 			return true;
2732 
2733 		/* Even the oldest 2010 Thunderbolt controller supports D3. */
2734 		if (bridge->is_thunderbolt)
2735 			return true;
2736 
2737 		/* Platform might know better if the bridge supports D3 */
2738 		if (platform_pci_bridge_d3(bridge))
2739 			return true;
2740 
2741 		/*
2742 		 * Hotplug ports handled natively by the OS were not validated
2743 		 * by vendors for runtime D3 at least until 2018 because there
2744 		 * was no OS support.
2745 		 */
2746 		if (bridge->is_hotplug_bridge)
2747 			return false;
2748 
2749 		if (dmi_check_system(bridge_d3_blacklist))
2750 			return false;
2751 
2752 		/*
2753 		 * It should be safe to put PCIe ports from 2015 or newer
2754 		 * to D3.
2755 		 */
2756 		if (dmi_get_bios_year() >= 2015)
2757 			return true;
2758 		break;
2759 	}
2760 
2761 	return false;
2762 }
2763 
2764 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2765 {
2766 	bool *d3cold_ok = data;
2767 
2768 	if (/* The device needs to be allowed to go D3cold ... */
2769 	    dev->no_d3cold || !dev->d3cold_allowed ||
2770 
2771 	    /* ... and if it is wakeup capable to do so from D3cold. */
2772 	    (device_may_wakeup(&dev->dev) &&
2773 	     !pci_pme_capable(dev, PCI_D3cold)) ||
2774 
2775 	    /* If it is a bridge it must be allowed to go to D3. */
2776 	    !pci_power_manageable(dev))
2777 
2778 		*d3cold_ok = false;
2779 
2780 	return !*d3cold_ok;
2781 }
2782 
2783 /*
2784  * pci_bridge_d3_update - Update bridge D3 capabilities
2785  * @dev: PCI device which is changed
2786  *
2787  * Update upstream bridge PM capabilities accordingly depending on if the
2788  * device PM configuration was changed or the device is being removed.  The
2789  * change is also propagated upstream.
2790  */
2791 void pci_bridge_d3_update(struct pci_dev *dev)
2792 {
2793 	bool remove = !device_is_registered(&dev->dev);
2794 	struct pci_dev *bridge;
2795 	bool d3cold_ok = true;
2796 
2797 	bridge = pci_upstream_bridge(dev);
2798 	if (!bridge || !pci_bridge_d3_possible(bridge))
2799 		return;
2800 
2801 	/*
2802 	 * If D3 is currently allowed for the bridge, removing one of its
2803 	 * children won't change that.
2804 	 */
2805 	if (remove && bridge->bridge_d3)
2806 		return;
2807 
2808 	/*
2809 	 * If D3 is currently allowed for the bridge and a child is added or
2810 	 * changed, disallowance of D3 can only be caused by that child, so
2811 	 * we only need to check that single device, not any of its siblings.
2812 	 *
2813 	 * If D3 is currently not allowed for the bridge, checking the device
2814 	 * first may allow us to skip checking its siblings.
2815 	 */
2816 	if (!remove)
2817 		pci_dev_check_d3cold(dev, &d3cold_ok);
2818 
2819 	/*
2820 	 * If D3 is currently not allowed for the bridge, this may be caused
2821 	 * either by the device being changed/removed or any of its siblings,
2822 	 * so we need to go through all children to find out if one of them
2823 	 * continues to block D3.
2824 	 */
2825 	if (d3cold_ok && !bridge->bridge_d3)
2826 		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2827 			     &d3cold_ok);
2828 
2829 	if (bridge->bridge_d3 != d3cold_ok) {
2830 		bridge->bridge_d3 = d3cold_ok;
2831 		/* Propagate change to upstream bridges */
2832 		pci_bridge_d3_update(bridge);
2833 	}
2834 }
2835 
2836 /**
2837  * pci_d3cold_enable - Enable D3cold for device
2838  * @dev: PCI device to handle
2839  *
2840  * This function can be used in drivers to enable D3cold from the device
2841  * they handle.  It also updates upstream PCI bridge PM capabilities
2842  * accordingly.
2843  */
2844 void pci_d3cold_enable(struct pci_dev *dev)
2845 {
2846 	if (dev->no_d3cold) {
2847 		dev->no_d3cold = false;
2848 		pci_bridge_d3_update(dev);
2849 	}
2850 }
2851 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2852 
2853 /**
2854  * pci_d3cold_disable - Disable D3cold for device
2855  * @dev: PCI device to handle
2856  *
2857  * This function can be used in drivers to disable D3cold from the device
2858  * they handle.  It also updates upstream PCI bridge PM capabilities
2859  * accordingly.
2860  */
2861 void pci_d3cold_disable(struct pci_dev *dev)
2862 {
2863 	if (!dev->no_d3cold) {
2864 		dev->no_d3cold = true;
2865 		pci_bridge_d3_update(dev);
2866 	}
2867 }
2868 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2869 
2870 /**
2871  * pci_pm_init - Initialize PM functions of given PCI device
2872  * @dev: PCI device to handle.
2873  */
2874 void pci_pm_init(struct pci_dev *dev)
2875 {
2876 	int pm;
2877 	u16 status;
2878 	u16 pmc;
2879 
2880 	pm_runtime_forbid(&dev->dev);
2881 	pm_runtime_set_active(&dev->dev);
2882 	pm_runtime_enable(&dev->dev);
2883 	device_enable_async_suspend(&dev->dev);
2884 	dev->wakeup_prepared = false;
2885 
2886 	dev->pm_cap = 0;
2887 	dev->pme_support = 0;
2888 
2889 	/* find PCI PM capability in list */
2890 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2891 	if (!pm)
2892 		return;
2893 	/* Check device's ability to generate PME# */
2894 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2895 
2896 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2897 		pci_err(dev, "unsupported PM cap regs version (%u)\n",
2898 			pmc & PCI_PM_CAP_VER_MASK);
2899 		return;
2900 	}
2901 
2902 	dev->pm_cap = pm;
2903 	dev->d3_delay = PCI_PM_D3_WAIT;
2904 	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2905 	dev->bridge_d3 = pci_bridge_d3_possible(dev);
2906 	dev->d3cold_allowed = true;
2907 
2908 	dev->d1_support = false;
2909 	dev->d2_support = false;
2910 	if (!pci_no_d1d2(dev)) {
2911 		if (pmc & PCI_PM_CAP_D1)
2912 			dev->d1_support = true;
2913 		if (pmc & PCI_PM_CAP_D2)
2914 			dev->d2_support = true;
2915 
2916 		if (dev->d1_support || dev->d2_support)
2917 			pci_info(dev, "supports%s%s\n",
2918 				   dev->d1_support ? " D1" : "",
2919 				   dev->d2_support ? " D2" : "");
2920 	}
2921 
2922 	pmc &= PCI_PM_CAP_PME_MASK;
2923 	if (pmc) {
2924 		pci_info(dev, "PME# supported from%s%s%s%s%s\n",
2925 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2926 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2927 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2928 			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2929 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2930 		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2931 		dev->pme_poll = true;
2932 		/*
2933 		 * Make device's PM flags reflect the wake-up capability, but
2934 		 * let the user space enable it to wake up the system as needed.
2935 		 */
2936 		device_set_wakeup_capable(&dev->dev, true);
2937 		/* Disable the PME# generation functionality */
2938 		pci_pme_active(dev, false);
2939 	}
2940 
2941 	pci_read_config_word(dev, PCI_STATUS, &status);
2942 	if (status & PCI_STATUS_IMM_READY)
2943 		dev->imm_ready = 1;
2944 }
2945 
2946 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2947 {
2948 	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2949 
2950 	switch (prop) {
2951 	case PCI_EA_P_MEM:
2952 	case PCI_EA_P_VF_MEM:
2953 		flags |= IORESOURCE_MEM;
2954 		break;
2955 	case PCI_EA_P_MEM_PREFETCH:
2956 	case PCI_EA_P_VF_MEM_PREFETCH:
2957 		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2958 		break;
2959 	case PCI_EA_P_IO:
2960 		flags |= IORESOURCE_IO;
2961 		break;
2962 	default:
2963 		return 0;
2964 	}
2965 
2966 	return flags;
2967 }
2968 
2969 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2970 					    u8 prop)
2971 {
2972 	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2973 		return &dev->resource[bei];
2974 #ifdef CONFIG_PCI_IOV
2975 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2976 		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2977 		return &dev->resource[PCI_IOV_RESOURCES +
2978 				      bei - PCI_EA_BEI_VF_BAR0];
2979 #endif
2980 	else if (bei == PCI_EA_BEI_ROM)
2981 		return &dev->resource[PCI_ROM_RESOURCE];
2982 	else
2983 		return NULL;
2984 }
2985 
2986 /* Read an Enhanced Allocation (EA) entry */
2987 static int pci_ea_read(struct pci_dev *dev, int offset)
2988 {
2989 	struct resource *res;
2990 	int ent_size, ent_offset = offset;
2991 	resource_size_t start, end;
2992 	unsigned long flags;
2993 	u32 dw0, bei, base, max_offset;
2994 	u8 prop;
2995 	bool support_64 = (sizeof(resource_size_t) >= 8);
2996 
2997 	pci_read_config_dword(dev, ent_offset, &dw0);
2998 	ent_offset += 4;
2999 
3000 	/* Entry size field indicates DWORDs after 1st */
3001 	ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3002 
3003 	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3004 		goto out;
3005 
3006 	bei = (dw0 & PCI_EA_BEI) >> 4;
3007 	prop = (dw0 & PCI_EA_PP) >> 8;
3008 
3009 	/*
3010 	 * If the Property is in the reserved range, try the Secondary
3011 	 * Property instead.
3012 	 */
3013 	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3014 		prop = (dw0 & PCI_EA_SP) >> 16;
3015 	if (prop > PCI_EA_P_BRIDGE_IO)
3016 		goto out;
3017 
3018 	res = pci_ea_get_resource(dev, bei, prop);
3019 	if (!res) {
3020 		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3021 		goto out;
3022 	}
3023 
3024 	flags = pci_ea_flags(dev, prop);
3025 	if (!flags) {
3026 		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3027 		goto out;
3028 	}
3029 
3030 	/* Read Base */
3031 	pci_read_config_dword(dev, ent_offset, &base);
3032 	start = (base & PCI_EA_FIELD_MASK);
3033 	ent_offset += 4;
3034 
3035 	/* Read MaxOffset */
3036 	pci_read_config_dword(dev, ent_offset, &max_offset);
3037 	ent_offset += 4;
3038 
3039 	/* Read Base MSBs (if 64-bit entry) */
3040 	if (base & PCI_EA_IS_64) {
3041 		u32 base_upper;
3042 
3043 		pci_read_config_dword(dev, ent_offset, &base_upper);
3044 		ent_offset += 4;
3045 
3046 		flags |= IORESOURCE_MEM_64;
3047 
3048 		/* entry starts above 32-bit boundary, can't use */
3049 		if (!support_64 && base_upper)
3050 			goto out;
3051 
3052 		if (support_64)
3053 			start |= ((u64)base_upper << 32);
3054 	}
3055 
3056 	end = start + (max_offset | 0x03);
3057 
3058 	/* Read MaxOffset MSBs (if 64-bit entry) */
3059 	if (max_offset & PCI_EA_IS_64) {
3060 		u32 max_offset_upper;
3061 
3062 		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3063 		ent_offset += 4;
3064 
3065 		flags |= IORESOURCE_MEM_64;
3066 
3067 		/* entry too big, can't use */
3068 		if (!support_64 && max_offset_upper)
3069 			goto out;
3070 
3071 		if (support_64)
3072 			end += ((u64)max_offset_upper << 32);
3073 	}
3074 
3075 	if (end < start) {
3076 		pci_err(dev, "EA Entry crosses address boundary\n");
3077 		goto out;
3078 	}
3079 
3080 	if (ent_size != ent_offset - offset) {
3081 		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3082 			ent_size, ent_offset - offset);
3083 		goto out;
3084 	}
3085 
3086 	res->name = pci_name(dev);
3087 	res->start = start;
3088 	res->end = end;
3089 	res->flags = flags;
3090 
3091 	if (bei <= PCI_EA_BEI_BAR5)
3092 		pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3093 			   bei, res, prop);
3094 	else if (bei == PCI_EA_BEI_ROM)
3095 		pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3096 			   res, prop);
3097 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3098 		pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3099 			   bei - PCI_EA_BEI_VF_BAR0, res, prop);
3100 	else
3101 		pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3102 			   bei, res, prop);
3103 
3104 out:
3105 	return offset + ent_size;
3106 }
3107 
3108 /* Enhanced Allocation Initialization */
3109 void pci_ea_init(struct pci_dev *dev)
3110 {
3111 	int ea;
3112 	u8 num_ent;
3113 	int offset;
3114 	int i;
3115 
3116 	/* find PCI EA capability in list */
3117 	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3118 	if (!ea)
3119 		return;
3120 
3121 	/* determine the number of entries */
3122 	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3123 					&num_ent);
3124 	num_ent &= PCI_EA_NUM_ENT_MASK;
3125 
3126 	offset = ea + PCI_EA_FIRST_ENT;
3127 
3128 	/* Skip DWORD 2 for type 1 functions */
3129 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3130 		offset += 4;
3131 
3132 	/* parse each EA entry */
3133 	for (i = 0; i < num_ent; ++i)
3134 		offset = pci_ea_read(dev, offset);
3135 }
3136 
3137 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3138 	struct pci_cap_saved_state *new_cap)
3139 {
3140 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3141 }
3142 
3143 /**
3144  * _pci_add_cap_save_buffer - allocate buffer for saving given
3145  *			      capability registers
3146  * @dev: the PCI device
3147  * @cap: the capability to allocate the buffer for
3148  * @extended: Standard or Extended capability ID
3149  * @size: requested size of the buffer
3150  */
3151 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3152 				    bool extended, unsigned int size)
3153 {
3154 	int pos;
3155 	struct pci_cap_saved_state *save_state;
3156 
3157 	if (extended)
3158 		pos = pci_find_ext_capability(dev, cap);
3159 	else
3160 		pos = pci_find_capability(dev, cap);
3161 
3162 	if (!pos)
3163 		return 0;
3164 
3165 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3166 	if (!save_state)
3167 		return -ENOMEM;
3168 
3169 	save_state->cap.cap_nr = cap;
3170 	save_state->cap.cap_extended = extended;
3171 	save_state->cap.size = size;
3172 	pci_add_saved_cap(dev, save_state);
3173 
3174 	return 0;
3175 }
3176 
3177 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3178 {
3179 	return _pci_add_cap_save_buffer(dev, cap, false, size);
3180 }
3181 
3182 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3183 {
3184 	return _pci_add_cap_save_buffer(dev, cap, true, size);
3185 }
3186 
3187 /**
3188  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3189  * @dev: the PCI device
3190  */
3191 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3192 {
3193 	int error;
3194 
3195 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3196 					PCI_EXP_SAVE_REGS * sizeof(u16));
3197 	if (error)
3198 		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3199 
3200 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3201 	if (error)
3202 		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3203 
3204 	error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3205 					    2 * sizeof(u16));
3206 	if (error)
3207 		pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3208 
3209 	pci_allocate_vc_save_buffers(dev);
3210 }
3211 
3212 void pci_free_cap_save_buffers(struct pci_dev *dev)
3213 {
3214 	struct pci_cap_saved_state *tmp;
3215 	struct hlist_node *n;
3216 
3217 	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3218 		kfree(tmp);
3219 }
3220 
3221 /**
3222  * pci_configure_ari - enable or disable ARI forwarding
3223  * @dev: the PCI device
3224  *
3225  * If @dev and its upstream bridge both support ARI, enable ARI in the
3226  * bridge.  Otherwise, disable ARI in the bridge.
3227  */
3228 void pci_configure_ari(struct pci_dev *dev)
3229 {
3230 	u32 cap;
3231 	struct pci_dev *bridge;
3232 
3233 	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3234 		return;
3235 
3236 	bridge = dev->bus->self;
3237 	if (!bridge)
3238 		return;
3239 
3240 	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3241 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
3242 		return;
3243 
3244 	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3245 		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3246 					 PCI_EXP_DEVCTL2_ARI);
3247 		bridge->ari_enabled = 1;
3248 	} else {
3249 		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3250 					   PCI_EXP_DEVCTL2_ARI);
3251 		bridge->ari_enabled = 0;
3252 	}
3253 }
3254 
3255 static int pci_acs_enable;
3256 
3257 /**
3258  * pci_request_acs - ask for ACS to be enabled if supported
3259  */
3260 void pci_request_acs(void)
3261 {
3262 	pci_acs_enable = 1;
3263 }
3264 
3265 static const char *disable_acs_redir_param;
3266 
3267 /**
3268  * pci_disable_acs_redir - disable ACS redirect capabilities
3269  * @dev: the PCI device
3270  *
3271  * For only devices specified in the disable_acs_redir parameter.
3272  */
3273 static void pci_disable_acs_redir(struct pci_dev *dev)
3274 {
3275 	int ret = 0;
3276 	const char *p;
3277 	int pos;
3278 	u16 ctrl;
3279 
3280 	if (!disable_acs_redir_param)
3281 		return;
3282 
3283 	p = disable_acs_redir_param;
3284 	while (*p) {
3285 		ret = pci_dev_str_match(dev, p, &p);
3286 		if (ret < 0) {
3287 			pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
3288 				     disable_acs_redir_param);
3289 
3290 			break;
3291 		} else if (ret == 1) {
3292 			/* Found a match */
3293 			break;
3294 		}
3295 
3296 		if (*p != ';' && *p != ',') {
3297 			/* End of param or invalid format */
3298 			break;
3299 		}
3300 		p++;
3301 	}
3302 
3303 	if (ret != 1)
3304 		return;
3305 
3306 	if (!pci_dev_specific_disable_acs_redir(dev))
3307 		return;
3308 
3309 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3310 	if (!pos) {
3311 		pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
3312 		return;
3313 	}
3314 
3315 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3316 
3317 	/* P2P Request & Completion Redirect */
3318 	ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
3319 
3320 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3321 
3322 	pci_info(dev, "disabled ACS redirect\n");
3323 }
3324 
3325 /**
3326  * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
3327  * @dev: the PCI device
3328  */
3329 static void pci_std_enable_acs(struct pci_dev *dev)
3330 {
3331 	int pos;
3332 	u16 cap;
3333 	u16 ctrl;
3334 
3335 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3336 	if (!pos)
3337 		return;
3338 
3339 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
3340 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3341 
3342 	/* Source Validation */
3343 	ctrl |= (cap & PCI_ACS_SV);
3344 
3345 	/* P2P Request Redirect */
3346 	ctrl |= (cap & PCI_ACS_RR);
3347 
3348 	/* P2P Completion Redirect */
3349 	ctrl |= (cap & PCI_ACS_CR);
3350 
3351 	/* Upstream Forwarding */
3352 	ctrl |= (cap & PCI_ACS_UF);
3353 
3354 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3355 }
3356 
3357 /**
3358  * pci_enable_acs - enable ACS if hardware support it
3359  * @dev: the PCI device
3360  */
3361 void pci_enable_acs(struct pci_dev *dev)
3362 {
3363 	if (!pci_acs_enable)
3364 		goto disable_acs_redir;
3365 
3366 	if (!pci_dev_specific_enable_acs(dev))
3367 		goto disable_acs_redir;
3368 
3369 	pci_std_enable_acs(dev);
3370 
3371 disable_acs_redir:
3372 	/*
3373 	 * Note: pci_disable_acs_redir() must be called even if ACS was not
3374 	 * enabled by the kernel because it may have been enabled by
3375 	 * platform firmware.  So if we are told to disable it, we should
3376 	 * always disable it after setting the kernel's default
3377 	 * preferences.
3378 	 */
3379 	pci_disable_acs_redir(dev);
3380 }
3381 
3382 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3383 {
3384 	int pos;
3385 	u16 cap, ctrl;
3386 
3387 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
3388 	if (!pos)
3389 		return false;
3390 
3391 	/*
3392 	 * Except for egress control, capabilities are either required
3393 	 * or only required if controllable.  Features missing from the
3394 	 * capability field can therefore be assumed as hard-wired enabled.
3395 	 */
3396 	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3397 	acs_flags &= (cap | PCI_ACS_EC);
3398 
3399 	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3400 	return (ctrl & acs_flags) == acs_flags;
3401 }
3402 
3403 /**
3404  * pci_acs_enabled - test ACS against required flags for a given device
3405  * @pdev: device to test
3406  * @acs_flags: required PCI ACS flags
3407  *
3408  * Return true if the device supports the provided flags.  Automatically
3409  * filters out flags that are not implemented on multifunction devices.
3410  *
3411  * Note that this interface checks the effective ACS capabilities of the
3412  * device rather than the actual capabilities.  For instance, most single
3413  * function endpoints are not required to support ACS because they have no
3414  * opportunity for peer-to-peer access.  We therefore return 'true'
3415  * regardless of whether the device exposes an ACS capability.  This makes
3416  * it much easier for callers of this function to ignore the actual type
3417  * or topology of the device when testing ACS support.
3418  */
3419 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3420 {
3421 	int ret;
3422 
3423 	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3424 	if (ret >= 0)
3425 		return ret > 0;
3426 
3427 	/*
3428 	 * Conventional PCI and PCI-X devices never support ACS, either
3429 	 * effectively or actually.  The shared bus topology implies that
3430 	 * any device on the bus can receive or snoop DMA.
3431 	 */
3432 	if (!pci_is_pcie(pdev))
3433 		return false;
3434 
3435 	switch (pci_pcie_type(pdev)) {
3436 	/*
3437 	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3438 	 * but since their primary interface is PCI/X, we conservatively
3439 	 * handle them as we would a non-PCIe device.
3440 	 */
3441 	case PCI_EXP_TYPE_PCIE_BRIDGE:
3442 	/*
3443 	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
3444 	 * applicable... must never implement an ACS Extended Capability...".
3445 	 * This seems arbitrary, but we take a conservative interpretation
3446 	 * of this statement.
3447 	 */
3448 	case PCI_EXP_TYPE_PCI_BRIDGE:
3449 	case PCI_EXP_TYPE_RC_EC:
3450 		return false;
3451 	/*
3452 	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3453 	 * implement ACS in order to indicate their peer-to-peer capabilities,
3454 	 * regardless of whether they are single- or multi-function devices.
3455 	 */
3456 	case PCI_EXP_TYPE_DOWNSTREAM:
3457 	case PCI_EXP_TYPE_ROOT_PORT:
3458 		return pci_acs_flags_enabled(pdev, acs_flags);
3459 	/*
3460 	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3461 	 * implemented by the remaining PCIe types to indicate peer-to-peer
3462 	 * capabilities, but only when they are part of a multifunction
3463 	 * device.  The footnote for section 6.12 indicates the specific
3464 	 * PCIe types included here.
3465 	 */
3466 	case PCI_EXP_TYPE_ENDPOINT:
3467 	case PCI_EXP_TYPE_UPSTREAM:
3468 	case PCI_EXP_TYPE_LEG_END:
3469 	case PCI_EXP_TYPE_RC_END:
3470 		if (!pdev->multifunction)
3471 			break;
3472 
3473 		return pci_acs_flags_enabled(pdev, acs_flags);
3474 	}
3475 
3476 	/*
3477 	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3478 	 * to single function devices with the exception of downstream ports.
3479 	 */
3480 	return true;
3481 }
3482 
3483 /**
3484  * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
3485  * @start: starting downstream device
3486  * @end: ending upstream device or NULL to search to the root bus
3487  * @acs_flags: required flags
3488  *
3489  * Walk up a device tree from start to end testing PCI ACS support.  If
3490  * any step along the way does not support the required flags, return false.
3491  */
3492 bool pci_acs_path_enabled(struct pci_dev *start,
3493 			  struct pci_dev *end, u16 acs_flags)
3494 {
3495 	struct pci_dev *pdev, *parent = start;
3496 
3497 	do {
3498 		pdev = parent;
3499 
3500 		if (!pci_acs_enabled(pdev, acs_flags))
3501 			return false;
3502 
3503 		if (pci_is_root_bus(pdev->bus))
3504 			return (end == NULL);
3505 
3506 		parent = pdev->bus->self;
3507 	} while (pdev != end);
3508 
3509 	return true;
3510 }
3511 
3512 /**
3513  * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3514  * @pdev: PCI device
3515  * @bar: BAR to find
3516  *
3517  * Helper to find the position of the ctrl register for a BAR.
3518  * Returns -ENOTSUPP if resizable BARs are not supported at all.
3519  * Returns -ENOENT if no ctrl register for the BAR could be found.
3520  */
3521 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3522 {
3523 	unsigned int pos, nbars, i;
3524 	u32 ctrl;
3525 
3526 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3527 	if (!pos)
3528 		return -ENOTSUPP;
3529 
3530 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3531 	nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3532 		    PCI_REBAR_CTRL_NBAR_SHIFT;
3533 
3534 	for (i = 0; i < nbars; i++, pos += 8) {
3535 		int bar_idx;
3536 
3537 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3538 		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3539 		if (bar_idx == bar)
3540 			return pos;
3541 	}
3542 
3543 	return -ENOENT;
3544 }
3545 
3546 /**
3547  * pci_rebar_get_possible_sizes - get possible sizes for BAR
3548  * @pdev: PCI device
3549  * @bar: BAR to query
3550  *
3551  * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3552  * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3553  */
3554 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3555 {
3556 	int pos;
3557 	u32 cap;
3558 
3559 	pos = pci_rebar_find_pos(pdev, bar);
3560 	if (pos < 0)
3561 		return 0;
3562 
3563 	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3564 	return (cap & PCI_REBAR_CAP_SIZES) >> 4;
3565 }
3566 
3567 /**
3568  * pci_rebar_get_current_size - get the current size of a BAR
3569  * @pdev: PCI device
3570  * @bar: BAR to set size to
3571  *
3572  * Read the size of a BAR from the resizable BAR config.
3573  * Returns size if found or negative error code.
3574  */
3575 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3576 {
3577 	int pos;
3578 	u32 ctrl;
3579 
3580 	pos = pci_rebar_find_pos(pdev, bar);
3581 	if (pos < 0)
3582 		return pos;
3583 
3584 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3585 	return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3586 }
3587 
3588 /**
3589  * pci_rebar_set_size - set a new size for a BAR
3590  * @pdev: PCI device
3591  * @bar: BAR to set size to
3592  * @size: new size as defined in the spec (0=1MB, 19=512GB)
3593  *
3594  * Set the new size of a BAR as defined in the spec.
3595  * Returns zero if resizing was successful, error code otherwise.
3596  */
3597 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3598 {
3599 	int pos;
3600 	u32 ctrl;
3601 
3602 	pos = pci_rebar_find_pos(pdev, bar);
3603 	if (pos < 0)
3604 		return pos;
3605 
3606 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3607 	ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3608 	ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3609 	pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3610 	return 0;
3611 }
3612 
3613 /**
3614  * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3615  * @dev: the PCI device
3616  * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3617  *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
3618  *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
3619  *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
3620  *
3621  * Return 0 if all upstream bridges support AtomicOp routing, egress
3622  * blocking is disabled on all upstream ports, and the root port supports
3623  * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3624  * AtomicOp completion), or negative otherwise.
3625  */
3626 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3627 {
3628 	struct pci_bus *bus = dev->bus;
3629 	struct pci_dev *bridge;
3630 	u32 cap, ctl2;
3631 
3632 	if (!pci_is_pcie(dev))
3633 		return -EINVAL;
3634 
3635 	/*
3636 	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3637 	 * AtomicOp requesters.  For now, we only support endpoints as
3638 	 * requesters and root ports as completers.  No endpoints as
3639 	 * completers, and no peer-to-peer.
3640 	 */
3641 
3642 	switch (pci_pcie_type(dev)) {
3643 	case PCI_EXP_TYPE_ENDPOINT:
3644 	case PCI_EXP_TYPE_LEG_END:
3645 	case PCI_EXP_TYPE_RC_END:
3646 		break;
3647 	default:
3648 		return -EINVAL;
3649 	}
3650 
3651 	while (bus->parent) {
3652 		bridge = bus->self;
3653 
3654 		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3655 
3656 		switch (pci_pcie_type(bridge)) {
3657 		/* Ensure switch ports support AtomicOp routing */
3658 		case PCI_EXP_TYPE_UPSTREAM:
3659 		case PCI_EXP_TYPE_DOWNSTREAM:
3660 			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3661 				return -EINVAL;
3662 			break;
3663 
3664 		/* Ensure root port supports all the sizes we care about */
3665 		case PCI_EXP_TYPE_ROOT_PORT:
3666 			if ((cap & cap_mask) != cap_mask)
3667 				return -EINVAL;
3668 			break;
3669 		}
3670 
3671 		/* Ensure upstream ports don't block AtomicOps on egress */
3672 		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3673 			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3674 						   &ctl2);
3675 			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3676 				return -EINVAL;
3677 		}
3678 
3679 		bus = bus->parent;
3680 	}
3681 
3682 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3683 				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3684 	return 0;
3685 }
3686 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3687 
3688 /**
3689  * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3690  * @dev: the PCI device
3691  * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3692  *
3693  * Perform INTx swizzling for a device behind one level of bridge.  This is
3694  * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3695  * behind bridges on add-in cards.  For devices with ARI enabled, the slot
3696  * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3697  * the PCI Express Base Specification, Revision 2.1)
3698  */
3699 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3700 {
3701 	int slot;
3702 
3703 	if (pci_ari_enabled(dev->bus))
3704 		slot = 0;
3705 	else
3706 		slot = PCI_SLOT(dev->devfn);
3707 
3708 	return (((pin - 1) + slot) % 4) + 1;
3709 }
3710 
3711 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3712 {
3713 	u8 pin;
3714 
3715 	pin = dev->pin;
3716 	if (!pin)
3717 		return -1;
3718 
3719 	while (!pci_is_root_bus(dev->bus)) {
3720 		pin = pci_swizzle_interrupt_pin(dev, pin);
3721 		dev = dev->bus->self;
3722 	}
3723 	*bridge = dev;
3724 	return pin;
3725 }
3726 
3727 /**
3728  * pci_common_swizzle - swizzle INTx all the way to root bridge
3729  * @dev: the PCI device
3730  * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3731  *
3732  * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
3733  * bridges all the way up to a PCI root bus.
3734  */
3735 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3736 {
3737 	u8 pin = *pinp;
3738 
3739 	while (!pci_is_root_bus(dev->bus)) {
3740 		pin = pci_swizzle_interrupt_pin(dev, pin);
3741 		dev = dev->bus->self;
3742 	}
3743 	*pinp = pin;
3744 	return PCI_SLOT(dev->devfn);
3745 }
3746 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3747 
3748 /**
3749  * pci_release_region - Release a PCI bar
3750  * @pdev: PCI device whose resources were previously reserved by
3751  *	  pci_request_region()
3752  * @bar: BAR to release
3753  *
3754  * Releases the PCI I/O and memory resources previously reserved by a
3755  * successful call to pci_request_region().  Call this function only
3756  * after all use of the PCI regions has ceased.
3757  */
3758 void pci_release_region(struct pci_dev *pdev, int bar)
3759 {
3760 	struct pci_devres *dr;
3761 
3762 	if (pci_resource_len(pdev, bar) == 0)
3763 		return;
3764 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3765 		release_region(pci_resource_start(pdev, bar),
3766 				pci_resource_len(pdev, bar));
3767 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3768 		release_mem_region(pci_resource_start(pdev, bar),
3769 				pci_resource_len(pdev, bar));
3770 
3771 	dr = find_pci_dr(pdev);
3772 	if (dr)
3773 		dr->region_mask &= ~(1 << bar);
3774 }
3775 EXPORT_SYMBOL(pci_release_region);
3776 
3777 /**
3778  * __pci_request_region - Reserved PCI I/O and memory resource
3779  * @pdev: PCI device whose resources are to be reserved
3780  * @bar: BAR to be reserved
3781  * @res_name: Name to be associated with resource.
3782  * @exclusive: whether the region access is exclusive or not
3783  *
3784  * Mark the PCI region associated with PCI device @pdev BAR @bar as
3785  * being reserved by owner @res_name.  Do not access any
3786  * address inside the PCI regions unless this call returns
3787  * successfully.
3788  *
3789  * If @exclusive is set, then the region is marked so that userspace
3790  * is explicitly not allowed to map the resource via /dev/mem or
3791  * sysfs MMIO access.
3792  *
3793  * Returns 0 on success, or %EBUSY on error.  A warning
3794  * message is also printed on failure.
3795  */
3796 static int __pci_request_region(struct pci_dev *pdev, int bar,
3797 				const char *res_name, int exclusive)
3798 {
3799 	struct pci_devres *dr;
3800 
3801 	if (pci_resource_len(pdev, bar) == 0)
3802 		return 0;
3803 
3804 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3805 		if (!request_region(pci_resource_start(pdev, bar),
3806 			    pci_resource_len(pdev, bar), res_name))
3807 			goto err_out;
3808 	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3809 		if (!__request_mem_region(pci_resource_start(pdev, bar),
3810 					pci_resource_len(pdev, bar), res_name,
3811 					exclusive))
3812 			goto err_out;
3813 	}
3814 
3815 	dr = find_pci_dr(pdev);
3816 	if (dr)
3817 		dr->region_mask |= 1 << bar;
3818 
3819 	return 0;
3820 
3821 err_out:
3822 	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3823 		 &pdev->resource[bar]);
3824 	return -EBUSY;
3825 }
3826 
3827 /**
3828  * pci_request_region - Reserve PCI I/O and memory resource
3829  * @pdev: PCI device whose resources are to be reserved
3830  * @bar: BAR to be reserved
3831  * @res_name: Name to be associated with resource
3832  *
3833  * Mark the PCI region associated with PCI device @pdev BAR @bar as
3834  * being reserved by owner @res_name.  Do not access any
3835  * address inside the PCI regions unless this call returns
3836  * successfully.
3837  *
3838  * Returns 0 on success, or %EBUSY on error.  A warning
3839  * message is also printed on failure.
3840  */
3841 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3842 {
3843 	return __pci_request_region(pdev, bar, res_name, 0);
3844 }
3845 EXPORT_SYMBOL(pci_request_region);
3846 
3847 /**
3848  * pci_release_selected_regions - Release selected PCI I/O and memory resources
3849  * @pdev: PCI device whose resources were previously reserved
3850  * @bars: Bitmask of BARs to be released
3851  *
3852  * Release selected PCI I/O and memory resources previously reserved.
3853  * Call this function only after all use of the PCI regions has ceased.
3854  */
3855 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3856 {
3857 	int i;
3858 
3859 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
3860 		if (bars & (1 << i))
3861 			pci_release_region(pdev, i);
3862 }
3863 EXPORT_SYMBOL(pci_release_selected_regions);
3864 
3865 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3866 					  const char *res_name, int excl)
3867 {
3868 	int i;
3869 
3870 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
3871 		if (bars & (1 << i))
3872 			if (__pci_request_region(pdev, i, res_name, excl))
3873 				goto err_out;
3874 	return 0;
3875 
3876 err_out:
3877 	while (--i >= 0)
3878 		if (bars & (1 << i))
3879 			pci_release_region(pdev, i);
3880 
3881 	return -EBUSY;
3882 }
3883 
3884 
3885 /**
3886  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3887  * @pdev: PCI device whose resources are to be reserved
3888  * @bars: Bitmask of BARs to be requested
3889  * @res_name: Name to be associated with resource
3890  */
3891 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3892 				 const char *res_name)
3893 {
3894 	return __pci_request_selected_regions(pdev, bars, res_name, 0);
3895 }
3896 EXPORT_SYMBOL(pci_request_selected_regions);
3897 
3898 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3899 					   const char *res_name)
3900 {
3901 	return __pci_request_selected_regions(pdev, bars, res_name,
3902 			IORESOURCE_EXCLUSIVE);
3903 }
3904 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3905 
3906 /**
3907  * pci_release_regions - Release reserved PCI I/O and memory resources
3908  * @pdev: PCI device whose resources were previously reserved by
3909  *	  pci_request_regions()
3910  *
3911  * Releases all PCI I/O and memory resources previously reserved by a
3912  * successful call to pci_request_regions().  Call this function only
3913  * after all use of the PCI regions has ceased.
3914  */
3915 
3916 void pci_release_regions(struct pci_dev *pdev)
3917 {
3918 	pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
3919 }
3920 EXPORT_SYMBOL(pci_release_regions);
3921 
3922 /**
3923  * pci_request_regions - Reserve PCI I/O and memory resources
3924  * @pdev: PCI device whose resources are to be reserved
3925  * @res_name: Name to be associated with resource.
3926  *
3927  * Mark all PCI regions associated with PCI device @pdev as
3928  * being reserved by owner @res_name.  Do not access any
3929  * address inside the PCI regions unless this call returns
3930  * successfully.
3931  *
3932  * Returns 0 on success, or %EBUSY on error.  A warning
3933  * message is also printed on failure.
3934  */
3935 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3936 {
3937 	return pci_request_selected_regions(pdev,
3938 			((1 << PCI_STD_NUM_BARS) - 1), res_name);
3939 }
3940 EXPORT_SYMBOL(pci_request_regions);
3941 
3942 /**
3943  * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
3944  * @pdev: PCI device whose resources are to be reserved
3945  * @res_name: Name to be associated with resource.
3946  *
3947  * Mark all PCI regions associated with PCI device @pdev as being reserved
3948  * by owner @res_name.  Do not access any address inside the PCI regions
3949  * unless this call returns successfully.
3950  *
3951  * pci_request_regions_exclusive() will mark the region so that /dev/mem
3952  * and the sysfs MMIO access will not be allowed.
3953  *
3954  * Returns 0 on success, or %EBUSY on error.  A warning message is also
3955  * printed on failure.
3956  */
3957 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3958 {
3959 	return pci_request_selected_regions_exclusive(pdev,
3960 				((1 << PCI_STD_NUM_BARS) - 1), res_name);
3961 }
3962 EXPORT_SYMBOL(pci_request_regions_exclusive);
3963 
3964 /*
3965  * Record the PCI IO range (expressed as CPU physical address + size).
3966  * Return a negative value if an error has occurred, zero otherwise
3967  */
3968 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
3969 			resource_size_t	size)
3970 {
3971 	int ret = 0;
3972 #ifdef PCI_IOBASE
3973 	struct logic_pio_hwaddr *range;
3974 
3975 	if (!size || addr + size < addr)
3976 		return -EINVAL;
3977 
3978 	range = kzalloc(sizeof(*range), GFP_ATOMIC);
3979 	if (!range)
3980 		return -ENOMEM;
3981 
3982 	range->fwnode = fwnode;
3983 	range->size = size;
3984 	range->hw_start = addr;
3985 	range->flags = LOGIC_PIO_CPU_MMIO;
3986 
3987 	ret = logic_pio_register_range(range);
3988 	if (ret)
3989 		kfree(range);
3990 #endif
3991 
3992 	return ret;
3993 }
3994 
3995 phys_addr_t pci_pio_to_address(unsigned long pio)
3996 {
3997 	phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3998 
3999 #ifdef PCI_IOBASE
4000 	if (pio >= MMIO_UPPER_LIMIT)
4001 		return address;
4002 
4003 	address = logic_pio_to_hwaddr(pio);
4004 #endif
4005 
4006 	return address;
4007 }
4008 
4009 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4010 {
4011 #ifdef PCI_IOBASE
4012 	return logic_pio_trans_cpuaddr(address);
4013 #else
4014 	if (address > IO_SPACE_LIMIT)
4015 		return (unsigned long)-1;
4016 
4017 	return (unsigned long) address;
4018 #endif
4019 }
4020 
4021 /**
4022  * pci_remap_iospace - Remap the memory mapped I/O space
4023  * @res: Resource describing the I/O space
4024  * @phys_addr: physical address of range to be mapped
4025  *
4026  * Remap the memory mapped I/O space described by the @res and the CPU
4027  * physical address @phys_addr into virtual address space.  Only
4028  * architectures that have memory mapped IO functions defined (and the
4029  * PCI_IOBASE value defined) should call this function.
4030  */
4031 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4032 {
4033 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4034 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4035 
4036 	if (!(res->flags & IORESOURCE_IO))
4037 		return -EINVAL;
4038 
4039 	if (res->end > IO_SPACE_LIMIT)
4040 		return -EINVAL;
4041 
4042 	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4043 				  pgprot_device(PAGE_KERNEL));
4044 #else
4045 	/*
4046 	 * This architecture does not have memory mapped I/O space,
4047 	 * so this function should never be called
4048 	 */
4049 	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4050 	return -ENODEV;
4051 #endif
4052 }
4053 EXPORT_SYMBOL(pci_remap_iospace);
4054 
4055 /**
4056  * pci_unmap_iospace - Unmap the memory mapped I/O space
4057  * @res: resource to be unmapped
4058  *
4059  * Unmap the CPU virtual address @res from virtual address space.  Only
4060  * architectures that have memory mapped IO functions defined (and the
4061  * PCI_IOBASE value defined) should call this function.
4062  */
4063 void pci_unmap_iospace(struct resource *res)
4064 {
4065 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4066 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4067 
4068 	unmap_kernel_range(vaddr, resource_size(res));
4069 #endif
4070 }
4071 EXPORT_SYMBOL(pci_unmap_iospace);
4072 
4073 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4074 {
4075 	struct resource **res = ptr;
4076 
4077 	pci_unmap_iospace(*res);
4078 }
4079 
4080 /**
4081  * devm_pci_remap_iospace - Managed pci_remap_iospace()
4082  * @dev: Generic device to remap IO address for
4083  * @res: Resource describing the I/O space
4084  * @phys_addr: physical address of range to be mapped
4085  *
4086  * Managed pci_remap_iospace().  Map is automatically unmapped on driver
4087  * detach.
4088  */
4089 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4090 			   phys_addr_t phys_addr)
4091 {
4092 	const struct resource **ptr;
4093 	int error;
4094 
4095 	ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4096 	if (!ptr)
4097 		return -ENOMEM;
4098 
4099 	error = pci_remap_iospace(res, phys_addr);
4100 	if (error) {
4101 		devres_free(ptr);
4102 	} else	{
4103 		*ptr = res;
4104 		devres_add(dev, ptr);
4105 	}
4106 
4107 	return error;
4108 }
4109 EXPORT_SYMBOL(devm_pci_remap_iospace);
4110 
4111 /**
4112  * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4113  * @dev: Generic device to remap IO address for
4114  * @offset: Resource address to map
4115  * @size: Size of map
4116  *
4117  * Managed pci_remap_cfgspace().  Map is automatically unmapped on driver
4118  * detach.
4119  */
4120 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4121 				      resource_size_t offset,
4122 				      resource_size_t size)
4123 {
4124 	void __iomem **ptr, *addr;
4125 
4126 	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4127 	if (!ptr)
4128 		return NULL;
4129 
4130 	addr = pci_remap_cfgspace(offset, size);
4131 	if (addr) {
4132 		*ptr = addr;
4133 		devres_add(dev, ptr);
4134 	} else
4135 		devres_free(ptr);
4136 
4137 	return addr;
4138 }
4139 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4140 
4141 /**
4142  * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4143  * @dev: generic device to handle the resource for
4144  * @res: configuration space resource to be handled
4145  *
4146  * Checks that a resource is a valid memory region, requests the memory
4147  * region and ioremaps with pci_remap_cfgspace() API that ensures the
4148  * proper PCI configuration space memory attributes are guaranteed.
4149  *
4150  * All operations are managed and will be undone on driver detach.
4151  *
4152  * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4153  * on failure. Usage example::
4154  *
4155  *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4156  *	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4157  *	if (IS_ERR(base))
4158  *		return PTR_ERR(base);
4159  */
4160 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4161 					  struct resource *res)
4162 {
4163 	resource_size_t size;
4164 	const char *name;
4165 	void __iomem *dest_ptr;
4166 
4167 	BUG_ON(!dev);
4168 
4169 	if (!res || resource_type(res) != IORESOURCE_MEM) {
4170 		dev_err(dev, "invalid resource\n");
4171 		return IOMEM_ERR_PTR(-EINVAL);
4172 	}
4173 
4174 	size = resource_size(res);
4175 	name = res->name ?: dev_name(dev);
4176 
4177 	if (!devm_request_mem_region(dev, res->start, size, name)) {
4178 		dev_err(dev, "can't request region for resource %pR\n", res);
4179 		return IOMEM_ERR_PTR(-EBUSY);
4180 	}
4181 
4182 	dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4183 	if (!dest_ptr) {
4184 		dev_err(dev, "ioremap failed for resource %pR\n", res);
4185 		devm_release_mem_region(dev, res->start, size);
4186 		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4187 	}
4188 
4189 	return dest_ptr;
4190 }
4191 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4192 
4193 static void __pci_set_master(struct pci_dev *dev, bool enable)
4194 {
4195 	u16 old_cmd, cmd;
4196 
4197 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4198 	if (enable)
4199 		cmd = old_cmd | PCI_COMMAND_MASTER;
4200 	else
4201 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
4202 	if (cmd != old_cmd) {
4203 		pci_dbg(dev, "%s bus mastering\n",
4204 			enable ? "enabling" : "disabling");
4205 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4206 	}
4207 	dev->is_busmaster = enable;
4208 }
4209 
4210 /**
4211  * pcibios_setup - process "pci=" kernel boot arguments
4212  * @str: string used to pass in "pci=" kernel boot arguments
4213  *
4214  * Process kernel boot arguments.  This is the default implementation.
4215  * Architecture specific implementations can override this as necessary.
4216  */
4217 char * __weak __init pcibios_setup(char *str)
4218 {
4219 	return str;
4220 }
4221 
4222 /**
4223  * pcibios_set_master - enable PCI bus-mastering for device dev
4224  * @dev: the PCI device to enable
4225  *
4226  * Enables PCI bus-mastering for the device.  This is the default
4227  * implementation.  Architecture specific implementations can override
4228  * this if necessary.
4229  */
4230 void __weak pcibios_set_master(struct pci_dev *dev)
4231 {
4232 	u8 lat;
4233 
4234 	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4235 	if (pci_is_pcie(dev))
4236 		return;
4237 
4238 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4239 	if (lat < 16)
4240 		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4241 	else if (lat > pcibios_max_latency)
4242 		lat = pcibios_max_latency;
4243 	else
4244 		return;
4245 
4246 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4247 }
4248 
4249 /**
4250  * pci_set_master - enables bus-mastering for device dev
4251  * @dev: the PCI device to enable
4252  *
4253  * Enables bus-mastering on the device and calls pcibios_set_master()
4254  * to do the needed arch specific settings.
4255  */
4256 void pci_set_master(struct pci_dev *dev)
4257 {
4258 	__pci_set_master(dev, true);
4259 	pcibios_set_master(dev);
4260 }
4261 EXPORT_SYMBOL(pci_set_master);
4262 
4263 /**
4264  * pci_clear_master - disables bus-mastering for device dev
4265  * @dev: the PCI device to disable
4266  */
4267 void pci_clear_master(struct pci_dev *dev)
4268 {
4269 	__pci_set_master(dev, false);
4270 }
4271 EXPORT_SYMBOL(pci_clear_master);
4272 
4273 /**
4274  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4275  * @dev: the PCI device for which MWI is to be enabled
4276  *
4277  * Helper function for pci_set_mwi.
4278  * Originally copied from drivers/net/acenic.c.
4279  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4280  *
4281  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4282  */
4283 int pci_set_cacheline_size(struct pci_dev *dev)
4284 {
4285 	u8 cacheline_size;
4286 
4287 	if (!pci_cache_line_size)
4288 		return -EINVAL;
4289 
4290 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4291 	   equal to or multiple of the right value. */
4292 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4293 	if (cacheline_size >= pci_cache_line_size &&
4294 	    (cacheline_size % pci_cache_line_size) == 0)
4295 		return 0;
4296 
4297 	/* Write the correct value. */
4298 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4299 	/* Read it back. */
4300 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4301 	if (cacheline_size == pci_cache_line_size)
4302 		return 0;
4303 
4304 	pci_info(dev, "cache line size of %d is not supported\n",
4305 		   pci_cache_line_size << 2);
4306 
4307 	return -EINVAL;
4308 }
4309 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4310 
4311 /**
4312  * pci_set_mwi - enables memory-write-invalidate PCI transaction
4313  * @dev: the PCI device for which MWI is enabled
4314  *
4315  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4316  *
4317  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4318  */
4319 int pci_set_mwi(struct pci_dev *dev)
4320 {
4321 #ifdef PCI_DISABLE_MWI
4322 	return 0;
4323 #else
4324 	int rc;
4325 	u16 cmd;
4326 
4327 	rc = pci_set_cacheline_size(dev);
4328 	if (rc)
4329 		return rc;
4330 
4331 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4332 	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4333 		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4334 		cmd |= PCI_COMMAND_INVALIDATE;
4335 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4336 	}
4337 	return 0;
4338 #endif
4339 }
4340 EXPORT_SYMBOL(pci_set_mwi);
4341 
4342 /**
4343  * pcim_set_mwi - a device-managed pci_set_mwi()
4344  * @dev: the PCI device for which MWI is enabled
4345  *
4346  * Managed pci_set_mwi().
4347  *
4348  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4349  */
4350 int pcim_set_mwi(struct pci_dev *dev)
4351 {
4352 	struct pci_devres *dr;
4353 
4354 	dr = find_pci_dr(dev);
4355 	if (!dr)
4356 		return -ENOMEM;
4357 
4358 	dr->mwi = 1;
4359 	return pci_set_mwi(dev);
4360 }
4361 EXPORT_SYMBOL(pcim_set_mwi);
4362 
4363 /**
4364  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4365  * @dev: the PCI device for which MWI is enabled
4366  *
4367  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4368  * Callers are not required to check the return value.
4369  *
4370  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4371  */
4372 int pci_try_set_mwi(struct pci_dev *dev)
4373 {
4374 #ifdef PCI_DISABLE_MWI
4375 	return 0;
4376 #else
4377 	return pci_set_mwi(dev);
4378 #endif
4379 }
4380 EXPORT_SYMBOL(pci_try_set_mwi);
4381 
4382 /**
4383  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4384  * @dev: the PCI device to disable
4385  *
4386  * Disables PCI Memory-Write-Invalidate transaction on the device
4387  */
4388 void pci_clear_mwi(struct pci_dev *dev)
4389 {
4390 #ifndef PCI_DISABLE_MWI
4391 	u16 cmd;
4392 
4393 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4394 	if (cmd & PCI_COMMAND_INVALIDATE) {
4395 		cmd &= ~PCI_COMMAND_INVALIDATE;
4396 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4397 	}
4398 #endif
4399 }
4400 EXPORT_SYMBOL(pci_clear_mwi);
4401 
4402 /**
4403  * pci_intx - enables/disables PCI INTx for device dev
4404  * @pdev: the PCI device to operate on
4405  * @enable: boolean: whether to enable or disable PCI INTx
4406  *
4407  * Enables/disables PCI INTx for device @pdev
4408  */
4409 void pci_intx(struct pci_dev *pdev, int enable)
4410 {
4411 	u16 pci_command, new;
4412 
4413 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4414 
4415 	if (enable)
4416 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4417 	else
4418 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
4419 
4420 	if (new != pci_command) {
4421 		struct pci_devres *dr;
4422 
4423 		pci_write_config_word(pdev, PCI_COMMAND, new);
4424 
4425 		dr = find_pci_dr(pdev);
4426 		if (dr && !dr->restore_intx) {
4427 			dr->restore_intx = 1;
4428 			dr->orig_intx = !enable;
4429 		}
4430 	}
4431 }
4432 EXPORT_SYMBOL_GPL(pci_intx);
4433 
4434 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4435 {
4436 	struct pci_bus *bus = dev->bus;
4437 	bool mask_updated = true;
4438 	u32 cmd_status_dword;
4439 	u16 origcmd, newcmd;
4440 	unsigned long flags;
4441 	bool irq_pending;
4442 
4443 	/*
4444 	 * We do a single dword read to retrieve both command and status.
4445 	 * Document assumptions that make this possible.
4446 	 */
4447 	BUILD_BUG_ON(PCI_COMMAND % 4);
4448 	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4449 
4450 	raw_spin_lock_irqsave(&pci_lock, flags);
4451 
4452 	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4453 
4454 	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4455 
4456 	/*
4457 	 * Check interrupt status register to see whether our device
4458 	 * triggered the interrupt (when masking) or the next IRQ is
4459 	 * already pending (when unmasking).
4460 	 */
4461 	if (mask != irq_pending) {
4462 		mask_updated = false;
4463 		goto done;
4464 	}
4465 
4466 	origcmd = cmd_status_dword;
4467 	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4468 	if (mask)
4469 		newcmd |= PCI_COMMAND_INTX_DISABLE;
4470 	if (newcmd != origcmd)
4471 		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4472 
4473 done:
4474 	raw_spin_unlock_irqrestore(&pci_lock, flags);
4475 
4476 	return mask_updated;
4477 }
4478 
4479 /**
4480  * pci_check_and_mask_intx - mask INTx on pending interrupt
4481  * @dev: the PCI device to operate on
4482  *
4483  * Check if the device dev has its INTx line asserted, mask it and return
4484  * true in that case. False is returned if no interrupt was pending.
4485  */
4486 bool pci_check_and_mask_intx(struct pci_dev *dev)
4487 {
4488 	return pci_check_and_set_intx_mask(dev, true);
4489 }
4490 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4491 
4492 /**
4493  * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4494  * @dev: the PCI device to operate on
4495  *
4496  * Check if the device dev has its INTx line asserted, unmask it if not and
4497  * return true. False is returned and the mask remains active if there was
4498  * still an interrupt pending.
4499  */
4500 bool pci_check_and_unmask_intx(struct pci_dev *dev)
4501 {
4502 	return pci_check_and_set_intx_mask(dev, false);
4503 }
4504 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4505 
4506 /**
4507  * pci_wait_for_pending_transaction - wait for pending transaction
4508  * @dev: the PCI device to operate on
4509  *
4510  * Return 0 if transaction is pending 1 otherwise.
4511  */
4512 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4513 {
4514 	if (!pci_is_pcie(dev))
4515 		return 1;
4516 
4517 	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4518 				    PCI_EXP_DEVSTA_TRPND);
4519 }
4520 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4521 
4522 /**
4523  * pcie_has_flr - check if a device supports function level resets
4524  * @dev: device to check
4525  *
4526  * Returns true if the device advertises support for PCIe function level
4527  * resets.
4528  */
4529 bool pcie_has_flr(struct pci_dev *dev)
4530 {
4531 	u32 cap;
4532 
4533 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4534 		return false;
4535 
4536 	pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4537 	return cap & PCI_EXP_DEVCAP_FLR;
4538 }
4539 EXPORT_SYMBOL_GPL(pcie_has_flr);
4540 
4541 /**
4542  * pcie_flr - initiate a PCIe function level reset
4543  * @dev: device to reset
4544  *
4545  * Initiate a function level reset on @dev.  The caller should ensure the
4546  * device supports FLR before calling this function, e.g. by using the
4547  * pcie_has_flr() helper.
4548  */
4549 int pcie_flr(struct pci_dev *dev)
4550 {
4551 	if (!pci_wait_for_pending_transaction(dev))
4552 		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4553 
4554 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4555 
4556 	if (dev->imm_ready)
4557 		return 0;
4558 
4559 	/*
4560 	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4561 	 * 100ms, but may silently discard requests while the FLR is in
4562 	 * progress.  Wait 100ms before trying to access the device.
4563 	 */
4564 	msleep(100);
4565 
4566 	return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4567 }
4568 EXPORT_SYMBOL_GPL(pcie_flr);
4569 
4570 static int pci_af_flr(struct pci_dev *dev, int probe)
4571 {
4572 	int pos;
4573 	u8 cap;
4574 
4575 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4576 	if (!pos)
4577 		return -ENOTTY;
4578 
4579 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4580 		return -ENOTTY;
4581 
4582 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4583 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4584 		return -ENOTTY;
4585 
4586 	if (probe)
4587 		return 0;
4588 
4589 	/*
4590 	 * Wait for Transaction Pending bit to clear.  A word-aligned test
4591 	 * is used, so we use the control offset rather than status and shift
4592 	 * the test bit to match.
4593 	 */
4594 	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4595 				 PCI_AF_STATUS_TP << 8))
4596 		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4597 
4598 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4599 
4600 	if (dev->imm_ready)
4601 		return 0;
4602 
4603 	/*
4604 	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4605 	 * updated 27 July 2006; a device must complete an FLR within
4606 	 * 100ms, but may silently discard requests while the FLR is in
4607 	 * progress.  Wait 100ms before trying to access the device.
4608 	 */
4609 	msleep(100);
4610 
4611 	return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4612 }
4613 
4614 /**
4615  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4616  * @dev: Device to reset.
4617  * @probe: If set, only check if the device can be reset this way.
4618  *
4619  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4620  * unset, it will be reinitialized internally when going from PCI_D3hot to
4621  * PCI_D0.  If that's the case and the device is not in a low-power state
4622  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4623  *
4624  * NOTE: This causes the caller to sleep for twice the device power transition
4625  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4626  * by default (i.e. unless the @dev's d3_delay field has a different value).
4627  * Moreover, only devices in D0 can be reset by this function.
4628  */
4629 static int pci_pm_reset(struct pci_dev *dev, int probe)
4630 {
4631 	u16 csr;
4632 
4633 	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4634 		return -ENOTTY;
4635 
4636 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4637 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4638 		return -ENOTTY;
4639 
4640 	if (probe)
4641 		return 0;
4642 
4643 	if (dev->current_state != PCI_D0)
4644 		return -EINVAL;
4645 
4646 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4647 	csr |= PCI_D3hot;
4648 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4649 	pci_dev_d3_sleep(dev);
4650 
4651 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4652 	csr |= PCI_D0;
4653 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4654 	pci_dev_d3_sleep(dev);
4655 
4656 	return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4657 }
4658 
4659 /**
4660  * pcie_wait_for_link_delay - Wait until link is active or inactive
4661  * @pdev: Bridge device
4662  * @active: waiting for active or inactive?
4663  * @delay: Delay to wait after link has become active (in ms)
4664  *
4665  * Use this to wait till link becomes active or inactive.
4666  */
4667 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4668 				     int delay)
4669 {
4670 	int timeout = 1000;
4671 	bool ret;
4672 	u16 lnk_status;
4673 
4674 	/*
4675 	 * Some controllers might not implement link active reporting. In this
4676 	 * case, we wait for 1000 + 100 ms.
4677 	 */
4678 	if (!pdev->link_active_reporting) {
4679 		msleep(1100);
4680 		return true;
4681 	}
4682 
4683 	/*
4684 	 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4685 	 * after which we should expect an link active if the reset was
4686 	 * successful. If so, software must wait a minimum 100ms before sending
4687 	 * configuration requests to devices downstream this port.
4688 	 *
4689 	 * If the link fails to activate, either the device was physically
4690 	 * removed or the link is permanently failed.
4691 	 */
4692 	if (active)
4693 		msleep(20);
4694 	for (;;) {
4695 		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4696 		ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4697 		if (ret == active)
4698 			break;
4699 		if (timeout <= 0)
4700 			break;
4701 		msleep(10);
4702 		timeout -= 10;
4703 	}
4704 	if (active && ret)
4705 		msleep(delay);
4706 	else if (ret != active)
4707 		pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
4708 			active ? "set" : "cleared");
4709 	return ret == active;
4710 }
4711 
4712 /**
4713  * pcie_wait_for_link - Wait until link is active or inactive
4714  * @pdev: Bridge device
4715  * @active: waiting for active or inactive?
4716  *
4717  * Use this to wait till link becomes active or inactive.
4718  */
4719 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4720 {
4721 	return pcie_wait_for_link_delay(pdev, active, 100);
4722 }
4723 
4724 /*
4725  * Find maximum D3cold delay required by all the devices on the bus.  The
4726  * spec says 100 ms, but firmware can lower it and we allow drivers to
4727  * increase it as well.
4728  *
4729  * Called with @pci_bus_sem locked for reading.
4730  */
4731 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4732 {
4733 	const struct pci_dev *pdev;
4734 	int min_delay = 100;
4735 	int max_delay = 0;
4736 
4737 	list_for_each_entry(pdev, &bus->devices, bus_list) {
4738 		if (pdev->d3cold_delay < min_delay)
4739 			min_delay = pdev->d3cold_delay;
4740 		if (pdev->d3cold_delay > max_delay)
4741 			max_delay = pdev->d3cold_delay;
4742 	}
4743 
4744 	return max(min_delay, max_delay);
4745 }
4746 
4747 /**
4748  * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4749  * @dev: PCI bridge
4750  *
4751  * Handle necessary delays before access to the devices on the secondary
4752  * side of the bridge are permitted after D3cold to D0 transition.
4753  *
4754  * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4755  * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4756  * 4.3.2.
4757  */
4758 void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4759 {
4760 	struct pci_dev *child;
4761 	int delay;
4762 
4763 	if (pci_dev_is_disconnected(dev))
4764 		return;
4765 
4766 	if (!pci_is_bridge(dev) || !dev->bridge_d3)
4767 		return;
4768 
4769 	down_read(&pci_bus_sem);
4770 
4771 	/*
4772 	 * We only deal with devices that are present currently on the bus.
4773 	 * For any hot-added devices the access delay is handled in pciehp
4774 	 * board_added(). In case of ACPI hotplug the firmware is expected
4775 	 * to configure the devices before OS is notified.
4776 	 */
4777 	if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4778 		up_read(&pci_bus_sem);
4779 		return;
4780 	}
4781 
4782 	/* Take d3cold_delay requirements into account */
4783 	delay = pci_bus_max_d3cold_delay(dev->subordinate);
4784 	if (!delay) {
4785 		up_read(&pci_bus_sem);
4786 		return;
4787 	}
4788 
4789 	child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4790 				 bus_list);
4791 	up_read(&pci_bus_sem);
4792 
4793 	/*
4794 	 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4795 	 * accessing the device after reset (that is 1000 ms + 100 ms). In
4796 	 * practice this should not be needed because we don't do power
4797 	 * management for them (see pci_bridge_d3_possible()).
4798 	 */
4799 	if (!pci_is_pcie(dev)) {
4800 		pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4801 		msleep(1000 + delay);
4802 		return;
4803 	}
4804 
4805 	/*
4806 	 * For PCIe downstream and root ports that do not support speeds
4807 	 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4808 	 * speeds (gen3) we need to wait first for the data link layer to
4809 	 * become active.
4810 	 *
4811 	 * However, 100 ms is the minimum and the PCIe spec says the
4812 	 * software must allow at least 1s before it can determine that the
4813 	 * device that did not respond is a broken device. There is
4814 	 * evidence that 100 ms is not always enough, for example certain
4815 	 * Titan Ridge xHCI controller does not always respond to
4816 	 * configuration requests if we only wait for 100 ms (see
4817 	 * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
4818 	 *
4819 	 * Therefore we wait for 100 ms and check for the device presence.
4820 	 * If it is still not present give it an additional 100 ms.
4821 	 */
4822 	if (!pcie_downstream_port(dev))
4823 		return;
4824 
4825 	if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4826 		pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4827 		msleep(delay);
4828 	} else {
4829 		pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4830 			delay);
4831 		if (!pcie_wait_for_link_delay(dev, true, delay)) {
4832 			/* Did not train, no need to wait any further */
4833 			return;
4834 		}
4835 	}
4836 
4837 	if (!pci_device_is_present(child)) {
4838 		pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
4839 		msleep(delay);
4840 	}
4841 }
4842 
4843 void pci_reset_secondary_bus(struct pci_dev *dev)
4844 {
4845 	u16 ctrl;
4846 
4847 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4848 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4849 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4850 
4851 	/*
4852 	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
4853 	 * this to 2ms to ensure that we meet the minimum requirement.
4854 	 */
4855 	msleep(2);
4856 
4857 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4858 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4859 
4860 	/*
4861 	 * Trhfa for conventional PCI is 2^25 clock cycles.
4862 	 * Assuming a minimum 33MHz clock this results in a 1s
4863 	 * delay before we can consider subordinate devices to
4864 	 * be re-initialized.  PCIe has some ways to shorten this,
4865 	 * but we don't make use of them yet.
4866 	 */
4867 	ssleep(1);
4868 }
4869 
4870 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4871 {
4872 	pci_reset_secondary_bus(dev);
4873 }
4874 
4875 /**
4876  * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4877  * @dev: Bridge device
4878  *
4879  * Use the bridge control register to assert reset on the secondary bus.
4880  * Devices on the secondary bus are left in power-on state.
4881  */
4882 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4883 {
4884 	pcibios_reset_secondary_bus(dev);
4885 
4886 	return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4887 }
4888 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4889 
4890 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4891 {
4892 	struct pci_dev *pdev;
4893 
4894 	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4895 	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4896 		return -ENOTTY;
4897 
4898 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4899 		if (pdev != dev)
4900 			return -ENOTTY;
4901 
4902 	if (probe)
4903 		return 0;
4904 
4905 	return pci_bridge_secondary_bus_reset(dev->bus->self);
4906 }
4907 
4908 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4909 {
4910 	int rc = -ENOTTY;
4911 
4912 	if (!hotplug || !try_module_get(hotplug->owner))
4913 		return rc;
4914 
4915 	if (hotplug->ops->reset_slot)
4916 		rc = hotplug->ops->reset_slot(hotplug, probe);
4917 
4918 	module_put(hotplug->owner);
4919 
4920 	return rc;
4921 }
4922 
4923 static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4924 {
4925 	struct pci_dev *pdev;
4926 
4927 	if (dev->subordinate || !dev->slot ||
4928 	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4929 		return -ENOTTY;
4930 
4931 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4932 		if (pdev != dev && pdev->slot == dev->slot)
4933 			return -ENOTTY;
4934 
4935 	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4936 }
4937 
4938 static void pci_dev_lock(struct pci_dev *dev)
4939 {
4940 	pci_cfg_access_lock(dev);
4941 	/* block PM suspend, driver probe, etc. */
4942 	device_lock(&dev->dev);
4943 }
4944 
4945 /* Return 1 on successful lock, 0 on contention */
4946 static int pci_dev_trylock(struct pci_dev *dev)
4947 {
4948 	if (pci_cfg_access_trylock(dev)) {
4949 		if (device_trylock(&dev->dev))
4950 			return 1;
4951 		pci_cfg_access_unlock(dev);
4952 	}
4953 
4954 	return 0;
4955 }
4956 
4957 static void pci_dev_unlock(struct pci_dev *dev)
4958 {
4959 	device_unlock(&dev->dev);
4960 	pci_cfg_access_unlock(dev);
4961 }
4962 
4963 static void pci_dev_save_and_disable(struct pci_dev *dev)
4964 {
4965 	const struct pci_error_handlers *err_handler =
4966 			dev->driver ? dev->driver->err_handler : NULL;
4967 
4968 	/*
4969 	 * dev->driver->err_handler->reset_prepare() is protected against
4970 	 * races with ->remove() by the device lock, which must be held by
4971 	 * the caller.
4972 	 */
4973 	if (err_handler && err_handler->reset_prepare)
4974 		err_handler->reset_prepare(dev);
4975 
4976 	/*
4977 	 * Wake-up device prior to save.  PM registers default to D0 after
4978 	 * reset and a simple register restore doesn't reliably return
4979 	 * to a non-D0 state anyway.
4980 	 */
4981 	pci_set_power_state(dev, PCI_D0);
4982 
4983 	pci_save_state(dev);
4984 	/*
4985 	 * Disable the device by clearing the Command register, except for
4986 	 * INTx-disable which is set.  This not only disables MMIO and I/O port
4987 	 * BARs, but also prevents the device from being Bus Master, preventing
4988 	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
4989 	 * compliant devices, INTx-disable prevents legacy interrupts.
4990 	 */
4991 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4992 }
4993 
4994 static void pci_dev_restore(struct pci_dev *dev)
4995 {
4996 	const struct pci_error_handlers *err_handler =
4997 			dev->driver ? dev->driver->err_handler : NULL;
4998 
4999 	pci_restore_state(dev);
5000 
5001 	/*
5002 	 * dev->driver->err_handler->reset_done() is protected against
5003 	 * races with ->remove() by the device lock, which must be held by
5004 	 * the caller.
5005 	 */
5006 	if (err_handler && err_handler->reset_done)
5007 		err_handler->reset_done(dev);
5008 }
5009 
5010 /**
5011  * __pci_reset_function_locked - reset a PCI device function while holding
5012  * the @dev mutex lock.
5013  * @dev: PCI device to reset
5014  *
5015  * Some devices allow an individual function to be reset without affecting
5016  * other functions in the same device.  The PCI device must be responsive
5017  * to PCI config space in order to use this function.
5018  *
5019  * The device function is presumed to be unused and the caller is holding
5020  * the device mutex lock when this function is called.
5021  *
5022  * Resetting the device will make the contents of PCI configuration space
5023  * random, so any caller of this must be prepared to reinitialise the
5024  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5025  * etc.
5026  *
5027  * Returns 0 if the device function was successfully reset or negative if the
5028  * device doesn't support resetting a single function.
5029  */
5030 int __pci_reset_function_locked(struct pci_dev *dev)
5031 {
5032 	int rc;
5033 
5034 	might_sleep();
5035 
5036 	/*
5037 	 * A reset method returns -ENOTTY if it doesn't support this device
5038 	 * and we should try the next method.
5039 	 *
5040 	 * If it returns 0 (success), we're finished.  If it returns any
5041 	 * other error, we're also finished: this indicates that further
5042 	 * reset mechanisms might be broken on the device.
5043 	 */
5044 	rc = pci_dev_specific_reset(dev, 0);
5045 	if (rc != -ENOTTY)
5046 		return rc;
5047 	if (pcie_has_flr(dev)) {
5048 		rc = pcie_flr(dev);
5049 		if (rc != -ENOTTY)
5050 			return rc;
5051 	}
5052 	rc = pci_af_flr(dev, 0);
5053 	if (rc != -ENOTTY)
5054 		return rc;
5055 	rc = pci_pm_reset(dev, 0);
5056 	if (rc != -ENOTTY)
5057 		return rc;
5058 	rc = pci_dev_reset_slot_function(dev, 0);
5059 	if (rc != -ENOTTY)
5060 		return rc;
5061 	return pci_parent_bus_reset(dev, 0);
5062 }
5063 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5064 
5065 /**
5066  * pci_probe_reset_function - check whether the device can be safely reset
5067  * @dev: PCI device to reset
5068  *
5069  * Some devices allow an individual function to be reset without affecting
5070  * other functions in the same device.  The PCI device must be responsive
5071  * to PCI config space in order to use this function.
5072  *
5073  * Returns 0 if the device function can be reset or negative if the
5074  * device doesn't support resetting a single function.
5075  */
5076 int pci_probe_reset_function(struct pci_dev *dev)
5077 {
5078 	int rc;
5079 
5080 	might_sleep();
5081 
5082 	rc = pci_dev_specific_reset(dev, 1);
5083 	if (rc != -ENOTTY)
5084 		return rc;
5085 	if (pcie_has_flr(dev))
5086 		return 0;
5087 	rc = pci_af_flr(dev, 1);
5088 	if (rc != -ENOTTY)
5089 		return rc;
5090 	rc = pci_pm_reset(dev, 1);
5091 	if (rc != -ENOTTY)
5092 		return rc;
5093 	rc = pci_dev_reset_slot_function(dev, 1);
5094 	if (rc != -ENOTTY)
5095 		return rc;
5096 
5097 	return pci_parent_bus_reset(dev, 1);
5098 }
5099 
5100 /**
5101  * pci_reset_function - quiesce and reset a PCI device function
5102  * @dev: PCI device to reset
5103  *
5104  * Some devices allow an individual function to be reset without affecting
5105  * other functions in the same device.  The PCI device must be responsive
5106  * to PCI config space in order to use this function.
5107  *
5108  * This function does not just reset the PCI portion of a device, but
5109  * clears all the state associated with the device.  This function differs
5110  * from __pci_reset_function_locked() in that it saves and restores device state
5111  * over the reset and takes the PCI device lock.
5112  *
5113  * Returns 0 if the device function was successfully reset or negative if the
5114  * device doesn't support resetting a single function.
5115  */
5116 int pci_reset_function(struct pci_dev *dev)
5117 {
5118 	int rc;
5119 
5120 	if (!dev->reset_fn)
5121 		return -ENOTTY;
5122 
5123 	pci_dev_lock(dev);
5124 	pci_dev_save_and_disable(dev);
5125 
5126 	rc = __pci_reset_function_locked(dev);
5127 
5128 	pci_dev_restore(dev);
5129 	pci_dev_unlock(dev);
5130 
5131 	return rc;
5132 }
5133 EXPORT_SYMBOL_GPL(pci_reset_function);
5134 
5135 /**
5136  * pci_reset_function_locked - quiesce and reset a PCI device function
5137  * @dev: PCI device to reset
5138  *
5139  * Some devices allow an individual function to be reset without affecting
5140  * other functions in the same device.  The PCI device must be responsive
5141  * to PCI config space in order to use this function.
5142  *
5143  * This function does not just reset the PCI portion of a device, but
5144  * clears all the state associated with the device.  This function differs
5145  * from __pci_reset_function_locked() in that it saves and restores device state
5146  * over the reset.  It also differs from pci_reset_function() in that it
5147  * requires the PCI device lock to be held.
5148  *
5149  * Returns 0 if the device function was successfully reset or negative if the
5150  * device doesn't support resetting a single function.
5151  */
5152 int pci_reset_function_locked(struct pci_dev *dev)
5153 {
5154 	int rc;
5155 
5156 	if (!dev->reset_fn)
5157 		return -ENOTTY;
5158 
5159 	pci_dev_save_and_disable(dev);
5160 
5161 	rc = __pci_reset_function_locked(dev);
5162 
5163 	pci_dev_restore(dev);
5164 
5165 	return rc;
5166 }
5167 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5168 
5169 /**
5170  * pci_try_reset_function - quiesce and reset a PCI device function
5171  * @dev: PCI device to reset
5172  *
5173  * Same as above, except return -EAGAIN if unable to lock device.
5174  */
5175 int pci_try_reset_function(struct pci_dev *dev)
5176 {
5177 	int rc;
5178 
5179 	if (!dev->reset_fn)
5180 		return -ENOTTY;
5181 
5182 	if (!pci_dev_trylock(dev))
5183 		return -EAGAIN;
5184 
5185 	pci_dev_save_and_disable(dev);
5186 	rc = __pci_reset_function_locked(dev);
5187 	pci_dev_restore(dev);
5188 	pci_dev_unlock(dev);
5189 
5190 	return rc;
5191 }
5192 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5193 
5194 /* Do any devices on or below this bus prevent a bus reset? */
5195 static bool pci_bus_resetable(struct pci_bus *bus)
5196 {
5197 	struct pci_dev *dev;
5198 
5199 
5200 	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5201 		return false;
5202 
5203 	list_for_each_entry(dev, &bus->devices, bus_list) {
5204 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5205 		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5206 			return false;
5207 	}
5208 
5209 	return true;
5210 }
5211 
5212 /* Lock devices from the top of the tree down */
5213 static void pci_bus_lock(struct pci_bus *bus)
5214 {
5215 	struct pci_dev *dev;
5216 
5217 	list_for_each_entry(dev, &bus->devices, bus_list) {
5218 		pci_dev_lock(dev);
5219 		if (dev->subordinate)
5220 			pci_bus_lock(dev->subordinate);
5221 	}
5222 }
5223 
5224 /* Unlock devices from the bottom of the tree up */
5225 static void pci_bus_unlock(struct pci_bus *bus)
5226 {
5227 	struct pci_dev *dev;
5228 
5229 	list_for_each_entry(dev, &bus->devices, bus_list) {
5230 		if (dev->subordinate)
5231 			pci_bus_unlock(dev->subordinate);
5232 		pci_dev_unlock(dev);
5233 	}
5234 }
5235 
5236 /* Return 1 on successful lock, 0 on contention */
5237 static int pci_bus_trylock(struct pci_bus *bus)
5238 {
5239 	struct pci_dev *dev;
5240 
5241 	list_for_each_entry(dev, &bus->devices, bus_list) {
5242 		if (!pci_dev_trylock(dev))
5243 			goto unlock;
5244 		if (dev->subordinate) {
5245 			if (!pci_bus_trylock(dev->subordinate)) {
5246 				pci_dev_unlock(dev);
5247 				goto unlock;
5248 			}
5249 		}
5250 	}
5251 	return 1;
5252 
5253 unlock:
5254 	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5255 		if (dev->subordinate)
5256 			pci_bus_unlock(dev->subordinate);
5257 		pci_dev_unlock(dev);
5258 	}
5259 	return 0;
5260 }
5261 
5262 /* Do any devices on or below this slot prevent a bus reset? */
5263 static bool pci_slot_resetable(struct pci_slot *slot)
5264 {
5265 	struct pci_dev *dev;
5266 
5267 	if (slot->bus->self &&
5268 	    (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5269 		return false;
5270 
5271 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5272 		if (!dev->slot || dev->slot != slot)
5273 			continue;
5274 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5275 		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5276 			return false;
5277 	}
5278 
5279 	return true;
5280 }
5281 
5282 /* Lock devices from the top of the tree down */
5283 static void pci_slot_lock(struct pci_slot *slot)
5284 {
5285 	struct pci_dev *dev;
5286 
5287 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5288 		if (!dev->slot || dev->slot != slot)
5289 			continue;
5290 		pci_dev_lock(dev);
5291 		if (dev->subordinate)
5292 			pci_bus_lock(dev->subordinate);
5293 	}
5294 }
5295 
5296 /* Unlock devices from the bottom of the tree up */
5297 static void pci_slot_unlock(struct pci_slot *slot)
5298 {
5299 	struct pci_dev *dev;
5300 
5301 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5302 		if (!dev->slot || dev->slot != slot)
5303 			continue;
5304 		if (dev->subordinate)
5305 			pci_bus_unlock(dev->subordinate);
5306 		pci_dev_unlock(dev);
5307 	}
5308 }
5309 
5310 /* Return 1 on successful lock, 0 on contention */
5311 static int pci_slot_trylock(struct pci_slot *slot)
5312 {
5313 	struct pci_dev *dev;
5314 
5315 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5316 		if (!dev->slot || dev->slot != slot)
5317 			continue;
5318 		if (!pci_dev_trylock(dev))
5319 			goto unlock;
5320 		if (dev->subordinate) {
5321 			if (!pci_bus_trylock(dev->subordinate)) {
5322 				pci_dev_unlock(dev);
5323 				goto unlock;
5324 			}
5325 		}
5326 	}
5327 	return 1;
5328 
5329 unlock:
5330 	list_for_each_entry_continue_reverse(dev,
5331 					     &slot->bus->devices, bus_list) {
5332 		if (!dev->slot || dev->slot != slot)
5333 			continue;
5334 		if (dev->subordinate)
5335 			pci_bus_unlock(dev->subordinate);
5336 		pci_dev_unlock(dev);
5337 	}
5338 	return 0;
5339 }
5340 
5341 /*
5342  * Save and disable devices from the top of the tree down while holding
5343  * the @dev mutex lock for the entire tree.
5344  */
5345 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5346 {
5347 	struct pci_dev *dev;
5348 
5349 	list_for_each_entry(dev, &bus->devices, bus_list) {
5350 		pci_dev_save_and_disable(dev);
5351 		if (dev->subordinate)
5352 			pci_bus_save_and_disable_locked(dev->subordinate);
5353 	}
5354 }
5355 
5356 /*
5357  * Restore devices from top of the tree down while holding @dev mutex lock
5358  * for the entire tree.  Parent bridges need to be restored before we can
5359  * get to subordinate devices.
5360  */
5361 static void pci_bus_restore_locked(struct pci_bus *bus)
5362 {
5363 	struct pci_dev *dev;
5364 
5365 	list_for_each_entry(dev, &bus->devices, bus_list) {
5366 		pci_dev_restore(dev);
5367 		if (dev->subordinate)
5368 			pci_bus_restore_locked(dev->subordinate);
5369 	}
5370 }
5371 
5372 /*
5373  * Save and disable devices from the top of the tree down while holding
5374  * the @dev mutex lock for the entire tree.
5375  */
5376 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5377 {
5378 	struct pci_dev *dev;
5379 
5380 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5381 		if (!dev->slot || dev->slot != slot)
5382 			continue;
5383 		pci_dev_save_and_disable(dev);
5384 		if (dev->subordinate)
5385 			pci_bus_save_and_disable_locked(dev->subordinate);
5386 	}
5387 }
5388 
5389 /*
5390  * Restore devices from top of the tree down while holding @dev mutex lock
5391  * for the entire tree.  Parent bridges need to be restored before we can
5392  * get to subordinate devices.
5393  */
5394 static void pci_slot_restore_locked(struct pci_slot *slot)
5395 {
5396 	struct pci_dev *dev;
5397 
5398 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5399 		if (!dev->slot || dev->slot != slot)
5400 			continue;
5401 		pci_dev_restore(dev);
5402 		if (dev->subordinate)
5403 			pci_bus_restore_locked(dev->subordinate);
5404 	}
5405 }
5406 
5407 static int pci_slot_reset(struct pci_slot *slot, int probe)
5408 {
5409 	int rc;
5410 
5411 	if (!slot || !pci_slot_resetable(slot))
5412 		return -ENOTTY;
5413 
5414 	if (!probe)
5415 		pci_slot_lock(slot);
5416 
5417 	might_sleep();
5418 
5419 	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5420 
5421 	if (!probe)
5422 		pci_slot_unlock(slot);
5423 
5424 	return rc;
5425 }
5426 
5427 /**
5428  * pci_probe_reset_slot - probe whether a PCI slot can be reset
5429  * @slot: PCI slot to probe
5430  *
5431  * Return 0 if slot can be reset, negative if a slot reset is not supported.
5432  */
5433 int pci_probe_reset_slot(struct pci_slot *slot)
5434 {
5435 	return pci_slot_reset(slot, 1);
5436 }
5437 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5438 
5439 /**
5440  * __pci_reset_slot - Try to reset a PCI slot
5441  * @slot: PCI slot to reset
5442  *
5443  * A PCI bus may host multiple slots, each slot may support a reset mechanism
5444  * independent of other slots.  For instance, some slots may support slot power
5445  * control.  In the case of a 1:1 bus to slot architecture, this function may
5446  * wrap the bus reset to avoid spurious slot related events such as hotplug.
5447  * Generally a slot reset should be attempted before a bus reset.  All of the
5448  * function of the slot and any subordinate buses behind the slot are reset
5449  * through this function.  PCI config space of all devices in the slot and
5450  * behind the slot is saved before and restored after reset.
5451  *
5452  * Same as above except return -EAGAIN if the slot cannot be locked
5453  */
5454 static int __pci_reset_slot(struct pci_slot *slot)
5455 {
5456 	int rc;
5457 
5458 	rc = pci_slot_reset(slot, 1);
5459 	if (rc)
5460 		return rc;
5461 
5462 	if (pci_slot_trylock(slot)) {
5463 		pci_slot_save_and_disable_locked(slot);
5464 		might_sleep();
5465 		rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5466 		pci_slot_restore_locked(slot);
5467 		pci_slot_unlock(slot);
5468 	} else
5469 		rc = -EAGAIN;
5470 
5471 	return rc;
5472 }
5473 
5474 static int pci_bus_reset(struct pci_bus *bus, int probe)
5475 {
5476 	int ret;
5477 
5478 	if (!bus->self || !pci_bus_resetable(bus))
5479 		return -ENOTTY;
5480 
5481 	if (probe)
5482 		return 0;
5483 
5484 	pci_bus_lock(bus);
5485 
5486 	might_sleep();
5487 
5488 	ret = pci_bridge_secondary_bus_reset(bus->self);
5489 
5490 	pci_bus_unlock(bus);
5491 
5492 	return ret;
5493 }
5494 
5495 /**
5496  * pci_bus_error_reset - reset the bridge's subordinate bus
5497  * @bridge: The parent device that connects to the bus to reset
5498  *
5499  * This function will first try to reset the slots on this bus if the method is
5500  * available. If slot reset fails or is not available, this will fall back to a
5501  * secondary bus reset.
5502  */
5503 int pci_bus_error_reset(struct pci_dev *bridge)
5504 {
5505 	struct pci_bus *bus = bridge->subordinate;
5506 	struct pci_slot *slot;
5507 
5508 	if (!bus)
5509 		return -ENOTTY;
5510 
5511 	mutex_lock(&pci_slot_mutex);
5512 	if (list_empty(&bus->slots))
5513 		goto bus_reset;
5514 
5515 	list_for_each_entry(slot, &bus->slots, list)
5516 		if (pci_probe_reset_slot(slot))
5517 			goto bus_reset;
5518 
5519 	list_for_each_entry(slot, &bus->slots, list)
5520 		if (pci_slot_reset(slot, 0))
5521 			goto bus_reset;
5522 
5523 	mutex_unlock(&pci_slot_mutex);
5524 	return 0;
5525 bus_reset:
5526 	mutex_unlock(&pci_slot_mutex);
5527 	return pci_bus_reset(bridge->subordinate, 0);
5528 }
5529 
5530 /**
5531  * pci_probe_reset_bus - probe whether a PCI bus can be reset
5532  * @bus: PCI bus to probe
5533  *
5534  * Return 0 if bus can be reset, negative if a bus reset is not supported.
5535  */
5536 int pci_probe_reset_bus(struct pci_bus *bus)
5537 {
5538 	return pci_bus_reset(bus, 1);
5539 }
5540 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5541 
5542 /**
5543  * __pci_reset_bus - Try to reset a PCI bus
5544  * @bus: top level PCI bus to reset
5545  *
5546  * Same as above except return -EAGAIN if the bus cannot be locked
5547  */
5548 static int __pci_reset_bus(struct pci_bus *bus)
5549 {
5550 	int rc;
5551 
5552 	rc = pci_bus_reset(bus, 1);
5553 	if (rc)
5554 		return rc;
5555 
5556 	if (pci_bus_trylock(bus)) {
5557 		pci_bus_save_and_disable_locked(bus);
5558 		might_sleep();
5559 		rc = pci_bridge_secondary_bus_reset(bus->self);
5560 		pci_bus_restore_locked(bus);
5561 		pci_bus_unlock(bus);
5562 	} else
5563 		rc = -EAGAIN;
5564 
5565 	return rc;
5566 }
5567 
5568 /**
5569  * pci_reset_bus - Try to reset a PCI bus
5570  * @pdev: top level PCI device to reset via slot/bus
5571  *
5572  * Same as above except return -EAGAIN if the bus cannot be locked
5573  */
5574 int pci_reset_bus(struct pci_dev *pdev)
5575 {
5576 	return (!pci_probe_reset_slot(pdev->slot)) ?
5577 	    __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5578 }
5579 EXPORT_SYMBOL_GPL(pci_reset_bus);
5580 
5581 /**
5582  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5583  * @dev: PCI device to query
5584  *
5585  * Returns mmrbc: maximum designed memory read count in bytes or
5586  * appropriate error value.
5587  */
5588 int pcix_get_max_mmrbc(struct pci_dev *dev)
5589 {
5590 	int cap;
5591 	u32 stat;
5592 
5593 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5594 	if (!cap)
5595 		return -EINVAL;
5596 
5597 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5598 		return -EINVAL;
5599 
5600 	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5601 }
5602 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5603 
5604 /**
5605  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5606  * @dev: PCI device to query
5607  *
5608  * Returns mmrbc: maximum memory read count in bytes or appropriate error
5609  * value.
5610  */
5611 int pcix_get_mmrbc(struct pci_dev *dev)
5612 {
5613 	int cap;
5614 	u16 cmd;
5615 
5616 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5617 	if (!cap)
5618 		return -EINVAL;
5619 
5620 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5621 		return -EINVAL;
5622 
5623 	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5624 }
5625 EXPORT_SYMBOL(pcix_get_mmrbc);
5626 
5627 /**
5628  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5629  * @dev: PCI device to query
5630  * @mmrbc: maximum memory read count in bytes
5631  *    valid values are 512, 1024, 2048, 4096
5632  *
5633  * If possible sets maximum memory read byte count, some bridges have errata
5634  * that prevent this.
5635  */
5636 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5637 {
5638 	int cap;
5639 	u32 stat, v, o;
5640 	u16 cmd;
5641 
5642 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5643 		return -EINVAL;
5644 
5645 	v = ffs(mmrbc) - 10;
5646 
5647 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5648 	if (!cap)
5649 		return -EINVAL;
5650 
5651 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5652 		return -EINVAL;
5653 
5654 	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5655 		return -E2BIG;
5656 
5657 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5658 		return -EINVAL;
5659 
5660 	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5661 	if (o != v) {
5662 		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5663 			return -EIO;
5664 
5665 		cmd &= ~PCI_X_CMD_MAX_READ;
5666 		cmd |= v << 2;
5667 		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5668 			return -EIO;
5669 	}
5670 	return 0;
5671 }
5672 EXPORT_SYMBOL(pcix_set_mmrbc);
5673 
5674 /**
5675  * pcie_get_readrq - get PCI Express read request size
5676  * @dev: PCI device to query
5677  *
5678  * Returns maximum memory read request in bytes or appropriate error value.
5679  */
5680 int pcie_get_readrq(struct pci_dev *dev)
5681 {
5682 	u16 ctl;
5683 
5684 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5685 
5686 	return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5687 }
5688 EXPORT_SYMBOL(pcie_get_readrq);
5689 
5690 /**
5691  * pcie_set_readrq - set PCI Express maximum memory read request
5692  * @dev: PCI device to query
5693  * @rq: maximum memory read count in bytes
5694  *    valid values are 128, 256, 512, 1024, 2048, 4096
5695  *
5696  * If possible sets maximum memory read request in bytes
5697  */
5698 int pcie_set_readrq(struct pci_dev *dev, int rq)
5699 {
5700 	u16 v;
5701 
5702 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5703 		return -EINVAL;
5704 
5705 	/*
5706 	 * If using the "performance" PCIe config, we clamp the read rq
5707 	 * size to the max packet size to keep the host bridge from
5708 	 * generating requests larger than we can cope with.
5709 	 */
5710 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5711 		int mps = pcie_get_mps(dev);
5712 
5713 		if (mps < rq)
5714 			rq = mps;
5715 	}
5716 
5717 	v = (ffs(rq) - 8) << 12;
5718 
5719 	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5720 						  PCI_EXP_DEVCTL_READRQ, v);
5721 }
5722 EXPORT_SYMBOL(pcie_set_readrq);
5723 
5724 /**
5725  * pcie_get_mps - get PCI Express maximum payload size
5726  * @dev: PCI device to query
5727  *
5728  * Returns maximum payload size in bytes
5729  */
5730 int pcie_get_mps(struct pci_dev *dev)
5731 {
5732 	u16 ctl;
5733 
5734 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5735 
5736 	return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5737 }
5738 EXPORT_SYMBOL(pcie_get_mps);
5739 
5740 /**
5741  * pcie_set_mps - set PCI Express maximum payload size
5742  * @dev: PCI device to query
5743  * @mps: maximum payload size in bytes
5744  *    valid values are 128, 256, 512, 1024, 2048, 4096
5745  *
5746  * If possible sets maximum payload size
5747  */
5748 int pcie_set_mps(struct pci_dev *dev, int mps)
5749 {
5750 	u16 v;
5751 
5752 	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5753 		return -EINVAL;
5754 
5755 	v = ffs(mps) - 8;
5756 	if (v > dev->pcie_mpss)
5757 		return -EINVAL;
5758 	v <<= 5;
5759 
5760 	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5761 						  PCI_EXP_DEVCTL_PAYLOAD, v);
5762 }
5763 EXPORT_SYMBOL(pcie_set_mps);
5764 
5765 /**
5766  * pcie_bandwidth_available - determine minimum link settings of a PCIe
5767  *			      device and its bandwidth limitation
5768  * @dev: PCI device to query
5769  * @limiting_dev: storage for device causing the bandwidth limitation
5770  * @speed: storage for speed of limiting device
5771  * @width: storage for width of limiting device
5772  *
5773  * Walk up the PCI device chain and find the point where the minimum
5774  * bandwidth is available.  Return the bandwidth available there and (if
5775  * limiting_dev, speed, and width pointers are supplied) information about
5776  * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
5777  * raw bandwidth.
5778  */
5779 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5780 			     enum pci_bus_speed *speed,
5781 			     enum pcie_link_width *width)
5782 {
5783 	u16 lnksta;
5784 	enum pci_bus_speed next_speed;
5785 	enum pcie_link_width next_width;
5786 	u32 bw, next_bw;
5787 
5788 	if (speed)
5789 		*speed = PCI_SPEED_UNKNOWN;
5790 	if (width)
5791 		*width = PCIE_LNK_WIDTH_UNKNOWN;
5792 
5793 	bw = 0;
5794 
5795 	while (dev) {
5796 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5797 
5798 		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5799 		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5800 			PCI_EXP_LNKSTA_NLW_SHIFT;
5801 
5802 		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5803 
5804 		/* Check if current device limits the total bandwidth */
5805 		if (!bw || next_bw <= bw) {
5806 			bw = next_bw;
5807 
5808 			if (limiting_dev)
5809 				*limiting_dev = dev;
5810 			if (speed)
5811 				*speed = next_speed;
5812 			if (width)
5813 				*width = next_width;
5814 		}
5815 
5816 		dev = pci_upstream_bridge(dev);
5817 	}
5818 
5819 	return bw;
5820 }
5821 EXPORT_SYMBOL(pcie_bandwidth_available);
5822 
5823 /**
5824  * pcie_get_speed_cap - query for the PCI device's link speed capability
5825  * @dev: PCI device to query
5826  *
5827  * Query the PCI device speed capability.  Return the maximum link speed
5828  * supported by the device.
5829  */
5830 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5831 {
5832 	u32 lnkcap2, lnkcap;
5833 
5834 	/*
5835 	 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.  The
5836 	 * implementation note there recommends using the Supported Link
5837 	 * Speeds Vector in Link Capabilities 2 when supported.
5838 	 *
5839 	 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
5840 	 * should use the Supported Link Speeds field in Link Capabilities,
5841 	 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
5842 	 */
5843 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5844 
5845 	/* PCIe r3.0-compliant */
5846 	if (lnkcap2)
5847 		return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
5848 
5849 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5850 	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5851 		return PCIE_SPEED_5_0GT;
5852 	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5853 		return PCIE_SPEED_2_5GT;
5854 
5855 	return PCI_SPEED_UNKNOWN;
5856 }
5857 EXPORT_SYMBOL(pcie_get_speed_cap);
5858 
5859 /**
5860  * pcie_get_width_cap - query for the PCI device's link width capability
5861  * @dev: PCI device to query
5862  *
5863  * Query the PCI device width capability.  Return the maximum link width
5864  * supported by the device.
5865  */
5866 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5867 {
5868 	u32 lnkcap;
5869 
5870 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5871 	if (lnkcap)
5872 		return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5873 
5874 	return PCIE_LNK_WIDTH_UNKNOWN;
5875 }
5876 EXPORT_SYMBOL(pcie_get_width_cap);
5877 
5878 /**
5879  * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
5880  * @dev: PCI device
5881  * @speed: storage for link speed
5882  * @width: storage for link width
5883  *
5884  * Calculate a PCI device's link bandwidth by querying for its link speed
5885  * and width, multiplying them, and applying encoding overhead.  The result
5886  * is in Mb/s, i.e., megabits/second of raw bandwidth.
5887  */
5888 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5889 			   enum pcie_link_width *width)
5890 {
5891 	*speed = pcie_get_speed_cap(dev);
5892 	*width = pcie_get_width_cap(dev);
5893 
5894 	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5895 		return 0;
5896 
5897 	return *width * PCIE_SPEED2MBS_ENC(*speed);
5898 }
5899 
5900 /**
5901  * __pcie_print_link_status - Report the PCI device's link speed and width
5902  * @dev: PCI device to query
5903  * @verbose: Print info even when enough bandwidth is available
5904  *
5905  * If the available bandwidth at the device is less than the device is
5906  * capable of, report the device's maximum possible bandwidth and the
5907  * upstream link that limits its performance.  If @verbose, always print
5908  * the available bandwidth, even if the device isn't constrained.
5909  */
5910 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
5911 {
5912 	enum pcie_link_width width, width_cap;
5913 	enum pci_bus_speed speed, speed_cap;
5914 	struct pci_dev *limiting_dev = NULL;
5915 	u32 bw_avail, bw_cap;
5916 
5917 	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5918 	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5919 
5920 	if (bw_avail >= bw_cap && verbose)
5921 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5922 			 bw_cap / 1000, bw_cap % 1000,
5923 			 pci_speed_string(speed_cap), width_cap);
5924 	else if (bw_avail < bw_cap)
5925 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5926 			 bw_avail / 1000, bw_avail % 1000,
5927 			 pci_speed_string(speed), width,
5928 			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5929 			 bw_cap / 1000, bw_cap % 1000,
5930 			 pci_speed_string(speed_cap), width_cap);
5931 }
5932 
5933 /**
5934  * pcie_print_link_status - Report the PCI device's link speed and width
5935  * @dev: PCI device to query
5936  *
5937  * Report the available bandwidth at the device.
5938  */
5939 void pcie_print_link_status(struct pci_dev *dev)
5940 {
5941 	__pcie_print_link_status(dev, true);
5942 }
5943 EXPORT_SYMBOL(pcie_print_link_status);
5944 
5945 /**
5946  * pci_select_bars - Make BAR mask from the type of resource
5947  * @dev: the PCI device for which BAR mask is made
5948  * @flags: resource type mask to be selected
5949  *
5950  * This helper routine makes bar mask from the type of resource.
5951  */
5952 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5953 {
5954 	int i, bars = 0;
5955 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
5956 		if (pci_resource_flags(dev, i) & flags)
5957 			bars |= (1 << i);
5958 	return bars;
5959 }
5960 EXPORT_SYMBOL(pci_select_bars);
5961 
5962 /* Some architectures require additional programming to enable VGA */
5963 static arch_set_vga_state_t arch_set_vga_state;
5964 
5965 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
5966 {
5967 	arch_set_vga_state = func;	/* NULL disables */
5968 }
5969 
5970 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
5971 				  unsigned int command_bits, u32 flags)
5972 {
5973 	if (arch_set_vga_state)
5974 		return arch_set_vga_state(dev, decode, command_bits,
5975 						flags);
5976 	return 0;
5977 }
5978 
5979 /**
5980  * pci_set_vga_state - set VGA decode state on device and parents if requested
5981  * @dev: the PCI device
5982  * @decode: true = enable decoding, false = disable decoding
5983  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
5984  * @flags: traverse ancestors and change bridges
5985  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
5986  */
5987 int pci_set_vga_state(struct pci_dev *dev, bool decode,
5988 		      unsigned int command_bits, u32 flags)
5989 {
5990 	struct pci_bus *bus;
5991 	struct pci_dev *bridge;
5992 	u16 cmd;
5993 	int rc;
5994 
5995 	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
5996 
5997 	/* ARCH specific VGA enables */
5998 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
5999 	if (rc)
6000 		return rc;
6001 
6002 	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6003 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
6004 		if (decode == true)
6005 			cmd |= command_bits;
6006 		else
6007 			cmd &= ~command_bits;
6008 		pci_write_config_word(dev, PCI_COMMAND, cmd);
6009 	}
6010 
6011 	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6012 		return 0;
6013 
6014 	bus = dev->bus;
6015 	while (bus) {
6016 		bridge = bus->self;
6017 		if (bridge) {
6018 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6019 					     &cmd);
6020 			if (decode == true)
6021 				cmd |= PCI_BRIDGE_CTL_VGA;
6022 			else
6023 				cmd &= ~PCI_BRIDGE_CTL_VGA;
6024 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6025 					      cmd);
6026 		}
6027 		bus = bus->parent;
6028 	}
6029 	return 0;
6030 }
6031 
6032 #ifdef CONFIG_ACPI
6033 bool pci_pr3_present(struct pci_dev *pdev)
6034 {
6035 	struct acpi_device *adev;
6036 
6037 	if (acpi_disabled)
6038 		return false;
6039 
6040 	adev = ACPI_COMPANION(&pdev->dev);
6041 	if (!adev)
6042 		return false;
6043 
6044 	return adev->power.flags.power_resources &&
6045 		acpi_has_method(adev->handle, "_PR3");
6046 }
6047 EXPORT_SYMBOL_GPL(pci_pr3_present);
6048 #endif
6049 
6050 /**
6051  * pci_add_dma_alias - Add a DMA devfn alias for a device
6052  * @dev: the PCI device for which alias is added
6053  * @devfn_from: alias slot and function
6054  * @nr_devfns: number of subsequent devfns to alias
6055  *
6056  * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6057  * which is used to program permissible bus-devfn source addresses for DMA
6058  * requests in an IOMMU.  These aliases factor into IOMMU group creation
6059  * and are useful for devices generating DMA requests beyond or different
6060  * from their logical bus-devfn.  Examples include device quirks where the
6061  * device simply uses the wrong devfn, as well as non-transparent bridges
6062  * where the alias may be a proxy for devices in another domain.
6063  *
6064  * IOMMU group creation is performed during device discovery or addition,
6065  * prior to any potential DMA mapping and therefore prior to driver probing
6066  * (especially for userspace assigned devices where IOMMU group definition
6067  * cannot be left as a userspace activity).  DMA aliases should therefore
6068  * be configured via quirks, such as the PCI fixup header quirk.
6069  */
6070 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
6071 {
6072 	int devfn_to;
6073 
6074 	nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
6075 	devfn_to = devfn_from + nr_devfns - 1;
6076 
6077 	if (!dev->dma_alias_mask)
6078 		dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6079 	if (!dev->dma_alias_mask) {
6080 		pci_warn(dev, "Unable to allocate DMA alias mask\n");
6081 		return;
6082 	}
6083 
6084 	bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6085 
6086 	if (nr_devfns == 1)
6087 		pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6088 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6089 	else if (nr_devfns > 1)
6090 		pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6091 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6092 				PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6093 }
6094 
6095 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6096 {
6097 	return (dev1->dma_alias_mask &&
6098 		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6099 	       (dev2->dma_alias_mask &&
6100 		test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6101 	       pci_real_dma_dev(dev1) == dev2 ||
6102 	       pci_real_dma_dev(dev2) == dev1;
6103 }
6104 
6105 bool pci_device_is_present(struct pci_dev *pdev)
6106 {
6107 	u32 v;
6108 
6109 	if (pci_dev_is_disconnected(pdev))
6110 		return false;
6111 	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6112 }
6113 EXPORT_SYMBOL_GPL(pci_device_is_present);
6114 
6115 void pci_ignore_hotplug(struct pci_dev *dev)
6116 {
6117 	struct pci_dev *bridge = dev->bus->self;
6118 
6119 	dev->ignore_hotplug = 1;
6120 	/* Propagate the "ignore hotplug" setting to the parent bridge. */
6121 	if (bridge)
6122 		bridge->ignore_hotplug = 1;
6123 }
6124 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6125 
6126 /**
6127  * pci_real_dma_dev - Get PCI DMA device for PCI device
6128  * @dev: the PCI device that may have a PCI DMA alias
6129  *
6130  * Permits the platform to provide architecture-specific functionality to
6131  * devices needing to alias DMA to another PCI device on another PCI bus. If
6132  * the PCI device is on the same bus, it is recommended to use
6133  * pci_add_dma_alias(). This is the default implementation. Architecture
6134  * implementations can override this.
6135  */
6136 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6137 {
6138 	return dev;
6139 }
6140 
6141 resource_size_t __weak pcibios_default_alignment(void)
6142 {
6143 	return 0;
6144 }
6145 
6146 /*
6147  * Arches that don't want to expose struct resource to userland as-is in
6148  * sysfs and /proc can implement their own pci_resource_to_user().
6149  */
6150 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6151 				 const struct resource *rsrc,
6152 				 resource_size_t *start, resource_size_t *end)
6153 {
6154 	*start = rsrc->start;
6155 	*end = rsrc->end;
6156 }
6157 
6158 static char *resource_alignment_param;
6159 static DEFINE_SPINLOCK(resource_alignment_lock);
6160 
6161 /**
6162  * pci_specified_resource_alignment - get resource alignment specified by user.
6163  * @dev: the PCI device to get
6164  * @resize: whether or not to change resources' size when reassigning alignment
6165  *
6166  * RETURNS: Resource alignment if it is specified.
6167  *          Zero if it is not specified.
6168  */
6169 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6170 							bool *resize)
6171 {
6172 	int align_order, count;
6173 	resource_size_t align = pcibios_default_alignment();
6174 	const char *p;
6175 	int ret;
6176 
6177 	spin_lock(&resource_alignment_lock);
6178 	p = resource_alignment_param;
6179 	if (!p || !*p)
6180 		goto out;
6181 	if (pci_has_flag(PCI_PROBE_ONLY)) {
6182 		align = 0;
6183 		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6184 		goto out;
6185 	}
6186 
6187 	while (*p) {
6188 		count = 0;
6189 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6190 							p[count] == '@') {
6191 			p += count + 1;
6192 		} else {
6193 			align_order = -1;
6194 		}
6195 
6196 		ret = pci_dev_str_match(dev, p, &p);
6197 		if (ret == 1) {
6198 			*resize = true;
6199 			if (align_order == -1)
6200 				align = PAGE_SIZE;
6201 			else
6202 				align = 1 << align_order;
6203 			break;
6204 		} else if (ret < 0) {
6205 			pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6206 			       p);
6207 			break;
6208 		}
6209 
6210 		if (*p != ';' && *p != ',') {
6211 			/* End of param or invalid format */
6212 			break;
6213 		}
6214 		p++;
6215 	}
6216 out:
6217 	spin_unlock(&resource_alignment_lock);
6218 	return align;
6219 }
6220 
6221 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6222 					   resource_size_t align, bool resize)
6223 {
6224 	struct resource *r = &dev->resource[bar];
6225 	resource_size_t size;
6226 
6227 	if (!(r->flags & IORESOURCE_MEM))
6228 		return;
6229 
6230 	if (r->flags & IORESOURCE_PCI_FIXED) {
6231 		pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6232 			 bar, r, (unsigned long long)align);
6233 		return;
6234 	}
6235 
6236 	size = resource_size(r);
6237 	if (size >= align)
6238 		return;
6239 
6240 	/*
6241 	 * Increase the alignment of the resource.  There are two ways we
6242 	 * can do this:
6243 	 *
6244 	 * 1) Increase the size of the resource.  BARs are aligned on their
6245 	 *    size, so when we reallocate space for this resource, we'll
6246 	 *    allocate it with the larger alignment.  This also prevents
6247 	 *    assignment of any other BARs inside the alignment region, so
6248 	 *    if we're requesting page alignment, this means no other BARs
6249 	 *    will share the page.
6250 	 *
6251 	 *    The disadvantage is that this makes the resource larger than
6252 	 *    the hardware BAR, which may break drivers that compute things
6253 	 *    based on the resource size, e.g., to find registers at a
6254 	 *    fixed offset before the end of the BAR.
6255 	 *
6256 	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6257 	 *    set r->start to the desired alignment.  By itself this
6258 	 *    doesn't prevent other BARs being put inside the alignment
6259 	 *    region, but if we realign *every* resource of every device in
6260 	 *    the system, none of them will share an alignment region.
6261 	 *
6262 	 * When the user has requested alignment for only some devices via
6263 	 * the "pci=resource_alignment" argument, "resize" is true and we
6264 	 * use the first method.  Otherwise we assume we're aligning all
6265 	 * devices and we use the second.
6266 	 */
6267 
6268 	pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6269 		 bar, r, (unsigned long long)align);
6270 
6271 	if (resize) {
6272 		r->start = 0;
6273 		r->end = align - 1;
6274 	} else {
6275 		r->flags &= ~IORESOURCE_SIZEALIGN;
6276 		r->flags |= IORESOURCE_STARTALIGN;
6277 		r->start = align;
6278 		r->end = r->start + size - 1;
6279 	}
6280 	r->flags |= IORESOURCE_UNSET;
6281 }
6282 
6283 /*
6284  * This function disables memory decoding and releases memory resources
6285  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6286  * It also rounds up size to specified alignment.
6287  * Later on, the kernel will assign page-aligned memory resource back
6288  * to the device.
6289  */
6290 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6291 {
6292 	int i;
6293 	struct resource *r;
6294 	resource_size_t align;
6295 	u16 command;
6296 	bool resize = false;
6297 
6298 	/*
6299 	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6300 	 * 3.4.1.11.  Their resources are allocated from the space
6301 	 * described by the VF BARx register in the PF's SR-IOV capability.
6302 	 * We can't influence their alignment here.
6303 	 */
6304 	if (dev->is_virtfn)
6305 		return;
6306 
6307 	/* check if specified PCI is target device to reassign */
6308 	align = pci_specified_resource_alignment(dev, &resize);
6309 	if (!align)
6310 		return;
6311 
6312 	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6313 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6314 		pci_warn(dev, "Can't reassign resources to host bridge\n");
6315 		return;
6316 	}
6317 
6318 	pci_read_config_word(dev, PCI_COMMAND, &command);
6319 	command &= ~PCI_COMMAND_MEMORY;
6320 	pci_write_config_word(dev, PCI_COMMAND, command);
6321 
6322 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6323 		pci_request_resource_alignment(dev, i, align, resize);
6324 
6325 	/*
6326 	 * Need to disable bridge's resource window,
6327 	 * to enable the kernel to reassign new resource
6328 	 * window later on.
6329 	 */
6330 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6331 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6332 			r = &dev->resource[i];
6333 			if (!(r->flags & IORESOURCE_MEM))
6334 				continue;
6335 			r->flags |= IORESOURCE_UNSET;
6336 			r->end = resource_size(r) - 1;
6337 			r->start = 0;
6338 		}
6339 		pci_disable_bridge_window(dev);
6340 	}
6341 }
6342 
6343 static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6344 {
6345 	size_t count = 0;
6346 
6347 	spin_lock(&resource_alignment_lock);
6348 	if (resource_alignment_param)
6349 		count = snprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6350 	spin_unlock(&resource_alignment_lock);
6351 
6352 	/*
6353 	 * When set by the command line, resource_alignment_param will not
6354 	 * have a trailing line feed, which is ugly. So conditionally add
6355 	 * it here.
6356 	 */
6357 	if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6358 		buf[count - 1] = '\n';
6359 		buf[count++] = 0;
6360 	}
6361 
6362 	return count;
6363 }
6364 
6365 static ssize_t resource_alignment_store(struct bus_type *bus,
6366 					const char *buf, size_t count)
6367 {
6368 	char *param = kstrndup(buf, count, GFP_KERNEL);
6369 
6370 	if (!param)
6371 		return -ENOMEM;
6372 
6373 	spin_lock(&resource_alignment_lock);
6374 	kfree(resource_alignment_param);
6375 	resource_alignment_param = param;
6376 	spin_unlock(&resource_alignment_lock);
6377 	return count;
6378 }
6379 
6380 static BUS_ATTR_RW(resource_alignment);
6381 
6382 static int __init pci_resource_alignment_sysfs_init(void)
6383 {
6384 	return bus_create_file(&pci_bus_type,
6385 					&bus_attr_resource_alignment);
6386 }
6387 late_initcall(pci_resource_alignment_sysfs_init);
6388 
6389 static void pci_no_domains(void)
6390 {
6391 #ifdef CONFIG_PCI_DOMAINS
6392 	pci_domains_supported = 0;
6393 #endif
6394 }
6395 
6396 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6397 static atomic_t __domain_nr = ATOMIC_INIT(-1);
6398 
6399 static int pci_get_new_domain_nr(void)
6400 {
6401 	return atomic_inc_return(&__domain_nr);
6402 }
6403 
6404 static int of_pci_bus_find_domain_nr(struct device *parent)
6405 {
6406 	static int use_dt_domains = -1;
6407 	int domain = -1;
6408 
6409 	if (parent)
6410 		domain = of_get_pci_domain_nr(parent->of_node);
6411 
6412 	/*
6413 	 * Check DT domain and use_dt_domains values.
6414 	 *
6415 	 * If DT domain property is valid (domain >= 0) and
6416 	 * use_dt_domains != 0, the DT assignment is valid since this means
6417 	 * we have not previously allocated a domain number by using
6418 	 * pci_get_new_domain_nr(); we should also update use_dt_domains to
6419 	 * 1, to indicate that we have just assigned a domain number from
6420 	 * DT.
6421 	 *
6422 	 * If DT domain property value is not valid (ie domain < 0), and we
6423 	 * have not previously assigned a domain number from DT
6424 	 * (use_dt_domains != 1) we should assign a domain number by
6425 	 * using the:
6426 	 *
6427 	 * pci_get_new_domain_nr()
6428 	 *
6429 	 * API and update the use_dt_domains value to keep track of method we
6430 	 * are using to assign domain numbers (use_dt_domains = 0).
6431 	 *
6432 	 * All other combinations imply we have a platform that is trying
6433 	 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
6434 	 * which is a recipe for domain mishandling and it is prevented by
6435 	 * invalidating the domain value (domain = -1) and printing a
6436 	 * corresponding error.
6437 	 */
6438 	if (domain >= 0 && use_dt_domains) {
6439 		use_dt_domains = 1;
6440 	} else if (domain < 0 && use_dt_domains != 1) {
6441 		use_dt_domains = 0;
6442 		domain = pci_get_new_domain_nr();
6443 	} else {
6444 		if (parent)
6445 			pr_err("Node %pOF has ", parent->of_node);
6446 		pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6447 		domain = -1;
6448 	}
6449 
6450 	return domain;
6451 }
6452 
6453 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6454 {
6455 	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6456 			       acpi_pci_bus_find_domain_nr(bus);
6457 }
6458 #endif
6459 
6460 /**
6461  * pci_ext_cfg_avail - can we access extended PCI config space?
6462  *
6463  * Returns 1 if we can access PCI extended config space (offsets
6464  * greater than 0xff). This is the default implementation. Architecture
6465  * implementations can override this.
6466  */
6467 int __weak pci_ext_cfg_avail(void)
6468 {
6469 	return 1;
6470 }
6471 
6472 void __weak pci_fixup_cardbus(struct pci_bus *bus)
6473 {
6474 }
6475 EXPORT_SYMBOL(pci_fixup_cardbus);
6476 
6477 static int __init pci_setup(char *str)
6478 {
6479 	while (str) {
6480 		char *k = strchr(str, ',');
6481 		if (k)
6482 			*k++ = 0;
6483 		if (*str && (str = pcibios_setup(str)) && *str) {
6484 			if (!strcmp(str, "nomsi")) {
6485 				pci_no_msi();
6486 			} else if (!strncmp(str, "noats", 5)) {
6487 				pr_info("PCIe: ATS is disabled\n");
6488 				pcie_ats_disabled = true;
6489 			} else if (!strcmp(str, "noaer")) {
6490 				pci_no_aer();
6491 			} else if (!strcmp(str, "earlydump")) {
6492 				pci_early_dump = true;
6493 			} else if (!strncmp(str, "realloc=", 8)) {
6494 				pci_realloc_get_opt(str + 8);
6495 			} else if (!strncmp(str, "realloc", 7)) {
6496 				pci_realloc_get_opt("on");
6497 			} else if (!strcmp(str, "nodomains")) {
6498 				pci_no_domains();
6499 			} else if (!strncmp(str, "noari", 5)) {
6500 				pcie_ari_disabled = true;
6501 			} else if (!strncmp(str, "cbiosize=", 9)) {
6502 				pci_cardbus_io_size = memparse(str + 9, &str);
6503 			} else if (!strncmp(str, "cbmemsize=", 10)) {
6504 				pci_cardbus_mem_size = memparse(str + 10, &str);
6505 			} else if (!strncmp(str, "resource_alignment=", 19)) {
6506 				resource_alignment_param = str + 19;
6507 			} else if (!strncmp(str, "ecrc=", 5)) {
6508 				pcie_ecrc_get_policy(str + 5);
6509 			} else if (!strncmp(str, "hpiosize=", 9)) {
6510 				pci_hotplug_io_size = memparse(str + 9, &str);
6511 			} else if (!strncmp(str, "hpmmiosize=", 11)) {
6512 				pci_hotplug_mmio_size = memparse(str + 11, &str);
6513 			} else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6514 				pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6515 			} else if (!strncmp(str, "hpmemsize=", 10)) {
6516 				pci_hotplug_mmio_size = memparse(str + 10, &str);
6517 				pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6518 			} else if (!strncmp(str, "hpbussize=", 10)) {
6519 				pci_hotplug_bus_size =
6520 					simple_strtoul(str + 10, &str, 0);
6521 				if (pci_hotplug_bus_size > 0xff)
6522 					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6523 			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6524 				pcie_bus_config = PCIE_BUS_TUNE_OFF;
6525 			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
6526 				pcie_bus_config = PCIE_BUS_SAFE;
6527 			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
6528 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
6529 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6530 				pcie_bus_config = PCIE_BUS_PEER2PEER;
6531 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
6532 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6533 			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
6534 				disable_acs_redir_param = str + 18;
6535 			} else {
6536 				pr_err("PCI: Unknown option `%s'\n", str);
6537 			}
6538 		}
6539 		str = k;
6540 	}
6541 	return 0;
6542 }
6543 early_param("pci", pci_setup);
6544 
6545 /*
6546  * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6547  * in pci_setup(), above, to point to data in the __initdata section which
6548  * will be freed after the init sequence is complete. We can't allocate memory
6549  * in pci_setup() because some architectures do not have any memory allocation
6550  * service available during an early_param() call. So we allocate memory and
6551  * copy the variable here before the init section is freed.
6552  *
6553  */
6554 static int __init pci_realloc_setup_params(void)
6555 {
6556 	resource_alignment_param = kstrdup(resource_alignment_param,
6557 					   GFP_KERNEL);
6558 	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6559 
6560 	return 0;
6561 }
6562 pure_initcall(pci_realloc_setup_params);
6563