xref: /linux/drivers/pci/pci.c (revision eb386617be4bdfe02eb0972874f726e2bfc7a6e7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Bus Services, see include/linux/pci.h for further explanation.
4  *
5  * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6  * David Mosberger-Tang
7  *
8  * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/interrupt.h>
28 #include <linux/device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pci_hotplug.h>
31 #include <linux/vmalloc.h>
32 #include <asm/dma.h>
33 #include <linux/aer.h>
34 #include <linux/bitfield.h>
35 #include "pci.h"
36 
37 DEFINE_MUTEX(pci_slot_mutex);
38 
39 const char *pci_power_names[] = {
40 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
41 };
42 EXPORT_SYMBOL_GPL(pci_power_names);
43 
44 #ifdef CONFIG_X86_32
45 int isa_dma_bridge_buggy;
46 EXPORT_SYMBOL(isa_dma_bridge_buggy);
47 #endif
48 
49 int pci_pci_problems;
50 EXPORT_SYMBOL(pci_pci_problems);
51 
52 unsigned int pci_pm_d3hot_delay;
53 
54 static void pci_pme_list_scan(struct work_struct *work);
55 
56 static LIST_HEAD(pci_pme_list);
57 static DEFINE_MUTEX(pci_pme_list_mutex);
58 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
59 
60 struct pci_pme_device {
61 	struct list_head list;
62 	struct pci_dev *dev;
63 };
64 
65 #define PME_TIMEOUT 1000 /* How long between PME checks */
66 
67 /*
68  * Following exit from Conventional Reset, devices must be ready within 1 sec
69  * (PCIe r6.0 sec 6.6.1).  A D3cold to D0 transition implies a Conventional
70  * Reset (PCIe r6.0 sec 5.8).
71  */
72 #define PCI_RESET_WAIT 1000 /* msec */
73 
74 /*
75  * Devices may extend the 1 sec period through Request Retry Status
76  * completions (PCIe r6.0 sec 2.3.1).  The spec does not provide an upper
77  * limit, but 60 sec ought to be enough for any device to become
78  * responsive.
79  */
80 #define PCIE_RESET_READY_POLL_MS 60000 /* msec */
81 
82 static void pci_dev_d3_sleep(struct pci_dev *dev)
83 {
84 	unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
85 	unsigned int upper;
86 
87 	if (delay_ms) {
88 		/* Use a 20% upper bound, 1ms minimum */
89 		upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
90 		usleep_range(delay_ms * USEC_PER_MSEC,
91 			     (delay_ms + upper) * USEC_PER_MSEC);
92 	}
93 }
94 
95 bool pci_reset_supported(struct pci_dev *dev)
96 {
97 	return dev->reset_methods[0] != 0;
98 }
99 
100 #ifdef CONFIG_PCI_DOMAINS
101 int pci_domains_supported = 1;
102 #endif
103 
104 #define DEFAULT_CARDBUS_IO_SIZE		(256)
105 #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
106 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
107 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
108 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
109 
110 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
111 #define DEFAULT_HOTPLUG_MMIO_SIZE	(2*1024*1024)
112 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE	(2*1024*1024)
113 /* hpiosize=nn can override this */
114 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
115 /*
116  * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
117  * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
118  * pci=hpmemsize=nnM overrides both
119  */
120 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
121 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
122 
123 #define DEFAULT_HOTPLUG_BUS_SIZE	1
124 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
125 
126 
127 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
128 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
129 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
130 #elif defined CONFIG_PCIE_BUS_SAFE
131 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
132 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
133 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
134 #elif defined CONFIG_PCIE_BUS_PEER2PEER
135 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
136 #else
137 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
138 #endif
139 
140 /*
141  * The default CLS is used if arch didn't set CLS explicitly and not
142  * all pci devices agree on the same value.  Arch can override either
143  * the dfl or actual value as it sees fit.  Don't forget this is
144  * measured in 32-bit words, not bytes.
145  */
146 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
147 u8 pci_cache_line_size;
148 
149 /*
150  * If we set up a device for bus mastering, we need to check the latency
151  * timer as certain BIOSes forget to set it properly.
152  */
153 unsigned int pcibios_max_latency = 255;
154 
155 /* If set, the PCIe ARI capability will not be used. */
156 static bool pcie_ari_disabled;
157 
158 /* If set, the PCIe ATS capability will not be used. */
159 static bool pcie_ats_disabled;
160 
161 /* If set, the PCI config space of each device is printed during boot. */
162 bool pci_early_dump;
163 
164 bool pci_ats_disabled(void)
165 {
166 	return pcie_ats_disabled;
167 }
168 EXPORT_SYMBOL_GPL(pci_ats_disabled);
169 
170 /* Disable bridge_d3 for all PCIe ports */
171 static bool pci_bridge_d3_disable;
172 /* Force bridge_d3 for all PCIe ports */
173 static bool pci_bridge_d3_force;
174 
175 static int __init pcie_port_pm_setup(char *str)
176 {
177 	if (!strcmp(str, "off"))
178 		pci_bridge_d3_disable = true;
179 	else if (!strcmp(str, "force"))
180 		pci_bridge_d3_force = true;
181 	return 1;
182 }
183 __setup("pcie_port_pm=", pcie_port_pm_setup);
184 
185 /**
186  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
187  * @bus: pointer to PCI bus structure to search
188  *
189  * Given a PCI bus, returns the highest PCI bus number present in the set
190  * including the given PCI bus and its list of child PCI buses.
191  */
192 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
193 {
194 	struct pci_bus *tmp;
195 	unsigned char max, n;
196 
197 	max = bus->busn_res.end;
198 	list_for_each_entry(tmp, &bus->children, node) {
199 		n = pci_bus_max_busnr(tmp);
200 		if (n > max)
201 			max = n;
202 	}
203 	return max;
204 }
205 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
206 
207 /**
208  * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
209  * @pdev: the PCI device
210  *
211  * Returns error bits set in PCI_STATUS and clears them.
212  */
213 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
214 {
215 	u16 status;
216 	int ret;
217 
218 	ret = pci_read_config_word(pdev, PCI_STATUS, &status);
219 	if (ret != PCIBIOS_SUCCESSFUL)
220 		return -EIO;
221 
222 	status &= PCI_STATUS_ERROR_BITS;
223 	if (status)
224 		pci_write_config_word(pdev, PCI_STATUS, status);
225 
226 	return status;
227 }
228 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
229 
230 #ifdef CONFIG_HAS_IOMEM
231 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
232 					    bool write_combine)
233 {
234 	struct resource *res = &pdev->resource[bar];
235 	resource_size_t start = res->start;
236 	resource_size_t size = resource_size(res);
237 
238 	/*
239 	 * Make sure the BAR is actually a memory resource, not an IO resource
240 	 */
241 	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
242 		pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
243 		return NULL;
244 	}
245 
246 	if (write_combine)
247 		return ioremap_wc(start, size);
248 
249 	return ioremap(start, size);
250 }
251 
252 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
253 {
254 	return __pci_ioremap_resource(pdev, bar, false);
255 }
256 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
257 
258 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
259 {
260 	return __pci_ioremap_resource(pdev, bar, true);
261 }
262 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
263 #endif
264 
265 /**
266  * pci_dev_str_match_path - test if a path string matches a device
267  * @dev: the PCI device to test
268  * @path: string to match the device against
269  * @endptr: pointer to the string after the match
270  *
271  * Test if a string (typically from a kernel parameter) formatted as a
272  * path of device/function addresses matches a PCI device. The string must
273  * be of the form:
274  *
275  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
276  *
277  * A path for a device can be obtained using 'lspci -t'.  Using a path
278  * is more robust against bus renumbering than using only a single bus,
279  * device and function address.
280  *
281  * Returns 1 if the string matches the device, 0 if it does not and
282  * a negative error code if it fails to parse the string.
283  */
284 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
285 				  const char **endptr)
286 {
287 	int ret;
288 	unsigned int seg, bus, slot, func;
289 	char *wpath, *p;
290 	char end;
291 
292 	*endptr = strchrnul(path, ';');
293 
294 	wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
295 	if (!wpath)
296 		return -ENOMEM;
297 
298 	while (1) {
299 		p = strrchr(wpath, '/');
300 		if (!p)
301 			break;
302 		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
303 		if (ret != 2) {
304 			ret = -EINVAL;
305 			goto free_and_exit;
306 		}
307 
308 		if (dev->devfn != PCI_DEVFN(slot, func)) {
309 			ret = 0;
310 			goto free_and_exit;
311 		}
312 
313 		/*
314 		 * Note: we don't need to get a reference to the upstream
315 		 * bridge because we hold a reference to the top level
316 		 * device which should hold a reference to the bridge,
317 		 * and so on.
318 		 */
319 		dev = pci_upstream_bridge(dev);
320 		if (!dev) {
321 			ret = 0;
322 			goto free_and_exit;
323 		}
324 
325 		*p = 0;
326 	}
327 
328 	ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
329 		     &func, &end);
330 	if (ret != 4) {
331 		seg = 0;
332 		ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
333 		if (ret != 3) {
334 			ret = -EINVAL;
335 			goto free_and_exit;
336 		}
337 	}
338 
339 	ret = (seg == pci_domain_nr(dev->bus) &&
340 	       bus == dev->bus->number &&
341 	       dev->devfn == PCI_DEVFN(slot, func));
342 
343 free_and_exit:
344 	kfree(wpath);
345 	return ret;
346 }
347 
348 /**
349  * pci_dev_str_match - test if a string matches a device
350  * @dev: the PCI device to test
351  * @p: string to match the device against
352  * @endptr: pointer to the string after the match
353  *
354  * Test if a string (typically from a kernel parameter) matches a specified
355  * PCI device. The string may be of one of the following formats:
356  *
357  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
358  *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
359  *
360  * The first format specifies a PCI bus/device/function address which
361  * may change if new hardware is inserted, if motherboard firmware changes,
362  * or due to changes caused in kernel parameters. If the domain is
363  * left unspecified, it is taken to be 0.  In order to be robust against
364  * bus renumbering issues, a path of PCI device/function numbers may be used
365  * to address the specific device.  The path for a device can be determined
366  * through the use of 'lspci -t'.
367  *
368  * The second format matches devices using IDs in the configuration
369  * space which may match multiple devices in the system. A value of 0
370  * for any field will match all devices. (Note: this differs from
371  * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
372  * legacy reasons and convenience so users don't have to specify
373  * FFFFFFFFs on the command line.)
374  *
375  * Returns 1 if the string matches the device, 0 if it does not and
376  * a negative error code if the string cannot be parsed.
377  */
378 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
379 			     const char **endptr)
380 {
381 	int ret;
382 	int count;
383 	unsigned short vendor, device, subsystem_vendor, subsystem_device;
384 
385 	if (strncmp(p, "pci:", 4) == 0) {
386 		/* PCI vendor/device (subvendor/subdevice) IDs are specified */
387 		p += 4;
388 		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
389 			     &subsystem_vendor, &subsystem_device, &count);
390 		if (ret != 4) {
391 			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
392 			if (ret != 2)
393 				return -EINVAL;
394 
395 			subsystem_vendor = 0;
396 			subsystem_device = 0;
397 		}
398 
399 		p += count;
400 
401 		if ((!vendor || vendor == dev->vendor) &&
402 		    (!device || device == dev->device) &&
403 		    (!subsystem_vendor ||
404 			    subsystem_vendor == dev->subsystem_vendor) &&
405 		    (!subsystem_device ||
406 			    subsystem_device == dev->subsystem_device))
407 			goto found;
408 	} else {
409 		/*
410 		 * PCI Bus, Device, Function IDs are specified
411 		 * (optionally, may include a path of devfns following it)
412 		 */
413 		ret = pci_dev_str_match_path(dev, p, &p);
414 		if (ret < 0)
415 			return ret;
416 		else if (ret)
417 			goto found;
418 	}
419 
420 	*endptr = p;
421 	return 0;
422 
423 found:
424 	*endptr = p;
425 	return 1;
426 }
427 
428 static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
429 				  u8 pos, int cap, int *ttl)
430 {
431 	u8 id;
432 	u16 ent;
433 
434 	pci_bus_read_config_byte(bus, devfn, pos, &pos);
435 
436 	while ((*ttl)--) {
437 		if (pos < 0x40)
438 			break;
439 		pos &= ~3;
440 		pci_bus_read_config_word(bus, devfn, pos, &ent);
441 
442 		id = ent & 0xff;
443 		if (id == 0xff)
444 			break;
445 		if (id == cap)
446 			return pos;
447 		pos = (ent >> 8);
448 	}
449 	return 0;
450 }
451 
452 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
453 			      u8 pos, int cap)
454 {
455 	int ttl = PCI_FIND_CAP_TTL;
456 
457 	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
458 }
459 
460 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
461 {
462 	return __pci_find_next_cap(dev->bus, dev->devfn,
463 				   pos + PCI_CAP_LIST_NEXT, cap);
464 }
465 EXPORT_SYMBOL_GPL(pci_find_next_capability);
466 
467 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
468 				    unsigned int devfn, u8 hdr_type)
469 {
470 	u16 status;
471 
472 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
473 	if (!(status & PCI_STATUS_CAP_LIST))
474 		return 0;
475 
476 	switch (hdr_type) {
477 	case PCI_HEADER_TYPE_NORMAL:
478 	case PCI_HEADER_TYPE_BRIDGE:
479 		return PCI_CAPABILITY_LIST;
480 	case PCI_HEADER_TYPE_CARDBUS:
481 		return PCI_CB_CAPABILITY_LIST;
482 	}
483 
484 	return 0;
485 }
486 
487 /**
488  * pci_find_capability - query for devices' capabilities
489  * @dev: PCI device to query
490  * @cap: capability code
491  *
492  * Tell if a device supports a given PCI capability.
493  * Returns the address of the requested capability structure within the
494  * device's PCI configuration space or 0 in case the device does not
495  * support it.  Possible values for @cap include:
496  *
497  *  %PCI_CAP_ID_PM           Power Management
498  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
499  *  %PCI_CAP_ID_VPD          Vital Product Data
500  *  %PCI_CAP_ID_SLOTID       Slot Identification
501  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
502  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
503  *  %PCI_CAP_ID_PCIX         PCI-X
504  *  %PCI_CAP_ID_EXP          PCI Express
505  */
506 u8 pci_find_capability(struct pci_dev *dev, int cap)
507 {
508 	u8 pos;
509 
510 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
511 	if (pos)
512 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
513 
514 	return pos;
515 }
516 EXPORT_SYMBOL(pci_find_capability);
517 
518 /**
519  * pci_bus_find_capability - query for devices' capabilities
520  * @bus: the PCI bus to query
521  * @devfn: PCI device to query
522  * @cap: capability code
523  *
524  * Like pci_find_capability() but works for PCI devices that do not have a
525  * pci_dev structure set up yet.
526  *
527  * Returns the address of the requested capability structure within the
528  * device's PCI configuration space or 0 in case the device does not
529  * support it.
530  */
531 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
532 {
533 	u8 hdr_type, pos;
534 
535 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
536 
537 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & PCI_HEADER_TYPE_MASK);
538 	if (pos)
539 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
540 
541 	return pos;
542 }
543 EXPORT_SYMBOL(pci_bus_find_capability);
544 
545 /**
546  * pci_find_next_ext_capability - Find an extended capability
547  * @dev: PCI device to query
548  * @start: address at which to start looking (0 to start at beginning of list)
549  * @cap: capability code
550  *
551  * Returns the address of the next matching extended capability structure
552  * within the device's PCI configuration space or 0 if the device does
553  * not support it.  Some capabilities can occur several times, e.g., the
554  * vendor-specific capability, and this provides a way to find them all.
555  */
556 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
557 {
558 	u32 header;
559 	int ttl;
560 	u16 pos = PCI_CFG_SPACE_SIZE;
561 
562 	/* minimum 8 bytes per capability */
563 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
564 
565 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
566 		return 0;
567 
568 	if (start)
569 		pos = start;
570 
571 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
572 		return 0;
573 
574 	/*
575 	 * If we have no capabilities, this is indicated by cap ID,
576 	 * cap version and next pointer all being 0.
577 	 */
578 	if (header == 0)
579 		return 0;
580 
581 	while (ttl-- > 0) {
582 		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
583 			return pos;
584 
585 		pos = PCI_EXT_CAP_NEXT(header);
586 		if (pos < PCI_CFG_SPACE_SIZE)
587 			break;
588 
589 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
590 			break;
591 	}
592 
593 	return 0;
594 }
595 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
596 
597 /**
598  * pci_find_ext_capability - Find an extended capability
599  * @dev: PCI device to query
600  * @cap: capability code
601  *
602  * Returns the address of the requested extended capability structure
603  * within the device's PCI configuration space or 0 if the device does
604  * not support it.  Possible values for @cap include:
605  *
606  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
607  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
608  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
609  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
610  */
611 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
612 {
613 	return pci_find_next_ext_capability(dev, 0, cap);
614 }
615 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
616 
617 /**
618  * pci_get_dsn - Read and return the 8-byte Device Serial Number
619  * @dev: PCI device to query
620  *
621  * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
622  * Number.
623  *
624  * Returns the DSN, or zero if the capability does not exist.
625  */
626 u64 pci_get_dsn(struct pci_dev *dev)
627 {
628 	u32 dword;
629 	u64 dsn;
630 	int pos;
631 
632 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
633 	if (!pos)
634 		return 0;
635 
636 	/*
637 	 * The Device Serial Number is two dwords offset 4 bytes from the
638 	 * capability position. The specification says that the first dword is
639 	 * the lower half, and the second dword is the upper half.
640 	 */
641 	pos += 4;
642 	pci_read_config_dword(dev, pos, &dword);
643 	dsn = (u64)dword;
644 	pci_read_config_dword(dev, pos + 4, &dword);
645 	dsn |= ((u64)dword) << 32;
646 
647 	return dsn;
648 }
649 EXPORT_SYMBOL_GPL(pci_get_dsn);
650 
651 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
652 {
653 	int rc, ttl = PCI_FIND_CAP_TTL;
654 	u8 cap, mask;
655 
656 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
657 		mask = HT_3BIT_CAP_MASK;
658 	else
659 		mask = HT_5BIT_CAP_MASK;
660 
661 	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
662 				      PCI_CAP_ID_HT, &ttl);
663 	while (pos) {
664 		rc = pci_read_config_byte(dev, pos + 3, &cap);
665 		if (rc != PCIBIOS_SUCCESSFUL)
666 			return 0;
667 
668 		if ((cap & mask) == ht_cap)
669 			return pos;
670 
671 		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
672 					      pos + PCI_CAP_LIST_NEXT,
673 					      PCI_CAP_ID_HT, &ttl);
674 	}
675 
676 	return 0;
677 }
678 
679 /**
680  * pci_find_next_ht_capability - query a device's HyperTransport capabilities
681  * @dev: PCI device to query
682  * @pos: Position from which to continue searching
683  * @ht_cap: HyperTransport capability code
684  *
685  * To be used in conjunction with pci_find_ht_capability() to search for
686  * all capabilities matching @ht_cap. @pos should always be a value returned
687  * from pci_find_ht_capability().
688  *
689  * NB. To be 100% safe against broken PCI devices, the caller should take
690  * steps to avoid an infinite loop.
691  */
692 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
693 {
694 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
695 }
696 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
697 
698 /**
699  * pci_find_ht_capability - query a device's HyperTransport capabilities
700  * @dev: PCI device to query
701  * @ht_cap: HyperTransport capability code
702  *
703  * Tell if a device supports a given HyperTransport capability.
704  * Returns an address within the device's PCI configuration space
705  * or 0 in case the device does not support the request capability.
706  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
707  * which has a HyperTransport capability matching @ht_cap.
708  */
709 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
710 {
711 	u8 pos;
712 
713 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
714 	if (pos)
715 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
716 
717 	return pos;
718 }
719 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
720 
721 /**
722  * pci_find_vsec_capability - Find a vendor-specific extended capability
723  * @dev: PCI device to query
724  * @vendor: Vendor ID for which capability is defined
725  * @cap: Vendor-specific capability ID
726  *
727  * If @dev has Vendor ID @vendor, search for a VSEC capability with
728  * VSEC ID @cap. If found, return the capability offset in
729  * config space; otherwise return 0.
730  */
731 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
732 {
733 	u16 vsec = 0;
734 	u32 header;
735 	int ret;
736 
737 	if (vendor != dev->vendor)
738 		return 0;
739 
740 	while ((vsec = pci_find_next_ext_capability(dev, vsec,
741 						     PCI_EXT_CAP_ID_VNDR))) {
742 		ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
743 		if (ret != PCIBIOS_SUCCESSFUL)
744 			continue;
745 
746 		if (PCI_VNDR_HEADER_ID(header) == cap)
747 			return vsec;
748 	}
749 
750 	return 0;
751 }
752 EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
753 
754 /**
755  * pci_find_dvsec_capability - Find DVSEC for vendor
756  * @dev: PCI device to query
757  * @vendor: Vendor ID to match for the DVSEC
758  * @dvsec: Designated Vendor-specific capability ID
759  *
760  * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
761  * offset in config space; otherwise return 0.
762  */
763 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
764 {
765 	int pos;
766 
767 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
768 	if (!pos)
769 		return 0;
770 
771 	while (pos) {
772 		u16 v, id;
773 
774 		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
775 		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
776 		if (vendor == v && dvsec == id)
777 			return pos;
778 
779 		pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
780 	}
781 
782 	return 0;
783 }
784 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
785 
786 /**
787  * pci_find_parent_resource - return resource region of parent bus of given
788  *			      region
789  * @dev: PCI device structure contains resources to be searched
790  * @res: child resource record for which parent is sought
791  *
792  * For given resource region of given device, return the resource region of
793  * parent bus the given region is contained in.
794  */
795 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
796 					  struct resource *res)
797 {
798 	const struct pci_bus *bus = dev->bus;
799 	struct resource *r;
800 
801 	pci_bus_for_each_resource(bus, r) {
802 		if (!r)
803 			continue;
804 		if (resource_contains(r, res)) {
805 
806 			/*
807 			 * If the window is prefetchable but the BAR is
808 			 * not, the allocator made a mistake.
809 			 */
810 			if (r->flags & IORESOURCE_PREFETCH &&
811 			    !(res->flags & IORESOURCE_PREFETCH))
812 				return NULL;
813 
814 			/*
815 			 * If we're below a transparent bridge, there may
816 			 * be both a positively-decoded aperture and a
817 			 * subtractively-decoded region that contain the BAR.
818 			 * We want the positively-decoded one, so this depends
819 			 * on pci_bus_for_each_resource() giving us those
820 			 * first.
821 			 */
822 			return r;
823 		}
824 	}
825 	return NULL;
826 }
827 EXPORT_SYMBOL(pci_find_parent_resource);
828 
829 /**
830  * pci_find_resource - Return matching PCI device resource
831  * @dev: PCI device to query
832  * @res: Resource to look for
833  *
834  * Goes over standard PCI resources (BARs) and checks if the given resource
835  * is partially or fully contained in any of them. In that case the
836  * matching resource is returned, %NULL otherwise.
837  */
838 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
839 {
840 	int i;
841 
842 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
843 		struct resource *r = &dev->resource[i];
844 
845 		if (r->start && resource_contains(r, res))
846 			return r;
847 	}
848 
849 	return NULL;
850 }
851 EXPORT_SYMBOL(pci_find_resource);
852 
853 /**
854  * pci_resource_name - Return the name of the PCI resource
855  * @dev: PCI device to query
856  * @i: index of the resource
857  *
858  * Return the standard PCI resource (BAR) name according to their index.
859  */
860 const char *pci_resource_name(struct pci_dev *dev, unsigned int i)
861 {
862 	static const char * const bar_name[] = {
863 		"BAR 0",
864 		"BAR 1",
865 		"BAR 2",
866 		"BAR 3",
867 		"BAR 4",
868 		"BAR 5",
869 		"ROM",
870 #ifdef CONFIG_PCI_IOV
871 		"VF BAR 0",
872 		"VF BAR 1",
873 		"VF BAR 2",
874 		"VF BAR 3",
875 		"VF BAR 4",
876 		"VF BAR 5",
877 #endif
878 		"bridge window",	/* "io" included in %pR */
879 		"bridge window",	/* "mem" included in %pR */
880 		"bridge window",	/* "mem pref" included in %pR */
881 	};
882 	static const char * const cardbus_name[] = {
883 		"BAR 1",
884 		"unknown",
885 		"unknown",
886 		"unknown",
887 		"unknown",
888 		"unknown",
889 #ifdef CONFIG_PCI_IOV
890 		"unknown",
891 		"unknown",
892 		"unknown",
893 		"unknown",
894 		"unknown",
895 		"unknown",
896 #endif
897 		"CardBus bridge window 0",	/* I/O */
898 		"CardBus bridge window 1",	/* I/O */
899 		"CardBus bridge window 0",	/* mem */
900 		"CardBus bridge window 1",	/* mem */
901 	};
902 
903 	if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS &&
904 	    i < ARRAY_SIZE(cardbus_name))
905 		return cardbus_name[i];
906 
907 	if (i < ARRAY_SIZE(bar_name))
908 		return bar_name[i];
909 
910 	return "unknown";
911 }
912 
913 /**
914  * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
915  * @dev: the PCI device to operate on
916  * @pos: config space offset of status word
917  * @mask: mask of bit(s) to care about in status word
918  *
919  * Return 1 when mask bit(s) in status word clear, 0 otherwise.
920  */
921 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
922 {
923 	int i;
924 
925 	/* Wait for Transaction Pending bit clean */
926 	for (i = 0; i < 4; i++) {
927 		u16 status;
928 		if (i)
929 			msleep((1 << (i - 1)) * 100);
930 
931 		pci_read_config_word(dev, pos, &status);
932 		if (!(status & mask))
933 			return 1;
934 	}
935 
936 	return 0;
937 }
938 
939 static int pci_acs_enable;
940 
941 /**
942  * pci_request_acs - ask for ACS to be enabled if supported
943  */
944 void pci_request_acs(void)
945 {
946 	pci_acs_enable = 1;
947 }
948 
949 static const char *disable_acs_redir_param;
950 
951 /**
952  * pci_disable_acs_redir - disable ACS redirect capabilities
953  * @dev: the PCI device
954  *
955  * For only devices specified in the disable_acs_redir parameter.
956  */
957 static void pci_disable_acs_redir(struct pci_dev *dev)
958 {
959 	int ret = 0;
960 	const char *p;
961 	int pos;
962 	u16 ctrl;
963 
964 	if (!disable_acs_redir_param)
965 		return;
966 
967 	p = disable_acs_redir_param;
968 	while (*p) {
969 		ret = pci_dev_str_match(dev, p, &p);
970 		if (ret < 0) {
971 			pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
972 				     disable_acs_redir_param);
973 
974 			break;
975 		} else if (ret == 1) {
976 			/* Found a match */
977 			break;
978 		}
979 
980 		if (*p != ';' && *p != ',') {
981 			/* End of param or invalid format */
982 			break;
983 		}
984 		p++;
985 	}
986 
987 	if (ret != 1)
988 		return;
989 
990 	if (!pci_dev_specific_disable_acs_redir(dev))
991 		return;
992 
993 	pos = dev->acs_cap;
994 	if (!pos) {
995 		pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
996 		return;
997 	}
998 
999 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
1000 
1001 	/* P2P Request & Completion Redirect */
1002 	ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
1003 
1004 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
1005 
1006 	pci_info(dev, "disabled ACS redirect\n");
1007 }
1008 
1009 /**
1010  * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
1011  * @dev: the PCI device
1012  */
1013 static void pci_std_enable_acs(struct pci_dev *dev)
1014 {
1015 	int pos;
1016 	u16 cap;
1017 	u16 ctrl;
1018 
1019 	pos = dev->acs_cap;
1020 	if (!pos)
1021 		return;
1022 
1023 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
1024 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
1025 
1026 	/* Source Validation */
1027 	ctrl |= (cap & PCI_ACS_SV);
1028 
1029 	/* P2P Request Redirect */
1030 	ctrl |= (cap & PCI_ACS_RR);
1031 
1032 	/* P2P Completion Redirect */
1033 	ctrl |= (cap & PCI_ACS_CR);
1034 
1035 	/* Upstream Forwarding */
1036 	ctrl |= (cap & PCI_ACS_UF);
1037 
1038 	/* Enable Translation Blocking for external devices and noats */
1039 	if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
1040 		ctrl |= (cap & PCI_ACS_TB);
1041 
1042 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
1043 }
1044 
1045 /**
1046  * pci_enable_acs - enable ACS if hardware support it
1047  * @dev: the PCI device
1048  */
1049 static void pci_enable_acs(struct pci_dev *dev)
1050 {
1051 	if (!pci_acs_enable)
1052 		goto disable_acs_redir;
1053 
1054 	if (!pci_dev_specific_enable_acs(dev))
1055 		goto disable_acs_redir;
1056 
1057 	pci_std_enable_acs(dev);
1058 
1059 disable_acs_redir:
1060 	/*
1061 	 * Note: pci_disable_acs_redir() must be called even if ACS was not
1062 	 * enabled by the kernel because it may have been enabled by
1063 	 * platform firmware.  So if we are told to disable it, we should
1064 	 * always disable it after setting the kernel's default
1065 	 * preferences.
1066 	 */
1067 	pci_disable_acs_redir(dev);
1068 }
1069 
1070 /**
1071  * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
1072  * @dev: PCI device to have its BARs restored
1073  *
1074  * Restore the BAR values for a given device, so as to make it
1075  * accessible by its driver.
1076  */
1077 static void pci_restore_bars(struct pci_dev *dev)
1078 {
1079 	int i;
1080 
1081 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1082 		pci_update_resource(dev, i);
1083 }
1084 
1085 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1086 {
1087 	if (pci_use_mid_pm())
1088 		return true;
1089 
1090 	return acpi_pci_power_manageable(dev);
1091 }
1092 
1093 static inline int platform_pci_set_power_state(struct pci_dev *dev,
1094 					       pci_power_t t)
1095 {
1096 	if (pci_use_mid_pm())
1097 		return mid_pci_set_power_state(dev, t);
1098 
1099 	return acpi_pci_set_power_state(dev, t);
1100 }
1101 
1102 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1103 {
1104 	if (pci_use_mid_pm())
1105 		return mid_pci_get_power_state(dev);
1106 
1107 	return acpi_pci_get_power_state(dev);
1108 }
1109 
1110 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1111 {
1112 	if (!pci_use_mid_pm())
1113 		acpi_pci_refresh_power_state(dev);
1114 }
1115 
1116 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1117 {
1118 	if (pci_use_mid_pm())
1119 		return PCI_POWER_ERROR;
1120 
1121 	return acpi_pci_choose_state(dev);
1122 }
1123 
1124 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1125 {
1126 	if (pci_use_mid_pm())
1127 		return PCI_POWER_ERROR;
1128 
1129 	return acpi_pci_wakeup(dev, enable);
1130 }
1131 
1132 static inline bool platform_pci_need_resume(struct pci_dev *dev)
1133 {
1134 	if (pci_use_mid_pm())
1135 		return false;
1136 
1137 	return acpi_pci_need_resume(dev);
1138 }
1139 
1140 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1141 {
1142 	if (pci_use_mid_pm())
1143 		return false;
1144 
1145 	return acpi_pci_bridge_d3(dev);
1146 }
1147 
1148 /**
1149  * pci_update_current_state - Read power state of given device and cache it
1150  * @dev: PCI device to handle.
1151  * @state: State to cache in case the device doesn't have the PM capability
1152  *
1153  * The power state is read from the PMCSR register, which however is
1154  * inaccessible in D3cold.  The platform firmware is therefore queried first
1155  * to detect accessibility of the register.  In case the platform firmware
1156  * reports an incorrect state or the device isn't power manageable by the
1157  * platform at all, we try to detect D3cold by testing accessibility of the
1158  * vendor ID in config space.
1159  */
1160 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1161 {
1162 	if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1163 		dev->current_state = PCI_D3cold;
1164 	} else if (dev->pm_cap) {
1165 		u16 pmcsr;
1166 
1167 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1168 		if (PCI_POSSIBLE_ERROR(pmcsr)) {
1169 			dev->current_state = PCI_D3cold;
1170 			return;
1171 		}
1172 		dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1173 	} else {
1174 		dev->current_state = state;
1175 	}
1176 }
1177 
1178 /**
1179  * pci_refresh_power_state - Refresh the given device's power state data
1180  * @dev: Target PCI device.
1181  *
1182  * Ask the platform to refresh the devices power state information and invoke
1183  * pci_update_current_state() to update its current PCI power state.
1184  */
1185 void pci_refresh_power_state(struct pci_dev *dev)
1186 {
1187 	platform_pci_refresh_power_state(dev);
1188 	pci_update_current_state(dev, dev->current_state);
1189 }
1190 
1191 /**
1192  * pci_platform_power_transition - Use platform to change device power state
1193  * @dev: PCI device to handle.
1194  * @state: State to put the device into.
1195  */
1196 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1197 {
1198 	int error;
1199 
1200 	error = platform_pci_set_power_state(dev, state);
1201 	if (!error)
1202 		pci_update_current_state(dev, state);
1203 	else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1204 		dev->current_state = PCI_D0;
1205 
1206 	return error;
1207 }
1208 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1209 
1210 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1211 {
1212 	pm_request_resume(&pci_dev->dev);
1213 	return 0;
1214 }
1215 
1216 /**
1217  * pci_resume_bus - Walk given bus and runtime resume devices on it
1218  * @bus: Top bus of the subtree to walk.
1219  */
1220 void pci_resume_bus(struct pci_bus *bus)
1221 {
1222 	if (bus)
1223 		pci_walk_bus(bus, pci_resume_one, NULL);
1224 }
1225 
1226 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1227 {
1228 	int delay = 1;
1229 	bool retrain = false;
1230 	struct pci_dev *bridge;
1231 
1232 	if (pci_is_pcie(dev)) {
1233 		bridge = pci_upstream_bridge(dev);
1234 		if (bridge)
1235 			retrain = true;
1236 	}
1237 
1238 	/*
1239 	 * After reset, the device should not silently discard config
1240 	 * requests, but it may still indicate that it needs more time by
1241 	 * responding to them with CRS completions.  The Root Port will
1242 	 * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete
1243 	 * the read (except when CRS SV is enabled and the read was for the
1244 	 * Vendor ID; in that case it synthesizes 0x0001 data).
1245 	 *
1246 	 * Wait for the device to return a non-CRS completion.  Read the
1247 	 * Command register instead of Vendor ID so we don't have to
1248 	 * contend with the CRS SV value.
1249 	 */
1250 	for (;;) {
1251 		u32 id;
1252 
1253 		pci_read_config_dword(dev, PCI_COMMAND, &id);
1254 		if (!PCI_POSSIBLE_ERROR(id))
1255 			break;
1256 
1257 		if (delay > timeout) {
1258 			pci_warn(dev, "not ready %dms after %s; giving up\n",
1259 				 delay - 1, reset_type);
1260 			return -ENOTTY;
1261 		}
1262 
1263 		if (delay > PCI_RESET_WAIT) {
1264 			if (retrain) {
1265 				retrain = false;
1266 				if (pcie_failed_link_retrain(bridge)) {
1267 					delay = 1;
1268 					continue;
1269 				}
1270 			}
1271 			pci_info(dev, "not ready %dms after %s; waiting\n",
1272 				 delay - 1, reset_type);
1273 		}
1274 
1275 		msleep(delay);
1276 		delay *= 2;
1277 	}
1278 
1279 	if (delay > PCI_RESET_WAIT)
1280 		pci_info(dev, "ready %dms after %s\n", delay - 1,
1281 			 reset_type);
1282 	else
1283 		pci_dbg(dev, "ready %dms after %s\n", delay - 1,
1284 			reset_type);
1285 
1286 	return 0;
1287 }
1288 
1289 /**
1290  * pci_power_up - Put the given device into D0
1291  * @dev: PCI device to power up
1292  *
1293  * On success, return 0 or 1, depending on whether or not it is necessary to
1294  * restore the device's BARs subsequently (1 is returned in that case).
1295  *
1296  * On failure, return a negative error code.  Always return failure if @dev
1297  * lacks a Power Management Capability, even if the platform was able to
1298  * put the device in D0 via non-PCI means.
1299  */
1300 int pci_power_up(struct pci_dev *dev)
1301 {
1302 	bool need_restore;
1303 	pci_power_t state;
1304 	u16 pmcsr;
1305 
1306 	platform_pci_set_power_state(dev, PCI_D0);
1307 
1308 	if (!dev->pm_cap) {
1309 		state = platform_pci_get_power_state(dev);
1310 		if (state == PCI_UNKNOWN)
1311 			dev->current_state = PCI_D0;
1312 		else
1313 			dev->current_state = state;
1314 
1315 		return -EIO;
1316 	}
1317 
1318 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1319 	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1320 		pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1321 			pci_power_name(dev->current_state));
1322 		dev->current_state = PCI_D3cold;
1323 		return -EIO;
1324 	}
1325 
1326 	state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1327 
1328 	need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1329 			!(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1330 
1331 	if (state == PCI_D0)
1332 		goto end;
1333 
1334 	/*
1335 	 * Force the entire word to 0. This doesn't affect PME_Status, disables
1336 	 * PME_En, and sets PowerState to 0.
1337 	 */
1338 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1339 
1340 	/* Mandatory transition delays; see PCI PM 1.2. */
1341 	if (state == PCI_D3hot)
1342 		pci_dev_d3_sleep(dev);
1343 	else if (state == PCI_D2)
1344 		udelay(PCI_PM_D2_DELAY);
1345 
1346 end:
1347 	dev->current_state = PCI_D0;
1348 	if (need_restore)
1349 		return 1;
1350 
1351 	return 0;
1352 }
1353 
1354 /**
1355  * pci_set_full_power_state - Put a PCI device into D0 and update its state
1356  * @dev: PCI device to power up
1357  * @locked: whether pci_bus_sem is held
1358  *
1359  * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
1360  * to confirm the state change, restore its BARs if they might be lost and
1361  * reconfigure ASPM in accordance with the new power state.
1362  *
1363  * If pci_restore_state() is going to be called right after a power state change
1364  * to D0, it is more efficient to use pci_power_up() directly instead of this
1365  * function.
1366  */
1367 static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
1368 {
1369 	u16 pmcsr;
1370 	int ret;
1371 
1372 	ret = pci_power_up(dev);
1373 	if (ret < 0) {
1374 		if (dev->current_state == PCI_D0)
1375 			return 0;
1376 
1377 		return ret;
1378 	}
1379 
1380 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1381 	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1382 	if (dev->current_state != PCI_D0) {
1383 		pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1384 				     pci_power_name(dev->current_state));
1385 	} else if (ret > 0) {
1386 		/*
1387 		 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1388 		 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1389 		 * from D3hot to D0 _may_ perform an internal reset, thereby
1390 		 * going to "D0 Uninitialized" rather than "D0 Initialized".
1391 		 * For example, at least some versions of the 3c905B and the
1392 		 * 3c556B exhibit this behaviour.
1393 		 *
1394 		 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1395 		 * devices in a D3hot state at boot.  Consequently, we need to
1396 		 * restore at least the BARs so that the device will be
1397 		 * accessible to its driver.
1398 		 */
1399 		pci_restore_bars(dev);
1400 	}
1401 
1402 	if (dev->bus->self)
1403 		pcie_aspm_pm_state_change(dev->bus->self, locked);
1404 
1405 	return 0;
1406 }
1407 
1408 /**
1409  * __pci_dev_set_current_state - Set current state of a PCI device
1410  * @dev: Device to handle
1411  * @data: pointer to state to be set
1412  */
1413 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1414 {
1415 	pci_power_t state = *(pci_power_t *)data;
1416 
1417 	dev->current_state = state;
1418 	return 0;
1419 }
1420 
1421 /**
1422  * pci_bus_set_current_state - Walk given bus and set current state of devices
1423  * @bus: Top bus of the subtree to walk.
1424  * @state: state to be set
1425  */
1426 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1427 {
1428 	if (bus)
1429 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1430 }
1431 
1432 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
1433 {
1434 	if (!bus)
1435 		return;
1436 
1437 	if (locked)
1438 		pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
1439 	else
1440 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1441 }
1442 
1443 /**
1444  * pci_set_low_power_state - Put a PCI device into a low-power state.
1445  * @dev: PCI device to handle.
1446  * @state: PCI power state (D1, D2, D3hot) to put the device into.
1447  * @locked: whether pci_bus_sem is held
1448  *
1449  * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1450  *
1451  * RETURN VALUE:
1452  * -EINVAL if the requested state is invalid.
1453  * -EIO if device does not support PCI PM or its PM capabilities register has a
1454  * wrong version, or device doesn't support the requested state.
1455  * 0 if device already is in the requested state.
1456  * 0 if device's power state has been successfully changed.
1457  */
1458 static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1459 {
1460 	u16 pmcsr;
1461 
1462 	if (!dev->pm_cap)
1463 		return -EIO;
1464 
1465 	/*
1466 	 * Validate transition: We can enter D0 from any state, but if
1467 	 * we're already in a low-power state, we can only go deeper.  E.g.,
1468 	 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1469 	 * we'd have to go from D3 to D0, then to D1.
1470 	 */
1471 	if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1472 		pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1473 			pci_power_name(dev->current_state),
1474 			pci_power_name(state));
1475 		return -EINVAL;
1476 	}
1477 
1478 	/* Check if this device supports the desired state */
1479 	if ((state == PCI_D1 && !dev->d1_support)
1480 	   || (state == PCI_D2 && !dev->d2_support))
1481 		return -EIO;
1482 
1483 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1484 	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1485 		pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1486 			pci_power_name(dev->current_state),
1487 			pci_power_name(state));
1488 		dev->current_state = PCI_D3cold;
1489 		return -EIO;
1490 	}
1491 
1492 	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1493 	pmcsr |= state;
1494 
1495 	/* Enter specified state */
1496 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1497 
1498 	/* Mandatory power management transition delays; see PCI PM 1.2. */
1499 	if (state == PCI_D3hot)
1500 		pci_dev_d3_sleep(dev);
1501 	else if (state == PCI_D2)
1502 		udelay(PCI_PM_D2_DELAY);
1503 
1504 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1505 	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1506 	if (dev->current_state != state)
1507 		pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1508 				     pci_power_name(dev->current_state),
1509 				     pci_power_name(state));
1510 
1511 	if (dev->bus->self)
1512 		pcie_aspm_pm_state_change(dev->bus->self, locked);
1513 
1514 	return 0;
1515 }
1516 
1517 static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1518 {
1519 	int error;
1520 
1521 	/* Bound the state we're entering */
1522 	if (state > PCI_D3cold)
1523 		state = PCI_D3cold;
1524 	else if (state < PCI_D0)
1525 		state = PCI_D0;
1526 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1527 
1528 		/*
1529 		 * If the device or the parent bridge do not support PCI
1530 		 * PM, ignore the request if we're doing anything other
1531 		 * than putting it into D0 (which would only happen on
1532 		 * boot).
1533 		 */
1534 		return 0;
1535 
1536 	/* Check if we're already there */
1537 	if (dev->current_state == state)
1538 		return 0;
1539 
1540 	if (state == PCI_D0)
1541 		return pci_set_full_power_state(dev, locked);
1542 
1543 	/*
1544 	 * This device is quirked not to be put into D3, so don't put it in
1545 	 * D3
1546 	 */
1547 	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1548 		return 0;
1549 
1550 	if (state == PCI_D3cold) {
1551 		/*
1552 		 * To put the device in D3cold, put it into D3hot in the native
1553 		 * way, then put it into D3cold using platform ops.
1554 		 */
1555 		error = pci_set_low_power_state(dev, PCI_D3hot, locked);
1556 
1557 		if (pci_platform_power_transition(dev, PCI_D3cold))
1558 			return error;
1559 
1560 		/* Powering off a bridge may power off the whole hierarchy */
1561 		if (dev->current_state == PCI_D3cold)
1562 			__pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
1563 	} else {
1564 		error = pci_set_low_power_state(dev, state, locked);
1565 
1566 		if (pci_platform_power_transition(dev, state))
1567 			return error;
1568 	}
1569 
1570 	return 0;
1571 }
1572 
1573 /**
1574  * pci_set_power_state - Set the power state of a PCI device
1575  * @dev: PCI device to handle.
1576  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1577  *
1578  * Transition a device to a new power state, using the platform firmware and/or
1579  * the device's PCI PM registers.
1580  *
1581  * RETURN VALUE:
1582  * -EINVAL if the requested state is invalid.
1583  * -EIO if device does not support PCI PM or its PM capabilities register has a
1584  * wrong version, or device doesn't support the requested state.
1585  * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1586  * 0 if device already is in the requested state.
1587  * 0 if the transition is to D3 but D3 is not supported.
1588  * 0 if device's power state has been successfully changed.
1589  */
1590 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1591 {
1592 	return __pci_set_power_state(dev, state, false);
1593 }
1594 EXPORT_SYMBOL(pci_set_power_state);
1595 
1596 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
1597 {
1598 	lockdep_assert_held(&pci_bus_sem);
1599 
1600 	return __pci_set_power_state(dev, state, true);
1601 }
1602 EXPORT_SYMBOL(pci_set_power_state_locked);
1603 
1604 #define PCI_EXP_SAVE_REGS	7
1605 
1606 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1607 						       u16 cap, bool extended)
1608 {
1609 	struct pci_cap_saved_state *tmp;
1610 
1611 	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1612 		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1613 			return tmp;
1614 	}
1615 	return NULL;
1616 }
1617 
1618 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1619 {
1620 	return _pci_find_saved_cap(dev, cap, false);
1621 }
1622 
1623 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1624 {
1625 	return _pci_find_saved_cap(dev, cap, true);
1626 }
1627 
1628 static int pci_save_pcie_state(struct pci_dev *dev)
1629 {
1630 	int i = 0;
1631 	struct pci_cap_saved_state *save_state;
1632 	u16 *cap;
1633 
1634 	if (!pci_is_pcie(dev))
1635 		return 0;
1636 
1637 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1638 	if (!save_state) {
1639 		pci_err(dev, "buffer not found in %s\n", __func__);
1640 		return -ENOMEM;
1641 	}
1642 
1643 	cap = (u16 *)&save_state->cap.data[0];
1644 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1645 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1646 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1647 	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1648 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1649 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1650 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1651 
1652 	return 0;
1653 }
1654 
1655 void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
1656 {
1657 #ifdef CONFIG_PCIEASPM
1658 	struct pci_dev *bridge;
1659 	u32 ctl;
1660 
1661 	bridge = pci_upstream_bridge(dev);
1662 	if (bridge && bridge->ltr_path) {
1663 		pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
1664 		if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
1665 			pci_dbg(bridge, "re-enabling LTR\n");
1666 			pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
1667 						 PCI_EXP_DEVCTL2_LTR_EN);
1668 		}
1669 	}
1670 #endif
1671 }
1672 
1673 static void pci_restore_pcie_state(struct pci_dev *dev)
1674 {
1675 	int i = 0;
1676 	struct pci_cap_saved_state *save_state;
1677 	u16 *cap;
1678 
1679 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1680 	if (!save_state)
1681 		return;
1682 
1683 	/*
1684 	 * Downstream ports reset the LTR enable bit when link goes down.
1685 	 * Check and re-configure the bit here before restoring device.
1686 	 * PCIe r5.0, sec 7.5.3.16.
1687 	 */
1688 	pci_bridge_reconfigure_ltr(dev);
1689 
1690 	cap = (u16 *)&save_state->cap.data[0];
1691 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1692 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1693 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1694 	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1695 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1696 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1697 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1698 }
1699 
1700 static int pci_save_pcix_state(struct pci_dev *dev)
1701 {
1702 	int pos;
1703 	struct pci_cap_saved_state *save_state;
1704 
1705 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1706 	if (!pos)
1707 		return 0;
1708 
1709 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1710 	if (!save_state) {
1711 		pci_err(dev, "buffer not found in %s\n", __func__);
1712 		return -ENOMEM;
1713 	}
1714 
1715 	pci_read_config_word(dev, pos + PCI_X_CMD,
1716 			     (u16 *)save_state->cap.data);
1717 
1718 	return 0;
1719 }
1720 
1721 static void pci_restore_pcix_state(struct pci_dev *dev)
1722 {
1723 	int i = 0, pos;
1724 	struct pci_cap_saved_state *save_state;
1725 	u16 *cap;
1726 
1727 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1728 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1729 	if (!save_state || !pos)
1730 		return;
1731 	cap = (u16 *)&save_state->cap.data[0];
1732 
1733 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1734 }
1735 
1736 static void pci_save_ltr_state(struct pci_dev *dev)
1737 {
1738 	int ltr;
1739 	struct pci_cap_saved_state *save_state;
1740 	u32 *cap;
1741 
1742 	if (!pci_is_pcie(dev))
1743 		return;
1744 
1745 	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1746 	if (!ltr)
1747 		return;
1748 
1749 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1750 	if (!save_state) {
1751 		pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1752 		return;
1753 	}
1754 
1755 	/* Some broken devices only support dword access to LTR */
1756 	cap = &save_state->cap.data[0];
1757 	pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
1758 }
1759 
1760 static void pci_restore_ltr_state(struct pci_dev *dev)
1761 {
1762 	struct pci_cap_saved_state *save_state;
1763 	int ltr;
1764 	u32 *cap;
1765 
1766 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1767 	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1768 	if (!save_state || !ltr)
1769 		return;
1770 
1771 	/* Some broken devices only support dword access to LTR */
1772 	cap = &save_state->cap.data[0];
1773 	pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
1774 }
1775 
1776 /**
1777  * pci_save_state - save the PCI configuration space of a device before
1778  *		    suspending
1779  * @dev: PCI device that we're dealing with
1780  */
1781 int pci_save_state(struct pci_dev *dev)
1782 {
1783 	int i;
1784 	/* XXX: 100% dword access ok here? */
1785 	for (i = 0; i < 16; i++) {
1786 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1787 		pci_dbg(dev, "save config %#04x: %#010x\n",
1788 			i * 4, dev->saved_config_space[i]);
1789 	}
1790 	dev->state_saved = true;
1791 
1792 	i = pci_save_pcie_state(dev);
1793 	if (i != 0)
1794 		return i;
1795 
1796 	i = pci_save_pcix_state(dev);
1797 	if (i != 0)
1798 		return i;
1799 
1800 	pci_save_ltr_state(dev);
1801 	pci_save_dpc_state(dev);
1802 	pci_save_aer_state(dev);
1803 	pci_save_ptm_state(dev);
1804 	return pci_save_vc_state(dev);
1805 }
1806 EXPORT_SYMBOL(pci_save_state);
1807 
1808 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1809 				     u32 saved_val, int retry, bool force)
1810 {
1811 	u32 val;
1812 
1813 	pci_read_config_dword(pdev, offset, &val);
1814 	if (!force && val == saved_val)
1815 		return;
1816 
1817 	for (;;) {
1818 		pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n",
1819 			offset, val, saved_val);
1820 		pci_write_config_dword(pdev, offset, saved_val);
1821 		if (retry-- <= 0)
1822 			return;
1823 
1824 		pci_read_config_dword(pdev, offset, &val);
1825 		if (val == saved_val)
1826 			return;
1827 
1828 		mdelay(1);
1829 	}
1830 }
1831 
1832 static void pci_restore_config_space_range(struct pci_dev *pdev,
1833 					   int start, int end, int retry,
1834 					   bool force)
1835 {
1836 	int index;
1837 
1838 	for (index = end; index >= start; index--)
1839 		pci_restore_config_dword(pdev, 4 * index,
1840 					 pdev->saved_config_space[index],
1841 					 retry, force);
1842 }
1843 
1844 static void pci_restore_config_space(struct pci_dev *pdev)
1845 {
1846 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1847 		pci_restore_config_space_range(pdev, 10, 15, 0, false);
1848 		/* Restore BARs before the command register. */
1849 		pci_restore_config_space_range(pdev, 4, 9, 10, false);
1850 		pci_restore_config_space_range(pdev, 0, 3, 0, false);
1851 	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1852 		pci_restore_config_space_range(pdev, 12, 15, 0, false);
1853 
1854 		/*
1855 		 * Force rewriting of prefetch registers to avoid S3 resume
1856 		 * issues on Intel PCI bridges that occur when these
1857 		 * registers are not explicitly written.
1858 		 */
1859 		pci_restore_config_space_range(pdev, 9, 11, 0, true);
1860 		pci_restore_config_space_range(pdev, 0, 8, 0, false);
1861 	} else {
1862 		pci_restore_config_space_range(pdev, 0, 15, 0, false);
1863 	}
1864 }
1865 
1866 static void pci_restore_rebar_state(struct pci_dev *pdev)
1867 {
1868 	unsigned int pos, nbars, i;
1869 	u32 ctrl;
1870 
1871 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1872 	if (!pos)
1873 		return;
1874 
1875 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1876 	nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
1877 
1878 	for (i = 0; i < nbars; i++, pos += 8) {
1879 		struct resource *res;
1880 		int bar_idx, size;
1881 
1882 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1883 		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1884 		res = pdev->resource + bar_idx;
1885 		size = pci_rebar_bytes_to_size(resource_size(res));
1886 		ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1887 		ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
1888 		pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1889 	}
1890 }
1891 
1892 /**
1893  * pci_restore_state - Restore the saved state of a PCI device
1894  * @dev: PCI device that we're dealing with
1895  */
1896 void pci_restore_state(struct pci_dev *dev)
1897 {
1898 	if (!dev->state_saved)
1899 		return;
1900 
1901 	/*
1902 	 * Restore max latencies (in the LTR capability) before enabling
1903 	 * LTR itself (in the PCIe capability).
1904 	 */
1905 	pci_restore_ltr_state(dev);
1906 
1907 	pci_restore_pcie_state(dev);
1908 	pci_restore_pasid_state(dev);
1909 	pci_restore_pri_state(dev);
1910 	pci_restore_ats_state(dev);
1911 	pci_restore_vc_state(dev);
1912 	pci_restore_rebar_state(dev);
1913 	pci_restore_dpc_state(dev);
1914 	pci_restore_ptm_state(dev);
1915 
1916 	pci_aer_clear_status(dev);
1917 	pci_restore_aer_state(dev);
1918 
1919 	pci_restore_config_space(dev);
1920 
1921 	pci_restore_pcix_state(dev);
1922 	pci_restore_msi_state(dev);
1923 
1924 	/* Restore ACS and IOV configuration state */
1925 	pci_enable_acs(dev);
1926 	pci_restore_iov_state(dev);
1927 
1928 	dev->state_saved = false;
1929 }
1930 EXPORT_SYMBOL(pci_restore_state);
1931 
1932 struct pci_saved_state {
1933 	u32 config_space[16];
1934 	struct pci_cap_saved_data cap[];
1935 };
1936 
1937 /**
1938  * pci_store_saved_state - Allocate and return an opaque struct containing
1939  *			   the device saved state.
1940  * @dev: PCI device that we're dealing with
1941  *
1942  * Return NULL if no state or error.
1943  */
1944 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1945 {
1946 	struct pci_saved_state *state;
1947 	struct pci_cap_saved_state *tmp;
1948 	struct pci_cap_saved_data *cap;
1949 	size_t size;
1950 
1951 	if (!dev->state_saved)
1952 		return NULL;
1953 
1954 	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1955 
1956 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1957 		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1958 
1959 	state = kzalloc(size, GFP_KERNEL);
1960 	if (!state)
1961 		return NULL;
1962 
1963 	memcpy(state->config_space, dev->saved_config_space,
1964 	       sizeof(state->config_space));
1965 
1966 	cap = state->cap;
1967 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1968 		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1969 		memcpy(cap, &tmp->cap, len);
1970 		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1971 	}
1972 	/* Empty cap_save terminates list */
1973 
1974 	return state;
1975 }
1976 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1977 
1978 /**
1979  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1980  * @dev: PCI device that we're dealing with
1981  * @state: Saved state returned from pci_store_saved_state()
1982  */
1983 int pci_load_saved_state(struct pci_dev *dev,
1984 			 struct pci_saved_state *state)
1985 {
1986 	struct pci_cap_saved_data *cap;
1987 
1988 	dev->state_saved = false;
1989 
1990 	if (!state)
1991 		return 0;
1992 
1993 	memcpy(dev->saved_config_space, state->config_space,
1994 	       sizeof(state->config_space));
1995 
1996 	cap = state->cap;
1997 	while (cap->size) {
1998 		struct pci_cap_saved_state *tmp;
1999 
2000 		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
2001 		if (!tmp || tmp->cap.size != cap->size)
2002 			return -EINVAL;
2003 
2004 		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
2005 		cap = (struct pci_cap_saved_data *)((u8 *)cap +
2006 		       sizeof(struct pci_cap_saved_data) + cap->size);
2007 	}
2008 
2009 	dev->state_saved = true;
2010 	return 0;
2011 }
2012 EXPORT_SYMBOL_GPL(pci_load_saved_state);
2013 
2014 /**
2015  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
2016  *				   and free the memory allocated for it.
2017  * @dev: PCI device that we're dealing with
2018  * @state: Pointer to saved state returned from pci_store_saved_state()
2019  */
2020 int pci_load_and_free_saved_state(struct pci_dev *dev,
2021 				  struct pci_saved_state **state)
2022 {
2023 	int ret = pci_load_saved_state(dev, *state);
2024 	kfree(*state);
2025 	*state = NULL;
2026 	return ret;
2027 }
2028 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
2029 
2030 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
2031 {
2032 	return pci_enable_resources(dev, bars);
2033 }
2034 
2035 static int do_pci_enable_device(struct pci_dev *dev, int bars)
2036 {
2037 	int err;
2038 	struct pci_dev *bridge;
2039 	u16 cmd;
2040 	u8 pin;
2041 
2042 	err = pci_set_power_state(dev, PCI_D0);
2043 	if (err < 0 && err != -EIO)
2044 		return err;
2045 
2046 	bridge = pci_upstream_bridge(dev);
2047 	if (bridge)
2048 		pcie_aspm_powersave_config_link(bridge);
2049 
2050 	err = pcibios_enable_device(dev, bars);
2051 	if (err < 0)
2052 		return err;
2053 	pci_fixup_device(pci_fixup_enable, dev);
2054 
2055 	if (dev->msi_enabled || dev->msix_enabled)
2056 		return 0;
2057 
2058 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
2059 	if (pin) {
2060 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
2061 		if (cmd & PCI_COMMAND_INTX_DISABLE)
2062 			pci_write_config_word(dev, PCI_COMMAND,
2063 					      cmd & ~PCI_COMMAND_INTX_DISABLE);
2064 	}
2065 
2066 	return 0;
2067 }
2068 
2069 /**
2070  * pci_reenable_device - Resume abandoned device
2071  * @dev: PCI device to be resumed
2072  *
2073  * NOTE: This function is a backend of pci_default_resume() and is not supposed
2074  * to be called by normal code, write proper resume handler and use it instead.
2075  */
2076 int pci_reenable_device(struct pci_dev *dev)
2077 {
2078 	if (pci_is_enabled(dev))
2079 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
2080 	return 0;
2081 }
2082 EXPORT_SYMBOL(pci_reenable_device);
2083 
2084 static void pci_enable_bridge(struct pci_dev *dev)
2085 {
2086 	struct pci_dev *bridge;
2087 	int retval;
2088 
2089 	bridge = pci_upstream_bridge(dev);
2090 	if (bridge)
2091 		pci_enable_bridge(bridge);
2092 
2093 	if (pci_is_enabled(dev)) {
2094 		if (!dev->is_busmaster)
2095 			pci_set_master(dev);
2096 		return;
2097 	}
2098 
2099 	retval = pci_enable_device(dev);
2100 	if (retval)
2101 		pci_err(dev, "Error enabling bridge (%d), continuing\n",
2102 			retval);
2103 	pci_set_master(dev);
2104 }
2105 
2106 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
2107 {
2108 	struct pci_dev *bridge;
2109 	int err;
2110 	int i, bars = 0;
2111 
2112 	/*
2113 	 * Power state could be unknown at this point, either due to a fresh
2114 	 * boot or a device removal call.  So get the current power state
2115 	 * so that things like MSI message writing will behave as expected
2116 	 * (e.g. if the device really is in D0 at enable time).
2117 	 */
2118 	pci_update_current_state(dev, dev->current_state);
2119 
2120 	if (atomic_inc_return(&dev->enable_cnt) > 1)
2121 		return 0;		/* already enabled */
2122 
2123 	bridge = pci_upstream_bridge(dev);
2124 	if (bridge)
2125 		pci_enable_bridge(bridge);
2126 
2127 	/* only skip sriov related */
2128 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
2129 		if (dev->resource[i].flags & flags)
2130 			bars |= (1 << i);
2131 	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
2132 		if (dev->resource[i].flags & flags)
2133 			bars |= (1 << i);
2134 
2135 	err = do_pci_enable_device(dev, bars);
2136 	if (err < 0)
2137 		atomic_dec(&dev->enable_cnt);
2138 	return err;
2139 }
2140 
2141 /**
2142  * pci_enable_device_io - Initialize a device for use with IO space
2143  * @dev: PCI device to be initialized
2144  *
2145  * Initialize device before it's used by a driver. Ask low-level code
2146  * to enable I/O resources. Wake up the device if it was suspended.
2147  * Beware, this function can fail.
2148  */
2149 int pci_enable_device_io(struct pci_dev *dev)
2150 {
2151 	return pci_enable_device_flags(dev, IORESOURCE_IO);
2152 }
2153 EXPORT_SYMBOL(pci_enable_device_io);
2154 
2155 /**
2156  * pci_enable_device_mem - Initialize a device for use with Memory space
2157  * @dev: PCI device to be initialized
2158  *
2159  * Initialize device before it's used by a driver. Ask low-level code
2160  * to enable Memory resources. Wake up the device if it was suspended.
2161  * Beware, this function can fail.
2162  */
2163 int pci_enable_device_mem(struct pci_dev *dev)
2164 {
2165 	return pci_enable_device_flags(dev, IORESOURCE_MEM);
2166 }
2167 EXPORT_SYMBOL(pci_enable_device_mem);
2168 
2169 /**
2170  * pci_enable_device - Initialize device before it's used by a driver.
2171  * @dev: PCI device to be initialized
2172  *
2173  * Initialize device before it's used by a driver. Ask low-level code
2174  * to enable I/O and memory. Wake up the device if it was suspended.
2175  * Beware, this function can fail.
2176  *
2177  * Note we don't actually enable the device many times if we call
2178  * this function repeatedly (we just increment the count).
2179  */
2180 int pci_enable_device(struct pci_dev *dev)
2181 {
2182 	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2183 }
2184 EXPORT_SYMBOL(pci_enable_device);
2185 
2186 /*
2187  * Managed PCI resources.  This manages device on/off, INTx/MSI/MSI-X
2188  * on/off and BAR regions.  pci_dev itself records MSI/MSI-X status, so
2189  * there's no need to track it separately.  pci_devres is initialized
2190  * when a device is enabled using managed PCI device enable interface.
2191  */
2192 struct pci_devres {
2193 	unsigned int enabled:1;
2194 	unsigned int pinned:1;
2195 	unsigned int orig_intx:1;
2196 	unsigned int restore_intx:1;
2197 	unsigned int mwi:1;
2198 	u32 region_mask;
2199 };
2200 
2201 static void pcim_release(struct device *gendev, void *res)
2202 {
2203 	struct pci_dev *dev = to_pci_dev(gendev);
2204 	struct pci_devres *this = res;
2205 	int i;
2206 
2207 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
2208 		if (this->region_mask & (1 << i))
2209 			pci_release_region(dev, i);
2210 
2211 	if (this->mwi)
2212 		pci_clear_mwi(dev);
2213 
2214 	if (this->restore_intx)
2215 		pci_intx(dev, this->orig_intx);
2216 
2217 	if (this->enabled && !this->pinned)
2218 		pci_disable_device(dev);
2219 }
2220 
2221 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
2222 {
2223 	struct pci_devres *dr, *new_dr;
2224 
2225 	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
2226 	if (dr)
2227 		return dr;
2228 
2229 	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2230 	if (!new_dr)
2231 		return NULL;
2232 	return devres_get(&pdev->dev, new_dr, NULL, NULL);
2233 }
2234 
2235 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2236 {
2237 	if (pci_is_managed(pdev))
2238 		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2239 	return NULL;
2240 }
2241 
2242 /**
2243  * pcim_enable_device - Managed pci_enable_device()
2244  * @pdev: PCI device to be initialized
2245  *
2246  * Managed pci_enable_device().
2247  */
2248 int pcim_enable_device(struct pci_dev *pdev)
2249 {
2250 	struct pci_devres *dr;
2251 	int rc;
2252 
2253 	dr = get_pci_dr(pdev);
2254 	if (unlikely(!dr))
2255 		return -ENOMEM;
2256 	if (dr->enabled)
2257 		return 0;
2258 
2259 	rc = pci_enable_device(pdev);
2260 	if (!rc) {
2261 		pdev->is_managed = 1;
2262 		dr->enabled = 1;
2263 	}
2264 	return rc;
2265 }
2266 EXPORT_SYMBOL(pcim_enable_device);
2267 
2268 /**
2269  * pcim_pin_device - Pin managed PCI device
2270  * @pdev: PCI device to pin
2271  *
2272  * Pin managed PCI device @pdev.  Pinned device won't be disabled on
2273  * driver detach.  @pdev must have been enabled with
2274  * pcim_enable_device().
2275  */
2276 void pcim_pin_device(struct pci_dev *pdev)
2277 {
2278 	struct pci_devres *dr;
2279 
2280 	dr = find_pci_dr(pdev);
2281 	WARN_ON(!dr || !dr->enabled);
2282 	if (dr)
2283 		dr->pinned = 1;
2284 }
2285 EXPORT_SYMBOL(pcim_pin_device);
2286 
2287 /*
2288  * pcibios_device_add - provide arch specific hooks when adding device dev
2289  * @dev: the PCI device being added
2290  *
2291  * Permits the platform to provide architecture specific functionality when
2292  * devices are added. This is the default implementation. Architecture
2293  * implementations can override this.
2294  */
2295 int __weak pcibios_device_add(struct pci_dev *dev)
2296 {
2297 	return 0;
2298 }
2299 
2300 /**
2301  * pcibios_release_device - provide arch specific hooks when releasing
2302  *			    device dev
2303  * @dev: the PCI device being released
2304  *
2305  * Permits the platform to provide architecture specific functionality when
2306  * devices are released. This is the default implementation. Architecture
2307  * implementations can override this.
2308  */
2309 void __weak pcibios_release_device(struct pci_dev *dev) {}
2310 
2311 /**
2312  * pcibios_disable_device - disable arch specific PCI resources for device dev
2313  * @dev: the PCI device to disable
2314  *
2315  * Disables architecture specific PCI resources for the device. This
2316  * is the default implementation. Architecture implementations can
2317  * override this.
2318  */
2319 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2320 
2321 /**
2322  * pcibios_penalize_isa_irq - penalize an ISA IRQ
2323  * @irq: ISA IRQ to penalize
2324  * @active: IRQ active or not
2325  *
2326  * Permits the platform to provide architecture-specific functionality when
2327  * penalizing ISA IRQs. This is the default implementation. Architecture
2328  * implementations can override this.
2329  */
2330 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2331 
2332 static void do_pci_disable_device(struct pci_dev *dev)
2333 {
2334 	u16 pci_command;
2335 
2336 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2337 	if (pci_command & PCI_COMMAND_MASTER) {
2338 		pci_command &= ~PCI_COMMAND_MASTER;
2339 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
2340 	}
2341 
2342 	pcibios_disable_device(dev);
2343 }
2344 
2345 /**
2346  * pci_disable_enabled_device - Disable device without updating enable_cnt
2347  * @dev: PCI device to disable
2348  *
2349  * NOTE: This function is a backend of PCI power management routines and is
2350  * not supposed to be called drivers.
2351  */
2352 void pci_disable_enabled_device(struct pci_dev *dev)
2353 {
2354 	if (pci_is_enabled(dev))
2355 		do_pci_disable_device(dev);
2356 }
2357 
2358 /**
2359  * pci_disable_device - Disable PCI device after use
2360  * @dev: PCI device to be disabled
2361  *
2362  * Signal to the system that the PCI device is not in use by the system
2363  * anymore.  This only involves disabling PCI bus-mastering, if active.
2364  *
2365  * Note we don't actually disable the device until all callers of
2366  * pci_enable_device() have called pci_disable_device().
2367  */
2368 void pci_disable_device(struct pci_dev *dev)
2369 {
2370 	struct pci_devres *dr;
2371 
2372 	dr = find_pci_dr(dev);
2373 	if (dr)
2374 		dr->enabled = 0;
2375 
2376 	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2377 		      "disabling already-disabled device");
2378 
2379 	if (atomic_dec_return(&dev->enable_cnt) != 0)
2380 		return;
2381 
2382 	do_pci_disable_device(dev);
2383 
2384 	dev->is_busmaster = 0;
2385 }
2386 EXPORT_SYMBOL(pci_disable_device);
2387 
2388 /**
2389  * pcibios_set_pcie_reset_state - set reset state for device dev
2390  * @dev: the PCIe device reset
2391  * @state: Reset state to enter into
2392  *
2393  * Set the PCIe reset state for the device. This is the default
2394  * implementation. Architecture implementations can override this.
2395  */
2396 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2397 					enum pcie_reset_state state)
2398 {
2399 	return -EINVAL;
2400 }
2401 
2402 /**
2403  * pci_set_pcie_reset_state - set reset state for device dev
2404  * @dev: the PCIe device reset
2405  * @state: Reset state to enter into
2406  *
2407  * Sets the PCI reset state for the device.
2408  */
2409 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2410 {
2411 	return pcibios_set_pcie_reset_state(dev, state);
2412 }
2413 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2414 
2415 #ifdef CONFIG_PCIEAER
2416 void pcie_clear_device_status(struct pci_dev *dev)
2417 {
2418 	u16 sta;
2419 
2420 	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2421 	pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2422 }
2423 #endif
2424 
2425 /**
2426  * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2427  * @dev: PCIe root port or event collector.
2428  */
2429 void pcie_clear_root_pme_status(struct pci_dev *dev)
2430 {
2431 	pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2432 }
2433 
2434 /**
2435  * pci_check_pme_status - Check if given device has generated PME.
2436  * @dev: Device to check.
2437  *
2438  * Check the PME status of the device and if set, clear it and clear PME enable
2439  * (if set).  Return 'true' if PME status and PME enable were both set or
2440  * 'false' otherwise.
2441  */
2442 bool pci_check_pme_status(struct pci_dev *dev)
2443 {
2444 	int pmcsr_pos;
2445 	u16 pmcsr;
2446 	bool ret = false;
2447 
2448 	if (!dev->pm_cap)
2449 		return false;
2450 
2451 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2452 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2453 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2454 		return false;
2455 
2456 	/* Clear PME status. */
2457 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
2458 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2459 		/* Disable PME to avoid interrupt flood. */
2460 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2461 		ret = true;
2462 	}
2463 
2464 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
2465 
2466 	return ret;
2467 }
2468 
2469 /**
2470  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2471  * @dev: Device to handle.
2472  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2473  *
2474  * Check if @dev has generated PME and queue a resume request for it in that
2475  * case.
2476  */
2477 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2478 {
2479 	if (pme_poll_reset && dev->pme_poll)
2480 		dev->pme_poll = false;
2481 
2482 	if (pci_check_pme_status(dev)) {
2483 		pci_wakeup_event(dev);
2484 		pm_request_resume(&dev->dev);
2485 	}
2486 	return 0;
2487 }
2488 
2489 /**
2490  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2491  * @bus: Top bus of the subtree to walk.
2492  */
2493 void pci_pme_wakeup_bus(struct pci_bus *bus)
2494 {
2495 	if (bus)
2496 		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2497 }
2498 
2499 
2500 /**
2501  * pci_pme_capable - check the capability of PCI device to generate PME#
2502  * @dev: PCI device to handle.
2503  * @state: PCI state from which device will issue PME#.
2504  */
2505 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2506 {
2507 	if (!dev->pm_cap)
2508 		return false;
2509 
2510 	return !!(dev->pme_support & (1 << state));
2511 }
2512 EXPORT_SYMBOL(pci_pme_capable);
2513 
2514 static void pci_pme_list_scan(struct work_struct *work)
2515 {
2516 	struct pci_pme_device *pme_dev, *n;
2517 
2518 	mutex_lock(&pci_pme_list_mutex);
2519 	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2520 		struct pci_dev *pdev = pme_dev->dev;
2521 
2522 		if (pdev->pme_poll) {
2523 			struct pci_dev *bridge = pdev->bus->self;
2524 			struct device *dev = &pdev->dev;
2525 			struct device *bdev = bridge ? &bridge->dev : NULL;
2526 			int bref = 0;
2527 
2528 			/*
2529 			 * If we have a bridge, it should be in an active/D0
2530 			 * state or the configuration space of subordinate
2531 			 * devices may not be accessible or stable over the
2532 			 * course of the call.
2533 			 */
2534 			if (bdev) {
2535 				bref = pm_runtime_get_if_active(bdev, true);
2536 				if (!bref)
2537 					continue;
2538 
2539 				if (bridge->current_state != PCI_D0)
2540 					goto put_bridge;
2541 			}
2542 
2543 			/*
2544 			 * The device itself should be suspended but config
2545 			 * space must be accessible, therefore it cannot be in
2546 			 * D3cold.
2547 			 */
2548 			if (pm_runtime_suspended(dev) &&
2549 			    pdev->current_state != PCI_D3cold)
2550 				pci_pme_wakeup(pdev, NULL);
2551 
2552 put_bridge:
2553 			if (bref > 0)
2554 				pm_runtime_put(bdev);
2555 		} else {
2556 			list_del(&pme_dev->list);
2557 			kfree(pme_dev);
2558 		}
2559 	}
2560 	if (!list_empty(&pci_pme_list))
2561 		queue_delayed_work(system_freezable_wq, &pci_pme_work,
2562 				   msecs_to_jiffies(PME_TIMEOUT));
2563 	mutex_unlock(&pci_pme_list_mutex);
2564 }
2565 
2566 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2567 {
2568 	u16 pmcsr;
2569 
2570 	if (!dev->pme_support)
2571 		return;
2572 
2573 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2574 	/* Clear PME_Status by writing 1 to it and enable PME# */
2575 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2576 	if (!enable)
2577 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2578 
2579 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2580 }
2581 
2582 /**
2583  * pci_pme_restore - Restore PME configuration after config space restore.
2584  * @dev: PCI device to update.
2585  */
2586 void pci_pme_restore(struct pci_dev *dev)
2587 {
2588 	u16 pmcsr;
2589 
2590 	if (!dev->pme_support)
2591 		return;
2592 
2593 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2594 	if (dev->wakeup_prepared) {
2595 		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2596 		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2597 	} else {
2598 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2599 		pmcsr |= PCI_PM_CTRL_PME_STATUS;
2600 	}
2601 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2602 }
2603 
2604 /**
2605  * pci_pme_active - enable or disable PCI device's PME# function
2606  * @dev: PCI device to handle.
2607  * @enable: 'true' to enable PME# generation; 'false' to disable it.
2608  *
2609  * The caller must verify that the device is capable of generating PME# before
2610  * calling this function with @enable equal to 'true'.
2611  */
2612 void pci_pme_active(struct pci_dev *dev, bool enable)
2613 {
2614 	__pci_pme_active(dev, enable);
2615 
2616 	/*
2617 	 * PCI (as opposed to PCIe) PME requires that the device have
2618 	 * its PME# line hooked up correctly. Not all hardware vendors
2619 	 * do this, so the PME never gets delivered and the device
2620 	 * remains asleep. The easiest way around this is to
2621 	 * periodically walk the list of suspended devices and check
2622 	 * whether any have their PME flag set. The assumption is that
2623 	 * we'll wake up often enough anyway that this won't be a huge
2624 	 * hit, and the power savings from the devices will still be a
2625 	 * win.
2626 	 *
2627 	 * Although PCIe uses in-band PME message instead of PME# line
2628 	 * to report PME, PME does not work for some PCIe devices in
2629 	 * reality.  For example, there are devices that set their PME
2630 	 * status bits, but don't really bother to send a PME message;
2631 	 * there are PCI Express Root Ports that don't bother to
2632 	 * trigger interrupts when they receive PME messages from the
2633 	 * devices below.  So PME poll is used for PCIe devices too.
2634 	 */
2635 
2636 	if (dev->pme_poll) {
2637 		struct pci_pme_device *pme_dev;
2638 		if (enable) {
2639 			pme_dev = kmalloc(sizeof(struct pci_pme_device),
2640 					  GFP_KERNEL);
2641 			if (!pme_dev) {
2642 				pci_warn(dev, "can't enable PME#\n");
2643 				return;
2644 			}
2645 			pme_dev->dev = dev;
2646 			mutex_lock(&pci_pme_list_mutex);
2647 			list_add(&pme_dev->list, &pci_pme_list);
2648 			if (list_is_singular(&pci_pme_list))
2649 				queue_delayed_work(system_freezable_wq,
2650 						   &pci_pme_work,
2651 						   msecs_to_jiffies(PME_TIMEOUT));
2652 			mutex_unlock(&pci_pme_list_mutex);
2653 		} else {
2654 			mutex_lock(&pci_pme_list_mutex);
2655 			list_for_each_entry(pme_dev, &pci_pme_list, list) {
2656 				if (pme_dev->dev == dev) {
2657 					list_del(&pme_dev->list);
2658 					kfree(pme_dev);
2659 					break;
2660 				}
2661 			}
2662 			mutex_unlock(&pci_pme_list_mutex);
2663 		}
2664 	}
2665 
2666 	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2667 }
2668 EXPORT_SYMBOL(pci_pme_active);
2669 
2670 /**
2671  * __pci_enable_wake - enable PCI device as wakeup event source
2672  * @dev: PCI device affected
2673  * @state: PCI state from which device will issue wakeup events
2674  * @enable: True to enable event generation; false to disable
2675  *
2676  * This enables the device as a wakeup event source, or disables it.
2677  * When such events involves platform-specific hooks, those hooks are
2678  * called automatically by this routine.
2679  *
2680  * Devices with legacy power management (no standard PCI PM capabilities)
2681  * always require such platform hooks.
2682  *
2683  * RETURN VALUE:
2684  * 0 is returned on success
2685  * -EINVAL is returned if device is not supposed to wake up the system
2686  * Error code depending on the platform is returned if both the platform and
2687  * the native mechanism fail to enable the generation of wake-up events
2688  */
2689 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2690 {
2691 	int ret = 0;
2692 
2693 	/*
2694 	 * Bridges that are not power-manageable directly only signal
2695 	 * wakeup on behalf of subordinate devices which is set up
2696 	 * elsewhere, so skip them. However, bridges that are
2697 	 * power-manageable may signal wakeup for themselves (for example,
2698 	 * on a hotplug event) and they need to be covered here.
2699 	 */
2700 	if (!pci_power_manageable(dev))
2701 		return 0;
2702 
2703 	/* Don't do the same thing twice in a row for one device. */
2704 	if (!!enable == !!dev->wakeup_prepared)
2705 		return 0;
2706 
2707 	/*
2708 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2709 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
2710 	 * enable.  To disable wake-up we call the platform first, for symmetry.
2711 	 */
2712 
2713 	if (enable) {
2714 		int error;
2715 
2716 		/*
2717 		 * Enable PME signaling if the device can signal PME from
2718 		 * D3cold regardless of whether or not it can signal PME from
2719 		 * the current target state, because that will allow it to
2720 		 * signal PME when the hierarchy above it goes into D3cold and
2721 		 * the device itself ends up in D3cold as a result of that.
2722 		 */
2723 		if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2724 			pci_pme_active(dev, true);
2725 		else
2726 			ret = 1;
2727 		error = platform_pci_set_wakeup(dev, true);
2728 		if (ret)
2729 			ret = error;
2730 		if (!ret)
2731 			dev->wakeup_prepared = true;
2732 	} else {
2733 		platform_pci_set_wakeup(dev, false);
2734 		pci_pme_active(dev, false);
2735 		dev->wakeup_prepared = false;
2736 	}
2737 
2738 	return ret;
2739 }
2740 
2741 /**
2742  * pci_enable_wake - change wakeup settings for a PCI device
2743  * @pci_dev: Target device
2744  * @state: PCI state from which device will issue wakeup events
2745  * @enable: Whether or not to enable event generation
2746  *
2747  * If @enable is set, check device_may_wakeup() for the device before calling
2748  * __pci_enable_wake() for it.
2749  */
2750 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2751 {
2752 	if (enable && !device_may_wakeup(&pci_dev->dev))
2753 		return -EINVAL;
2754 
2755 	return __pci_enable_wake(pci_dev, state, enable);
2756 }
2757 EXPORT_SYMBOL(pci_enable_wake);
2758 
2759 /**
2760  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2761  * @dev: PCI device to prepare
2762  * @enable: True to enable wake-up event generation; false to disable
2763  *
2764  * Many drivers want the device to wake up the system from D3_hot or D3_cold
2765  * and this function allows them to set that up cleanly - pci_enable_wake()
2766  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2767  * ordering constraints.
2768  *
2769  * This function only returns error code if the device is not allowed to wake
2770  * up the system from sleep or it is not capable of generating PME# from both
2771  * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2772  */
2773 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2774 {
2775 	return pci_pme_capable(dev, PCI_D3cold) ?
2776 			pci_enable_wake(dev, PCI_D3cold, enable) :
2777 			pci_enable_wake(dev, PCI_D3hot, enable);
2778 }
2779 EXPORT_SYMBOL(pci_wake_from_d3);
2780 
2781 /**
2782  * pci_target_state - find an appropriate low power state for a given PCI dev
2783  * @dev: PCI device
2784  * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2785  *
2786  * Use underlying platform code to find a supported low power state for @dev.
2787  * If the platform can't manage @dev, return the deepest state from which it
2788  * can generate wake events, based on any available PME info.
2789  */
2790 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2791 {
2792 	if (platform_pci_power_manageable(dev)) {
2793 		/*
2794 		 * Call the platform to find the target state for the device.
2795 		 */
2796 		pci_power_t state = platform_pci_choose_state(dev);
2797 
2798 		switch (state) {
2799 		case PCI_POWER_ERROR:
2800 		case PCI_UNKNOWN:
2801 			return PCI_D3hot;
2802 
2803 		case PCI_D1:
2804 		case PCI_D2:
2805 			if (pci_no_d1d2(dev))
2806 				return PCI_D3hot;
2807 		}
2808 
2809 		return state;
2810 	}
2811 
2812 	/*
2813 	 * If the device is in D3cold even though it's not power-manageable by
2814 	 * the platform, it may have been powered down by non-standard means.
2815 	 * Best to let it slumber.
2816 	 */
2817 	if (dev->current_state == PCI_D3cold)
2818 		return PCI_D3cold;
2819 	else if (!dev->pm_cap)
2820 		return PCI_D0;
2821 
2822 	if (wakeup && dev->pme_support) {
2823 		pci_power_t state = PCI_D3hot;
2824 
2825 		/*
2826 		 * Find the deepest state from which the device can generate
2827 		 * PME#.
2828 		 */
2829 		while (state && !(dev->pme_support & (1 << state)))
2830 			state--;
2831 
2832 		if (state)
2833 			return state;
2834 		else if (dev->pme_support & 1)
2835 			return PCI_D0;
2836 	}
2837 
2838 	return PCI_D3hot;
2839 }
2840 
2841 /**
2842  * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2843  *			  into a sleep state
2844  * @dev: Device to handle.
2845  *
2846  * Choose the power state appropriate for the device depending on whether
2847  * it can wake up the system and/or is power manageable by the platform
2848  * (PCI_D3hot is the default) and put the device into that state.
2849  */
2850 int pci_prepare_to_sleep(struct pci_dev *dev)
2851 {
2852 	bool wakeup = device_may_wakeup(&dev->dev);
2853 	pci_power_t target_state = pci_target_state(dev, wakeup);
2854 	int error;
2855 
2856 	if (target_state == PCI_POWER_ERROR)
2857 		return -EIO;
2858 
2859 	pci_enable_wake(dev, target_state, wakeup);
2860 
2861 	error = pci_set_power_state(dev, target_state);
2862 
2863 	if (error)
2864 		pci_enable_wake(dev, target_state, false);
2865 
2866 	return error;
2867 }
2868 EXPORT_SYMBOL(pci_prepare_to_sleep);
2869 
2870 /**
2871  * pci_back_from_sleep - turn PCI device on during system-wide transition
2872  *			 into working state
2873  * @dev: Device to handle.
2874  *
2875  * Disable device's system wake-up capability and put it into D0.
2876  */
2877 int pci_back_from_sleep(struct pci_dev *dev)
2878 {
2879 	int ret = pci_set_power_state(dev, PCI_D0);
2880 
2881 	if (ret)
2882 		return ret;
2883 
2884 	pci_enable_wake(dev, PCI_D0, false);
2885 	return 0;
2886 }
2887 EXPORT_SYMBOL(pci_back_from_sleep);
2888 
2889 /**
2890  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2891  * @dev: PCI device being suspended.
2892  *
2893  * Prepare @dev to generate wake-up events at run time and put it into a low
2894  * power state.
2895  */
2896 int pci_finish_runtime_suspend(struct pci_dev *dev)
2897 {
2898 	pci_power_t target_state;
2899 	int error;
2900 
2901 	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2902 	if (target_state == PCI_POWER_ERROR)
2903 		return -EIO;
2904 
2905 	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2906 
2907 	error = pci_set_power_state(dev, target_state);
2908 
2909 	if (error)
2910 		pci_enable_wake(dev, target_state, false);
2911 
2912 	return error;
2913 }
2914 
2915 /**
2916  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2917  * @dev: Device to check.
2918  *
2919  * Return true if the device itself is capable of generating wake-up events
2920  * (through the platform or using the native PCIe PME) or if the device supports
2921  * PME and one of its upstream bridges can generate wake-up events.
2922  */
2923 bool pci_dev_run_wake(struct pci_dev *dev)
2924 {
2925 	struct pci_bus *bus = dev->bus;
2926 
2927 	if (!dev->pme_support)
2928 		return false;
2929 
2930 	/* PME-capable in principle, but not from the target power state */
2931 	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2932 		return false;
2933 
2934 	if (device_can_wakeup(&dev->dev))
2935 		return true;
2936 
2937 	while (bus->parent) {
2938 		struct pci_dev *bridge = bus->self;
2939 
2940 		if (device_can_wakeup(&bridge->dev))
2941 			return true;
2942 
2943 		bus = bus->parent;
2944 	}
2945 
2946 	/* We have reached the root bus. */
2947 	if (bus->bridge)
2948 		return device_can_wakeup(bus->bridge);
2949 
2950 	return false;
2951 }
2952 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2953 
2954 /**
2955  * pci_dev_need_resume - Check if it is necessary to resume the device.
2956  * @pci_dev: Device to check.
2957  *
2958  * Return 'true' if the device is not runtime-suspended or it has to be
2959  * reconfigured due to wakeup settings difference between system and runtime
2960  * suspend, or the current power state of it is not suitable for the upcoming
2961  * (system-wide) transition.
2962  */
2963 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2964 {
2965 	struct device *dev = &pci_dev->dev;
2966 	pci_power_t target_state;
2967 
2968 	if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2969 		return true;
2970 
2971 	target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2972 
2973 	/*
2974 	 * If the earlier platform check has not triggered, D3cold is just power
2975 	 * removal on top of D3hot, so no need to resume the device in that
2976 	 * case.
2977 	 */
2978 	return target_state != pci_dev->current_state &&
2979 		target_state != PCI_D3cold &&
2980 		pci_dev->current_state != PCI_D3hot;
2981 }
2982 
2983 /**
2984  * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2985  * @pci_dev: Device to check.
2986  *
2987  * If the device is suspended and it is not configured for system wakeup,
2988  * disable PME for it to prevent it from waking up the system unnecessarily.
2989  *
2990  * Note that if the device's power state is D3cold and the platform check in
2991  * pci_dev_need_resume() has not triggered, the device's configuration need not
2992  * be changed.
2993  */
2994 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2995 {
2996 	struct device *dev = &pci_dev->dev;
2997 
2998 	spin_lock_irq(&dev->power.lock);
2999 
3000 	if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
3001 	    pci_dev->current_state < PCI_D3cold)
3002 		__pci_pme_active(pci_dev, false);
3003 
3004 	spin_unlock_irq(&dev->power.lock);
3005 }
3006 
3007 /**
3008  * pci_dev_complete_resume - Finalize resume from system sleep for a device.
3009  * @pci_dev: Device to handle.
3010  *
3011  * If the device is runtime suspended and wakeup-capable, enable PME for it as
3012  * it might have been disabled during the prepare phase of system suspend if
3013  * the device was not configured for system wakeup.
3014  */
3015 void pci_dev_complete_resume(struct pci_dev *pci_dev)
3016 {
3017 	struct device *dev = &pci_dev->dev;
3018 
3019 	if (!pci_dev_run_wake(pci_dev))
3020 		return;
3021 
3022 	spin_lock_irq(&dev->power.lock);
3023 
3024 	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
3025 		__pci_pme_active(pci_dev, true);
3026 
3027 	spin_unlock_irq(&dev->power.lock);
3028 }
3029 
3030 /**
3031  * pci_choose_state - Choose the power state of a PCI device.
3032  * @dev: Target PCI device.
3033  * @state: Target state for the whole system.
3034  *
3035  * Returns PCI power state suitable for @dev and @state.
3036  */
3037 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
3038 {
3039 	if (state.event == PM_EVENT_ON)
3040 		return PCI_D0;
3041 
3042 	return pci_target_state(dev, false);
3043 }
3044 EXPORT_SYMBOL(pci_choose_state);
3045 
3046 void pci_config_pm_runtime_get(struct pci_dev *pdev)
3047 {
3048 	struct device *dev = &pdev->dev;
3049 	struct device *parent = dev->parent;
3050 
3051 	if (parent)
3052 		pm_runtime_get_sync(parent);
3053 	pm_runtime_get_noresume(dev);
3054 	/*
3055 	 * pdev->current_state is set to PCI_D3cold during suspending,
3056 	 * so wait until suspending completes
3057 	 */
3058 	pm_runtime_barrier(dev);
3059 	/*
3060 	 * Only need to resume devices in D3cold, because config
3061 	 * registers are still accessible for devices suspended but
3062 	 * not in D3cold.
3063 	 */
3064 	if (pdev->current_state == PCI_D3cold)
3065 		pm_runtime_resume(dev);
3066 }
3067 
3068 void pci_config_pm_runtime_put(struct pci_dev *pdev)
3069 {
3070 	struct device *dev = &pdev->dev;
3071 	struct device *parent = dev->parent;
3072 
3073 	pm_runtime_put(dev);
3074 	if (parent)
3075 		pm_runtime_put_sync(parent);
3076 }
3077 
3078 static const struct dmi_system_id bridge_d3_blacklist[] = {
3079 #ifdef CONFIG_X86
3080 	{
3081 		/*
3082 		 * Gigabyte X299 root port is not marked as hotplug capable
3083 		 * which allows Linux to power manage it.  However, this
3084 		 * confuses the BIOS SMI handler so don't power manage root
3085 		 * ports on that system.
3086 		 */
3087 		.ident = "X299 DESIGNARE EX-CF",
3088 		.matches = {
3089 			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
3090 			DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
3091 		},
3092 	},
3093 	{
3094 		/*
3095 		 * Downstream device is not accessible after putting a root port
3096 		 * into D3cold and back into D0 on Elo Continental Z2 board
3097 		 */
3098 		.ident = "Elo Continental Z2",
3099 		.matches = {
3100 			DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
3101 			DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
3102 			DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
3103 		},
3104 	},
3105 #endif
3106 	{ }
3107 };
3108 
3109 /**
3110  * pci_bridge_d3_possible - Is it possible to put the bridge into D3
3111  * @bridge: Bridge to check
3112  *
3113  * This function checks if it is possible to move the bridge to D3.
3114  * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
3115  */
3116 bool pci_bridge_d3_possible(struct pci_dev *bridge)
3117 {
3118 	if (!pci_is_pcie(bridge))
3119 		return false;
3120 
3121 	switch (pci_pcie_type(bridge)) {
3122 	case PCI_EXP_TYPE_ROOT_PORT:
3123 	case PCI_EXP_TYPE_UPSTREAM:
3124 	case PCI_EXP_TYPE_DOWNSTREAM:
3125 		if (pci_bridge_d3_disable)
3126 			return false;
3127 
3128 		/*
3129 		 * Hotplug ports handled by firmware in System Management Mode
3130 		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
3131 		 */
3132 		if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
3133 			return false;
3134 
3135 		if (pci_bridge_d3_force)
3136 			return true;
3137 
3138 		/* Even the oldest 2010 Thunderbolt controller supports D3. */
3139 		if (bridge->is_thunderbolt)
3140 			return true;
3141 
3142 		/* Platform might know better if the bridge supports D3 */
3143 		if (platform_pci_bridge_d3(bridge))
3144 			return true;
3145 
3146 		/*
3147 		 * Hotplug ports handled natively by the OS were not validated
3148 		 * by vendors for runtime D3 at least until 2018 because there
3149 		 * was no OS support.
3150 		 */
3151 		if (bridge->is_hotplug_bridge)
3152 			return false;
3153 
3154 		if (dmi_check_system(bridge_d3_blacklist))
3155 			return false;
3156 
3157 		/*
3158 		 * It should be safe to put PCIe ports from 2015 or newer
3159 		 * to D3.
3160 		 */
3161 		if (dmi_get_bios_year() >= 2015)
3162 			return true;
3163 		break;
3164 	}
3165 
3166 	return false;
3167 }
3168 
3169 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3170 {
3171 	bool *d3cold_ok = data;
3172 
3173 	if (/* The device needs to be allowed to go D3cold ... */
3174 	    dev->no_d3cold || !dev->d3cold_allowed ||
3175 
3176 	    /* ... and if it is wakeup capable to do so from D3cold. */
3177 	    (device_may_wakeup(&dev->dev) &&
3178 	     !pci_pme_capable(dev, PCI_D3cold)) ||
3179 
3180 	    /* If it is a bridge it must be allowed to go to D3. */
3181 	    !pci_power_manageable(dev))
3182 
3183 		*d3cold_ok = false;
3184 
3185 	return !*d3cold_ok;
3186 }
3187 
3188 /*
3189  * pci_bridge_d3_update - Update bridge D3 capabilities
3190  * @dev: PCI device which is changed
3191  *
3192  * Update upstream bridge PM capabilities accordingly depending on if the
3193  * device PM configuration was changed or the device is being removed.  The
3194  * change is also propagated upstream.
3195  */
3196 void pci_bridge_d3_update(struct pci_dev *dev)
3197 {
3198 	bool remove = !device_is_registered(&dev->dev);
3199 	struct pci_dev *bridge;
3200 	bool d3cold_ok = true;
3201 
3202 	bridge = pci_upstream_bridge(dev);
3203 	if (!bridge || !pci_bridge_d3_possible(bridge))
3204 		return;
3205 
3206 	/*
3207 	 * If D3 is currently allowed for the bridge, removing one of its
3208 	 * children won't change that.
3209 	 */
3210 	if (remove && bridge->bridge_d3)
3211 		return;
3212 
3213 	/*
3214 	 * If D3 is currently allowed for the bridge and a child is added or
3215 	 * changed, disallowance of D3 can only be caused by that child, so
3216 	 * we only need to check that single device, not any of its siblings.
3217 	 *
3218 	 * If D3 is currently not allowed for the bridge, checking the device
3219 	 * first may allow us to skip checking its siblings.
3220 	 */
3221 	if (!remove)
3222 		pci_dev_check_d3cold(dev, &d3cold_ok);
3223 
3224 	/*
3225 	 * If D3 is currently not allowed for the bridge, this may be caused
3226 	 * either by the device being changed/removed or any of its siblings,
3227 	 * so we need to go through all children to find out if one of them
3228 	 * continues to block D3.
3229 	 */
3230 	if (d3cold_ok && !bridge->bridge_d3)
3231 		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3232 			     &d3cold_ok);
3233 
3234 	if (bridge->bridge_d3 != d3cold_ok) {
3235 		bridge->bridge_d3 = d3cold_ok;
3236 		/* Propagate change to upstream bridges */
3237 		pci_bridge_d3_update(bridge);
3238 	}
3239 }
3240 
3241 /**
3242  * pci_d3cold_enable - Enable D3cold for device
3243  * @dev: PCI device to handle
3244  *
3245  * This function can be used in drivers to enable D3cold from the device
3246  * they handle.  It also updates upstream PCI bridge PM capabilities
3247  * accordingly.
3248  */
3249 void pci_d3cold_enable(struct pci_dev *dev)
3250 {
3251 	if (dev->no_d3cold) {
3252 		dev->no_d3cold = false;
3253 		pci_bridge_d3_update(dev);
3254 	}
3255 }
3256 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3257 
3258 /**
3259  * pci_d3cold_disable - Disable D3cold for device
3260  * @dev: PCI device to handle
3261  *
3262  * This function can be used in drivers to disable D3cold from the device
3263  * they handle.  It also updates upstream PCI bridge PM capabilities
3264  * accordingly.
3265  */
3266 void pci_d3cold_disable(struct pci_dev *dev)
3267 {
3268 	if (!dev->no_d3cold) {
3269 		dev->no_d3cold = true;
3270 		pci_bridge_d3_update(dev);
3271 	}
3272 }
3273 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3274 
3275 /**
3276  * pci_pm_init - Initialize PM functions of given PCI device
3277  * @dev: PCI device to handle.
3278  */
3279 void pci_pm_init(struct pci_dev *dev)
3280 {
3281 	int pm;
3282 	u16 status;
3283 	u16 pmc;
3284 
3285 	pm_runtime_forbid(&dev->dev);
3286 	pm_runtime_set_active(&dev->dev);
3287 	pm_runtime_enable(&dev->dev);
3288 	device_enable_async_suspend(&dev->dev);
3289 	dev->wakeup_prepared = false;
3290 
3291 	dev->pm_cap = 0;
3292 	dev->pme_support = 0;
3293 
3294 	/* find PCI PM capability in list */
3295 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3296 	if (!pm)
3297 		return;
3298 	/* Check device's ability to generate PME# */
3299 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3300 
3301 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3302 		pci_err(dev, "unsupported PM cap regs version (%u)\n",
3303 			pmc & PCI_PM_CAP_VER_MASK);
3304 		return;
3305 	}
3306 
3307 	dev->pm_cap = pm;
3308 	dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3309 	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3310 	dev->bridge_d3 = pci_bridge_d3_possible(dev);
3311 	dev->d3cold_allowed = true;
3312 
3313 	dev->d1_support = false;
3314 	dev->d2_support = false;
3315 	if (!pci_no_d1d2(dev)) {
3316 		if (pmc & PCI_PM_CAP_D1)
3317 			dev->d1_support = true;
3318 		if (pmc & PCI_PM_CAP_D2)
3319 			dev->d2_support = true;
3320 
3321 		if (dev->d1_support || dev->d2_support)
3322 			pci_info(dev, "supports%s%s\n",
3323 				   dev->d1_support ? " D1" : "",
3324 				   dev->d2_support ? " D2" : "");
3325 	}
3326 
3327 	pmc &= PCI_PM_CAP_PME_MASK;
3328 	if (pmc) {
3329 		pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3330 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3331 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3332 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3333 			 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3334 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3335 		dev->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
3336 		dev->pme_poll = true;
3337 		/*
3338 		 * Make device's PM flags reflect the wake-up capability, but
3339 		 * let the user space enable it to wake up the system as needed.
3340 		 */
3341 		device_set_wakeup_capable(&dev->dev, true);
3342 		/* Disable the PME# generation functionality */
3343 		pci_pme_active(dev, false);
3344 	}
3345 
3346 	pci_read_config_word(dev, PCI_STATUS, &status);
3347 	if (status & PCI_STATUS_IMM_READY)
3348 		dev->imm_ready = 1;
3349 }
3350 
3351 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3352 {
3353 	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3354 
3355 	switch (prop) {
3356 	case PCI_EA_P_MEM:
3357 	case PCI_EA_P_VF_MEM:
3358 		flags |= IORESOURCE_MEM;
3359 		break;
3360 	case PCI_EA_P_MEM_PREFETCH:
3361 	case PCI_EA_P_VF_MEM_PREFETCH:
3362 		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3363 		break;
3364 	case PCI_EA_P_IO:
3365 		flags |= IORESOURCE_IO;
3366 		break;
3367 	default:
3368 		return 0;
3369 	}
3370 
3371 	return flags;
3372 }
3373 
3374 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3375 					    u8 prop)
3376 {
3377 	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3378 		return &dev->resource[bei];
3379 #ifdef CONFIG_PCI_IOV
3380 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3381 		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3382 		return &dev->resource[PCI_IOV_RESOURCES +
3383 				      bei - PCI_EA_BEI_VF_BAR0];
3384 #endif
3385 	else if (bei == PCI_EA_BEI_ROM)
3386 		return &dev->resource[PCI_ROM_RESOURCE];
3387 	else
3388 		return NULL;
3389 }
3390 
3391 /* Read an Enhanced Allocation (EA) entry */
3392 static int pci_ea_read(struct pci_dev *dev, int offset)
3393 {
3394 	struct resource *res;
3395 	const char *res_name;
3396 	int ent_size, ent_offset = offset;
3397 	resource_size_t start, end;
3398 	unsigned long flags;
3399 	u32 dw0, bei, base, max_offset;
3400 	u8 prop;
3401 	bool support_64 = (sizeof(resource_size_t) >= 8);
3402 
3403 	pci_read_config_dword(dev, ent_offset, &dw0);
3404 	ent_offset += 4;
3405 
3406 	/* Entry size field indicates DWORDs after 1st */
3407 	ent_size = (FIELD_GET(PCI_EA_ES, dw0) + 1) << 2;
3408 
3409 	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3410 		goto out;
3411 
3412 	bei = FIELD_GET(PCI_EA_BEI, dw0);
3413 	prop = FIELD_GET(PCI_EA_PP, dw0);
3414 
3415 	/*
3416 	 * If the Property is in the reserved range, try the Secondary
3417 	 * Property instead.
3418 	 */
3419 	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3420 		prop = FIELD_GET(PCI_EA_SP, dw0);
3421 	if (prop > PCI_EA_P_BRIDGE_IO)
3422 		goto out;
3423 
3424 	res = pci_ea_get_resource(dev, bei, prop);
3425 	res_name = pci_resource_name(dev, bei);
3426 	if (!res) {
3427 		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3428 		goto out;
3429 	}
3430 
3431 	flags = pci_ea_flags(dev, prop);
3432 	if (!flags) {
3433 		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3434 		goto out;
3435 	}
3436 
3437 	/* Read Base */
3438 	pci_read_config_dword(dev, ent_offset, &base);
3439 	start = (base & PCI_EA_FIELD_MASK);
3440 	ent_offset += 4;
3441 
3442 	/* Read MaxOffset */
3443 	pci_read_config_dword(dev, ent_offset, &max_offset);
3444 	ent_offset += 4;
3445 
3446 	/* Read Base MSBs (if 64-bit entry) */
3447 	if (base & PCI_EA_IS_64) {
3448 		u32 base_upper;
3449 
3450 		pci_read_config_dword(dev, ent_offset, &base_upper);
3451 		ent_offset += 4;
3452 
3453 		flags |= IORESOURCE_MEM_64;
3454 
3455 		/* entry starts above 32-bit boundary, can't use */
3456 		if (!support_64 && base_upper)
3457 			goto out;
3458 
3459 		if (support_64)
3460 			start |= ((u64)base_upper << 32);
3461 	}
3462 
3463 	end = start + (max_offset | 0x03);
3464 
3465 	/* Read MaxOffset MSBs (if 64-bit entry) */
3466 	if (max_offset & PCI_EA_IS_64) {
3467 		u32 max_offset_upper;
3468 
3469 		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3470 		ent_offset += 4;
3471 
3472 		flags |= IORESOURCE_MEM_64;
3473 
3474 		/* entry too big, can't use */
3475 		if (!support_64 && max_offset_upper)
3476 			goto out;
3477 
3478 		if (support_64)
3479 			end += ((u64)max_offset_upper << 32);
3480 	}
3481 
3482 	if (end < start) {
3483 		pci_err(dev, "EA Entry crosses address boundary\n");
3484 		goto out;
3485 	}
3486 
3487 	if (ent_size != ent_offset - offset) {
3488 		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3489 			ent_size, ent_offset - offset);
3490 		goto out;
3491 	}
3492 
3493 	res->name = pci_name(dev);
3494 	res->start = start;
3495 	res->end = end;
3496 	res->flags = flags;
3497 
3498 	if (bei <= PCI_EA_BEI_BAR5)
3499 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3500 			 res_name, res, prop);
3501 	else if (bei == PCI_EA_BEI_ROM)
3502 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3503 			 res_name, res, prop);
3504 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3505 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3506 			 res_name, res, prop);
3507 	else
3508 		pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n",
3509 			   bei, res, prop);
3510 
3511 out:
3512 	return offset + ent_size;
3513 }
3514 
3515 /* Enhanced Allocation Initialization */
3516 void pci_ea_init(struct pci_dev *dev)
3517 {
3518 	int ea;
3519 	u8 num_ent;
3520 	int offset;
3521 	int i;
3522 
3523 	/* find PCI EA capability in list */
3524 	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3525 	if (!ea)
3526 		return;
3527 
3528 	/* determine the number of entries */
3529 	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3530 					&num_ent);
3531 	num_ent &= PCI_EA_NUM_ENT_MASK;
3532 
3533 	offset = ea + PCI_EA_FIRST_ENT;
3534 
3535 	/* Skip DWORD 2 for type 1 functions */
3536 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3537 		offset += 4;
3538 
3539 	/* parse each EA entry */
3540 	for (i = 0; i < num_ent; ++i)
3541 		offset = pci_ea_read(dev, offset);
3542 }
3543 
3544 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3545 	struct pci_cap_saved_state *new_cap)
3546 {
3547 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3548 }
3549 
3550 /**
3551  * _pci_add_cap_save_buffer - allocate buffer for saving given
3552  *			      capability registers
3553  * @dev: the PCI device
3554  * @cap: the capability to allocate the buffer for
3555  * @extended: Standard or Extended capability ID
3556  * @size: requested size of the buffer
3557  */
3558 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3559 				    bool extended, unsigned int size)
3560 {
3561 	int pos;
3562 	struct pci_cap_saved_state *save_state;
3563 
3564 	if (extended)
3565 		pos = pci_find_ext_capability(dev, cap);
3566 	else
3567 		pos = pci_find_capability(dev, cap);
3568 
3569 	if (!pos)
3570 		return 0;
3571 
3572 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3573 	if (!save_state)
3574 		return -ENOMEM;
3575 
3576 	save_state->cap.cap_nr = cap;
3577 	save_state->cap.cap_extended = extended;
3578 	save_state->cap.size = size;
3579 	pci_add_saved_cap(dev, save_state);
3580 
3581 	return 0;
3582 }
3583 
3584 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3585 {
3586 	return _pci_add_cap_save_buffer(dev, cap, false, size);
3587 }
3588 
3589 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3590 {
3591 	return _pci_add_cap_save_buffer(dev, cap, true, size);
3592 }
3593 
3594 /**
3595  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3596  * @dev: the PCI device
3597  */
3598 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3599 {
3600 	int error;
3601 
3602 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3603 					PCI_EXP_SAVE_REGS * sizeof(u16));
3604 	if (error)
3605 		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3606 
3607 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3608 	if (error)
3609 		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3610 
3611 	error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3612 					    2 * sizeof(u16));
3613 	if (error)
3614 		pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3615 
3616 	pci_allocate_vc_save_buffers(dev);
3617 }
3618 
3619 void pci_free_cap_save_buffers(struct pci_dev *dev)
3620 {
3621 	struct pci_cap_saved_state *tmp;
3622 	struct hlist_node *n;
3623 
3624 	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3625 		kfree(tmp);
3626 }
3627 
3628 /**
3629  * pci_configure_ari - enable or disable ARI forwarding
3630  * @dev: the PCI device
3631  *
3632  * If @dev and its upstream bridge both support ARI, enable ARI in the
3633  * bridge.  Otherwise, disable ARI in the bridge.
3634  */
3635 void pci_configure_ari(struct pci_dev *dev)
3636 {
3637 	u32 cap;
3638 	struct pci_dev *bridge;
3639 
3640 	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3641 		return;
3642 
3643 	bridge = dev->bus->self;
3644 	if (!bridge)
3645 		return;
3646 
3647 	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3648 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
3649 		return;
3650 
3651 	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3652 		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3653 					 PCI_EXP_DEVCTL2_ARI);
3654 		bridge->ari_enabled = 1;
3655 	} else {
3656 		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3657 					   PCI_EXP_DEVCTL2_ARI);
3658 		bridge->ari_enabled = 0;
3659 	}
3660 }
3661 
3662 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3663 {
3664 	int pos;
3665 	u16 cap, ctrl;
3666 
3667 	pos = pdev->acs_cap;
3668 	if (!pos)
3669 		return false;
3670 
3671 	/*
3672 	 * Except for egress control, capabilities are either required
3673 	 * or only required if controllable.  Features missing from the
3674 	 * capability field can therefore be assumed as hard-wired enabled.
3675 	 */
3676 	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3677 	acs_flags &= (cap | PCI_ACS_EC);
3678 
3679 	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3680 	return (ctrl & acs_flags) == acs_flags;
3681 }
3682 
3683 /**
3684  * pci_acs_enabled - test ACS against required flags for a given device
3685  * @pdev: device to test
3686  * @acs_flags: required PCI ACS flags
3687  *
3688  * Return true if the device supports the provided flags.  Automatically
3689  * filters out flags that are not implemented on multifunction devices.
3690  *
3691  * Note that this interface checks the effective ACS capabilities of the
3692  * device rather than the actual capabilities.  For instance, most single
3693  * function endpoints are not required to support ACS because they have no
3694  * opportunity for peer-to-peer access.  We therefore return 'true'
3695  * regardless of whether the device exposes an ACS capability.  This makes
3696  * it much easier for callers of this function to ignore the actual type
3697  * or topology of the device when testing ACS support.
3698  */
3699 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3700 {
3701 	int ret;
3702 
3703 	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3704 	if (ret >= 0)
3705 		return ret > 0;
3706 
3707 	/*
3708 	 * Conventional PCI and PCI-X devices never support ACS, either
3709 	 * effectively or actually.  The shared bus topology implies that
3710 	 * any device on the bus can receive or snoop DMA.
3711 	 */
3712 	if (!pci_is_pcie(pdev))
3713 		return false;
3714 
3715 	switch (pci_pcie_type(pdev)) {
3716 	/*
3717 	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3718 	 * but since their primary interface is PCI/X, we conservatively
3719 	 * handle them as we would a non-PCIe device.
3720 	 */
3721 	case PCI_EXP_TYPE_PCIE_BRIDGE:
3722 	/*
3723 	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
3724 	 * applicable... must never implement an ACS Extended Capability...".
3725 	 * This seems arbitrary, but we take a conservative interpretation
3726 	 * of this statement.
3727 	 */
3728 	case PCI_EXP_TYPE_PCI_BRIDGE:
3729 	case PCI_EXP_TYPE_RC_EC:
3730 		return false;
3731 	/*
3732 	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3733 	 * implement ACS in order to indicate their peer-to-peer capabilities,
3734 	 * regardless of whether they are single- or multi-function devices.
3735 	 */
3736 	case PCI_EXP_TYPE_DOWNSTREAM:
3737 	case PCI_EXP_TYPE_ROOT_PORT:
3738 		return pci_acs_flags_enabled(pdev, acs_flags);
3739 	/*
3740 	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3741 	 * implemented by the remaining PCIe types to indicate peer-to-peer
3742 	 * capabilities, but only when they are part of a multifunction
3743 	 * device.  The footnote for section 6.12 indicates the specific
3744 	 * PCIe types included here.
3745 	 */
3746 	case PCI_EXP_TYPE_ENDPOINT:
3747 	case PCI_EXP_TYPE_UPSTREAM:
3748 	case PCI_EXP_TYPE_LEG_END:
3749 	case PCI_EXP_TYPE_RC_END:
3750 		if (!pdev->multifunction)
3751 			break;
3752 
3753 		return pci_acs_flags_enabled(pdev, acs_flags);
3754 	}
3755 
3756 	/*
3757 	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3758 	 * to single function devices with the exception of downstream ports.
3759 	 */
3760 	return true;
3761 }
3762 
3763 /**
3764  * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3765  * @start: starting downstream device
3766  * @end: ending upstream device or NULL to search to the root bus
3767  * @acs_flags: required flags
3768  *
3769  * Walk up a device tree from start to end testing PCI ACS support.  If
3770  * any step along the way does not support the required flags, return false.
3771  */
3772 bool pci_acs_path_enabled(struct pci_dev *start,
3773 			  struct pci_dev *end, u16 acs_flags)
3774 {
3775 	struct pci_dev *pdev, *parent = start;
3776 
3777 	do {
3778 		pdev = parent;
3779 
3780 		if (!pci_acs_enabled(pdev, acs_flags))
3781 			return false;
3782 
3783 		if (pci_is_root_bus(pdev->bus))
3784 			return (end == NULL);
3785 
3786 		parent = pdev->bus->self;
3787 	} while (pdev != end);
3788 
3789 	return true;
3790 }
3791 
3792 /**
3793  * pci_acs_init - Initialize ACS if hardware supports it
3794  * @dev: the PCI device
3795  */
3796 void pci_acs_init(struct pci_dev *dev)
3797 {
3798 	dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3799 
3800 	/*
3801 	 * Attempt to enable ACS regardless of capability because some Root
3802 	 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3803 	 * the standard ACS capability but still support ACS via those
3804 	 * quirks.
3805 	 */
3806 	pci_enable_acs(dev);
3807 }
3808 
3809 /**
3810  * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3811  * @pdev: PCI device
3812  * @bar: BAR to find
3813  *
3814  * Helper to find the position of the ctrl register for a BAR.
3815  * Returns -ENOTSUPP if resizable BARs are not supported at all.
3816  * Returns -ENOENT if no ctrl register for the BAR could be found.
3817  */
3818 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3819 {
3820 	unsigned int pos, nbars, i;
3821 	u32 ctrl;
3822 
3823 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3824 	if (!pos)
3825 		return -ENOTSUPP;
3826 
3827 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3828 	nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
3829 
3830 	for (i = 0; i < nbars; i++, pos += 8) {
3831 		int bar_idx;
3832 
3833 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3834 		bar_idx = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, ctrl);
3835 		if (bar_idx == bar)
3836 			return pos;
3837 	}
3838 
3839 	return -ENOENT;
3840 }
3841 
3842 /**
3843  * pci_rebar_get_possible_sizes - get possible sizes for BAR
3844  * @pdev: PCI device
3845  * @bar: BAR to query
3846  *
3847  * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3848  * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3849  */
3850 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3851 {
3852 	int pos;
3853 	u32 cap;
3854 
3855 	pos = pci_rebar_find_pos(pdev, bar);
3856 	if (pos < 0)
3857 		return 0;
3858 
3859 	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3860 	cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
3861 
3862 	/* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3863 	if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3864 	    bar == 0 && cap == 0x700)
3865 		return 0x3f00;
3866 
3867 	return cap;
3868 }
3869 EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3870 
3871 /**
3872  * pci_rebar_get_current_size - get the current size of a BAR
3873  * @pdev: PCI device
3874  * @bar: BAR to set size to
3875  *
3876  * Read the size of a BAR from the resizable BAR config.
3877  * Returns size if found or negative error code.
3878  */
3879 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3880 {
3881 	int pos;
3882 	u32 ctrl;
3883 
3884 	pos = pci_rebar_find_pos(pdev, bar);
3885 	if (pos < 0)
3886 		return pos;
3887 
3888 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3889 	return FIELD_GET(PCI_REBAR_CTRL_BAR_SIZE, ctrl);
3890 }
3891 
3892 /**
3893  * pci_rebar_set_size - set a new size for a BAR
3894  * @pdev: PCI device
3895  * @bar: BAR to set size to
3896  * @size: new size as defined in the spec (0=1MB, 19=512GB)
3897  *
3898  * Set the new size of a BAR as defined in the spec.
3899  * Returns zero if resizing was successful, error code otherwise.
3900  */
3901 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3902 {
3903 	int pos;
3904 	u32 ctrl;
3905 
3906 	pos = pci_rebar_find_pos(pdev, bar);
3907 	if (pos < 0)
3908 		return pos;
3909 
3910 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3911 	ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3912 	ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
3913 	pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3914 	return 0;
3915 }
3916 
3917 /**
3918  * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3919  * @dev: the PCI device
3920  * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3921  *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
3922  *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
3923  *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
3924  *
3925  * Return 0 if all upstream bridges support AtomicOp routing, egress
3926  * blocking is disabled on all upstream ports, and the root port supports
3927  * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3928  * AtomicOp completion), or negative otherwise.
3929  */
3930 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3931 {
3932 	struct pci_bus *bus = dev->bus;
3933 	struct pci_dev *bridge;
3934 	u32 cap, ctl2;
3935 
3936 	/*
3937 	 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3938 	 * in Device Control 2 is reserved in VFs and the PF value applies
3939 	 * to all associated VFs.
3940 	 */
3941 	if (dev->is_virtfn)
3942 		return -EINVAL;
3943 
3944 	if (!pci_is_pcie(dev))
3945 		return -EINVAL;
3946 
3947 	/*
3948 	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3949 	 * AtomicOp requesters.  For now, we only support endpoints as
3950 	 * requesters and root ports as completers.  No endpoints as
3951 	 * completers, and no peer-to-peer.
3952 	 */
3953 
3954 	switch (pci_pcie_type(dev)) {
3955 	case PCI_EXP_TYPE_ENDPOINT:
3956 	case PCI_EXP_TYPE_LEG_END:
3957 	case PCI_EXP_TYPE_RC_END:
3958 		break;
3959 	default:
3960 		return -EINVAL;
3961 	}
3962 
3963 	while (bus->parent) {
3964 		bridge = bus->self;
3965 
3966 		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3967 
3968 		switch (pci_pcie_type(bridge)) {
3969 		/* Ensure switch ports support AtomicOp routing */
3970 		case PCI_EXP_TYPE_UPSTREAM:
3971 		case PCI_EXP_TYPE_DOWNSTREAM:
3972 			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3973 				return -EINVAL;
3974 			break;
3975 
3976 		/* Ensure root port supports all the sizes we care about */
3977 		case PCI_EXP_TYPE_ROOT_PORT:
3978 			if ((cap & cap_mask) != cap_mask)
3979 				return -EINVAL;
3980 			break;
3981 		}
3982 
3983 		/* Ensure upstream ports don't block AtomicOps on egress */
3984 		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3985 			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3986 						   &ctl2);
3987 			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3988 				return -EINVAL;
3989 		}
3990 
3991 		bus = bus->parent;
3992 	}
3993 
3994 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3995 				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3996 	return 0;
3997 }
3998 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3999 
4000 /**
4001  * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
4002  * @dev: the PCI device
4003  * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
4004  *
4005  * Perform INTx swizzling for a device behind one level of bridge.  This is
4006  * required by section 9.1 of the PCI-to-PCI bridge specification for devices
4007  * behind bridges on add-in cards.  For devices with ARI enabled, the slot
4008  * number is always 0 (see the Implementation Note in section 2.2.8.1 of
4009  * the PCI Express Base Specification, Revision 2.1)
4010  */
4011 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
4012 {
4013 	int slot;
4014 
4015 	if (pci_ari_enabled(dev->bus))
4016 		slot = 0;
4017 	else
4018 		slot = PCI_SLOT(dev->devfn);
4019 
4020 	return (((pin - 1) + slot) % 4) + 1;
4021 }
4022 
4023 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
4024 {
4025 	u8 pin;
4026 
4027 	pin = dev->pin;
4028 	if (!pin)
4029 		return -1;
4030 
4031 	while (!pci_is_root_bus(dev->bus)) {
4032 		pin = pci_swizzle_interrupt_pin(dev, pin);
4033 		dev = dev->bus->self;
4034 	}
4035 	*bridge = dev;
4036 	return pin;
4037 }
4038 
4039 /**
4040  * pci_common_swizzle - swizzle INTx all the way to root bridge
4041  * @dev: the PCI device
4042  * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
4043  *
4044  * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
4045  * bridges all the way up to a PCI root bus.
4046  */
4047 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
4048 {
4049 	u8 pin = *pinp;
4050 
4051 	while (!pci_is_root_bus(dev->bus)) {
4052 		pin = pci_swizzle_interrupt_pin(dev, pin);
4053 		dev = dev->bus->self;
4054 	}
4055 	*pinp = pin;
4056 	return PCI_SLOT(dev->devfn);
4057 }
4058 EXPORT_SYMBOL_GPL(pci_common_swizzle);
4059 
4060 /**
4061  * pci_release_region - Release a PCI bar
4062  * @pdev: PCI device whose resources were previously reserved by
4063  *	  pci_request_region()
4064  * @bar: BAR to release
4065  *
4066  * Releases the PCI I/O and memory resources previously reserved by a
4067  * successful call to pci_request_region().  Call this function only
4068  * after all use of the PCI regions has ceased.
4069  */
4070 void pci_release_region(struct pci_dev *pdev, int bar)
4071 {
4072 	struct pci_devres *dr;
4073 
4074 	if (pci_resource_len(pdev, bar) == 0)
4075 		return;
4076 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
4077 		release_region(pci_resource_start(pdev, bar),
4078 				pci_resource_len(pdev, bar));
4079 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
4080 		release_mem_region(pci_resource_start(pdev, bar),
4081 				pci_resource_len(pdev, bar));
4082 
4083 	dr = find_pci_dr(pdev);
4084 	if (dr)
4085 		dr->region_mask &= ~(1 << bar);
4086 }
4087 EXPORT_SYMBOL(pci_release_region);
4088 
4089 /**
4090  * __pci_request_region - Reserved PCI I/O and memory resource
4091  * @pdev: PCI device whose resources are to be reserved
4092  * @bar: BAR to be reserved
4093  * @res_name: Name to be associated with resource.
4094  * @exclusive: whether the region access is exclusive or not
4095  *
4096  * Mark the PCI region associated with PCI device @pdev BAR @bar as
4097  * being reserved by owner @res_name.  Do not access any
4098  * address inside the PCI regions unless this call returns
4099  * successfully.
4100  *
4101  * If @exclusive is set, then the region is marked so that userspace
4102  * is explicitly not allowed to map the resource via /dev/mem or
4103  * sysfs MMIO access.
4104  *
4105  * Returns 0 on success, or %EBUSY on error.  A warning
4106  * message is also printed on failure.
4107  */
4108 static int __pci_request_region(struct pci_dev *pdev, int bar,
4109 				const char *res_name, int exclusive)
4110 {
4111 	struct pci_devres *dr;
4112 
4113 	if (pci_resource_len(pdev, bar) == 0)
4114 		return 0;
4115 
4116 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
4117 		if (!request_region(pci_resource_start(pdev, bar),
4118 			    pci_resource_len(pdev, bar), res_name))
4119 			goto err_out;
4120 	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
4121 		if (!__request_mem_region(pci_resource_start(pdev, bar),
4122 					pci_resource_len(pdev, bar), res_name,
4123 					exclusive))
4124 			goto err_out;
4125 	}
4126 
4127 	dr = find_pci_dr(pdev);
4128 	if (dr)
4129 		dr->region_mask |= 1 << bar;
4130 
4131 	return 0;
4132 
4133 err_out:
4134 	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
4135 		 &pdev->resource[bar]);
4136 	return -EBUSY;
4137 }
4138 
4139 /**
4140  * pci_request_region - Reserve PCI I/O and memory resource
4141  * @pdev: PCI device whose resources are to be reserved
4142  * @bar: BAR to be reserved
4143  * @res_name: Name to be associated with resource
4144  *
4145  * Mark the PCI region associated with PCI device @pdev BAR @bar as
4146  * being reserved by owner @res_name.  Do not access any
4147  * address inside the PCI regions unless this call returns
4148  * successfully.
4149  *
4150  * Returns 0 on success, or %EBUSY on error.  A warning
4151  * message is also printed on failure.
4152  */
4153 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
4154 {
4155 	return __pci_request_region(pdev, bar, res_name, 0);
4156 }
4157 EXPORT_SYMBOL(pci_request_region);
4158 
4159 /**
4160  * pci_release_selected_regions - Release selected PCI I/O and memory resources
4161  * @pdev: PCI device whose resources were previously reserved
4162  * @bars: Bitmask of BARs to be released
4163  *
4164  * Release selected PCI I/O and memory resources previously reserved.
4165  * Call this function only after all use of the PCI regions has ceased.
4166  */
4167 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
4168 {
4169 	int i;
4170 
4171 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
4172 		if (bars & (1 << i))
4173 			pci_release_region(pdev, i);
4174 }
4175 EXPORT_SYMBOL(pci_release_selected_regions);
4176 
4177 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4178 					  const char *res_name, int excl)
4179 {
4180 	int i;
4181 
4182 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
4183 		if (bars & (1 << i))
4184 			if (__pci_request_region(pdev, i, res_name, excl))
4185 				goto err_out;
4186 	return 0;
4187 
4188 err_out:
4189 	while (--i >= 0)
4190 		if (bars & (1 << i))
4191 			pci_release_region(pdev, i);
4192 
4193 	return -EBUSY;
4194 }
4195 
4196 
4197 /**
4198  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
4199  * @pdev: PCI device whose resources are to be reserved
4200  * @bars: Bitmask of BARs to be requested
4201  * @res_name: Name to be associated with resource
4202  */
4203 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4204 				 const char *res_name)
4205 {
4206 	return __pci_request_selected_regions(pdev, bars, res_name, 0);
4207 }
4208 EXPORT_SYMBOL(pci_request_selected_regions);
4209 
4210 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4211 					   const char *res_name)
4212 {
4213 	return __pci_request_selected_regions(pdev, bars, res_name,
4214 			IORESOURCE_EXCLUSIVE);
4215 }
4216 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4217 
4218 /**
4219  * pci_release_regions - Release reserved PCI I/O and memory resources
4220  * @pdev: PCI device whose resources were previously reserved by
4221  *	  pci_request_regions()
4222  *
4223  * Releases all PCI I/O and memory resources previously reserved by a
4224  * successful call to pci_request_regions().  Call this function only
4225  * after all use of the PCI regions has ceased.
4226  */
4227 
4228 void pci_release_regions(struct pci_dev *pdev)
4229 {
4230 	pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4231 }
4232 EXPORT_SYMBOL(pci_release_regions);
4233 
4234 /**
4235  * pci_request_regions - Reserve PCI I/O and memory resources
4236  * @pdev: PCI device whose resources are to be reserved
4237  * @res_name: Name to be associated with resource.
4238  *
4239  * Mark all PCI regions associated with PCI device @pdev as
4240  * being reserved by owner @res_name.  Do not access any
4241  * address inside the PCI regions unless this call returns
4242  * successfully.
4243  *
4244  * Returns 0 on success, or %EBUSY on error.  A warning
4245  * message is also printed on failure.
4246  */
4247 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4248 {
4249 	return pci_request_selected_regions(pdev,
4250 			((1 << PCI_STD_NUM_BARS) - 1), res_name);
4251 }
4252 EXPORT_SYMBOL(pci_request_regions);
4253 
4254 /**
4255  * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4256  * @pdev: PCI device whose resources are to be reserved
4257  * @res_name: Name to be associated with resource.
4258  *
4259  * Mark all PCI regions associated with PCI device @pdev as being reserved
4260  * by owner @res_name.  Do not access any address inside the PCI regions
4261  * unless this call returns successfully.
4262  *
4263  * pci_request_regions_exclusive() will mark the region so that /dev/mem
4264  * and the sysfs MMIO access will not be allowed.
4265  *
4266  * Returns 0 on success, or %EBUSY on error.  A warning message is also
4267  * printed on failure.
4268  */
4269 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4270 {
4271 	return pci_request_selected_regions_exclusive(pdev,
4272 				((1 << PCI_STD_NUM_BARS) - 1), res_name);
4273 }
4274 EXPORT_SYMBOL(pci_request_regions_exclusive);
4275 
4276 /*
4277  * Record the PCI IO range (expressed as CPU physical address + size).
4278  * Return a negative value if an error has occurred, zero otherwise
4279  */
4280 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4281 			resource_size_t	size)
4282 {
4283 	int ret = 0;
4284 #ifdef PCI_IOBASE
4285 	struct logic_pio_hwaddr *range;
4286 
4287 	if (!size || addr + size < addr)
4288 		return -EINVAL;
4289 
4290 	range = kzalloc(sizeof(*range), GFP_ATOMIC);
4291 	if (!range)
4292 		return -ENOMEM;
4293 
4294 	range->fwnode = fwnode;
4295 	range->size = size;
4296 	range->hw_start = addr;
4297 	range->flags = LOGIC_PIO_CPU_MMIO;
4298 
4299 	ret = logic_pio_register_range(range);
4300 	if (ret)
4301 		kfree(range);
4302 
4303 	/* Ignore duplicates due to deferred probing */
4304 	if (ret == -EEXIST)
4305 		ret = 0;
4306 #endif
4307 
4308 	return ret;
4309 }
4310 
4311 phys_addr_t pci_pio_to_address(unsigned long pio)
4312 {
4313 #ifdef PCI_IOBASE
4314 	if (pio < MMIO_UPPER_LIMIT)
4315 		return logic_pio_to_hwaddr(pio);
4316 #endif
4317 
4318 	return (phys_addr_t) OF_BAD_ADDR;
4319 }
4320 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4321 
4322 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4323 {
4324 #ifdef PCI_IOBASE
4325 	return logic_pio_trans_cpuaddr(address);
4326 #else
4327 	if (address > IO_SPACE_LIMIT)
4328 		return (unsigned long)-1;
4329 
4330 	return (unsigned long) address;
4331 #endif
4332 }
4333 
4334 /**
4335  * pci_remap_iospace - Remap the memory mapped I/O space
4336  * @res: Resource describing the I/O space
4337  * @phys_addr: physical address of range to be mapped
4338  *
4339  * Remap the memory mapped I/O space described by the @res and the CPU
4340  * physical address @phys_addr into virtual address space.  Only
4341  * architectures that have memory mapped IO functions defined (and the
4342  * PCI_IOBASE value defined) should call this function.
4343  */
4344 #ifndef pci_remap_iospace
4345 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4346 {
4347 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4348 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4349 
4350 	if (!(res->flags & IORESOURCE_IO))
4351 		return -EINVAL;
4352 
4353 	if (res->end > IO_SPACE_LIMIT)
4354 		return -EINVAL;
4355 
4356 	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4357 				  pgprot_device(PAGE_KERNEL));
4358 #else
4359 	/*
4360 	 * This architecture does not have memory mapped I/O space,
4361 	 * so this function should never be called
4362 	 */
4363 	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4364 	return -ENODEV;
4365 #endif
4366 }
4367 EXPORT_SYMBOL(pci_remap_iospace);
4368 #endif
4369 
4370 /**
4371  * pci_unmap_iospace - Unmap the memory mapped I/O space
4372  * @res: resource to be unmapped
4373  *
4374  * Unmap the CPU virtual address @res from virtual address space.  Only
4375  * architectures that have memory mapped IO functions defined (and the
4376  * PCI_IOBASE value defined) should call this function.
4377  */
4378 void pci_unmap_iospace(struct resource *res)
4379 {
4380 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4381 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4382 
4383 	vunmap_range(vaddr, vaddr + resource_size(res));
4384 #endif
4385 }
4386 EXPORT_SYMBOL(pci_unmap_iospace);
4387 
4388 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4389 {
4390 	struct resource **res = ptr;
4391 
4392 	pci_unmap_iospace(*res);
4393 }
4394 
4395 /**
4396  * devm_pci_remap_iospace - Managed pci_remap_iospace()
4397  * @dev: Generic device to remap IO address for
4398  * @res: Resource describing the I/O space
4399  * @phys_addr: physical address of range to be mapped
4400  *
4401  * Managed pci_remap_iospace().  Map is automatically unmapped on driver
4402  * detach.
4403  */
4404 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4405 			   phys_addr_t phys_addr)
4406 {
4407 	const struct resource **ptr;
4408 	int error;
4409 
4410 	ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4411 	if (!ptr)
4412 		return -ENOMEM;
4413 
4414 	error = pci_remap_iospace(res, phys_addr);
4415 	if (error) {
4416 		devres_free(ptr);
4417 	} else	{
4418 		*ptr = res;
4419 		devres_add(dev, ptr);
4420 	}
4421 
4422 	return error;
4423 }
4424 EXPORT_SYMBOL(devm_pci_remap_iospace);
4425 
4426 /**
4427  * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4428  * @dev: Generic device to remap IO address for
4429  * @offset: Resource address to map
4430  * @size: Size of map
4431  *
4432  * Managed pci_remap_cfgspace().  Map is automatically unmapped on driver
4433  * detach.
4434  */
4435 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4436 				      resource_size_t offset,
4437 				      resource_size_t size)
4438 {
4439 	void __iomem **ptr, *addr;
4440 
4441 	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4442 	if (!ptr)
4443 		return NULL;
4444 
4445 	addr = pci_remap_cfgspace(offset, size);
4446 	if (addr) {
4447 		*ptr = addr;
4448 		devres_add(dev, ptr);
4449 	} else
4450 		devres_free(ptr);
4451 
4452 	return addr;
4453 }
4454 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4455 
4456 /**
4457  * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4458  * @dev: generic device to handle the resource for
4459  * @res: configuration space resource to be handled
4460  *
4461  * Checks that a resource is a valid memory region, requests the memory
4462  * region and ioremaps with pci_remap_cfgspace() API that ensures the
4463  * proper PCI configuration space memory attributes are guaranteed.
4464  *
4465  * All operations are managed and will be undone on driver detach.
4466  *
4467  * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4468  * on failure. Usage example::
4469  *
4470  *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4471  *	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4472  *	if (IS_ERR(base))
4473  *		return PTR_ERR(base);
4474  */
4475 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4476 					  struct resource *res)
4477 {
4478 	resource_size_t size;
4479 	const char *name;
4480 	void __iomem *dest_ptr;
4481 
4482 	BUG_ON(!dev);
4483 
4484 	if (!res || resource_type(res) != IORESOURCE_MEM) {
4485 		dev_err(dev, "invalid resource\n");
4486 		return IOMEM_ERR_PTR(-EINVAL);
4487 	}
4488 
4489 	size = resource_size(res);
4490 
4491 	if (res->name)
4492 		name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
4493 				      res->name);
4494 	else
4495 		name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4496 	if (!name)
4497 		return IOMEM_ERR_PTR(-ENOMEM);
4498 
4499 	if (!devm_request_mem_region(dev, res->start, size, name)) {
4500 		dev_err(dev, "can't request region for resource %pR\n", res);
4501 		return IOMEM_ERR_PTR(-EBUSY);
4502 	}
4503 
4504 	dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4505 	if (!dest_ptr) {
4506 		dev_err(dev, "ioremap failed for resource %pR\n", res);
4507 		devm_release_mem_region(dev, res->start, size);
4508 		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4509 	}
4510 
4511 	return dest_ptr;
4512 }
4513 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4514 
4515 static void __pci_set_master(struct pci_dev *dev, bool enable)
4516 {
4517 	u16 old_cmd, cmd;
4518 
4519 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4520 	if (enable)
4521 		cmd = old_cmd | PCI_COMMAND_MASTER;
4522 	else
4523 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
4524 	if (cmd != old_cmd) {
4525 		pci_dbg(dev, "%s bus mastering\n",
4526 			enable ? "enabling" : "disabling");
4527 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4528 	}
4529 	dev->is_busmaster = enable;
4530 }
4531 
4532 /**
4533  * pcibios_setup - process "pci=" kernel boot arguments
4534  * @str: string used to pass in "pci=" kernel boot arguments
4535  *
4536  * Process kernel boot arguments.  This is the default implementation.
4537  * Architecture specific implementations can override this as necessary.
4538  */
4539 char * __weak __init pcibios_setup(char *str)
4540 {
4541 	return str;
4542 }
4543 
4544 /**
4545  * pcibios_set_master - enable PCI bus-mastering for device dev
4546  * @dev: the PCI device to enable
4547  *
4548  * Enables PCI bus-mastering for the device.  This is the default
4549  * implementation.  Architecture specific implementations can override
4550  * this if necessary.
4551  */
4552 void __weak pcibios_set_master(struct pci_dev *dev)
4553 {
4554 	u8 lat;
4555 
4556 	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4557 	if (pci_is_pcie(dev))
4558 		return;
4559 
4560 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4561 	if (lat < 16)
4562 		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4563 	else if (lat > pcibios_max_latency)
4564 		lat = pcibios_max_latency;
4565 	else
4566 		return;
4567 
4568 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4569 }
4570 
4571 /**
4572  * pci_set_master - enables bus-mastering for device dev
4573  * @dev: the PCI device to enable
4574  *
4575  * Enables bus-mastering on the device and calls pcibios_set_master()
4576  * to do the needed arch specific settings.
4577  */
4578 void pci_set_master(struct pci_dev *dev)
4579 {
4580 	__pci_set_master(dev, true);
4581 	pcibios_set_master(dev);
4582 }
4583 EXPORT_SYMBOL(pci_set_master);
4584 
4585 /**
4586  * pci_clear_master - disables bus-mastering for device dev
4587  * @dev: the PCI device to disable
4588  */
4589 void pci_clear_master(struct pci_dev *dev)
4590 {
4591 	__pci_set_master(dev, false);
4592 }
4593 EXPORT_SYMBOL(pci_clear_master);
4594 
4595 /**
4596  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4597  * @dev: the PCI device for which MWI is to be enabled
4598  *
4599  * Helper function for pci_set_mwi.
4600  * Originally copied from drivers/net/acenic.c.
4601  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4602  *
4603  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4604  */
4605 int pci_set_cacheline_size(struct pci_dev *dev)
4606 {
4607 	u8 cacheline_size;
4608 
4609 	if (!pci_cache_line_size)
4610 		return -EINVAL;
4611 
4612 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4613 	   equal to or multiple of the right value. */
4614 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4615 	if (cacheline_size >= pci_cache_line_size &&
4616 	    (cacheline_size % pci_cache_line_size) == 0)
4617 		return 0;
4618 
4619 	/* Write the correct value. */
4620 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4621 	/* Read it back. */
4622 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4623 	if (cacheline_size == pci_cache_line_size)
4624 		return 0;
4625 
4626 	pci_dbg(dev, "cache line size of %d is not supported\n",
4627 		   pci_cache_line_size << 2);
4628 
4629 	return -EINVAL;
4630 }
4631 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4632 
4633 /**
4634  * pci_set_mwi - enables memory-write-invalidate PCI transaction
4635  * @dev: the PCI device for which MWI is enabled
4636  *
4637  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4638  *
4639  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4640  */
4641 int pci_set_mwi(struct pci_dev *dev)
4642 {
4643 #ifdef PCI_DISABLE_MWI
4644 	return 0;
4645 #else
4646 	int rc;
4647 	u16 cmd;
4648 
4649 	rc = pci_set_cacheline_size(dev);
4650 	if (rc)
4651 		return rc;
4652 
4653 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4654 	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4655 		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4656 		cmd |= PCI_COMMAND_INVALIDATE;
4657 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4658 	}
4659 	return 0;
4660 #endif
4661 }
4662 EXPORT_SYMBOL(pci_set_mwi);
4663 
4664 /**
4665  * pcim_set_mwi - a device-managed pci_set_mwi()
4666  * @dev: the PCI device for which MWI is enabled
4667  *
4668  * Managed pci_set_mwi().
4669  *
4670  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4671  */
4672 int pcim_set_mwi(struct pci_dev *dev)
4673 {
4674 	struct pci_devres *dr;
4675 
4676 	dr = find_pci_dr(dev);
4677 	if (!dr)
4678 		return -ENOMEM;
4679 
4680 	dr->mwi = 1;
4681 	return pci_set_mwi(dev);
4682 }
4683 EXPORT_SYMBOL(pcim_set_mwi);
4684 
4685 /**
4686  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4687  * @dev: the PCI device for which MWI is enabled
4688  *
4689  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4690  * Callers are not required to check the return value.
4691  *
4692  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4693  */
4694 int pci_try_set_mwi(struct pci_dev *dev)
4695 {
4696 #ifdef PCI_DISABLE_MWI
4697 	return 0;
4698 #else
4699 	return pci_set_mwi(dev);
4700 #endif
4701 }
4702 EXPORT_SYMBOL(pci_try_set_mwi);
4703 
4704 /**
4705  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4706  * @dev: the PCI device to disable
4707  *
4708  * Disables PCI Memory-Write-Invalidate transaction on the device
4709  */
4710 void pci_clear_mwi(struct pci_dev *dev)
4711 {
4712 #ifndef PCI_DISABLE_MWI
4713 	u16 cmd;
4714 
4715 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4716 	if (cmd & PCI_COMMAND_INVALIDATE) {
4717 		cmd &= ~PCI_COMMAND_INVALIDATE;
4718 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4719 	}
4720 #endif
4721 }
4722 EXPORT_SYMBOL(pci_clear_mwi);
4723 
4724 /**
4725  * pci_disable_parity - disable parity checking for device
4726  * @dev: the PCI device to operate on
4727  *
4728  * Disable parity checking for device @dev
4729  */
4730 void pci_disable_parity(struct pci_dev *dev)
4731 {
4732 	u16 cmd;
4733 
4734 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4735 	if (cmd & PCI_COMMAND_PARITY) {
4736 		cmd &= ~PCI_COMMAND_PARITY;
4737 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4738 	}
4739 }
4740 
4741 /**
4742  * pci_intx - enables/disables PCI INTx for device dev
4743  * @pdev: the PCI device to operate on
4744  * @enable: boolean: whether to enable or disable PCI INTx
4745  *
4746  * Enables/disables PCI INTx for device @pdev
4747  */
4748 void pci_intx(struct pci_dev *pdev, int enable)
4749 {
4750 	u16 pci_command, new;
4751 
4752 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4753 
4754 	if (enable)
4755 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4756 	else
4757 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
4758 
4759 	if (new != pci_command) {
4760 		struct pci_devres *dr;
4761 
4762 		pci_write_config_word(pdev, PCI_COMMAND, new);
4763 
4764 		dr = find_pci_dr(pdev);
4765 		if (dr && !dr->restore_intx) {
4766 			dr->restore_intx = 1;
4767 			dr->orig_intx = !enable;
4768 		}
4769 	}
4770 }
4771 EXPORT_SYMBOL_GPL(pci_intx);
4772 
4773 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4774 {
4775 	struct pci_bus *bus = dev->bus;
4776 	bool mask_updated = true;
4777 	u32 cmd_status_dword;
4778 	u16 origcmd, newcmd;
4779 	unsigned long flags;
4780 	bool irq_pending;
4781 
4782 	/*
4783 	 * We do a single dword read to retrieve both command and status.
4784 	 * Document assumptions that make this possible.
4785 	 */
4786 	BUILD_BUG_ON(PCI_COMMAND % 4);
4787 	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4788 
4789 	raw_spin_lock_irqsave(&pci_lock, flags);
4790 
4791 	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4792 
4793 	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4794 
4795 	/*
4796 	 * Check interrupt status register to see whether our device
4797 	 * triggered the interrupt (when masking) or the next IRQ is
4798 	 * already pending (when unmasking).
4799 	 */
4800 	if (mask != irq_pending) {
4801 		mask_updated = false;
4802 		goto done;
4803 	}
4804 
4805 	origcmd = cmd_status_dword;
4806 	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4807 	if (mask)
4808 		newcmd |= PCI_COMMAND_INTX_DISABLE;
4809 	if (newcmd != origcmd)
4810 		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4811 
4812 done:
4813 	raw_spin_unlock_irqrestore(&pci_lock, flags);
4814 
4815 	return mask_updated;
4816 }
4817 
4818 /**
4819  * pci_check_and_mask_intx - mask INTx on pending interrupt
4820  * @dev: the PCI device to operate on
4821  *
4822  * Check if the device dev has its INTx line asserted, mask it and return
4823  * true in that case. False is returned if no interrupt was pending.
4824  */
4825 bool pci_check_and_mask_intx(struct pci_dev *dev)
4826 {
4827 	return pci_check_and_set_intx_mask(dev, true);
4828 }
4829 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4830 
4831 /**
4832  * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4833  * @dev: the PCI device to operate on
4834  *
4835  * Check if the device dev has its INTx line asserted, unmask it if not and
4836  * return true. False is returned and the mask remains active if there was
4837  * still an interrupt pending.
4838  */
4839 bool pci_check_and_unmask_intx(struct pci_dev *dev)
4840 {
4841 	return pci_check_and_set_intx_mask(dev, false);
4842 }
4843 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4844 
4845 /**
4846  * pci_wait_for_pending_transaction - wait for pending transaction
4847  * @dev: the PCI device to operate on
4848  *
4849  * Return 0 if transaction is pending 1 otherwise.
4850  */
4851 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4852 {
4853 	if (!pci_is_pcie(dev))
4854 		return 1;
4855 
4856 	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4857 				    PCI_EXP_DEVSTA_TRPND);
4858 }
4859 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4860 
4861 /**
4862  * pcie_flr - initiate a PCIe function level reset
4863  * @dev: device to reset
4864  *
4865  * Initiate a function level reset unconditionally on @dev without
4866  * checking any flags and DEVCAP
4867  */
4868 int pcie_flr(struct pci_dev *dev)
4869 {
4870 	if (!pci_wait_for_pending_transaction(dev))
4871 		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4872 
4873 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4874 
4875 	if (dev->imm_ready)
4876 		return 0;
4877 
4878 	/*
4879 	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4880 	 * 100ms, but may silently discard requests while the FLR is in
4881 	 * progress.  Wait 100ms before trying to access the device.
4882 	 */
4883 	msleep(100);
4884 
4885 	return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4886 }
4887 EXPORT_SYMBOL_GPL(pcie_flr);
4888 
4889 /**
4890  * pcie_reset_flr - initiate a PCIe function level reset
4891  * @dev: device to reset
4892  * @probe: if true, return 0 if device can be reset this way
4893  *
4894  * Initiate a function level reset on @dev.
4895  */
4896 int pcie_reset_flr(struct pci_dev *dev, bool probe)
4897 {
4898 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4899 		return -ENOTTY;
4900 
4901 	if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4902 		return -ENOTTY;
4903 
4904 	if (probe)
4905 		return 0;
4906 
4907 	return pcie_flr(dev);
4908 }
4909 EXPORT_SYMBOL_GPL(pcie_reset_flr);
4910 
4911 static int pci_af_flr(struct pci_dev *dev, bool probe)
4912 {
4913 	int pos;
4914 	u8 cap;
4915 
4916 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4917 	if (!pos)
4918 		return -ENOTTY;
4919 
4920 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4921 		return -ENOTTY;
4922 
4923 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4924 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4925 		return -ENOTTY;
4926 
4927 	if (probe)
4928 		return 0;
4929 
4930 	/*
4931 	 * Wait for Transaction Pending bit to clear.  A word-aligned test
4932 	 * is used, so we use the control offset rather than status and shift
4933 	 * the test bit to match.
4934 	 */
4935 	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4936 				 PCI_AF_STATUS_TP << 8))
4937 		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4938 
4939 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4940 
4941 	if (dev->imm_ready)
4942 		return 0;
4943 
4944 	/*
4945 	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4946 	 * updated 27 July 2006; a device must complete an FLR within
4947 	 * 100ms, but may silently discard requests while the FLR is in
4948 	 * progress.  Wait 100ms before trying to access the device.
4949 	 */
4950 	msleep(100);
4951 
4952 	return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4953 }
4954 
4955 /**
4956  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4957  * @dev: Device to reset.
4958  * @probe: if true, return 0 if the device can be reset this way.
4959  *
4960  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4961  * unset, it will be reinitialized internally when going from PCI_D3hot to
4962  * PCI_D0.  If that's the case and the device is not in a low-power state
4963  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4964  *
4965  * NOTE: This causes the caller to sleep for twice the device power transition
4966  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4967  * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4968  * Moreover, only devices in D0 can be reset by this function.
4969  */
4970 static int pci_pm_reset(struct pci_dev *dev, bool probe)
4971 {
4972 	u16 csr;
4973 
4974 	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4975 		return -ENOTTY;
4976 
4977 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4978 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4979 		return -ENOTTY;
4980 
4981 	if (probe)
4982 		return 0;
4983 
4984 	if (dev->current_state != PCI_D0)
4985 		return -EINVAL;
4986 
4987 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4988 	csr |= PCI_D3hot;
4989 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4990 	pci_dev_d3_sleep(dev);
4991 
4992 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4993 	csr |= PCI_D0;
4994 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4995 	pci_dev_d3_sleep(dev);
4996 
4997 	return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4998 }
4999 
5000 /**
5001  * pcie_wait_for_link_status - Wait for link status change
5002  * @pdev: Device whose link to wait for.
5003  * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE.
5004  * @active: Waiting for active or inactive?
5005  *
5006  * Return 0 if successful, or -ETIMEDOUT if status has not changed within
5007  * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
5008  */
5009 static int pcie_wait_for_link_status(struct pci_dev *pdev,
5010 				     bool use_lt, bool active)
5011 {
5012 	u16 lnksta_mask, lnksta_match;
5013 	unsigned long end_jiffies;
5014 	u16 lnksta;
5015 
5016 	lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA;
5017 	lnksta_match = active ? lnksta_mask : 0;
5018 
5019 	end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS);
5020 	do {
5021 		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
5022 		if ((lnksta & lnksta_mask) == lnksta_match)
5023 			return 0;
5024 		msleep(1);
5025 	} while (time_before(jiffies, end_jiffies));
5026 
5027 	return -ETIMEDOUT;
5028 }
5029 
5030 /**
5031  * pcie_retrain_link - Request a link retrain and wait for it to complete
5032  * @pdev: Device whose link to retrain.
5033  * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
5034  *
5035  * Retrain completion status is retrieved from the Link Status Register
5036  * according to @use_lt.  It is not verified whether the use of the DLLLA
5037  * bit is valid.
5038  *
5039  * Return 0 if successful, or -ETIMEDOUT if training has not completed
5040  * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
5041  */
5042 int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
5043 {
5044 	int rc;
5045 
5046 	/*
5047 	 * Ensure the updated LNKCTL parameters are used during link
5048 	 * training by checking that there is no ongoing link training to
5049 	 * avoid LTSSM race as recommended in Implementation Note at the
5050 	 * end of PCIe r6.0.1 sec 7.5.3.7.
5051 	 */
5052 	rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
5053 	if (rc)
5054 		return rc;
5055 
5056 	pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
5057 	if (pdev->clear_retrain_link) {
5058 		/*
5059 		 * Due to an erratum in some devices the Retrain Link bit
5060 		 * needs to be cleared again manually to allow the link
5061 		 * training to succeed.
5062 		 */
5063 		pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
5064 	}
5065 
5066 	return pcie_wait_for_link_status(pdev, use_lt, !use_lt);
5067 }
5068 
5069 /**
5070  * pcie_wait_for_link_delay - Wait until link is active or inactive
5071  * @pdev: Bridge device
5072  * @active: waiting for active or inactive?
5073  * @delay: Delay to wait after link has become active (in ms)
5074  *
5075  * Use this to wait till link becomes active or inactive.
5076  */
5077 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
5078 				     int delay)
5079 {
5080 	int rc;
5081 
5082 	/*
5083 	 * Some controllers might not implement link active reporting. In this
5084 	 * case, we wait for 1000 ms + any delay requested by the caller.
5085 	 */
5086 	if (!pdev->link_active_reporting) {
5087 		msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay);
5088 		return true;
5089 	}
5090 
5091 	/*
5092 	 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
5093 	 * after which we should expect an link active if the reset was
5094 	 * successful. If so, software must wait a minimum 100ms before sending
5095 	 * configuration requests to devices downstream this port.
5096 	 *
5097 	 * If the link fails to activate, either the device was physically
5098 	 * removed or the link is permanently failed.
5099 	 */
5100 	if (active)
5101 		msleep(20);
5102 	rc = pcie_wait_for_link_status(pdev, false, active);
5103 	if (active) {
5104 		if (rc)
5105 			rc = pcie_failed_link_retrain(pdev);
5106 		if (rc)
5107 			return false;
5108 
5109 		msleep(delay);
5110 		return true;
5111 	}
5112 
5113 	if (rc)
5114 		return false;
5115 
5116 	return true;
5117 }
5118 
5119 /**
5120  * pcie_wait_for_link - Wait until link is active or inactive
5121  * @pdev: Bridge device
5122  * @active: waiting for active or inactive?
5123  *
5124  * Use this to wait till link becomes active or inactive.
5125  */
5126 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
5127 {
5128 	return pcie_wait_for_link_delay(pdev, active, 100);
5129 }
5130 
5131 /*
5132  * Find maximum D3cold delay required by all the devices on the bus.  The
5133  * spec says 100 ms, but firmware can lower it and we allow drivers to
5134  * increase it as well.
5135  *
5136  * Called with @pci_bus_sem locked for reading.
5137  */
5138 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
5139 {
5140 	const struct pci_dev *pdev;
5141 	int min_delay = 100;
5142 	int max_delay = 0;
5143 
5144 	list_for_each_entry(pdev, &bus->devices, bus_list) {
5145 		if (pdev->d3cold_delay < min_delay)
5146 			min_delay = pdev->d3cold_delay;
5147 		if (pdev->d3cold_delay > max_delay)
5148 			max_delay = pdev->d3cold_delay;
5149 	}
5150 
5151 	return max(min_delay, max_delay);
5152 }
5153 
5154 /**
5155  * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
5156  * @dev: PCI bridge
5157  * @reset_type: reset type in human-readable form
5158  *
5159  * Handle necessary delays before access to the devices on the secondary
5160  * side of the bridge are permitted after D3cold to D0 transition
5161  * or Conventional Reset.
5162  *
5163  * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
5164  * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
5165  * 4.3.2.
5166  *
5167  * Return 0 on success or -ENOTTY if the first device on the secondary bus
5168  * failed to become accessible.
5169  */
5170 int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
5171 {
5172 	struct pci_dev *child;
5173 	int delay;
5174 
5175 	if (pci_dev_is_disconnected(dev))
5176 		return 0;
5177 
5178 	if (!pci_is_bridge(dev))
5179 		return 0;
5180 
5181 	down_read(&pci_bus_sem);
5182 
5183 	/*
5184 	 * We only deal with devices that are present currently on the bus.
5185 	 * For any hot-added devices the access delay is handled in pciehp
5186 	 * board_added(). In case of ACPI hotplug the firmware is expected
5187 	 * to configure the devices before OS is notified.
5188 	 */
5189 	if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
5190 		up_read(&pci_bus_sem);
5191 		return 0;
5192 	}
5193 
5194 	/* Take d3cold_delay requirements into account */
5195 	delay = pci_bus_max_d3cold_delay(dev->subordinate);
5196 	if (!delay) {
5197 		up_read(&pci_bus_sem);
5198 		return 0;
5199 	}
5200 
5201 	child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
5202 				 bus_list);
5203 	up_read(&pci_bus_sem);
5204 
5205 	/*
5206 	 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
5207 	 * accessing the device after reset (that is 1000 ms + 100 ms).
5208 	 */
5209 	if (!pci_is_pcie(dev)) {
5210 		pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
5211 		msleep(1000 + delay);
5212 		return 0;
5213 	}
5214 
5215 	/*
5216 	 * For PCIe downstream and root ports that do not support speeds
5217 	 * greater than 5 GT/s need to wait minimum 100 ms. For higher
5218 	 * speeds (gen3) we need to wait first for the data link layer to
5219 	 * become active.
5220 	 *
5221 	 * However, 100 ms is the minimum and the PCIe spec says the
5222 	 * software must allow at least 1s before it can determine that the
5223 	 * device that did not respond is a broken device. Also device can
5224 	 * take longer than that to respond if it indicates so through Request
5225 	 * Retry Status completions.
5226 	 *
5227 	 * Therefore we wait for 100 ms and check for the device presence
5228 	 * until the timeout expires.
5229 	 */
5230 	if (!pcie_downstream_port(dev))
5231 		return 0;
5232 
5233 	if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
5234 		u16 status;
5235 
5236 		pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
5237 		msleep(delay);
5238 
5239 		if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay))
5240 			return 0;
5241 
5242 		/*
5243 		 * If the port supports active link reporting we now check
5244 		 * whether the link is active and if not bail out early with
5245 		 * the assumption that the device is not present anymore.
5246 		 */
5247 		if (!dev->link_active_reporting)
5248 			return -ENOTTY;
5249 
5250 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status);
5251 		if (!(status & PCI_EXP_LNKSTA_DLLLA))
5252 			return -ENOTTY;
5253 
5254 		return pci_dev_wait(child, reset_type,
5255 				    PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT);
5256 	}
5257 
5258 	pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
5259 		delay);
5260 	if (!pcie_wait_for_link_delay(dev, true, delay)) {
5261 		/* Did not train, no need to wait any further */
5262 		pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
5263 		return -ENOTTY;
5264 	}
5265 
5266 	return pci_dev_wait(child, reset_type,
5267 			    PCIE_RESET_READY_POLL_MS - delay);
5268 }
5269 
5270 void pci_reset_secondary_bus(struct pci_dev *dev)
5271 {
5272 	u16 ctrl;
5273 
5274 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
5275 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
5276 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5277 
5278 	/*
5279 	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
5280 	 * this to 2ms to ensure that we meet the minimum requirement.
5281 	 */
5282 	msleep(2);
5283 
5284 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
5285 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5286 }
5287 
5288 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
5289 {
5290 	pci_reset_secondary_bus(dev);
5291 }
5292 
5293 /**
5294  * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
5295  * @dev: Bridge device
5296  *
5297  * Use the bridge control register to assert reset on the secondary bus.
5298  * Devices on the secondary bus are left in power-on state.
5299  */
5300 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
5301 {
5302 	pcibios_reset_secondary_bus(dev);
5303 
5304 	return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
5305 }
5306 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
5307 
5308 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
5309 {
5310 	struct pci_dev *pdev;
5311 
5312 	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
5313 	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5314 		return -ENOTTY;
5315 
5316 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5317 		if (pdev != dev)
5318 			return -ENOTTY;
5319 
5320 	if (probe)
5321 		return 0;
5322 
5323 	return pci_bridge_secondary_bus_reset(dev->bus->self);
5324 }
5325 
5326 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5327 {
5328 	int rc = -ENOTTY;
5329 
5330 	if (!hotplug || !try_module_get(hotplug->owner))
5331 		return rc;
5332 
5333 	if (hotplug->ops->reset_slot)
5334 		rc = hotplug->ops->reset_slot(hotplug, probe);
5335 
5336 	module_put(hotplug->owner);
5337 
5338 	return rc;
5339 }
5340 
5341 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5342 {
5343 	if (dev->multifunction || dev->subordinate || !dev->slot ||
5344 	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5345 		return -ENOTTY;
5346 
5347 	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5348 }
5349 
5350 static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5351 {
5352 	int rc;
5353 
5354 	rc = pci_dev_reset_slot_function(dev, probe);
5355 	if (rc != -ENOTTY)
5356 		return rc;
5357 	return pci_parent_bus_reset(dev, probe);
5358 }
5359 
5360 void pci_dev_lock(struct pci_dev *dev)
5361 {
5362 	/* block PM suspend, driver probe, etc. */
5363 	device_lock(&dev->dev);
5364 	pci_cfg_access_lock(dev);
5365 }
5366 EXPORT_SYMBOL_GPL(pci_dev_lock);
5367 
5368 /* Return 1 on successful lock, 0 on contention */
5369 int pci_dev_trylock(struct pci_dev *dev)
5370 {
5371 	if (device_trylock(&dev->dev)) {
5372 		if (pci_cfg_access_trylock(dev))
5373 			return 1;
5374 		device_unlock(&dev->dev);
5375 	}
5376 
5377 	return 0;
5378 }
5379 EXPORT_SYMBOL_GPL(pci_dev_trylock);
5380 
5381 void pci_dev_unlock(struct pci_dev *dev)
5382 {
5383 	pci_cfg_access_unlock(dev);
5384 	device_unlock(&dev->dev);
5385 }
5386 EXPORT_SYMBOL_GPL(pci_dev_unlock);
5387 
5388 static void pci_dev_save_and_disable(struct pci_dev *dev)
5389 {
5390 	const struct pci_error_handlers *err_handler =
5391 			dev->driver ? dev->driver->err_handler : NULL;
5392 
5393 	/*
5394 	 * dev->driver->err_handler->reset_prepare() is protected against
5395 	 * races with ->remove() by the device lock, which must be held by
5396 	 * the caller.
5397 	 */
5398 	if (err_handler && err_handler->reset_prepare)
5399 		err_handler->reset_prepare(dev);
5400 
5401 	/*
5402 	 * Wake-up device prior to save.  PM registers default to D0 after
5403 	 * reset and a simple register restore doesn't reliably return
5404 	 * to a non-D0 state anyway.
5405 	 */
5406 	pci_set_power_state(dev, PCI_D0);
5407 
5408 	pci_save_state(dev);
5409 	/*
5410 	 * Disable the device by clearing the Command register, except for
5411 	 * INTx-disable which is set.  This not only disables MMIO and I/O port
5412 	 * BARs, but also prevents the device from being Bus Master, preventing
5413 	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
5414 	 * compliant devices, INTx-disable prevents legacy interrupts.
5415 	 */
5416 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5417 }
5418 
5419 static void pci_dev_restore(struct pci_dev *dev)
5420 {
5421 	const struct pci_error_handlers *err_handler =
5422 			dev->driver ? dev->driver->err_handler : NULL;
5423 
5424 	pci_restore_state(dev);
5425 
5426 	/*
5427 	 * dev->driver->err_handler->reset_done() is protected against
5428 	 * races with ->remove() by the device lock, which must be held by
5429 	 * the caller.
5430 	 */
5431 	if (err_handler && err_handler->reset_done)
5432 		err_handler->reset_done(dev);
5433 }
5434 
5435 /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5436 static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5437 	{ },
5438 	{ pci_dev_specific_reset, .name = "device_specific" },
5439 	{ pci_dev_acpi_reset, .name = "acpi" },
5440 	{ pcie_reset_flr, .name = "flr" },
5441 	{ pci_af_flr, .name = "af_flr" },
5442 	{ pci_pm_reset, .name = "pm" },
5443 	{ pci_reset_bus_function, .name = "bus" },
5444 };
5445 
5446 static ssize_t reset_method_show(struct device *dev,
5447 				 struct device_attribute *attr, char *buf)
5448 {
5449 	struct pci_dev *pdev = to_pci_dev(dev);
5450 	ssize_t len = 0;
5451 	int i, m;
5452 
5453 	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5454 		m = pdev->reset_methods[i];
5455 		if (!m)
5456 			break;
5457 
5458 		len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
5459 				     pci_reset_fn_methods[m].name);
5460 	}
5461 
5462 	if (len)
5463 		len += sysfs_emit_at(buf, len, "\n");
5464 
5465 	return len;
5466 }
5467 
5468 static int reset_method_lookup(const char *name)
5469 {
5470 	int m;
5471 
5472 	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5473 		if (sysfs_streq(name, pci_reset_fn_methods[m].name))
5474 			return m;
5475 	}
5476 
5477 	return 0;	/* not found */
5478 }
5479 
5480 static ssize_t reset_method_store(struct device *dev,
5481 				  struct device_attribute *attr,
5482 				  const char *buf, size_t count)
5483 {
5484 	struct pci_dev *pdev = to_pci_dev(dev);
5485 	char *options, *name;
5486 	int m, n;
5487 	u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
5488 
5489 	if (sysfs_streq(buf, "")) {
5490 		pdev->reset_methods[0] = 0;
5491 		pci_warn(pdev, "All device reset methods disabled by user");
5492 		return count;
5493 	}
5494 
5495 	if (sysfs_streq(buf, "default")) {
5496 		pci_init_reset_methods(pdev);
5497 		return count;
5498 	}
5499 
5500 	options = kstrndup(buf, count, GFP_KERNEL);
5501 	if (!options)
5502 		return -ENOMEM;
5503 
5504 	n = 0;
5505 	while ((name = strsep(&options, " ")) != NULL) {
5506 		if (sysfs_streq(name, ""))
5507 			continue;
5508 
5509 		name = strim(name);
5510 
5511 		m = reset_method_lookup(name);
5512 		if (!m) {
5513 			pci_err(pdev, "Invalid reset method '%s'", name);
5514 			goto error;
5515 		}
5516 
5517 		if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
5518 			pci_err(pdev, "Unsupported reset method '%s'", name);
5519 			goto error;
5520 		}
5521 
5522 		if (n == PCI_NUM_RESET_METHODS - 1) {
5523 			pci_err(pdev, "Too many reset methods\n");
5524 			goto error;
5525 		}
5526 
5527 		reset_methods[n++] = m;
5528 	}
5529 
5530 	reset_methods[n] = 0;
5531 
5532 	/* Warn if dev-specific supported but not highest priority */
5533 	if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
5534 	    reset_methods[0] != 1)
5535 		pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
5536 	memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
5537 	kfree(options);
5538 	return count;
5539 
5540 error:
5541 	/* Leave previous methods unchanged */
5542 	kfree(options);
5543 	return -EINVAL;
5544 }
5545 static DEVICE_ATTR_RW(reset_method);
5546 
5547 static struct attribute *pci_dev_reset_method_attrs[] = {
5548 	&dev_attr_reset_method.attr,
5549 	NULL,
5550 };
5551 
5552 static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
5553 						    struct attribute *a, int n)
5554 {
5555 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
5556 
5557 	if (!pci_reset_supported(pdev))
5558 		return 0;
5559 
5560 	return a->mode;
5561 }
5562 
5563 const struct attribute_group pci_dev_reset_method_attr_group = {
5564 	.attrs = pci_dev_reset_method_attrs,
5565 	.is_visible = pci_dev_reset_method_attr_is_visible,
5566 };
5567 
5568 /**
5569  * __pci_reset_function_locked - reset a PCI device function while holding
5570  * the @dev mutex lock.
5571  * @dev: PCI device to reset
5572  *
5573  * Some devices allow an individual function to be reset without affecting
5574  * other functions in the same device.  The PCI device must be responsive
5575  * to PCI config space in order to use this function.
5576  *
5577  * The device function is presumed to be unused and the caller is holding
5578  * the device mutex lock when this function is called.
5579  *
5580  * Resetting the device will make the contents of PCI configuration space
5581  * random, so any caller of this must be prepared to reinitialise the
5582  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5583  * etc.
5584  *
5585  * Returns 0 if the device function was successfully reset or negative if the
5586  * device doesn't support resetting a single function.
5587  */
5588 int __pci_reset_function_locked(struct pci_dev *dev)
5589 {
5590 	int i, m, rc;
5591 
5592 	might_sleep();
5593 
5594 	/*
5595 	 * A reset method returns -ENOTTY if it doesn't support this device and
5596 	 * we should try the next method.
5597 	 *
5598 	 * If it returns 0 (success), we're finished.  If it returns any other
5599 	 * error, we're also finished: this indicates that further reset
5600 	 * mechanisms might be broken on the device.
5601 	 */
5602 	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5603 		m = dev->reset_methods[i];
5604 		if (!m)
5605 			return -ENOTTY;
5606 
5607 		rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
5608 		if (!rc)
5609 			return 0;
5610 		if (rc != -ENOTTY)
5611 			return rc;
5612 	}
5613 
5614 	return -ENOTTY;
5615 }
5616 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5617 
5618 /**
5619  * pci_init_reset_methods - check whether device can be safely reset
5620  * and store supported reset mechanisms.
5621  * @dev: PCI device to check for reset mechanisms
5622  *
5623  * Some devices allow an individual function to be reset without affecting
5624  * other functions in the same device.  The PCI device must be in D0-D3hot
5625  * state.
5626  *
5627  * Stores reset mechanisms supported by device in reset_methods byte array
5628  * which is a member of struct pci_dev.
5629  */
5630 void pci_init_reset_methods(struct pci_dev *dev)
5631 {
5632 	int m, i, rc;
5633 
5634 	BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5635 
5636 	might_sleep();
5637 
5638 	i = 0;
5639 	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5640 		rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5641 		if (!rc)
5642 			dev->reset_methods[i++] = m;
5643 		else if (rc != -ENOTTY)
5644 			break;
5645 	}
5646 
5647 	dev->reset_methods[i] = 0;
5648 }
5649 
5650 /**
5651  * pci_reset_function - quiesce and reset a PCI device function
5652  * @dev: PCI device to reset
5653  *
5654  * Some devices allow an individual function to be reset without affecting
5655  * other functions in the same device.  The PCI device must be responsive
5656  * to PCI config space in order to use this function.
5657  *
5658  * This function does not just reset the PCI portion of a device, but
5659  * clears all the state associated with the device.  This function differs
5660  * from __pci_reset_function_locked() in that it saves and restores device state
5661  * over the reset and takes the PCI device lock.
5662  *
5663  * Returns 0 if the device function was successfully reset or negative if the
5664  * device doesn't support resetting a single function.
5665  */
5666 int pci_reset_function(struct pci_dev *dev)
5667 {
5668 	int rc;
5669 
5670 	if (!pci_reset_supported(dev))
5671 		return -ENOTTY;
5672 
5673 	pci_dev_lock(dev);
5674 	pci_dev_save_and_disable(dev);
5675 
5676 	rc = __pci_reset_function_locked(dev);
5677 
5678 	pci_dev_restore(dev);
5679 	pci_dev_unlock(dev);
5680 
5681 	return rc;
5682 }
5683 EXPORT_SYMBOL_GPL(pci_reset_function);
5684 
5685 /**
5686  * pci_reset_function_locked - quiesce and reset a PCI device function
5687  * @dev: PCI device to reset
5688  *
5689  * Some devices allow an individual function to be reset without affecting
5690  * other functions in the same device.  The PCI device must be responsive
5691  * to PCI config space in order to use this function.
5692  *
5693  * This function does not just reset the PCI portion of a device, but
5694  * clears all the state associated with the device.  This function differs
5695  * from __pci_reset_function_locked() in that it saves and restores device state
5696  * over the reset.  It also differs from pci_reset_function() in that it
5697  * requires the PCI device lock to be held.
5698  *
5699  * Returns 0 if the device function was successfully reset or negative if the
5700  * device doesn't support resetting a single function.
5701  */
5702 int pci_reset_function_locked(struct pci_dev *dev)
5703 {
5704 	int rc;
5705 
5706 	if (!pci_reset_supported(dev))
5707 		return -ENOTTY;
5708 
5709 	pci_dev_save_and_disable(dev);
5710 
5711 	rc = __pci_reset_function_locked(dev);
5712 
5713 	pci_dev_restore(dev);
5714 
5715 	return rc;
5716 }
5717 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5718 
5719 /**
5720  * pci_try_reset_function - quiesce and reset a PCI device function
5721  * @dev: PCI device to reset
5722  *
5723  * Same as above, except return -EAGAIN if unable to lock device.
5724  */
5725 int pci_try_reset_function(struct pci_dev *dev)
5726 {
5727 	int rc;
5728 
5729 	if (!pci_reset_supported(dev))
5730 		return -ENOTTY;
5731 
5732 	if (!pci_dev_trylock(dev))
5733 		return -EAGAIN;
5734 
5735 	pci_dev_save_and_disable(dev);
5736 	rc = __pci_reset_function_locked(dev);
5737 	pci_dev_restore(dev);
5738 	pci_dev_unlock(dev);
5739 
5740 	return rc;
5741 }
5742 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5743 
5744 /* Do any devices on or below this bus prevent a bus reset? */
5745 static bool pci_bus_resettable(struct pci_bus *bus)
5746 {
5747 	struct pci_dev *dev;
5748 
5749 
5750 	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5751 		return false;
5752 
5753 	list_for_each_entry(dev, &bus->devices, bus_list) {
5754 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5755 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5756 			return false;
5757 	}
5758 
5759 	return true;
5760 }
5761 
5762 /* Lock devices from the top of the tree down */
5763 static void pci_bus_lock(struct pci_bus *bus)
5764 {
5765 	struct pci_dev *dev;
5766 
5767 	list_for_each_entry(dev, &bus->devices, bus_list) {
5768 		pci_dev_lock(dev);
5769 		if (dev->subordinate)
5770 			pci_bus_lock(dev->subordinate);
5771 	}
5772 }
5773 
5774 /* Unlock devices from the bottom of the tree up */
5775 static void pci_bus_unlock(struct pci_bus *bus)
5776 {
5777 	struct pci_dev *dev;
5778 
5779 	list_for_each_entry(dev, &bus->devices, bus_list) {
5780 		if (dev->subordinate)
5781 			pci_bus_unlock(dev->subordinate);
5782 		pci_dev_unlock(dev);
5783 	}
5784 }
5785 
5786 /* Return 1 on successful lock, 0 on contention */
5787 static int pci_bus_trylock(struct pci_bus *bus)
5788 {
5789 	struct pci_dev *dev;
5790 
5791 	list_for_each_entry(dev, &bus->devices, bus_list) {
5792 		if (!pci_dev_trylock(dev))
5793 			goto unlock;
5794 		if (dev->subordinate) {
5795 			if (!pci_bus_trylock(dev->subordinate)) {
5796 				pci_dev_unlock(dev);
5797 				goto unlock;
5798 			}
5799 		}
5800 	}
5801 	return 1;
5802 
5803 unlock:
5804 	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5805 		if (dev->subordinate)
5806 			pci_bus_unlock(dev->subordinate);
5807 		pci_dev_unlock(dev);
5808 	}
5809 	return 0;
5810 }
5811 
5812 /* Do any devices on or below this slot prevent a bus reset? */
5813 static bool pci_slot_resettable(struct pci_slot *slot)
5814 {
5815 	struct pci_dev *dev;
5816 
5817 	if (slot->bus->self &&
5818 	    (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5819 		return false;
5820 
5821 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5822 		if (!dev->slot || dev->slot != slot)
5823 			continue;
5824 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5825 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5826 			return false;
5827 	}
5828 
5829 	return true;
5830 }
5831 
5832 /* Lock devices from the top of the tree down */
5833 static void pci_slot_lock(struct pci_slot *slot)
5834 {
5835 	struct pci_dev *dev;
5836 
5837 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5838 		if (!dev->slot || dev->slot != slot)
5839 			continue;
5840 		pci_dev_lock(dev);
5841 		if (dev->subordinate)
5842 			pci_bus_lock(dev->subordinate);
5843 	}
5844 }
5845 
5846 /* Unlock devices from the bottom of the tree up */
5847 static void pci_slot_unlock(struct pci_slot *slot)
5848 {
5849 	struct pci_dev *dev;
5850 
5851 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5852 		if (!dev->slot || dev->slot != slot)
5853 			continue;
5854 		if (dev->subordinate)
5855 			pci_bus_unlock(dev->subordinate);
5856 		pci_dev_unlock(dev);
5857 	}
5858 }
5859 
5860 /* Return 1 on successful lock, 0 on contention */
5861 static int pci_slot_trylock(struct pci_slot *slot)
5862 {
5863 	struct pci_dev *dev;
5864 
5865 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5866 		if (!dev->slot || dev->slot != slot)
5867 			continue;
5868 		if (!pci_dev_trylock(dev))
5869 			goto unlock;
5870 		if (dev->subordinate) {
5871 			if (!pci_bus_trylock(dev->subordinate)) {
5872 				pci_dev_unlock(dev);
5873 				goto unlock;
5874 			}
5875 		}
5876 	}
5877 	return 1;
5878 
5879 unlock:
5880 	list_for_each_entry_continue_reverse(dev,
5881 					     &slot->bus->devices, bus_list) {
5882 		if (!dev->slot || dev->slot != slot)
5883 			continue;
5884 		if (dev->subordinate)
5885 			pci_bus_unlock(dev->subordinate);
5886 		pci_dev_unlock(dev);
5887 	}
5888 	return 0;
5889 }
5890 
5891 /*
5892  * Save and disable devices from the top of the tree down while holding
5893  * the @dev mutex lock for the entire tree.
5894  */
5895 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5896 {
5897 	struct pci_dev *dev;
5898 
5899 	list_for_each_entry(dev, &bus->devices, bus_list) {
5900 		pci_dev_save_and_disable(dev);
5901 		if (dev->subordinate)
5902 			pci_bus_save_and_disable_locked(dev->subordinate);
5903 	}
5904 }
5905 
5906 /*
5907  * Restore devices from top of the tree down while holding @dev mutex lock
5908  * for the entire tree.  Parent bridges need to be restored before we can
5909  * get to subordinate devices.
5910  */
5911 static void pci_bus_restore_locked(struct pci_bus *bus)
5912 {
5913 	struct pci_dev *dev;
5914 
5915 	list_for_each_entry(dev, &bus->devices, bus_list) {
5916 		pci_dev_restore(dev);
5917 		if (dev->subordinate)
5918 			pci_bus_restore_locked(dev->subordinate);
5919 	}
5920 }
5921 
5922 /*
5923  * Save and disable devices from the top of the tree down while holding
5924  * the @dev mutex lock for the entire tree.
5925  */
5926 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5927 {
5928 	struct pci_dev *dev;
5929 
5930 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5931 		if (!dev->slot || dev->slot != slot)
5932 			continue;
5933 		pci_dev_save_and_disable(dev);
5934 		if (dev->subordinate)
5935 			pci_bus_save_and_disable_locked(dev->subordinate);
5936 	}
5937 }
5938 
5939 /*
5940  * Restore devices from top of the tree down while holding @dev mutex lock
5941  * for the entire tree.  Parent bridges need to be restored before we can
5942  * get to subordinate devices.
5943  */
5944 static void pci_slot_restore_locked(struct pci_slot *slot)
5945 {
5946 	struct pci_dev *dev;
5947 
5948 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5949 		if (!dev->slot || dev->slot != slot)
5950 			continue;
5951 		pci_dev_restore(dev);
5952 		if (dev->subordinate)
5953 			pci_bus_restore_locked(dev->subordinate);
5954 	}
5955 }
5956 
5957 static int pci_slot_reset(struct pci_slot *slot, bool probe)
5958 {
5959 	int rc;
5960 
5961 	if (!slot || !pci_slot_resettable(slot))
5962 		return -ENOTTY;
5963 
5964 	if (!probe)
5965 		pci_slot_lock(slot);
5966 
5967 	might_sleep();
5968 
5969 	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5970 
5971 	if (!probe)
5972 		pci_slot_unlock(slot);
5973 
5974 	return rc;
5975 }
5976 
5977 /**
5978  * pci_probe_reset_slot - probe whether a PCI slot can be reset
5979  * @slot: PCI slot to probe
5980  *
5981  * Return 0 if slot can be reset, negative if a slot reset is not supported.
5982  */
5983 int pci_probe_reset_slot(struct pci_slot *slot)
5984 {
5985 	return pci_slot_reset(slot, PCI_RESET_PROBE);
5986 }
5987 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5988 
5989 /**
5990  * __pci_reset_slot - Try to reset a PCI slot
5991  * @slot: PCI slot to reset
5992  *
5993  * A PCI bus may host multiple slots, each slot may support a reset mechanism
5994  * independent of other slots.  For instance, some slots may support slot power
5995  * control.  In the case of a 1:1 bus to slot architecture, this function may
5996  * wrap the bus reset to avoid spurious slot related events such as hotplug.
5997  * Generally a slot reset should be attempted before a bus reset.  All of the
5998  * function of the slot and any subordinate buses behind the slot are reset
5999  * through this function.  PCI config space of all devices in the slot and
6000  * behind the slot is saved before and restored after reset.
6001  *
6002  * Same as above except return -EAGAIN if the slot cannot be locked
6003  */
6004 static int __pci_reset_slot(struct pci_slot *slot)
6005 {
6006 	int rc;
6007 
6008 	rc = pci_slot_reset(slot, PCI_RESET_PROBE);
6009 	if (rc)
6010 		return rc;
6011 
6012 	if (pci_slot_trylock(slot)) {
6013 		pci_slot_save_and_disable_locked(slot);
6014 		might_sleep();
6015 		rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
6016 		pci_slot_restore_locked(slot);
6017 		pci_slot_unlock(slot);
6018 	} else
6019 		rc = -EAGAIN;
6020 
6021 	return rc;
6022 }
6023 
6024 static int pci_bus_reset(struct pci_bus *bus, bool probe)
6025 {
6026 	int ret;
6027 
6028 	if (!bus->self || !pci_bus_resettable(bus))
6029 		return -ENOTTY;
6030 
6031 	if (probe)
6032 		return 0;
6033 
6034 	pci_bus_lock(bus);
6035 
6036 	might_sleep();
6037 
6038 	ret = pci_bridge_secondary_bus_reset(bus->self);
6039 
6040 	pci_bus_unlock(bus);
6041 
6042 	return ret;
6043 }
6044 
6045 /**
6046  * pci_bus_error_reset - reset the bridge's subordinate bus
6047  * @bridge: The parent device that connects to the bus to reset
6048  *
6049  * This function will first try to reset the slots on this bus if the method is
6050  * available. If slot reset fails or is not available, this will fall back to a
6051  * secondary bus reset.
6052  */
6053 int pci_bus_error_reset(struct pci_dev *bridge)
6054 {
6055 	struct pci_bus *bus = bridge->subordinate;
6056 	struct pci_slot *slot;
6057 
6058 	if (!bus)
6059 		return -ENOTTY;
6060 
6061 	mutex_lock(&pci_slot_mutex);
6062 	if (list_empty(&bus->slots))
6063 		goto bus_reset;
6064 
6065 	list_for_each_entry(slot, &bus->slots, list)
6066 		if (pci_probe_reset_slot(slot))
6067 			goto bus_reset;
6068 
6069 	list_for_each_entry(slot, &bus->slots, list)
6070 		if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
6071 			goto bus_reset;
6072 
6073 	mutex_unlock(&pci_slot_mutex);
6074 	return 0;
6075 bus_reset:
6076 	mutex_unlock(&pci_slot_mutex);
6077 	return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
6078 }
6079 
6080 /**
6081  * pci_probe_reset_bus - probe whether a PCI bus can be reset
6082  * @bus: PCI bus to probe
6083  *
6084  * Return 0 if bus can be reset, negative if a bus reset is not supported.
6085  */
6086 int pci_probe_reset_bus(struct pci_bus *bus)
6087 {
6088 	return pci_bus_reset(bus, PCI_RESET_PROBE);
6089 }
6090 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
6091 
6092 /**
6093  * __pci_reset_bus - Try to reset a PCI bus
6094  * @bus: top level PCI bus to reset
6095  *
6096  * Same as above except return -EAGAIN if the bus cannot be locked
6097  */
6098 static int __pci_reset_bus(struct pci_bus *bus)
6099 {
6100 	int rc;
6101 
6102 	rc = pci_bus_reset(bus, PCI_RESET_PROBE);
6103 	if (rc)
6104 		return rc;
6105 
6106 	if (pci_bus_trylock(bus)) {
6107 		pci_bus_save_and_disable_locked(bus);
6108 		might_sleep();
6109 		rc = pci_bridge_secondary_bus_reset(bus->self);
6110 		pci_bus_restore_locked(bus);
6111 		pci_bus_unlock(bus);
6112 	} else
6113 		rc = -EAGAIN;
6114 
6115 	return rc;
6116 }
6117 
6118 /**
6119  * pci_reset_bus - Try to reset a PCI bus
6120  * @pdev: top level PCI device to reset via slot/bus
6121  *
6122  * Same as above except return -EAGAIN if the bus cannot be locked
6123  */
6124 int pci_reset_bus(struct pci_dev *pdev)
6125 {
6126 	return (!pci_probe_reset_slot(pdev->slot)) ?
6127 	    __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
6128 }
6129 EXPORT_SYMBOL_GPL(pci_reset_bus);
6130 
6131 /**
6132  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
6133  * @dev: PCI device to query
6134  *
6135  * Returns mmrbc: maximum designed memory read count in bytes or
6136  * appropriate error value.
6137  */
6138 int pcix_get_max_mmrbc(struct pci_dev *dev)
6139 {
6140 	int cap;
6141 	u32 stat;
6142 
6143 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
6144 	if (!cap)
6145 		return -EINVAL;
6146 
6147 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
6148 		return -EINVAL;
6149 
6150 	return 512 << FIELD_GET(PCI_X_STATUS_MAX_READ, stat);
6151 }
6152 EXPORT_SYMBOL(pcix_get_max_mmrbc);
6153 
6154 /**
6155  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
6156  * @dev: PCI device to query
6157  *
6158  * Returns mmrbc: maximum memory read count in bytes or appropriate error
6159  * value.
6160  */
6161 int pcix_get_mmrbc(struct pci_dev *dev)
6162 {
6163 	int cap;
6164 	u16 cmd;
6165 
6166 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
6167 	if (!cap)
6168 		return -EINVAL;
6169 
6170 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
6171 		return -EINVAL;
6172 
6173 	return 512 << FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
6174 }
6175 EXPORT_SYMBOL(pcix_get_mmrbc);
6176 
6177 /**
6178  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
6179  * @dev: PCI device to query
6180  * @mmrbc: maximum memory read count in bytes
6181  *    valid values are 512, 1024, 2048, 4096
6182  *
6183  * If possible sets maximum memory read byte count, some bridges have errata
6184  * that prevent this.
6185  */
6186 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
6187 {
6188 	int cap;
6189 	u32 stat, v, o;
6190 	u16 cmd;
6191 
6192 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
6193 		return -EINVAL;
6194 
6195 	v = ffs(mmrbc) - 10;
6196 
6197 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
6198 	if (!cap)
6199 		return -EINVAL;
6200 
6201 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
6202 		return -EINVAL;
6203 
6204 	if (v > FIELD_GET(PCI_X_STATUS_MAX_READ, stat))
6205 		return -E2BIG;
6206 
6207 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
6208 		return -EINVAL;
6209 
6210 	o = FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
6211 	if (o != v) {
6212 		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
6213 			return -EIO;
6214 
6215 		cmd &= ~PCI_X_CMD_MAX_READ;
6216 		cmd |= FIELD_PREP(PCI_X_CMD_MAX_READ, v);
6217 		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
6218 			return -EIO;
6219 	}
6220 	return 0;
6221 }
6222 EXPORT_SYMBOL(pcix_set_mmrbc);
6223 
6224 /**
6225  * pcie_get_readrq - get PCI Express read request size
6226  * @dev: PCI device to query
6227  *
6228  * Returns maximum memory read request in bytes or appropriate error value.
6229  */
6230 int pcie_get_readrq(struct pci_dev *dev)
6231 {
6232 	u16 ctl;
6233 
6234 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6235 
6236 	return 128 << FIELD_GET(PCI_EXP_DEVCTL_READRQ, ctl);
6237 }
6238 EXPORT_SYMBOL(pcie_get_readrq);
6239 
6240 /**
6241  * pcie_set_readrq - set PCI Express maximum memory read request
6242  * @dev: PCI device to query
6243  * @rq: maximum memory read count in bytes
6244  *    valid values are 128, 256, 512, 1024, 2048, 4096
6245  *
6246  * If possible sets maximum memory read request in bytes
6247  */
6248 int pcie_set_readrq(struct pci_dev *dev, int rq)
6249 {
6250 	u16 v;
6251 	int ret;
6252 	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
6253 
6254 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
6255 		return -EINVAL;
6256 
6257 	/*
6258 	 * If using the "performance" PCIe config, we clamp the read rq
6259 	 * size to the max packet size to keep the host bridge from
6260 	 * generating requests larger than we can cope with.
6261 	 */
6262 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
6263 		int mps = pcie_get_mps(dev);
6264 
6265 		if (mps < rq)
6266 			rq = mps;
6267 	}
6268 
6269 	v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, ffs(rq) - 8);
6270 
6271 	if (bridge->no_inc_mrrs) {
6272 		int max_mrrs = pcie_get_readrq(dev);
6273 
6274 		if (rq > max_mrrs) {
6275 			pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
6276 			return -EINVAL;
6277 		}
6278 	}
6279 
6280 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6281 						  PCI_EXP_DEVCTL_READRQ, v);
6282 
6283 	return pcibios_err_to_errno(ret);
6284 }
6285 EXPORT_SYMBOL(pcie_set_readrq);
6286 
6287 /**
6288  * pcie_get_mps - get PCI Express maximum payload size
6289  * @dev: PCI device to query
6290  *
6291  * Returns maximum payload size in bytes
6292  */
6293 int pcie_get_mps(struct pci_dev *dev)
6294 {
6295 	u16 ctl;
6296 
6297 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6298 
6299 	return 128 << FIELD_GET(PCI_EXP_DEVCTL_PAYLOAD, ctl);
6300 }
6301 EXPORT_SYMBOL(pcie_get_mps);
6302 
6303 /**
6304  * pcie_set_mps - set PCI Express maximum payload size
6305  * @dev: PCI device to query
6306  * @mps: maximum payload size in bytes
6307  *    valid values are 128, 256, 512, 1024, 2048, 4096
6308  *
6309  * If possible sets maximum payload size
6310  */
6311 int pcie_set_mps(struct pci_dev *dev, int mps)
6312 {
6313 	u16 v;
6314 	int ret;
6315 
6316 	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
6317 		return -EINVAL;
6318 
6319 	v = ffs(mps) - 8;
6320 	if (v > dev->pcie_mpss)
6321 		return -EINVAL;
6322 	v = FIELD_PREP(PCI_EXP_DEVCTL_PAYLOAD, v);
6323 
6324 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6325 						  PCI_EXP_DEVCTL_PAYLOAD, v);
6326 
6327 	return pcibios_err_to_errno(ret);
6328 }
6329 EXPORT_SYMBOL(pcie_set_mps);
6330 
6331 static enum pci_bus_speed to_pcie_link_speed(u16 lnksta)
6332 {
6333 	return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)];
6334 }
6335 
6336 int pcie_link_speed_mbps(struct pci_dev *pdev)
6337 {
6338 	u16 lnksta;
6339 	int err;
6340 
6341 	err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
6342 	if (err)
6343 		return err;
6344 
6345 	switch (to_pcie_link_speed(lnksta)) {
6346 	case PCIE_SPEED_2_5GT:
6347 		return 2500;
6348 	case PCIE_SPEED_5_0GT:
6349 		return 5000;
6350 	case PCIE_SPEED_8_0GT:
6351 		return 8000;
6352 	case PCIE_SPEED_16_0GT:
6353 		return 16000;
6354 	case PCIE_SPEED_32_0GT:
6355 		return 32000;
6356 	case PCIE_SPEED_64_0GT:
6357 		return 64000;
6358 	default:
6359 		break;
6360 	}
6361 
6362 	return -EINVAL;
6363 }
6364 EXPORT_SYMBOL(pcie_link_speed_mbps);
6365 
6366 /**
6367  * pcie_bandwidth_available - determine minimum link settings of a PCIe
6368  *			      device and its bandwidth limitation
6369  * @dev: PCI device to query
6370  * @limiting_dev: storage for device causing the bandwidth limitation
6371  * @speed: storage for speed of limiting device
6372  * @width: storage for width of limiting device
6373  *
6374  * Walk up the PCI device chain and find the point where the minimum
6375  * bandwidth is available.  Return the bandwidth available there and (if
6376  * limiting_dev, speed, and width pointers are supplied) information about
6377  * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
6378  * raw bandwidth.
6379  */
6380 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6381 			     enum pci_bus_speed *speed,
6382 			     enum pcie_link_width *width)
6383 {
6384 	u16 lnksta;
6385 	enum pci_bus_speed next_speed;
6386 	enum pcie_link_width next_width;
6387 	u32 bw, next_bw;
6388 
6389 	if (speed)
6390 		*speed = PCI_SPEED_UNKNOWN;
6391 	if (width)
6392 		*width = PCIE_LNK_WIDTH_UNKNOWN;
6393 
6394 	bw = 0;
6395 
6396 	while (dev) {
6397 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6398 
6399 		next_speed = to_pcie_link_speed(lnksta);
6400 		next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
6401 
6402 		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6403 
6404 		/* Check if current device limits the total bandwidth */
6405 		if (!bw || next_bw <= bw) {
6406 			bw = next_bw;
6407 
6408 			if (limiting_dev)
6409 				*limiting_dev = dev;
6410 			if (speed)
6411 				*speed = next_speed;
6412 			if (width)
6413 				*width = next_width;
6414 		}
6415 
6416 		dev = pci_upstream_bridge(dev);
6417 	}
6418 
6419 	return bw;
6420 }
6421 EXPORT_SYMBOL(pcie_bandwidth_available);
6422 
6423 /**
6424  * pcie_get_speed_cap - query for the PCI device's link speed capability
6425  * @dev: PCI device to query
6426  *
6427  * Query the PCI device speed capability.  Return the maximum link speed
6428  * supported by the device.
6429  */
6430 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6431 {
6432 	u32 lnkcap2, lnkcap;
6433 
6434 	/*
6435 	 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.  The
6436 	 * implementation note there recommends using the Supported Link
6437 	 * Speeds Vector in Link Capabilities 2 when supported.
6438 	 *
6439 	 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
6440 	 * should use the Supported Link Speeds field in Link Capabilities,
6441 	 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
6442 	 */
6443 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6444 
6445 	/* PCIe r3.0-compliant */
6446 	if (lnkcap2)
6447 		return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
6448 
6449 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6450 	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6451 		return PCIE_SPEED_5_0GT;
6452 	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6453 		return PCIE_SPEED_2_5GT;
6454 
6455 	return PCI_SPEED_UNKNOWN;
6456 }
6457 EXPORT_SYMBOL(pcie_get_speed_cap);
6458 
6459 /**
6460  * pcie_get_width_cap - query for the PCI device's link width capability
6461  * @dev: PCI device to query
6462  *
6463  * Query the PCI device width capability.  Return the maximum link width
6464  * supported by the device.
6465  */
6466 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6467 {
6468 	u32 lnkcap;
6469 
6470 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6471 	if (lnkcap)
6472 		return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
6473 
6474 	return PCIE_LNK_WIDTH_UNKNOWN;
6475 }
6476 EXPORT_SYMBOL(pcie_get_width_cap);
6477 
6478 /**
6479  * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6480  * @dev: PCI device
6481  * @speed: storage for link speed
6482  * @width: storage for link width
6483  *
6484  * Calculate a PCI device's link bandwidth by querying for its link speed
6485  * and width, multiplying them, and applying encoding overhead.  The result
6486  * is in Mb/s, i.e., megabits/second of raw bandwidth.
6487  */
6488 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
6489 			   enum pcie_link_width *width)
6490 {
6491 	*speed = pcie_get_speed_cap(dev);
6492 	*width = pcie_get_width_cap(dev);
6493 
6494 	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6495 		return 0;
6496 
6497 	return *width * PCIE_SPEED2MBS_ENC(*speed);
6498 }
6499 
6500 /**
6501  * __pcie_print_link_status - Report the PCI device's link speed and width
6502  * @dev: PCI device to query
6503  * @verbose: Print info even when enough bandwidth is available
6504  *
6505  * If the available bandwidth at the device is less than the device is
6506  * capable of, report the device's maximum possible bandwidth and the
6507  * upstream link that limits its performance.  If @verbose, always print
6508  * the available bandwidth, even if the device isn't constrained.
6509  */
6510 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6511 {
6512 	enum pcie_link_width width, width_cap;
6513 	enum pci_bus_speed speed, speed_cap;
6514 	struct pci_dev *limiting_dev = NULL;
6515 	u32 bw_avail, bw_cap;
6516 
6517 	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6518 	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6519 
6520 	if (bw_avail >= bw_cap && verbose)
6521 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6522 			 bw_cap / 1000, bw_cap % 1000,
6523 			 pci_speed_string(speed_cap), width_cap);
6524 	else if (bw_avail < bw_cap)
6525 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6526 			 bw_avail / 1000, bw_avail % 1000,
6527 			 pci_speed_string(speed), width,
6528 			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6529 			 bw_cap / 1000, bw_cap % 1000,
6530 			 pci_speed_string(speed_cap), width_cap);
6531 }
6532 
6533 /**
6534  * pcie_print_link_status - Report the PCI device's link speed and width
6535  * @dev: PCI device to query
6536  *
6537  * Report the available bandwidth at the device.
6538  */
6539 void pcie_print_link_status(struct pci_dev *dev)
6540 {
6541 	__pcie_print_link_status(dev, true);
6542 }
6543 EXPORT_SYMBOL(pcie_print_link_status);
6544 
6545 /**
6546  * pci_select_bars - Make BAR mask from the type of resource
6547  * @dev: the PCI device for which BAR mask is made
6548  * @flags: resource type mask to be selected
6549  *
6550  * This helper routine makes bar mask from the type of resource.
6551  */
6552 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6553 {
6554 	int i, bars = 0;
6555 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
6556 		if (pci_resource_flags(dev, i) & flags)
6557 			bars |= (1 << i);
6558 	return bars;
6559 }
6560 EXPORT_SYMBOL(pci_select_bars);
6561 
6562 /* Some architectures require additional programming to enable VGA */
6563 static arch_set_vga_state_t arch_set_vga_state;
6564 
6565 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6566 {
6567 	arch_set_vga_state = func;	/* NULL disables */
6568 }
6569 
6570 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6571 				  unsigned int command_bits, u32 flags)
6572 {
6573 	if (arch_set_vga_state)
6574 		return arch_set_vga_state(dev, decode, command_bits,
6575 						flags);
6576 	return 0;
6577 }
6578 
6579 /**
6580  * pci_set_vga_state - set VGA decode state on device and parents if requested
6581  * @dev: the PCI device
6582  * @decode: true = enable decoding, false = disable decoding
6583  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6584  * @flags: traverse ancestors and change bridges
6585  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6586  */
6587 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6588 		      unsigned int command_bits, u32 flags)
6589 {
6590 	struct pci_bus *bus;
6591 	struct pci_dev *bridge;
6592 	u16 cmd;
6593 	int rc;
6594 
6595 	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6596 
6597 	/* ARCH specific VGA enables */
6598 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6599 	if (rc)
6600 		return rc;
6601 
6602 	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6603 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
6604 		if (decode)
6605 			cmd |= command_bits;
6606 		else
6607 			cmd &= ~command_bits;
6608 		pci_write_config_word(dev, PCI_COMMAND, cmd);
6609 	}
6610 
6611 	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6612 		return 0;
6613 
6614 	bus = dev->bus;
6615 	while (bus) {
6616 		bridge = bus->self;
6617 		if (bridge) {
6618 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6619 					     &cmd);
6620 			if (decode)
6621 				cmd |= PCI_BRIDGE_CTL_VGA;
6622 			else
6623 				cmd &= ~PCI_BRIDGE_CTL_VGA;
6624 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6625 					      cmd);
6626 		}
6627 		bus = bus->parent;
6628 	}
6629 	return 0;
6630 }
6631 
6632 #ifdef CONFIG_ACPI
6633 bool pci_pr3_present(struct pci_dev *pdev)
6634 {
6635 	struct acpi_device *adev;
6636 
6637 	if (acpi_disabled)
6638 		return false;
6639 
6640 	adev = ACPI_COMPANION(&pdev->dev);
6641 	if (!adev)
6642 		return false;
6643 
6644 	return adev->power.flags.power_resources &&
6645 		acpi_has_method(adev->handle, "_PR3");
6646 }
6647 EXPORT_SYMBOL_GPL(pci_pr3_present);
6648 #endif
6649 
6650 /**
6651  * pci_add_dma_alias - Add a DMA devfn alias for a device
6652  * @dev: the PCI device for which alias is added
6653  * @devfn_from: alias slot and function
6654  * @nr_devfns: number of subsequent devfns to alias
6655  *
6656  * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6657  * which is used to program permissible bus-devfn source addresses for DMA
6658  * requests in an IOMMU.  These aliases factor into IOMMU group creation
6659  * and are useful for devices generating DMA requests beyond or different
6660  * from their logical bus-devfn.  Examples include device quirks where the
6661  * device simply uses the wrong devfn, as well as non-transparent bridges
6662  * where the alias may be a proxy for devices in another domain.
6663  *
6664  * IOMMU group creation is performed during device discovery or addition,
6665  * prior to any potential DMA mapping and therefore prior to driver probing
6666  * (especially for userspace assigned devices where IOMMU group definition
6667  * cannot be left as a userspace activity).  DMA aliases should therefore
6668  * be configured via quirks, such as the PCI fixup header quirk.
6669  */
6670 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6671 		       unsigned int nr_devfns)
6672 {
6673 	int devfn_to;
6674 
6675 	nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6676 	devfn_to = devfn_from + nr_devfns - 1;
6677 
6678 	if (!dev->dma_alias_mask)
6679 		dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6680 	if (!dev->dma_alias_mask) {
6681 		pci_warn(dev, "Unable to allocate DMA alias mask\n");
6682 		return;
6683 	}
6684 
6685 	bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6686 
6687 	if (nr_devfns == 1)
6688 		pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6689 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6690 	else if (nr_devfns > 1)
6691 		pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6692 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6693 				PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6694 }
6695 
6696 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6697 {
6698 	return (dev1->dma_alias_mask &&
6699 		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6700 	       (dev2->dma_alias_mask &&
6701 		test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6702 	       pci_real_dma_dev(dev1) == dev2 ||
6703 	       pci_real_dma_dev(dev2) == dev1;
6704 }
6705 
6706 bool pci_device_is_present(struct pci_dev *pdev)
6707 {
6708 	u32 v;
6709 
6710 	/* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
6711 	pdev = pci_physfn(pdev);
6712 	if (pci_dev_is_disconnected(pdev))
6713 		return false;
6714 	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6715 }
6716 EXPORT_SYMBOL_GPL(pci_device_is_present);
6717 
6718 void pci_ignore_hotplug(struct pci_dev *dev)
6719 {
6720 	struct pci_dev *bridge = dev->bus->self;
6721 
6722 	dev->ignore_hotplug = 1;
6723 	/* Propagate the "ignore hotplug" setting to the parent bridge. */
6724 	if (bridge)
6725 		bridge->ignore_hotplug = 1;
6726 }
6727 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6728 
6729 /**
6730  * pci_real_dma_dev - Get PCI DMA device for PCI device
6731  * @dev: the PCI device that may have a PCI DMA alias
6732  *
6733  * Permits the platform to provide architecture-specific functionality to
6734  * devices needing to alias DMA to another PCI device on another PCI bus. If
6735  * the PCI device is on the same bus, it is recommended to use
6736  * pci_add_dma_alias(). This is the default implementation. Architecture
6737  * implementations can override this.
6738  */
6739 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6740 {
6741 	return dev;
6742 }
6743 
6744 resource_size_t __weak pcibios_default_alignment(void)
6745 {
6746 	return 0;
6747 }
6748 
6749 /*
6750  * Arches that don't want to expose struct resource to userland as-is in
6751  * sysfs and /proc can implement their own pci_resource_to_user().
6752  */
6753 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6754 				 const struct resource *rsrc,
6755 				 resource_size_t *start, resource_size_t *end)
6756 {
6757 	*start = rsrc->start;
6758 	*end = rsrc->end;
6759 }
6760 
6761 static char *resource_alignment_param;
6762 static DEFINE_SPINLOCK(resource_alignment_lock);
6763 
6764 /**
6765  * pci_specified_resource_alignment - get resource alignment specified by user.
6766  * @dev: the PCI device to get
6767  * @resize: whether or not to change resources' size when reassigning alignment
6768  *
6769  * RETURNS: Resource alignment if it is specified.
6770  *          Zero if it is not specified.
6771  */
6772 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6773 							bool *resize)
6774 {
6775 	int align_order, count;
6776 	resource_size_t align = pcibios_default_alignment();
6777 	const char *p;
6778 	int ret;
6779 
6780 	spin_lock(&resource_alignment_lock);
6781 	p = resource_alignment_param;
6782 	if (!p || !*p)
6783 		goto out;
6784 	if (pci_has_flag(PCI_PROBE_ONLY)) {
6785 		align = 0;
6786 		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6787 		goto out;
6788 	}
6789 
6790 	while (*p) {
6791 		count = 0;
6792 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6793 		    p[count] == '@') {
6794 			p += count + 1;
6795 			if (align_order > 63) {
6796 				pr_err("PCI: Invalid requested alignment (order %d)\n",
6797 				       align_order);
6798 				align_order = PAGE_SHIFT;
6799 			}
6800 		} else {
6801 			align_order = PAGE_SHIFT;
6802 		}
6803 
6804 		ret = pci_dev_str_match(dev, p, &p);
6805 		if (ret == 1) {
6806 			*resize = true;
6807 			align = 1ULL << align_order;
6808 			break;
6809 		} else if (ret < 0) {
6810 			pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6811 			       p);
6812 			break;
6813 		}
6814 
6815 		if (*p != ';' && *p != ',') {
6816 			/* End of param or invalid format */
6817 			break;
6818 		}
6819 		p++;
6820 	}
6821 out:
6822 	spin_unlock(&resource_alignment_lock);
6823 	return align;
6824 }
6825 
6826 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6827 					   resource_size_t align, bool resize)
6828 {
6829 	struct resource *r = &dev->resource[bar];
6830 	const char *r_name = pci_resource_name(dev, bar);
6831 	resource_size_t size;
6832 
6833 	if (!(r->flags & IORESOURCE_MEM))
6834 		return;
6835 
6836 	if (r->flags & IORESOURCE_PCI_FIXED) {
6837 		pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n",
6838 			 r_name, r, (unsigned long long)align);
6839 		return;
6840 	}
6841 
6842 	size = resource_size(r);
6843 	if (size >= align)
6844 		return;
6845 
6846 	/*
6847 	 * Increase the alignment of the resource.  There are two ways we
6848 	 * can do this:
6849 	 *
6850 	 * 1) Increase the size of the resource.  BARs are aligned on their
6851 	 *    size, so when we reallocate space for this resource, we'll
6852 	 *    allocate it with the larger alignment.  This also prevents
6853 	 *    assignment of any other BARs inside the alignment region, so
6854 	 *    if we're requesting page alignment, this means no other BARs
6855 	 *    will share the page.
6856 	 *
6857 	 *    The disadvantage is that this makes the resource larger than
6858 	 *    the hardware BAR, which may break drivers that compute things
6859 	 *    based on the resource size, e.g., to find registers at a
6860 	 *    fixed offset before the end of the BAR.
6861 	 *
6862 	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6863 	 *    set r->start to the desired alignment.  By itself this
6864 	 *    doesn't prevent other BARs being put inside the alignment
6865 	 *    region, but if we realign *every* resource of every device in
6866 	 *    the system, none of them will share an alignment region.
6867 	 *
6868 	 * When the user has requested alignment for only some devices via
6869 	 * the "pci=resource_alignment" argument, "resize" is true and we
6870 	 * use the first method.  Otherwise we assume we're aligning all
6871 	 * devices and we use the second.
6872 	 */
6873 
6874 	pci_info(dev, "%s %pR: requesting alignment to %#llx\n",
6875 		 r_name, r, (unsigned long long)align);
6876 
6877 	if (resize) {
6878 		r->start = 0;
6879 		r->end = align - 1;
6880 	} else {
6881 		r->flags &= ~IORESOURCE_SIZEALIGN;
6882 		r->flags |= IORESOURCE_STARTALIGN;
6883 		r->start = align;
6884 		r->end = r->start + size - 1;
6885 	}
6886 	r->flags |= IORESOURCE_UNSET;
6887 }
6888 
6889 /*
6890  * This function disables memory decoding and releases memory resources
6891  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6892  * It also rounds up size to specified alignment.
6893  * Later on, the kernel will assign page-aligned memory resource back
6894  * to the device.
6895  */
6896 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6897 {
6898 	int i;
6899 	struct resource *r;
6900 	resource_size_t align;
6901 	u16 command;
6902 	bool resize = false;
6903 
6904 	/*
6905 	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6906 	 * 3.4.1.11.  Their resources are allocated from the space
6907 	 * described by the VF BARx register in the PF's SR-IOV capability.
6908 	 * We can't influence their alignment here.
6909 	 */
6910 	if (dev->is_virtfn)
6911 		return;
6912 
6913 	/* check if specified PCI is target device to reassign */
6914 	align = pci_specified_resource_alignment(dev, &resize);
6915 	if (!align)
6916 		return;
6917 
6918 	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6919 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6920 		pci_warn(dev, "Can't reassign resources to host bridge\n");
6921 		return;
6922 	}
6923 
6924 	pci_read_config_word(dev, PCI_COMMAND, &command);
6925 	command &= ~PCI_COMMAND_MEMORY;
6926 	pci_write_config_word(dev, PCI_COMMAND, command);
6927 
6928 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6929 		pci_request_resource_alignment(dev, i, align, resize);
6930 
6931 	/*
6932 	 * Need to disable bridge's resource window,
6933 	 * to enable the kernel to reassign new resource
6934 	 * window later on.
6935 	 */
6936 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6937 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6938 			r = &dev->resource[i];
6939 			if (!(r->flags & IORESOURCE_MEM))
6940 				continue;
6941 			r->flags |= IORESOURCE_UNSET;
6942 			r->end = resource_size(r) - 1;
6943 			r->start = 0;
6944 		}
6945 		pci_disable_bridge_window(dev);
6946 	}
6947 }
6948 
6949 static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf)
6950 {
6951 	size_t count = 0;
6952 
6953 	spin_lock(&resource_alignment_lock);
6954 	if (resource_alignment_param)
6955 		count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6956 	spin_unlock(&resource_alignment_lock);
6957 
6958 	return count;
6959 }
6960 
6961 static ssize_t resource_alignment_store(const struct bus_type *bus,
6962 					const char *buf, size_t count)
6963 {
6964 	char *param, *old, *end;
6965 
6966 	if (count >= (PAGE_SIZE - 1))
6967 		return -EINVAL;
6968 
6969 	param = kstrndup(buf, count, GFP_KERNEL);
6970 	if (!param)
6971 		return -ENOMEM;
6972 
6973 	end = strchr(param, '\n');
6974 	if (end)
6975 		*end = '\0';
6976 
6977 	spin_lock(&resource_alignment_lock);
6978 	old = resource_alignment_param;
6979 	if (strlen(param)) {
6980 		resource_alignment_param = param;
6981 	} else {
6982 		kfree(param);
6983 		resource_alignment_param = NULL;
6984 	}
6985 	spin_unlock(&resource_alignment_lock);
6986 
6987 	kfree(old);
6988 
6989 	return count;
6990 }
6991 
6992 static BUS_ATTR_RW(resource_alignment);
6993 
6994 static int __init pci_resource_alignment_sysfs_init(void)
6995 {
6996 	return bus_create_file(&pci_bus_type,
6997 					&bus_attr_resource_alignment);
6998 }
6999 late_initcall(pci_resource_alignment_sysfs_init);
7000 
7001 static void pci_no_domains(void)
7002 {
7003 #ifdef CONFIG_PCI_DOMAINS
7004 	pci_domains_supported = 0;
7005 #endif
7006 }
7007 
7008 #ifdef CONFIG_PCI_DOMAINS_GENERIC
7009 static DEFINE_IDA(pci_domain_nr_static_ida);
7010 static DEFINE_IDA(pci_domain_nr_dynamic_ida);
7011 
7012 static void of_pci_reserve_static_domain_nr(void)
7013 {
7014 	struct device_node *np;
7015 	int domain_nr;
7016 
7017 	for_each_node_by_type(np, "pci") {
7018 		domain_nr = of_get_pci_domain_nr(np);
7019 		if (domain_nr < 0)
7020 			continue;
7021 		/*
7022 		 * Permanently allocate domain_nr in dynamic_ida
7023 		 * to prevent it from dynamic allocation.
7024 		 */
7025 		ida_alloc_range(&pci_domain_nr_dynamic_ida,
7026 				domain_nr, domain_nr, GFP_KERNEL);
7027 	}
7028 }
7029 
7030 static int of_pci_bus_find_domain_nr(struct device *parent)
7031 {
7032 	static bool static_domains_reserved = false;
7033 	int domain_nr;
7034 
7035 	/* On the first call scan device tree for static allocations. */
7036 	if (!static_domains_reserved) {
7037 		of_pci_reserve_static_domain_nr();
7038 		static_domains_reserved = true;
7039 	}
7040 
7041 	if (parent) {
7042 		/*
7043 		 * If domain is in DT, allocate it in static IDA.  This
7044 		 * prevents duplicate static allocations in case of errors
7045 		 * in DT.
7046 		 */
7047 		domain_nr = of_get_pci_domain_nr(parent->of_node);
7048 		if (domain_nr >= 0)
7049 			return ida_alloc_range(&pci_domain_nr_static_ida,
7050 					       domain_nr, domain_nr,
7051 					       GFP_KERNEL);
7052 	}
7053 
7054 	/*
7055 	 * If domain was not specified in DT, choose a free ID from dynamic
7056 	 * allocations. All domain numbers from DT are permanently in
7057 	 * dynamic allocations to prevent assigning them to other DT nodes
7058 	 * without static domain.
7059 	 */
7060 	return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
7061 }
7062 
7063 static void of_pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
7064 {
7065 	if (bus->domain_nr < 0)
7066 		return;
7067 
7068 	/* Release domain from IDA where it was allocated. */
7069 	if (of_get_pci_domain_nr(parent->of_node) == bus->domain_nr)
7070 		ida_free(&pci_domain_nr_static_ida, bus->domain_nr);
7071 	else
7072 		ida_free(&pci_domain_nr_dynamic_ida, bus->domain_nr);
7073 }
7074 
7075 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
7076 {
7077 	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
7078 			       acpi_pci_bus_find_domain_nr(bus);
7079 }
7080 
7081 void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
7082 {
7083 	if (!acpi_disabled)
7084 		return;
7085 	of_pci_bus_release_domain_nr(bus, parent);
7086 }
7087 #endif
7088 
7089 /**
7090  * pci_ext_cfg_avail - can we access extended PCI config space?
7091  *
7092  * Returns 1 if we can access PCI extended config space (offsets
7093  * greater than 0xff). This is the default implementation. Architecture
7094  * implementations can override this.
7095  */
7096 int __weak pci_ext_cfg_avail(void)
7097 {
7098 	return 1;
7099 }
7100 
7101 void __weak pci_fixup_cardbus(struct pci_bus *bus)
7102 {
7103 }
7104 EXPORT_SYMBOL(pci_fixup_cardbus);
7105 
7106 static int __init pci_setup(char *str)
7107 {
7108 	while (str) {
7109 		char *k = strchr(str, ',');
7110 		if (k)
7111 			*k++ = 0;
7112 		if (*str && (str = pcibios_setup(str)) && *str) {
7113 			if (!strcmp(str, "nomsi")) {
7114 				pci_no_msi();
7115 			} else if (!strncmp(str, "noats", 5)) {
7116 				pr_info("PCIe: ATS is disabled\n");
7117 				pcie_ats_disabled = true;
7118 			} else if (!strcmp(str, "noaer")) {
7119 				pci_no_aer();
7120 			} else if (!strcmp(str, "earlydump")) {
7121 				pci_early_dump = true;
7122 			} else if (!strncmp(str, "realloc=", 8)) {
7123 				pci_realloc_get_opt(str + 8);
7124 			} else if (!strncmp(str, "realloc", 7)) {
7125 				pci_realloc_get_opt("on");
7126 			} else if (!strcmp(str, "nodomains")) {
7127 				pci_no_domains();
7128 			} else if (!strncmp(str, "noari", 5)) {
7129 				pcie_ari_disabled = true;
7130 			} else if (!strncmp(str, "cbiosize=", 9)) {
7131 				pci_cardbus_io_size = memparse(str + 9, &str);
7132 			} else if (!strncmp(str, "cbmemsize=", 10)) {
7133 				pci_cardbus_mem_size = memparse(str + 10, &str);
7134 			} else if (!strncmp(str, "resource_alignment=", 19)) {
7135 				resource_alignment_param = str + 19;
7136 			} else if (!strncmp(str, "ecrc=", 5)) {
7137 				pcie_ecrc_get_policy(str + 5);
7138 			} else if (!strncmp(str, "hpiosize=", 9)) {
7139 				pci_hotplug_io_size = memparse(str + 9, &str);
7140 			} else if (!strncmp(str, "hpmmiosize=", 11)) {
7141 				pci_hotplug_mmio_size = memparse(str + 11, &str);
7142 			} else if (!strncmp(str, "hpmmioprefsize=", 15)) {
7143 				pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
7144 			} else if (!strncmp(str, "hpmemsize=", 10)) {
7145 				pci_hotplug_mmio_size = memparse(str + 10, &str);
7146 				pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
7147 			} else if (!strncmp(str, "hpbussize=", 10)) {
7148 				pci_hotplug_bus_size =
7149 					simple_strtoul(str + 10, &str, 0);
7150 				if (pci_hotplug_bus_size > 0xff)
7151 					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
7152 			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
7153 				pcie_bus_config = PCIE_BUS_TUNE_OFF;
7154 			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
7155 				pcie_bus_config = PCIE_BUS_SAFE;
7156 			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
7157 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
7158 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
7159 				pcie_bus_config = PCIE_BUS_PEER2PEER;
7160 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
7161 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
7162 			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
7163 				disable_acs_redir_param = str + 18;
7164 			} else {
7165 				pr_err("PCI: Unknown option `%s'\n", str);
7166 			}
7167 		}
7168 		str = k;
7169 	}
7170 	return 0;
7171 }
7172 early_param("pci", pci_setup);
7173 
7174 /*
7175  * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
7176  * in pci_setup(), above, to point to data in the __initdata section which
7177  * will be freed after the init sequence is complete. We can't allocate memory
7178  * in pci_setup() because some architectures do not have any memory allocation
7179  * service available during an early_param() call. So we allocate memory and
7180  * copy the variable here before the init section is freed.
7181  *
7182  */
7183 static int __init pci_realloc_setup_params(void)
7184 {
7185 	resource_alignment_param = kstrdup(resource_alignment_param,
7186 					   GFP_KERNEL);
7187 	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
7188 
7189 	return 0;
7190 }
7191 pure_initcall(pci_realloc_setup_params);
7192